Add MAAS support
At the moment, Watcher can use a single bare metal provisioning service: Openstack Ironic. We're now adding support for Canonical's MAAS service [1], which is commonly used along with Juju [2] to deploy Openstack. In order to do so, we're building a metal client abstraction, with concrete implementations for Ironic and MAAS. We'll pick the MAAS client if the MAAS url is provided, otherwise defaulting to Ironic. For now, we aren't updating the baremetal model collector since it doesn't seem to be used by any of the existing Watcher strategy implementations. [1] https://maas.io/docs [2] https://juju.is/docs Implements: blueprint maas-support Change-Id: I6861995598f6c542fa9c006131f10203f358e0a6
This commit is contained in:
@@ -81,6 +81,7 @@ class BareMetalModelBuilder(base.BaseModelBuilder):
|
||||
def __init__(self, osc):
|
||||
self.osc = osc
|
||||
self.model = model_root.BaremetalModelRoot()
|
||||
# TODO(lpetrut): add MAAS support
|
||||
self.ironic_helper = ironic_helper.IronicHelper(osc=self.osc)
|
||||
|
||||
def add_ironic_node(self, node):
|
||||
|
||||
@@ -157,7 +157,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
if node_list:
|
||||
return node_list[0]
|
||||
else:
|
||||
raise exception.ComputeResourceNotFound
|
||||
raise exception.ComputeNodeNotFound(name=name)
|
||||
except exception.ComputeResourceNotFound:
|
||||
raise exception.ComputeNodeNotFound(name=name)
|
||||
|
||||
|
||||
@@ -23,6 +23,8 @@ from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception
|
||||
from watcher.common.metal_helper import constants as metal_constants
|
||||
from watcher.common.metal_helper import factory as metal_helper_factory
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@@ -81,7 +83,7 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
def __init__(self, config, osc=None):
|
||||
|
||||
super(SavingEnergy, self).__init__(config, osc)
|
||||
self._ironic_client = None
|
||||
self._metal_helper = None
|
||||
self._nova_client = None
|
||||
|
||||
self.with_vms_node_pool = []
|
||||
@@ -91,10 +93,10 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
self.min_free_hosts_num = 1
|
||||
|
||||
@property
|
||||
def ironic_client(self):
|
||||
if not self._ironic_client:
|
||||
self._ironic_client = self.osc.ironic()
|
||||
return self._ironic_client
|
||||
def metal_helper(self):
|
||||
if not self._metal_helper:
|
||||
self._metal_helper = metal_helper_factory.get_helper(self.osc)
|
||||
return self._metal_helper
|
||||
|
||||
@property
|
||||
def nova_client(self):
|
||||
@@ -149,10 +151,10 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
:return: None
|
||||
"""
|
||||
params = {'state': state,
|
||||
'resource_name': node.hostname}
|
||||
'resource_name': node.get_hypervisor_hostname()}
|
||||
self.solution.add_action(
|
||||
action_type='change_node_power_state',
|
||||
resource_id=node.uuid,
|
||||
resource_id=node.get_id(),
|
||||
input_parameters=params)
|
||||
|
||||
def get_hosts_pool(self):
|
||||
@@ -162,36 +164,36 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
|
||||
"""
|
||||
|
||||
node_list = self.ironic_client.node.list()
|
||||
node_list = self.metal_helper.list_compute_nodes()
|
||||
for node in node_list:
|
||||
node_info = self.ironic_client.node.get(node.uuid)
|
||||
hypervisor_id = node_info.extra.get('compute_node_id', None)
|
||||
if hypervisor_id is None:
|
||||
LOG.warning(('Cannot find compute_node_id in extra '
|
||||
'of ironic node %s'), node.uuid)
|
||||
continue
|
||||
hypervisor_node = self.nova_client.hypervisors.get(hypervisor_id)
|
||||
if hypervisor_node is None:
|
||||
LOG.warning(('Cannot find hypervisor %s'), hypervisor_id)
|
||||
continue
|
||||
node.hostname = hypervisor_node.hypervisor_hostname
|
||||
hypervisor_node = hypervisor_node.to_dict()
|
||||
hypervisor_node = node.get_hypervisor_node().to_dict()
|
||||
|
||||
compute_service = hypervisor_node.get('service', None)
|
||||
host_name = compute_service.get('host')
|
||||
LOG.debug("Found hypervisor: %s", hypervisor_node)
|
||||
try:
|
||||
self.compute_model.get_node_by_name(host_name)
|
||||
except exception.ComputeNodeNotFound:
|
||||
LOG.info("The compute model does not contain the host: %s",
|
||||
host_name)
|
||||
continue
|
||||
|
||||
if not (hypervisor_node.get('state') == 'up'):
|
||||
"""filter nodes that are not in 'up' state"""
|
||||
if (node.hv_up_when_powered_off and
|
||||
hypervisor_node.get('state') != 'up'):
|
||||
# filter nodes that are not in 'up' state
|
||||
LOG.info("Ignoring node that isn't in 'up' state: %s",
|
||||
host_name)
|
||||
continue
|
||||
else:
|
||||
if (hypervisor_node['running_vms'] == 0):
|
||||
if (node_info.power_state == 'power on'):
|
||||
power_state = node.get_power_state()
|
||||
if power_state == metal_constants.PowerState.ON:
|
||||
self.free_poweron_node_pool.append(node)
|
||||
elif (node_info.power_state == 'power off'):
|
||||
elif power_state == metal_constants.PowerState.OFF:
|
||||
self.free_poweroff_node_pool.append(node)
|
||||
else:
|
||||
LOG.info("Ignoring node %s, unknown state: %s",
|
||||
node, power_state)
|
||||
else:
|
||||
self.with_vms_node_pool.append(node)
|
||||
|
||||
@@ -202,17 +204,21 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
self.min_free_hosts_num)))
|
||||
len_poweron = len(self.free_poweron_node_pool)
|
||||
len_poweroff = len(self.free_poweroff_node_pool)
|
||||
LOG.debug("need_poweron: %s, len_poweron: %s, len_poweroff: %s",
|
||||
need_poweron, len_poweron, len_poweroff)
|
||||
if len_poweron > need_poweron:
|
||||
for node in random.sample(self.free_poweron_node_pool,
|
||||
(len_poweron - need_poweron)):
|
||||
self.add_action_poweronoff_node(node, 'off')
|
||||
LOG.info("power off %s", node.uuid)
|
||||
self.add_action_poweronoff_node(node,
|
||||
metal_constants.PowerState.OFF)
|
||||
LOG.info("power off %s", node.get_id())
|
||||
elif len_poweron < need_poweron:
|
||||
diff = need_poweron - len_poweron
|
||||
for node in random.sample(self.free_poweroff_node_pool,
|
||||
min(len_poweroff, diff)):
|
||||
self.add_action_poweronoff_node(node, 'on')
|
||||
LOG.info("power on %s", node.uuid)
|
||||
self.add_action_poweronoff_node(node,
|
||||
metal_constants.PowerState.ON)
|
||||
LOG.info("power on %s", node.get_id())
|
||||
|
||||
def pre_execute(self):
|
||||
self._pre_execute()
|
||||
|
||||
Reference in New Issue
Block a user