Refactored the compute model and its elements

In this changeset, I refactored the whole Watcher codebase to
adopt a naming convention about the various elements of the
Compute model so that it reflects the same naming convention
adopted by Nova.

Change-Id: I28adba5e1f27175f025330417b072686134d5f51
Partially-Implements: blueprint cluster-model-objects-wrapper
This commit is contained in:
Vincent Françoise
2016-07-06 17:44:29 +02:00
parent dbde1afea0
commit 31c37342cd
53 changed files with 1865 additions and 1803 deletions

View File

@@ -172,7 +172,7 @@ Input parameter could cause audit creation failure, when:
Watcher service will compute an :ref:`Action Plan <action_plan_definition>` Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
composed of a list of potential optimization :ref:`actions <action_definition>` composed of a list of potential optimization :ref:`actions <action_definition>`
(instance migration, disabling of an hypervisor, ...) according to the (instance migration, disabling of a compute node, ...) according to the
:ref:`goal <goal_definition>` to achieve. You can see all of the goals :ref:`goal <goal_definition>` to achieve. You can see all of the goals
available in section ``[watcher_strategies]`` of the Watcher service available in section ``[watcher_strategies]`` of the Watcher service
configuration file. configuration file.

View File

@@ -312,7 +312,7 @@ Using that you can now query the values for that specific metric:
.. code-block:: py .. code-block:: py
query_history.statistic_aggregation(resource_id=hypervisor.uuid, query_history.statistic_aggregation(resource_id=compute_node.uuid,
meter_name='compute.node.cpu.percent', meter_name='compute.node.cpu.percent',
period="7200", period="7200",
aggregate='avg' aggregate='avg'

View File

@@ -27,7 +27,7 @@ of the OpenStack :ref:`Cluster <cluster_definition>` such as:
- Live migration of an instance from one compute node to another compute - Live migration of an instance from one compute node to another compute
node with Nova node with Nova
- Changing the power level of a compute node (ACPI level, ...) - Changing the power level of a compute node (ACPI level, ...)
- Changing the current state of an hypervisor (enable or disable) with Nova - Changing the current state of a compute node (enable or disable) with Nova
In most cases, an :ref:`Action <action_definition>` triggers some concrete In most cases, an :ref:`Action <action_definition>` triggers some concrete
commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.).

View File

@@ -23,7 +23,7 @@ from watcher._i18n import _
from watcher.applier.actions import base from watcher.applier.actions import base
from watcher.common import exception from watcher.common import exception
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.decision_engine.model import hypervisor_state as hstate from watcher.decision_engine.model import element
class ChangeNovaServiceState(base.BaseAction): class ChangeNovaServiceState(base.BaseAction):
@@ -57,7 +57,7 @@ class ChangeNovaServiceState(base.BaseAction):
voluptuous.Length(min=1)), voluptuous.Length(min=1)),
voluptuous.Required(self.STATE): voluptuous.Required(self.STATE):
voluptuous.Any(*[state.value voluptuous.Any(*[state.value
for state in list(hstate.HypervisorState)]), for state in list(element.ServiceState)]),
}) })
@property @property
@@ -70,17 +70,17 @@ class ChangeNovaServiceState(base.BaseAction):
def execute(self): def execute(self):
target_state = None target_state = None
if self.state == hstate.HypervisorState.DISABLED.value: if self.state == element.ServiceState.DISABLED.value:
target_state = False target_state = False
elif self.state == hstate.HypervisorState.ENABLED.value: elif self.state == element.ServiceState.ENABLED.value:
target_state = True target_state = True
return self._nova_manage_service(target_state) return self._nova_manage_service(target_state)
def revert(self): def revert(self):
target_state = None target_state = None
if self.state == hstate.HypervisorState.DISABLED.value: if self.state == element.ServiceState.DISABLED.value:
target_state = True target_state = True
elif self.state == hstate.HypervisorState.ENABLED.value: elif self.state == element.ServiceState.ENABLED.value:
target_state = False target_state = False
return self._nova_manage_service(target_state) return self._nova_manage_service(target_state)

View File

@@ -44,12 +44,12 @@ class Migrate(base.BaseAction):
schema = Schema({ schema = Schema({
'resource_id': str, # should be a UUID 'resource_id': str, # should be a UUID
'migration_type': str, # choices -> "live", "cold" 'migration_type': str, # choices -> "live", "cold"
'dst_hypervisor': str, 'destination_node': str,
'src_hypervisor': str, 'source_node': str,
}) })
The `resource_id` is the UUID of the server to migrate. The `resource_id` is the UUID of the server to migrate.
The `src_hypervisor` and `dst_hypervisor` parameters are respectively the The `source_node` and `destination_node` parameters are respectively the
source and the destination compute hostname (list of available compute source and the destination compute hostname (list of available compute
hosts is returned by this command: ``nova service-list --binary hosts is returned by this command: ``nova service-list --binary
nova-compute``). nova-compute``).
@@ -59,8 +59,8 @@ class Migrate(base.BaseAction):
MIGRATION_TYPE = 'migration_type' MIGRATION_TYPE = 'migration_type'
LIVE_MIGRATION = 'live' LIVE_MIGRATION = 'live'
COLD_MIGRATION = 'cold' COLD_MIGRATION = 'cold'
DST_HYPERVISOR = 'dst_hypervisor' DESTINATION_NODE = 'destination_node'
SRC_HYPERVISOR = 'src_hypervisor' SOURCE_NODE = 'source_node'
def check_resource_id(self, value): def check_resource_id(self, value):
if (value is not None and if (value is not None and
@@ -73,14 +73,14 @@ class Migrate(base.BaseAction):
def schema(self): def schema(self):
return voluptuous.Schema({ return voluptuous.Schema({
voluptuous.Required(self.RESOURCE_ID): self.check_resource_id, voluptuous.Required(self.RESOURCE_ID): self.check_resource_id,
voluptuous.Required(self.MIGRATION_TYPE, voluptuous.Required(
default=self.LIVE_MIGRATION): self.MIGRATION_TYPE, default=self.LIVE_MIGRATION):
voluptuous.Any(*[self.LIVE_MIGRATION, voluptuous.Any(
self.COLD_MIGRATION]), *[self.LIVE_MIGRATION, self.COLD_MIGRATION]),
voluptuous.Required(self.DST_HYPERVISOR): voluptuous.Required(self.DESTINATION_NODE):
voluptuous.All(voluptuous.Any(*six.string_types), voluptuous.All(voluptuous.Any(*six.string_types),
voluptuous.Length(min=1)), voluptuous.Length(min=1)),
voluptuous.Required(self.SRC_HYPERVISOR): voluptuous.Required(self.SOURCE_NODE):
voluptuous.All(voluptuous.Any(*six.string_types), voluptuous.All(voluptuous.Any(*six.string_types),
voluptuous.Length(min=1)), voluptuous.Length(min=1)),
}) })
@@ -94,12 +94,12 @@ class Migrate(base.BaseAction):
return self.input_parameters.get(self.MIGRATION_TYPE) return self.input_parameters.get(self.MIGRATION_TYPE)
@property @property
def dst_hypervisor(self): def destination_node(self):
return self.input_parameters.get(self.DST_HYPERVISOR) return self.input_parameters.get(self.DESTINATION_NODE)
@property @property
def src_hypervisor(self): def source_node(self):
return self.input_parameters.get(self.SRC_HYPERVISOR) return self.input_parameters.get(self.SOURCE_NODE)
def _live_migrate_instance(self, nova, destination): def _live_migrate_instance(self, nova, destination):
result = None result = None
@@ -159,14 +159,14 @@ class Migrate(base.BaseAction):
raise exception.InstanceNotFound(name=self.instance_uuid) raise exception.InstanceNotFound(name=self.instance_uuid)
def execute(self): def execute(self):
return self.migrate(destination=self.dst_hypervisor) return self.migrate(destination=self.destination_node)
def revert(self): def revert(self):
return self.migrate(destination=self.src_hypervisor) return self.migrate(destination=self.source_node)
def precondition(self): def precondition(self):
# todo(jed) check if the instance exist/ check if the instance is on # todo(jed) check if the instance exist/ check if the instance is on
# the src_hypervisor # the source_node
pass pass
def postcondition(self): def postcondition(self):

View File

@@ -317,7 +317,7 @@ class KeystoneFailure(WatcherException):
class ClusterEmpty(WatcherException): class ClusterEmpty(WatcherException):
msg_fmt = _("The list of hypervisor(s) in the cluster is empty") msg_fmt = _("The list of compute node(s) in the cluster is empty")
class MetricCollectorNotDefined(WatcherException): class MetricCollectorNotDefined(WatcherException):
@@ -346,7 +346,7 @@ class GlobalEfficacyComputationError(WatcherException):
"goal using the '%(strategy)s' strategy.") "goal using the '%(strategy)s' strategy.")
class NoMetricValuesForVM(WatcherException): class NoMetricValuesForInstance(WatcherException):
msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.") msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.")
@@ -357,11 +357,11 @@ class NoSuchMetricForHost(WatcherException):
# Model # Model
class InstanceNotFound(WatcherException): class InstanceNotFound(WatcherException):
msg_fmt = _("The instance '%(name)s' is not found") msg_fmt = _("The instance '%(name)s' could not be found")
class HypervisorNotFound(WatcherException): class ComputeNodeNotFound(WatcherException):
msg_fmt = _("The hypervisor is not found") msg_fmt = _("The compute node %s could not be found")
class LoadingError(WatcherException): class LoadingError(WatcherException):

View File

@@ -40,7 +40,7 @@ class NovaHelper(object):
self.nova = self.osc.nova() self.nova = self.osc.nova()
self.glance = self.osc.glance() self.glance = self.osc.glance()
def get_hypervisors_list(self): def get_compute_node_list(self):
return self.nova.hypervisors.list() return self.nova.hypervisors.list()
def find_instance(self, instance_id): def find_instance(self, instance_id):
@@ -54,7 +54,26 @@ class NovaHelper(object):
break break
return instance return instance
def watcher_non_live_migrate_instance(self, instance_id, hypervisor_id, def wait_for_volume_status(self, volume, status, timeout=60,
poll_interval=1):
"""Wait until volume reaches given status.
:param volume: volume resource
:param status: expected status of volume
:param timeout: timeout in seconds
:param poll_interval: poll interval in seconds
"""
start_time = time.time()
while time.time() - start_time < timeout:
volume = self.cinder.volumes.get(volume.id)
if volume.status == status:
break
time.sleep(poll_interval)
else:
raise Exception("Volume %s did not reach status %s after %d s"
% (volume.id, status, timeout))
def watcher_non_live_migrate_instance(self, instance_id, node_id,
keep_original_image_name=True): keep_original_image_name=True):
"""This method migrates a given instance """This method migrates a given instance
@@ -218,7 +237,7 @@ class NovaHelper(object):
# We create the new instance from # We create the new instance from
# the intermediate image of the original instance # the intermediate image of the original instance
new_instance = self. \ new_instance = self. \
create_instance(hypervisor_id, create_instance(node_id,
instance_name, instance_name,
image_uuid, image_uuid,
flavor_name, flavor_name,
@@ -358,7 +377,7 @@ class NovaHelper(object):
# Sets the compute host's ability to accept new instances. # Sets the compute host's ability to accept new instances.
# host_maintenance_mode(self, host, mode): # host_maintenance_mode(self, host, mode):
# Start/Stop host maintenance window. # Start/Stop host maintenance window.
# On start, it triggers guest VMs evacuation. # On start, it triggers guest instances evacuation.
host = self.nova.hosts.get(hostname) host = self.nova.hosts.get(hostname)
if not host: if not host:
@@ -463,20 +482,20 @@ class NovaHelper(object):
else: else:
self.nova.servers.stop(instance_id) self.nova.servers.stop(instance_id)
if self.wait_for_vm_state(instance, "stopped", 8, 10): if self.wait_for_instance_state(instance, "stopped", 8, 10):
LOG.debug("Instance %s stopped." % instance_id) LOG.debug("Instance %s stopped." % instance_id)
return True return True
else: else:
return False return False
def wait_for_vm_state(self, server, vm_state, retry, sleep): def wait_for_instance_state(self, server, state, retry, sleep):
"""Waits for server to be in a specific vm_state """Waits for server to be in a specific state
The vm_state can be one of the following : The state can be one of the following :
active, stopped active, stopped
:param server: server object. :param server: server object.
:param vm_state: for which state we are waiting for :param state: for which state we are waiting for
:param retry: how many times to retry :param retry: how many times to retry
:param sleep: seconds to sleep between the retries :param sleep: seconds to sleep between the retries
""" """
@@ -484,11 +503,11 @@ class NovaHelper(object):
if not server: if not server:
return False return False
while getattr(server, 'OS-EXT-STS:vm_state') != vm_state and retry: while getattr(server, 'OS-EXT-STS:vm_state') != state and retry:
time.sleep(sleep) time.sleep(sleep)
server = self.nova.servers.get(server) server = self.nova.servers.get(server)
retry -= 1 retry -= 1
return getattr(server, 'OS-EXT-STS:vm_state') == vm_state return getattr(server, 'OS-EXT-STS:vm_state') == state
def wait_for_instance_status(self, instance, status_list, retry, sleep): def wait_for_instance_status(self, instance, status_list, retry, sleep):
"""Waits for instance to be in a specific status """Waits for instance to be in a specific status
@@ -514,7 +533,7 @@ class NovaHelper(object):
LOG.debug("Current instance status: %s" % instance.status) LOG.debug("Current instance status: %s" % instance.status)
return instance.status in status_list return instance.status in status_list
def create_instance(self, hypervisor_id, inst_name="test", image_id=None, def create_instance(self, node_id, inst_name="test", image_id=None,
flavor_name="m1.tiny", flavor_name="m1.tiny",
sec_group_list=["default"], sec_group_list=["default"],
network_names_list=["demo-net"], keypair_name="mykeys", network_names_list=["demo-net"], keypair_name="mykeys",
@@ -570,15 +589,14 @@ class NovaHelper(object):
net_obj = {"net-id": nic_id} net_obj = {"net-id": nic_id}
net_list.append(net_obj) net_list.append(net_obj)
instance = self.nova.servers. \ instance = self.nova.servers.create(
create(inst_name, inst_name, image,
image, flavor=flavor, flavor=flavor,
key_name=keypair_name, key_name=keypair_name,
security_groups=sec_group_list, security_groups=sec_group_list,
nics=net_list, nics=net_list,
block_device_mapping_v2=block_device_mapping_v2, block_device_mapping_v2=block_device_mapping_v2,
availability_zone="nova:" + availability_zone="nova:%s" % node_id)
hypervisor_id)
# Poll at 5 second intervals, until the status is no longer 'BUILD' # Poll at 5 second intervals, until the status is no longer 'BUILD'
if instance: if instance:
@@ -609,13 +627,13 @@ class NovaHelper(object):
return network_id return network_id
def get_vms_by_hypervisor(self, host): def get_instances_by_node(self, host):
return [vm for vm in return [instance for instance in
self.nova.servers.list(search_opts={"all_tenants": True}) self.nova.servers.list(search_opts={"all_tenants": True})
if self.get_hostname(vm) == host] if self.get_hostname(instance) == host]
def get_hostname(self, vm): def get_hostname(self, instance):
return str(getattr(vm, 'OS-EXT-SRV-ATTR:host')) return str(getattr(instance, 'OS-EXT-SRV-ATTR:host'))
def get_flavor_instance(self, instance, cache): def get_flavor_instance(self, instance, cache):
fid = instance.flavor['id'] fid = instance.flavor['id']

View File

@@ -118,11 +118,11 @@ class ReleasedComputeNodesCount(IndicatorSpecification):
voluptuous.Range(min=0), required=True) voluptuous.Range(min=0), required=True)
class VmMigrationsCount(IndicatorSpecification): class InstanceMigrationsCount(IndicatorSpecification):
def __init__(self): def __init__(self):
super(VmMigrationsCount, self).__init__( super(InstanceMigrationsCount, self).__init__(
name="vm_migrations_count", name="instance_migrations_count",
description=_("The number of migrations to be performed."), description=_("The number of VM migrations to be performed."),
unit=None, unit=None,
) )

View File

@@ -34,14 +34,14 @@ class ServerConsolidation(base.EfficacySpecification):
def get_indicators_specifications(self): def get_indicators_specifications(self):
return [ return [
indicators.ReleasedComputeNodesCount(), indicators.ReleasedComputeNodesCount(),
indicators.VmMigrationsCount(), indicators.InstanceMigrationsCount(),
] ]
def get_global_efficacy_indicator(self, indicators_map): def get_global_efficacy_indicator(self, indicators_map):
value = 0 value = 0
if indicators_map.vm_migrations_count > 0: if indicators_map.instance_migrations_count > 0:
value = (float(indicators_map.released_compute_nodes_count) / value = (float(indicators_map.released_compute_nodes_count) /
float(indicators_map.vm_migrations_count)) * 100 float(indicators_map.instance_migrations_count)) * 100
return efficacy.Indicator( return efficacy.Indicator(
name="released_nodes_ratio", name="released_nodes_ratio",

View File

@@ -16,7 +16,6 @@
# implied. # implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
from oslo_config import cfg from oslo_config import cfg
@@ -40,7 +39,7 @@ class CollectorManager(object):
for collector_name in available_collectors: for collector_name in available_collectors:
collector = self.collector_loader.load(collector_name) collector = self.collector_loader.load(collector_name)
collectors[collector_name] = collector collectors[collector_name] = collector
self._collectors = collectors self._collectors = collectors
return self._collectors return self._collectors

View File

@@ -20,10 +20,8 @@ from oslo_log import log
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.decision_engine.model.collector import base from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor as obj_hypervisor from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as obj_vm
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -50,45 +48,46 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
LOG.debug("Building latest Nova cluster data model") LOG.debug("Building latest Nova cluster data model")
model = model_root.ModelRoot() model = model_root.ModelRoot()
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
disk_capacity = resource.Resource(resource.ResourceType.disk_capacity) disk_capacity = element.Resource(element.ResourceType.disk_capacity)
model.create_resource(mem) model.create_resource(mem)
model.create_resource(num_cores) model.create_resource(num_cores)
model.create_resource(disk) model.create_resource(disk)
model.create_resource(disk_capacity) model.create_resource(disk_capacity)
flavor_cache = {} flavor_cache = {}
hypervisors = self.wrapper.get_hypervisors_list() nodes = self.wrapper.get_compute_node_list()
for h in hypervisors: for n in nodes:
service = self.wrapper.nova.services.find(id=h.service['id']) service = self.wrapper.nova.services.find(id=n.service['id'])
# create hypervisor in cluster_model_collector # create node in cluster_model_collector
hypervisor = obj_hypervisor.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = service.host node.uuid = service.host
hypervisor.hostname = h.hypervisor_hostname node.hostname = n.hypervisor_hostname
# set capacity # set capacity
mem.set_capacity(hypervisor, h.memory_mb) mem.set_capacity(node, n.memory_mb)
disk.set_capacity(hypervisor, h.free_disk_gb) disk.set_capacity(node, n.free_disk_gb)
disk_capacity.set_capacity(hypervisor, h.local_gb) disk_capacity.set_capacity(node, n.local_gb)
num_cores.set_capacity(hypervisor, h.vcpus) num_cores.set_capacity(node, n.vcpus)
hypervisor.state = h.state node.state = n.state
hypervisor.status = h.status node.status = n.status
model.add_hypervisor(hypervisor) model.add_node(node)
vms = self.wrapper.get_vms_by_hypervisor(str(service.host)) instances = self.wrapper.get_instances_by_node(str(service.host))
for v in vms: for v in instances:
# create VM in cluster_model_collector # create VM in cluster_model_collector
vm = obj_vm.VM() instance = element.Instance()
vm.uuid = v.id instance.uuid = v.id
# nova/nova/compute/vm_states.py # nova/nova/compute/instance_states.py
vm.state = getattr(v, 'OS-EXT-STS:vm_state') instance.state = getattr(v, 'OS-EXT-STS:instance_state')
# set capacity # set capacity
self.wrapper.get_flavor_instance(v, flavor_cache) self.wrapper.get_flavor_instance(v, flavor_cache)
mem.set_capacity(vm, v.flavor['ram']) mem.set_capacity(instance, v.flavor['ram'])
disk.set_capacity(vm, v.flavor['disk']) disk.set_capacity(instance, v.flavor['disk'])
num_cores.set_capacity(vm, v.flavor['vcpus']) num_cores.set_capacity(instance, v.flavor['vcpus'])
model.get_mapping().map(node, instance)
model.add_instance(instance)
model.get_mapping().map(hypervisor, vm)
model.add_vm(vm)
return model return model

View File

@@ -0,0 +1,39 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2016 b<>com
#
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model.element import disk_info
from watcher.decision_engine.model.element import instance
from watcher.decision_engine.model.element import node
from watcher.decision_engine.model.element import resource
ServiceState = node.ServiceState
PowerState = node.PowerState
ComputeNode = node.ComputeNode
InstanceState = instance.InstanceState
Instance = instance.Instance
DiskInfo = disk_info.DiskInfo
ResourceType = resource.ResourceType
Resource = resource.Resource
__all__ = [
'ServiceState', 'PowerState', 'ComputeNode', 'InstanceState', 'Instance',
'DiskInfo', 'ResourceType', 'Resource']

View File

@@ -1,11 +1,13 @@
# -*- encoding: utf-8 -*- # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com # Copyright (c) 2016 b<>com
#
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,11 +16,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import enum import abc
import six
class HypervisorState(enum.Enum): @six.add_metaclass(abc.ABCMeta)
ONLINE = 'up' class Element(object):
OFFLINE = 'down'
ENABLED = 'enabled' @abc.abstractmethod
DISABLED = 'disabled' def accept(self, visitor):
raise NotImplementedError()

View File

@@ -14,8 +14,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import abc
class ComputeResource(object): import six
from watcher.decision_engine.model.element import base
@six.add_metaclass(abc.ABCMeta)
class ComputeResource(base.Element):
def __init__(self): def __init__(self):
self._uuid = "" self._uuid = ""

View File

@@ -14,8 +14,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from watcher.decision_engine.model.element import base
class DiskInfo(base.Element):
class DiskInfo(object):
def __init__(self): def __init__(self):
self.name = "" self.name = ""
self.major = 0 self.major = 0
@@ -23,6 +26,9 @@ class DiskInfo(object):
self.size = 0 self.size = 0
self.scheduler = "" self.scheduler = ""
def accept(self, visitor):
raise NotImplementedError()
def set_size(self, size): def set_size(self, size):
"""DiskInfo """DiskInfo

View File

@@ -0,0 +1,54 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from watcher.decision_engine.model.element import compute_resource
class InstanceState(enum.Enum):
ACTIVE = 'active' # Instance is running
BUILDING = 'building' # Instance only exists in DB
PAUSED = 'paused'
SUSPENDED = 'suspended' # Instance is suspended to disk.
STOPPED = 'stopped' # Instance is shut off, the disk image is still there.
RESCUED = 'rescued' # A rescue image is running with the original image
# attached.
RESIZED = 'resized' # a Instance with the new size is active.
SOFT_DELETED = 'soft-delete'
# still available to restore.
DELETED = 'deleted' # Instance is permanently deleted.
ERROR = 'error'
class Instance(compute_resource.ComputeResource):
def __init__(self):
super(Instance, self).__init__()
self._state = InstanceState.ACTIVE.value
def accept(self, visitor):
raise NotImplementedError()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state

View File

@@ -14,17 +14,46 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from watcher.decision_engine.model import compute_resource import enum
from watcher.decision_engine.model import hypervisor_state
from watcher.decision_engine.model import power_state from watcher.decision_engine.model.element import compute_resource
class Hypervisor(compute_resource.ComputeResource): class ServiceState(enum.Enum):
ONLINE = 'up'
OFFLINE = 'down'
ENABLED = 'enabled'
DISABLED = 'disabled'
class PowerState(enum.Enum):
# away mode
g0 = "g0"
# power on suspend (processor caches are flushed)
# The power to the CPU(s) and RAM is maintained
g1_S1 = "g1_S1"
# CPU powered off. Dirty cache is flushed to RAM
g1_S2 = "g1_S2"
# Suspend to RAM
g1_S3 = "g1_S3"
# Suspend to Disk
g1_S4 = "g1_S4"
# switch outlet X OFF on the PDU (Power Distribution Unit)
switch_off = "switch_off"
# switch outlet X ON on the PDU (Power Distribution Unit)
switch_on = "switch_on"
class ComputeNode(compute_resource.ComputeResource):
def __init__(self): def __init__(self):
super(Hypervisor, self).__init__() super(ComputeNode, self).__init__()
self._state = hypervisor_state.HypervisorState.ONLINE self._state = ServiceState.ONLINE
self._status = hypervisor_state.HypervisorState.ENABLED self._status = ServiceState.ENABLED
self._power_state = power_state.PowerState.g0 self._power_state = PowerState.g0
def accept(self, visitor):
raise NotImplementedError()
@property @property
def state(self): def state(self):

View File

@@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log
import threading import threading
from oslo_log import log
from watcher._i18n import _LW from watcher._i18n import _LW
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -25,110 +26,108 @@ LOG = log.getLogger(__name__)
class Mapping(object): class Mapping(object):
def __init__(self, model): def __init__(self, model):
self.model = model self.model = model
self._mapping_hypervisors = {} self.compute_node_mapping = {}
self.mapping_vm = {} self.instance_mapping = {}
self.lock = threading.Lock() self.lock = threading.Lock()
def map(self, hypervisor, vm): def map(self, node, instance):
"""Select the hypervisor where the instance is launched """Select the node where the instance is launched
:param hypervisor: the hypervisor :param node: the node
:param vm: the virtual machine or instance :param instance: the virtual machine or instance
""" """
try: try:
self.lock.acquire() self.lock.acquire()
# init first # init first
if hypervisor.uuid not in self._mapping_hypervisors.keys(): if node.uuid not in self.compute_node_mapping.keys():
self._mapping_hypervisors[hypervisor.uuid] = [] self.compute_node_mapping[node.uuid] = []
# map node => vms # map node => instances
self._mapping_hypervisors[hypervisor.uuid].append( self.compute_node_mapping[node.uuid].append(
vm.uuid) instance.uuid)
# map vm => node # map instance => node
self.mapping_vm[vm.uuid] = hypervisor.uuid self.instance_mapping[instance.uuid] = node.uuid
finally: finally:
self.lock.release() self.lock.release()
def unmap(self, hypervisor, vm): def unmap(self, node, instance):
"""Remove the instance from the hypervisor """Remove the instance from the node
:param hypervisor: the hypervisor :param node: the node
:param vm: the virtual machine or instance :param instance: the virtual machine or instance
""" """
self.unmap_from_id(hypervisor.uuid, vm.uuid) self.unmap_from_id(node.uuid, instance.uuid)
def unmap_from_id(self, node_uuid, vm_uuid): def unmap_from_id(self, node_uuid, instance_uuid):
"""Remove the instance (by id) from the hypervisor (by id) """Remove the instance (by id) from the node (by id)
:rtype : object :rtype : object
""" """
try: try:
self.lock.acquire() self.lock.acquire()
if str(node_uuid) in self._mapping_hypervisors: if str(node_uuid) in self.compute_node_mapping:
self._mapping_hypervisors[str(node_uuid)].remove(str(vm_uuid)) self.compute_node_mapping[str(node_uuid)].remove(
# remove vm str(instance_uuid))
self.mapping_vm.pop(vm_uuid) # remove instance
self.instance_mapping.pop(instance_uuid)
else: else:
LOG.warning(_LW( LOG.warning(_LW(
"trying to delete the virtual machine %(vm)s but it was " "Trying to delete the instance %(instance)s but it was "
"not found on hypervisor %(hyp)s"), "not found on node %(node)s"),
{'vm': vm_uuid, 'hyp': node_uuid}) {'instance': instance_uuid, 'node': node_uuid})
finally: finally:
self.lock.release() self.lock.release()
def get_mapping(self): def get_mapping(self):
return self._mapping_hypervisors return self.compute_node_mapping
def get_mapping_vm(self): def get_node_from_instance(self, instance):
return self.mapping_vm return self.get_node_from_instance_id(instance.uuid)
def get_node_from_vm(self, vm): def get_node_from_instance_id(self, instance_uuid):
return self.get_node_from_vm_id(vm.uuid) """Getting host information from the guest instance
def get_node_from_vm_id(self, vm_uuid): :param instance: the uuid of the instance
"""Getting host information from the guest VM :return: node
:param vm: the uuid of the instance
:return: hypervisor
""" """
return self.model.get_hypervisor_from_id( return self.model.get_node_from_id(
self.get_mapping_vm()[str(vm_uuid)]) self.instance_mapping[str(instance_uuid)])
def get_node_vms(self, hypervisor): def get_node_instances(self, node):
"""Get the list of instances running on the hypervisor """Get the list of instances running on the node
:param hypervisor: :param node:
:return: :return:
""" """
return self.get_node_vms_from_id(hypervisor.uuid) return self.get_node_instances_from_id(node.uuid)
def get_node_vms_from_id(self, node_uuid): def get_node_instances_from_id(self, node_uuid):
if str(node_uuid) in self._mapping_hypervisors.keys(): if str(node_uuid) in self.compute_node_mapping.keys():
return self._mapping_hypervisors[str(node_uuid)] return self.compute_node_mapping[str(node_uuid)]
else: else:
# empty # empty
return [] return []
def migrate_vm(self, vm, src_hypervisor, dest_hypervisor): def migrate_instance(self, instance, source_node, destination_node):
"""Migrate single instance from src_hypervisor to dest_hypervisor """Migrate single instance from source_node to destination_node
:param vm: :param instance:
:param src_hypervisor: :param source_node:
:param dest_hypervisor: :param destination_node:
:return: :return:
""" """
if src_hypervisor == dest_hypervisor: if source_node == destination_node:
return False return False
# unmap # unmap
self.unmap(src_hypervisor, vm) self.unmap(source_node, instance)
# map # map
self.map(dest_hypervisor, vm) self.map(destination_node, instance)
return True return True

View File

@@ -17,16 +17,14 @@
from watcher._i18n import _ from watcher._i18n import _
from watcher.common import exception from watcher.common import exception
from watcher.common import utils from watcher.common import utils
from watcher.decision_engine.model import hypervisor from watcher.decision_engine.model import element
from watcher.decision_engine.model import mapping from watcher.decision_engine.model import mapping
from watcher.decision_engine.model import vm
class ModelRoot(object): class ModelRoot(object):
def __init__(self, stale=False): def __init__(self, stale=False):
self._hypervisors = utils.Struct() self._nodes = utils.Struct()
self._vms = utils.Struct() self._instances = utils.Struct()
self.mapping = mapping.Mapping(self) self.mapping = mapping.Mapping(self)
self.resource = utils.Struct() self.resource = utils.Struct()
self.stale = stale self.stale = stale
@@ -36,46 +34,46 @@ class ModelRoot(object):
__bool__ = __nonzero__ __bool__ = __nonzero__
def assert_hypervisor(self, obj): def assert_node(self, obj):
if not isinstance(obj, hypervisor.Hypervisor): if not isinstance(obj, element.ComputeNode):
raise exception.IllegalArgumentException( raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid")) message=_("'obj' argument type is not valid"))
def assert_vm(self, obj): def assert_instance(self, obj):
if not isinstance(obj, vm.VM): if not isinstance(obj, element.Instance):
raise exception.IllegalArgumentException( raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid")) message=_("'obj' argument type is not valid"))
def add_hypervisor(self, hypervisor): def add_node(self, node):
self.assert_hypervisor(hypervisor) self.assert_node(node)
self._hypervisors[hypervisor.uuid] = hypervisor self._nodes[node.uuid] = node
def remove_hypervisor(self, hypervisor): def remove_node(self, node):
self.assert_hypervisor(hypervisor) self.assert_node(node)
if str(hypervisor.uuid) not in self._hypervisors.keys(): if str(node.uuid) not in self._nodes:
raise exception.HypervisorNotFound(hypervisor.uuid) raise exception.ComputeNodeNotFound(node.uuid)
else: else:
del self._hypervisors[hypervisor.uuid] del self._nodes[node.uuid]
def add_vm(self, vm): def add_instance(self, instance):
self.assert_vm(vm) self.assert_instance(instance)
self._vms[vm.uuid] = vm self._instances[instance.uuid] = instance
def get_all_hypervisors(self): def get_all_compute_nodes(self):
return self._hypervisors return self._nodes
def get_hypervisor_from_id(self, hypervisor_uuid): def get_node_from_id(self, node_uuid):
if str(hypervisor_uuid) not in self._hypervisors.keys(): if str(node_uuid) not in self._nodes:
raise exception.HypervisorNotFound(hypervisor_uuid) raise exception.ComputeNodeNotFound(node_uuid)
return self._hypervisors[str(hypervisor_uuid)] return self._nodes[str(node_uuid)]
def get_vm_from_id(self, uuid): def get_instance_from_id(self, uuid):
if str(uuid) not in self._vms.keys(): if str(uuid) not in self._instances:
raise exception.InstanceNotFound(name=uuid) raise exception.InstanceNotFound(name=uuid)
return self._vms[str(uuid)] return self._instances[str(uuid)]
def get_all_vms(self): def get_all_instances(self):
return self._vms return self._instances
def get_mapping(self): def get_mapping(self):
return self.mapping return self.mapping
@@ -83,5 +81,5 @@ class ModelRoot(object):
def create_resource(self, r): def create_resource(self, r):
self.resource[str(r.name)] = r self.resource[str(r.name)] = r
def get_resource_from_id(self, id): def get_resource_from_id(self, resource_id):
return self.resource[str(id)] return self.resource[str(resource_id)]

View File

@@ -1,31 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class PowerState(enum.Enum):
# away mode
g0 = "g0"
# power on suspend (processor caches are flushed)
# The power to the CPU(s) and RAM is maintained
g1_S1 = "g1_S1"
# CPU powered off. Dirty cache is flushed to RAM
g1_S2 = "g1_S2"
# Suspend to RAM
g1_S3 = "g1_S3"
# Suspend to Disk
g1_S4 = "g1_S4"
# switch outlet X OFF on the PDU (Power Distribution Unit)
switch_off = "switch_off"
# switch outlet X ON on the PDU (Power Distribution Unit)
switch_on = "switch_on"

View File

@@ -1,31 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.model import compute_resource
from watcher.decision_engine.model import vm_state
class VM(compute_resource.ComputeResource):
def __init__(self):
super(VM, self).__init__()
self._state = vm_state.VMState.ACTIVE.value
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state

View File

@@ -1,34 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class VMState(enum.Enum):
ACTIVE = 'active' # VM is running
BUILDING = 'building' # VM only exists in DB
PAUSED = 'paused'
SUSPENDED = 'suspended' # VM is suspended to disk.
STOPPED = 'stopped' # VM is powered off, the disk image is still there.
RESCUED = 'rescued' # A rescue image is running with the original VM image
# attached.
RESIZED = 'resized' # a VM with the new size is active.
SOFT_DELETED = 'soft-delete'
# still available to restore.
DELETED = 'deleted' # VM is permanently deleted.
ERROR = 'error'

View File

@@ -32,9 +32,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer as cch from watcher.decision_engine.cluster.history import ceilometer as cch
from watcher.decision_engine.model import hypervisor_state as hyper_state from watcher.decision_engine.model import element
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -136,46 +134,47 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
""" """
self.migration_attempts = size_cluster * self.bound_migration self.migration_attempts = size_cluster * self.bound_migration
def check_migration(self, src_hypervisor, dest_hypervisor, vm_to_mig): def check_migration(self, source_node, destination_node,
instance_to_migrate):
"""Check if the migration is possible """Check if the migration is possible
:param src_hypervisor: the current node of the virtual machine :param source_node: the current node of the virtual machine
:param dest_hypervisor: the destination of the virtual machine :param destination_node: the destination of the virtual machine
:param vm_to_mig: the virtual machine :param instance_to_migrate: the instance / virtual machine
:return: True if the there is enough place otherwise false :return: True if the there is enough place otherwise false
""" """
if src_hypervisor == dest_hypervisor: if source_node == destination_node:
return False return False
LOG.debug('Migrate VM %s from %s to %s', LOG.debug('Migrate instance %s from %s to %s',
vm_to_mig, src_hypervisor, dest_hypervisor) instance_to_migrate, source_node, destination_node)
total_cores = 0 total_cores = 0
total_disk = 0 total_disk = 0
total_mem = 0 total_mem = 0
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores) element.ResourceType.cpu_cores)
disk_capacity = self.compute_model.get_resource_from_id( disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk) element.ResourceType.disk)
memory_capacity = self.compute_model.get_resource_from_id( memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory) element.ResourceType.memory)
for vm_id in self.compute_model. \ for instance_id in self.compute_model. \
get_mapping().get_node_vms(dest_hypervisor): get_mapping().get_node_instances(destination_node):
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
total_cores += cpu_capacity.get_capacity(vm) total_cores += cpu_capacity.get_capacity(instance)
total_disk += disk_capacity.get_capacity(vm) total_disk += disk_capacity.get_capacity(instance)
total_mem += memory_capacity.get_capacity(vm) total_mem += memory_capacity.get_capacity(instance)
# capacity requested by hypervisor # capacity requested by the compute node
total_cores += cpu_capacity.get_capacity(vm_to_mig) total_cores += cpu_capacity.get_capacity(instance_to_migrate)
total_disk += disk_capacity.get_capacity(vm_to_mig) total_disk += disk_capacity.get_capacity(instance_to_migrate)
total_mem += memory_capacity.get_capacity(vm_to_mig) total_mem += memory_capacity.get_capacity(instance_to_migrate)
return self.check_threshold(dest_hypervisor, total_cores, total_disk, return self.check_threshold(destination_node, total_cores, total_disk,
total_mem) total_mem)
def check_threshold(self, dest_hypervisor, total_cores, def check_threshold(self, destination_node, total_cores,
total_disk, total_mem): total_disk, total_mem):
"""Check threshold """Check threshold
@@ -183,18 +182,18 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
aggregated CPU capacity of VMs on one node to CPU capacity aggregated CPU capacity of VMs on one node to CPU capacity
of this node must not exceed the threshold value. of this node must not exceed the threshold value.
:param dest_hypervisor: the destination of the virtual machine :param destination_node: the destination of the virtual machine
:param total_cores: total cores of the virtual machine :param total_cores: total cores of the virtual machine
:param total_disk: total disk size used by the virtual machine :param total_disk: total disk size used by the virtual machine
:param total_mem: total memory used by the virtual machine :param total_mem: total memory used by the virtual machine
:return: True if the threshold is not exceed :return: True if the threshold is not exceed
""" """
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(dest_hypervisor) element.ResourceType.cpu_cores).get_capacity(destination_node)
disk_capacity = self.compute_model.get_resource_from_id( disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(dest_hypervisor) element.ResourceType.disk).get_capacity(destination_node)
memory_capacity = self.compute_model.get_resource_from_id( memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(dest_hypervisor) element.ResourceType.memory).get_capacity(destination_node)
return (cpu_capacity >= total_cores * self.threshold_cores and return (cpu_capacity >= total_cores * self.threshold_cores and
disk_capacity >= total_disk * self.threshold_disk and disk_capacity >= total_disk * self.threshold_disk and
@@ -210,7 +209,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
""" """
return self.migration_attempts return self.migration_attempts
def calculate_weight(self, element, total_cores_used, total_disk_used, def calculate_weight(self, node, total_cores_used, total_disk_used,
total_memory_used): total_memory_used):
"""Calculate weight of every resource """Calculate weight of every resource
@@ -221,13 +220,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
:return: :return:
""" """
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(element) element.ResourceType.cpu_cores).get_capacity(node)
disk_capacity = self.compute_model.get_resource_from_id( disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity(element) element.ResourceType.disk).get_capacity(node)
memory_capacity = self.compute_model.get_resource_from_id( memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(element) element.ResourceType.memory).get_capacity(node)
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
float(cpu_capacity)) float(cpu_capacity))
@@ -245,13 +244,14 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# TODO(jed): take in account weight # TODO(jed): take in account weight
return (score_cores + score_disk + score_memory) / 3 return (score_cores + score_disk + score_memory) / 3
def calculate_score_node(self, hypervisor): def calculate_score_node(self, node):
"""Calculate the score that represent the utilization level """Calculate the score that represent the utilization level
:param hypervisor: :param node: :py:class:`~.ComputeNode` instance
:return: :return: Score for the given compute node
:rtype: float
""" """
resource_id = "%s_%s" % (hypervisor.uuid, hypervisor.hostname) resource_id = "%s_%s" % (node.uuid, node.hostname)
host_avg_cpu_util = self.ceilometer. \ host_avg_cpu_util = self.ceilometer. \
statistic_aggregation(resource_id=resource_id, statistic_aggregation(resource_id=resource_id,
meter_name=self.HOST_CPU_USAGE_METRIC_NAME, meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
@@ -268,11 +268,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
host_avg_cpu_util = 100 host_avg_cpu_util = 100
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(hypervisor) element.ResourceType.cpu_cores).get_capacity(node)
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100) total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)
return self.calculate_weight(hypervisor, total_cores_used, 0, 0) return self.calculate_weight(node, total_cores_used, 0, 0)
def calculate_migration_efficacy(self): def calculate_migration_efficacy(self):
"""Calculate migration efficacy """Calculate migration efficacy
@@ -286,34 +286,34 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
else: else:
return 0 return 0
def calculate_score_vm(self, vm): def calculate_score_instance(self, instance):
"""Calculate Score of virtual machine """Calculate Score of virtual machine
:param vm: the virtual machine :param instance: the virtual machine
:return: score :return: score
""" """
vm_cpu_utilization = self.ceilometer. \ instance_cpu_utilization = self.ceilometer. \
statistic_aggregation( statistic_aggregation(
resource_id=vm.uuid, resource_id=instance.uuid,
meter_name=self.INSTANCE_CPU_USAGE_METRIC_NAME, meter_name=self.INSTANCE_CPU_USAGE_METRIC_NAME,
period="7200", period="7200",
aggregate='avg' aggregate='avg'
) )
if vm_cpu_utilization is None: if instance_cpu_utilization is None:
LOG.error( LOG.error(
_LE("No values returned by %(resource_id)s " _LE("No values returned by %(resource_id)s "
"for %(metric_name)s"), "for %(metric_name)s"),
resource_id=vm.uuid, resource_id=instance.uuid,
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME, metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME,
) )
vm_cpu_utilization = 100 instance_cpu_utilization = 100
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(vm) element.ResourceType.cpu_cores).get_capacity(instance)
total_cores_used = cpu_capacity * (vm_cpu_utilization / 100.0) total_cores_used = cpu_capacity * (instance_cpu_utilization / 100.0)
return self.calculate_weight(vm, total_cores_used, 0, 0) return self.calculate_weight(instance, total_cores_used, 0, 0)
def add_change_service_state(self, resource_id, state): def add_change_service_state(self, resource_id, state):
parameters = {'state': state} parameters = {'state': state}
@@ -324,79 +324,80 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def add_migration(self, def add_migration(self,
resource_id, resource_id,
migration_type, migration_type,
src_hypervisor, source_node,
dst_hypervisor): destination_node):
parameters = {'migration_type': migration_type, parameters = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor, 'source_node': source_node,
'dst_hypervisor': dst_hypervisor} 'destination_node': destination_node}
self.solution.add_action(action_type=self.MIGRATION, self.solution.add_action(action_type=self.MIGRATION,
resource_id=resource_id, resource_id=resource_id,
input_parameters=parameters) input_parameters=parameters)
def score_of_nodes(self, score): def score_of_nodes(self, score):
"""Calculate score of nodes based on load by VMs""" """Calculate score of nodes based on load by VMs"""
for hypervisor_id in self.compute_model.get_all_hypervisors(): for node_id in self.compute_model.get_all_compute_nodes():
hypervisor = self.compute_model. \ node = self.compute_model. \
get_hypervisor_from_id(hypervisor_id) get_node_from_id(node_id)
count = self.compute_model.get_mapping(). \ count = self.compute_model.get_mapping(). \
get_node_vms_from_id(hypervisor_id) get_node_instances_from_id(node_id)
if len(count) > 0: if len(count) > 0:
result = self.calculate_score_node(hypervisor) result = self.calculate_score_node(node)
else: else:
# The hypervisor has not VMs # The node has not VMs
result = 0 result = 0
if len(count) > 0: if len(count) > 0:
score.append((hypervisor_id, result)) score.append((node_id, result))
return score return score
def node_and_vm_score(self, sorted_score, score): def node_and_instance_score(self, sorted_score, score):
"""Get List of VMs from node""" """Get List of VMs from node"""
node_to_release = sorted_score[len(score) - 1][0] node_to_release = sorted_score[len(score) - 1][0]
vms_to_mig = self.compute_model.get_mapping().get_node_vms_from_id( instances_to_migrate = (
node_to_release) self.compute_model.mapping.get_node_instances_from_id(
node_to_release))
vm_score = [] instance_score = []
for vm_id in vms_to_mig: for instance_id in instances_to_migrate:
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
if vm.state == vm_state.VMState.ACTIVE.value: if instance.state == element.InstanceState.ACTIVE.value:
vm_score.append( instance_score.append(
(vm_id, self.calculate_score_vm(vm))) (instance_id, self.calculate_score_instance(instance)))
return node_to_release, vm_score return node_to_release, instance_score
def create_migration_vm(self, mig_vm, mig_src_hypervisor, def create_migration_instance(self, mig_instance, mig_source_node,
mig_dst_hypervisor): mig_destination_node):
"""Create migration VM""" """Create migration VM"""
if self.compute_model.get_mapping().migrate_vm( if self.compute_model.get_mapping().migrate_instance(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor): mig_instance, mig_source_node, mig_destination_node):
self.add_migration(mig_vm.uuid, 'live', self.add_migration(mig_instance.uuid, 'live',
mig_src_hypervisor.uuid, mig_source_node.uuid,
mig_dst_hypervisor.uuid) mig_destination_node.uuid)
if len(self.compute_model.get_mapping().get_node_vms( if len(self.compute_model.get_mapping().get_node_instances(
mig_src_hypervisor)) == 0: mig_source_node)) == 0:
self.add_change_service_state(mig_src_hypervisor. self.add_change_service_state(mig_source_node.
uuid, uuid,
hyper_state.HypervisorState. element.ServiceState.DISABLED.value)
DISABLED.value)
self.number_of_released_nodes += 1 self.number_of_released_nodes += 1
def calculate_num_migrations(self, sorted_vms, node_to_release, def calculate_num_migrations(self, sorted_instances, node_to_release,
sorted_score): sorted_score):
number_migrations = 0 number_migrations = 0
for vm in sorted_vms: for instance in sorted_instances:
for j in range(0, len(sorted_score)): for j in range(0, len(sorted_score)):
mig_vm = self.compute_model.get_vm_from_id(vm[0]) mig_instance = self.compute_model.get_instance_from_id(
mig_src_hypervisor = self.compute_model.get_hypervisor_from_id( instance[0])
mig_source_node = self.compute_model.get_node_from_id(
node_to_release) node_to_release)
mig_dst_hypervisor = self.compute_model.get_hypervisor_from_id( mig_destination_node = self.compute_model.get_node_from_id(
sorted_score[j][0]) sorted_score[j][0])
result = self.check_migration( result = self.check_migration(
mig_src_hypervisor, mig_dst_hypervisor, mig_vm) mig_source_node, mig_destination_node, mig_instance)
if result: if result:
self.create_migration_vm( self.create_migration_instance(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor) mig_instance, mig_source_node, mig_destination_node)
number_migrations += 1 number_migrations += 1
break break
return number_migrations return number_migrations
@@ -420,22 +421,20 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
unsuccessful_migration = 0 unsuccessful_migration = 0
first_migration = True first_migration = True
size_cluster = len(self.compute_model.get_all_hypervisors()) size_cluster = len(self.compute_model.get_all_compute_nodes())
if size_cluster == 0: if size_cluster == 0:
raise exception.ClusterEmpty() raise exception.ClusterEmpty()
self.compute_attempts(size_cluster) self.compute_attempts(size_cluster)
for hypervisor_id in self.compute_model.get_all_hypervisors(): for node_id in self.compute_model.get_all_compute_nodes():
hypervisor = self.compute_model.get_hypervisor_from_id( node = self.compute_model.get_node_from_id(node_id)
hypervisor_id)
count = self.compute_model.get_mapping(). \ count = self.compute_model.get_mapping(). \
get_node_vms_from_id(hypervisor_id) get_node_instances_from_id(node_id)
if len(count) == 0: if len(count) == 0:
if hypervisor.state == hyper_state.HypervisorState.ENABLED: if node.state == element.ServiceState.ENABLED:
self.add_change_service_state(hypervisor_id, self.add_change_service_state(
hyper_state.HypervisorState. node_id, element.ServiceState.DISABLED.value)
DISABLED.value)
while self.get_allowed_migration_attempts() >= unsuccessful_migration: while self.get_allowed_migration_attempts() >= unsuccessful_migration:
if not first_migration: if not first_migration:
@@ -449,7 +448,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# Sort compute nodes by Score decreasing # Sort compute nodes by Score decreasing
sorted_score = sorted(score, reverse=True, key=lambda x: (x[1])) sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
LOG.debug("Hypervisor(s) BFD %s", sorted_score) LOG.debug("Compute node(s) BFD %s", sorted_score)
# Get Node to be released # Get Node to be released
if len(score) == 0: if len(score) == 0:
@@ -458,16 +457,17 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
" of the cluster is zero")) " of the cluster is zero"))
break break
node_to_release, vm_score = self.node_and_vm_score( node_to_release, instance_score = self.node_and_instance_score(
sorted_score, score) sorted_score, score)
# Sort VMs by Score # Sort instances by Score
sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1])) sorted_instances = sorted(
instance_score, reverse=True, key=lambda x: (x[1]))
# BFD: Best Fit Decrease # BFD: Best Fit Decrease
LOG.debug("VM(s) BFD %s", sorted_vms) LOG.debug("VM(s) BFD %s", sorted_instances)
migrations = self.calculate_num_migrations( migrations = self.calculate_num_migrations(
sorted_vms, node_to_release, sorted_score) sorted_instances, node_to_release, sorted_score)
unsuccessful_migration = self.unsuccessful_migration_actualization( unsuccessful_migration = self.unsuccessful_migration_actualization(
migrations, unsuccessful_migration) migrations, unsuccessful_migration)
@@ -481,5 +481,5 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def post_execute(self): def post_execute(self):
self.solution.set_efficacy_indicators( self.solution.set_efficacy_indicators(
released_compute_nodes_count=self.number_of_released_nodes, released_compute_nodes_count=self.number_of_released_nodes,
vm_migrations_count=self.number_of_migrations, instance_migrations_count=self.number_of_migrations,
) )

View File

@@ -30,11 +30,10 @@ telemetries to measure thermal/workload status of server.
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LI, _LW from watcher._i18n import _, _LW, _LI
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
@@ -122,35 +121,35 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
def ceilometer(self, c): def ceilometer(self, c):
self._ceilometer = c self._ceilometer = c
def calc_used_res(self, hypervisor, cpu_capacity, def calc_used_res(self, node, cpu_capacity,
memory_capacity, disk_capacity): memory_capacity, disk_capacity):
"""Calculate the used vcpus, memory and disk based on VM flavors""" """Calculate the used vcpus, memory and disk based on VM flavors"""
vms = self.compute_model.get_mapping().get_node_vms(hypervisor) instances = self.compute_model.mapping.get_node_instances(node)
vcpus_used = 0 vcpus_used = 0
memory_mb_used = 0 memory_mb_used = 0
disk_gb_used = 0 disk_gb_used = 0
if len(vms) > 0: if len(instances) > 0:
for vm_id in vms: for instance_id in instances:
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
vcpus_used += cpu_capacity.get_capacity(vm) vcpus_used += cpu_capacity.get_capacity(instance)
memory_mb_used += memory_capacity.get_capacity(vm) memory_mb_used += memory_capacity.get_capacity(instance)
disk_gb_used += disk_capacity.get_capacity(vm) disk_gb_used += disk_capacity.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used return vcpus_used, memory_mb_used, disk_gb_used
def group_hosts_by_outlet_temp(self): def group_hosts_by_outlet_temp(self):
"""Group hosts based on outlet temp meters""" """Group hosts based on outlet temp meters"""
hypervisors = self.compute_model.get_all_hypervisors() nodes = self.compute_model.get_all_compute_nodes()
size_cluster = len(hypervisors) size_cluster = len(nodes)
if size_cluster == 0: if size_cluster == 0:
raise wexc.ClusterEmpty() raise wexc.ClusterEmpty()
hosts_need_release = [] hosts_need_release = []
hosts_target = [] hosts_target = []
for hypervisor_id in hypervisors: for node_id in nodes:
hypervisor = self.compute_model.get_hypervisor_from_id( node = self.compute_model.get_node_from_id(
hypervisor_id) node_id)
resource_id = hypervisor.uuid resource_id = node.uuid
outlet_temp = self.ceilometer.statistic_aggregation( outlet_temp = self.ceilometer.statistic_aggregation(
resource_id=resource_id, resource_id=resource_id,
@@ -163,53 +162,55 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
continue continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
hvmap = {'hv': hypervisor, 'outlet_temp': outlet_temp} instance_data = {'node': node, 'outlet_temp': outlet_temp}
if outlet_temp >= self.threshold: if outlet_temp >= self.threshold:
# mark the hypervisor to release resources # mark the node to release resources
hosts_need_release.append(hvmap) hosts_need_release.append(instance_data)
else: else:
hosts_target.append(hvmap) hosts_target.append(instance_data)
return hosts_need_release, hosts_target return hosts_need_release, hosts_target
def choose_vm_to_migrate(self, hosts): def choose_instance_to_migrate(self, hosts):
"""Pick up an active vm instance to migrate from provided hosts""" """Pick up an active instance to migrate from provided hosts"""
for hvmap in hosts: for instance_data in hosts:
mig_src_hypervisor = hvmap['hv'] mig_source_node = instance_data['node']
vms_of_src = self.compute_model.get_mapping().get_node_vms( instances_of_src = self.compute_model.mapping.get_node_instances(
mig_src_hypervisor) mig_source_node)
if len(vms_of_src) > 0: if len(instances_of_src) > 0:
for vm_id in vms_of_src: for instance_id in instances_of_src:
try: try:
# select the first active VM to migrate # select the first active instance to migrate
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(
if vm.state != vm_state.VMState.ACTIVE.value: instance_id)
LOG.info(_LI("VM not active, skipped: %s"), if (instance.state !=
vm.uuid) element.InstanceState.ACTIVE.value):
LOG.info(_LI("Instance not active, skipped: %s"),
instance.uuid)
continue continue
return mig_src_hypervisor, vm return mig_source_node, instance
except wexc.InstanceNotFound as e: except wexc.InstanceNotFound as e:
LOG.exception(e) LOG.exception(e)
LOG.info(_LI("VM not found")) LOG.info(_LI("Instance not found"))
return None return None
def filter_dest_servers(self, hosts, vm_to_migrate): def filter_dest_servers(self, hosts, instance_to_migrate):
"""Only return hosts with sufficient available resources""" """Only return hosts with sufficient available resources"""
cpu_capacity = self.compute_model.get_resource_from_id( cpu_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores) element.ResourceType.cpu_cores)
disk_capacity = self.compute_model.get_resource_from_id( disk_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.disk) element.ResourceType.disk)
memory_capacity = self.compute_model.get_resource_from_id( memory_capacity = self.compute_model.get_resource_from_id(
resource.ResourceType.memory) element.ResourceType.memory)
required_cores = cpu_capacity.get_capacity(vm_to_migrate) required_cores = cpu_capacity.get_capacity(instance_to_migrate)
required_disk = disk_capacity.get_capacity(vm_to_migrate) required_disk = disk_capacity.get_capacity(instance_to_migrate)
required_memory = memory_capacity.get_capacity(vm_to_migrate) required_memory = memory_capacity.get_capacity(instance_to_migrate)
# filter hypervisors without enough resource # filter nodes without enough resource
dest_servers = [] dest_servers = []
for hvmap in hosts: for instance_data in hosts:
host = hvmap['hv'] host = instance_data['node']
# available # available
cores_used, mem_used, disk_used = self.calc_used_res( cores_used, mem_used, disk_used = self.calc_used_res(
host, cpu_capacity, memory_capacity, disk_capacity) host, cpu_capacity, memory_capacity, disk_capacity)
@@ -219,7 +220,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
if cores_available >= required_cores \ if cores_available >= required_cores \
and disk_available >= required_disk \ and disk_available >= required_disk \
and mem_available >= required_memory: and mem_available >= required_memory:
dest_servers.append(hvmap) dest_servers.append(instance_data)
return dest_servers return dest_servers
@@ -251,13 +252,14 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
reverse=True, reverse=True,
key=lambda x: (x["outlet_temp"])) key=lambda x: (x["outlet_temp"]))
vm_to_migrate = self.choose_vm_to_migrate(hosts_need_release) instance_to_migrate = self.choose_instance_to_migrate(
# calculate the vm's cpu cores,memory,disk needs hosts_need_release)
if vm_to_migrate is None: # calculate the instance's cpu cores,memory,disk needs
if instance_to_migrate is None:
return self.solution return self.solution
mig_src_hypervisor, vm_src = vm_to_migrate mig_source_node, instance_src = instance_to_migrate
dest_servers = self.filter_dest_servers(hosts_target, vm_src) dest_servers = self.filter_dest_servers(hosts_target, instance_src)
# sort the filtered result by outlet temp # sort the filtered result by outlet temp
# pick up the lowest one as dest server # pick up the lowest one as dest server
if len(dest_servers) == 0: if len(dest_servers) == 0:
@@ -268,15 +270,15 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))
# always use the host with lowerest outlet temperature # always use the host with lowerest outlet temperature
mig_dst_hypervisor = dest_servers[0]['hv'] mig_destination_node = dest_servers[0]['node']
# generate solution to migrate the vm to the dest server, # generate solution to migrate the instance to the dest server,
if self.compute_model.get_mapping().migrate_vm( if self.compute_model.mapping.migrate_instance(
vm_src, mig_src_hypervisor, mig_dst_hypervisor): instance_src, mig_source_node, mig_destination_node):
parameters = {'migration_type': 'live', parameters = {'migration_type': 'live',
'src_hypervisor': mig_src_hypervisor.uuid, 'source_node': mig_source_node.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid} 'destination_node': mig_destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION, self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid, resource_id=instance_src.uuid,
input_parameters=parameters) input_parameters=parameters)
def post_execute(self): def post_execute(self):

View File

@@ -21,8 +21,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -121,20 +120,20 @@ class UniformAirflow(base.BaseStrategy):
return { return {
"properties": { "properties": {
"threshold_airflow": { "threshold_airflow": {
"description": "airflow threshold for migration, Unit is\ "description": ("airflow threshold for migration, Unit is "
0.1CFM", "0.1CFM"),
"type": "number", "type": "number",
"default": 400.0 "default": 400.0
}, },
"threshold_inlet_t": { "threshold_inlet_t": {
"description": "inlet temperature threshold for migration\ "description": ("inlet temperature threshold for "
decision", "migration decision"),
"type": "number", "type": "number",
"default": 28.0 "default": 28.0
}, },
"threshold_power": { "threshold_power": {
"description": "system power threshold for migration\ "description": ("system power threshold for migration "
decision", "decision"),
"type": "number", "type": "number",
"default": 350.0 "default": 350.0
}, },
@@ -146,112 +145,120 @@ class UniformAirflow(base.BaseStrategy):
}, },
} }
def calculate_used_resource(self, hypervisor, cap_cores, cap_mem, def calculate_used_resource(self, node, cap_cores, cap_mem, cap_disk):
cap_disk): """Compute the used vcpus, memory and disk based on instance flavors"""
"""Calculate the used vcpus, memory and disk based on VM flavors""" instances = self.compute_model.mapping.get_node_instances(node)
vms = self.compute_model.get_mapping().get_node_vms(hypervisor)
vcpus_used = 0 vcpus_used = 0
memory_mb_used = 0 memory_mb_used = 0
disk_gb_used = 0 disk_gb_used = 0
for vm_id in vms: for instance_id in instances:
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(
vcpus_used += cap_cores.get_capacity(vm) instance_id)
memory_mb_used += cap_mem.get_capacity(vm) vcpus_used += cap_cores.get_capacity(instance)
disk_gb_used += cap_disk.get_capacity(vm) memory_mb_used += cap_mem.get_capacity(instance)
disk_gb_used += cap_disk.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used return vcpus_used, memory_mb_used, disk_gb_used
def choose_vm_to_migrate(self, hosts): def choose_instance_to_migrate(self, hosts):
"""Pick up an active vm instance to migrate from provided hosts """Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains hypervisor object :param hosts: the array of dict which contains node object
""" """
vms_tobe_migrate = [] instances_tobe_migrate = []
for hvmap in hosts: for nodemap in hosts:
source_hypervisor = hvmap['hv'] source_node = nodemap['node']
source_vms = self.compute_model.get_mapping().get_node_vms( source_instances = self.compute_model.mapping.get_node_instances(
source_hypervisor) source_node)
if source_vms: if source_instances:
inlet_t = self.ceilometer.statistic_aggregation( inlet_t = self.ceilometer.statistic_aggregation(
resource_id=source_hypervisor.uuid, resource_id=source_node.uuid,
meter_name=self.meter_name_inlet_t, meter_name=self.meter_name_inlet_t,
period=self._period, period=self._period,
aggregate='avg') aggregate='avg')
power = self.ceilometer.statistic_aggregation( power = self.ceilometer.statistic_aggregation(
resource_id=source_hypervisor.uuid, resource_id=source_node.uuid,
meter_name=self.meter_name_power, meter_name=self.meter_name_power,
period=self._period, period=self._period,
aggregate='avg') aggregate='avg')
if (power < self.threshold_power and if (power < self.threshold_power and
inlet_t < self.threshold_inlet_t): inlet_t < self.threshold_inlet_t):
# hardware issue, migrate all vms from this hypervisor # hardware issue, migrate all instances from this node
for vm_id in source_vms: for instance_id in source_instances:
try: try:
vm = self.compute_model.get_vm_from_id(vm_id) instance = (self.compute_model.
vms_tobe_migrate.append(vm) get_instance_from_id(instance_id))
instances_tobe_migrate.append(instance)
except wexc.InstanceNotFound: except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id) LOG.error(_LE("Instance not found; error: %s"),
return source_hypervisor, vms_tobe_migrate instance_id)
return source_node, instances_tobe_migrate
else: else:
# migrate the first active vm # migrate the first active instance
for vm_id in source_vms: for instance_id in source_instances:
try: try:
vm = self.compute_model.get_vm_from_id(vm_id) instance = (self.compute_model.
if vm.state != vm_state.VMState.ACTIVE.value: get_instance_from_id(instance_id))
LOG.info(_LI("VM not active; skipped: %s"), if (instance.state !=
vm.uuid) element.InstanceState.ACTIVE.value):
LOG.info(
_LI("Instance not active, skipped: %s"),
instance.uuid)
continue continue
vms_tobe_migrate.append(vm) instances_tobe_migrate.append(instance)
return source_hypervisor, vms_tobe_migrate return source_node, instances_tobe_migrate
except wexc.InstanceNotFound: except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id) LOG.error(_LE("Instance not found; error: %s"),
instance_id)
else: else:
LOG.info(_LI("VM not found on hypervisor: %s"), LOG.info(_LI("Instance not found on node: %s"),
source_hypervisor.uuid) source_node.uuid)
def filter_destination_hosts(self, hosts, vms_to_migrate): def filter_destination_hosts(self, hosts, instances_to_migrate):
"""Return vm and host with sufficient available resources""" """Find instance and host with sufficient available resources"""
cap_cores = self.compute_model.get_resource_from_id( cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores) element.ResourceType.cpu_cores)
cap_disk = self.compute_model.get_resource_from_id( cap_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk) element.ResourceType.disk)
cap_mem = self.compute_model.get_resource_from_id( cap_mem = self.compute_model.get_resource_from_id(
resource.ResourceType.memory) element.ResourceType.memory)
# large vm go first # large instance go first
vms_to_migrate = sorted(vms_to_migrate, reverse=True, instances_to_migrate = sorted(
key=lambda x: (cap_cores.get_capacity(x))) instances_to_migrate, reverse=True,
# find hosts for VMs key=lambda x: (cap_cores.get_capacity(x)))
# find hosts for instances
destination_hosts = [] destination_hosts = []
for vm_to_migrate in vms_to_migrate: for instance_to_migrate in instances_to_migrate:
required_cores = cap_cores.get_capacity(vm_to_migrate) required_cores = cap_cores.get_capacity(instance_to_migrate)
required_disk = cap_disk.get_capacity(vm_to_migrate) required_disk = cap_disk.get_capacity(instance_to_migrate)
required_mem = cap_mem.get_capacity(vm_to_migrate) required_mem = cap_mem.get_capacity(instance_to_migrate)
dest_migrate_info = {} dest_migrate_info = {}
for hvmap in hosts: for nodemap in hosts:
host = hvmap['hv'] host = nodemap['node']
if 'cores_used' not in hvmap: if 'cores_used' not in nodemap:
# calculate the available resources # calculate the available resources
hvmap['cores_used'], hvmap['mem_used'],\ nodemap['cores_used'], nodemap['mem_used'],\
hvmap['disk_used'] = self.calculate_used_resource( nodemap['disk_used'] = self.calculate_used_resource(
host, cap_cores, cap_mem, cap_disk) host, cap_cores, cap_mem, cap_disk)
cores_available = (cap_cores.get_capacity(host) - cores_available = (cap_cores.get_capacity(host) -
hvmap['cores_used']) nodemap['cores_used'])
disk_available = (cap_disk.get_capacity(host) - disk_available = (cap_disk.get_capacity(host) -
hvmap['disk_used']) nodemap['disk_used'])
mem_available = cap_mem.get_capacity(host) - hvmap['mem_used'] mem_available = (
cap_mem.get_capacity(host) - nodemap['mem_used'])
if (cores_available >= required_cores and if (cores_available >= required_cores and
disk_available >= required_disk and disk_available >= required_disk and
mem_available >= required_mem): mem_available >= required_mem):
dest_migrate_info['vm'] = vm_to_migrate dest_migrate_info['instance'] = instance_to_migrate
dest_migrate_info['hv'] = host dest_migrate_info['node'] = host
hvmap['cores_used'] += required_cores nodemap['cores_used'] += required_cores
hvmap['mem_used'] += required_mem nodemap['mem_used'] += required_mem
hvmap['disk_used'] += required_disk nodemap['disk_used'] += required_disk
destination_hosts.append(dest_migrate_info) destination_hosts.append(dest_migrate_info)
break break
# check if all vms have target hosts # check if all instances have target hosts
if len(destination_hosts) != len(vms_to_migrate): if len(destination_hosts) != len(instances_to_migrate):
LOG.warning(_LW("Not all target hosts could be found; it might " LOG.warning(_LW("Not all target hosts could be found; it might "
"be because there is not enough resource")) "be because there is not enough resource"))
return None return None
@@ -260,15 +267,15 @@ class UniformAirflow(base.BaseStrategy):
def group_hosts_by_airflow(self): def group_hosts_by_airflow(self):
"""Group hosts based on airflow meters""" """Group hosts based on airflow meters"""
hypervisors = self.compute_model.get_all_hypervisors() nodes = self.compute_model.get_all_compute_nodes()
if not hypervisors: if not nodes:
raise wexc.ClusterEmpty() raise wexc.ClusterEmpty()
overload_hosts = [] overload_hosts = []
nonoverload_hosts = [] nonoverload_hosts = []
for hypervisor_id in hypervisors: for node_id in nodes:
hypervisor = self.compute_model.get_hypervisor_from_id( node = self.compute_model.get_node_from_id(
hypervisor_id) node_id)
resource_id = hypervisor.uuid resource_id = node.uuid
airflow = self.ceilometer.statistic_aggregation( airflow = self.ceilometer.statistic_aggregation(
resource_id=resource_id, resource_id=resource_id,
meter_name=self.meter_name_airflow, meter_name=self.meter_name_airflow,
@@ -280,12 +287,12 @@ class UniformAirflow(base.BaseStrategy):
continue continue
LOG.debug("%s: airflow %f" % (resource_id, airflow)) LOG.debug("%s: airflow %f" % (resource_id, airflow))
hvmap = {'hv': hypervisor, 'airflow': airflow} nodemap = {'node': node, 'airflow': airflow}
if airflow >= self.threshold_airflow: if airflow >= self.threshold_airflow:
# mark the hypervisor to release resources # mark the node to release resources
overload_hosts.append(hvmap) overload_hosts.append(nodemap)
else: else:
nonoverload_hosts.append(hvmap) nonoverload_hosts.append(nodemap)
return overload_hosts, nonoverload_hosts return overload_hosts, nonoverload_hosts
def pre_execute(self): def pre_execute(self):
@@ -299,49 +306,48 @@ class UniformAirflow(base.BaseStrategy):
self.threshold_inlet_t = self.input_parameters.threshold_inlet_t self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
self.threshold_power = self.input_parameters.threshold_power self.threshold_power = self.input_parameters.threshold_power
self._period = self.input_parameters.period self._period = self.input_parameters.period
src_hypervisors, target_hypervisors = ( source_nodes, target_nodes = self.group_hosts_by_airflow()
self.group_hosts_by_airflow())
if not src_hypervisors: if not source_nodes:
LOG.debug("No hosts require optimization") LOG.debug("No hosts require optimization")
return self.solution return self.solution
if not target_hypervisors: if not target_nodes:
LOG.warning(_LW("No hosts currently have airflow under %s, " LOG.warning(_LW("No hosts currently have airflow under %s, "
"therefore there are no possible target " "therefore there are no possible target "
"hosts for any migration"), "hosts for any migration"),
self.threshold_airflow) self.threshold_airflow)
return self.solution return self.solution
# migrate the vm from server with largest airflow first # migrate the instance from server with largest airflow first
src_hypervisors = sorted(src_hypervisors, source_nodes = sorted(source_nodes,
reverse=True, reverse=True,
key=lambda x: (x["airflow"])) key=lambda x: (x["airflow"]))
vms_to_migrate = self.choose_vm_to_migrate(src_hypervisors) instances_to_migrate = self.choose_instance_to_migrate(source_nodes)
if not vms_to_migrate: if not instances_to_migrate:
return self.solution return self.solution
source_hypervisor, vms_src = vms_to_migrate source_node, instances_src = instances_to_migrate
# sort host with airflow # sort host with airflow
target_hypervisors = sorted(target_hypervisors, target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"]))
key=lambda x: (x["airflow"])) # find the hosts that have enough resource
# find the hosts that have enough resource for the VM to be migrated # for the instance to be migrated
destination_hosts = self.filter_destination_hosts(target_hypervisors, destination_hosts = self.filter_destination_hosts(
vms_src) target_nodes, instances_src)
if not destination_hosts: if not destination_hosts:
LOG.warning(_LW("No target host could be found; it might " LOG.warning(_LW("No target host could be found; it might "
"be because there is not enough resources")) "be because there is not enough resources"))
return self.solution return self.solution
# generate solution to migrate the vm to the dest server, # generate solution to migrate the instance to the dest server,
for info in destination_hosts: for info in destination_hosts:
vm_src = info['vm'] instance = info['instance']
mig_dst_hypervisor = info['hv'] destination_node = info['node']
if self.compute_model.get_mapping().migrate_vm( if self.compute_model.mapping.migrate_instance(
vm_src, source_hypervisor, mig_dst_hypervisor): instance, source_node, destination_node):
parameters = {'migration_type': 'live', parameters = {'migration_type': 'live',
'src_hypervisor': source_hypervisor.uuid, 'source_node': source_node.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid} 'destination_node': destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION, self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid, resource_id=instance.uuid,
input_parameters=parameters) input_parameters=parameters)
def post_execute(self): def post_execute(self):

View File

@@ -24,9 +24,7 @@ from watcher._i18n import _, _LE, _LI
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer \ from watcher.decision_engine.cluster.history import ceilometer \
as ceilometer_cluster_history as ceilometer_cluster_history
from watcher.decision_engine.model import hypervisor_state as hyper_state from watcher.decision_engine.model import element
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -48,26 +46,26 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* Offload phase - handling over-utilized resources * Offload phase - handling over-utilized resources
* Consolidation phase - handling under-utilized resources * Consolidation phase - handling under-utilized resources
* Solution optimization - reducing number of migrations * Solution optimization - reducing number of migrations
* Disability of unused hypervisors * Disability of unused compute nodes
A capacity coefficients (cc) might be used to adjust optimization A capacity coefficients (cc) might be used to adjust optimization
thresholds. Different resources may require different coefficient thresholds. Different resources may require different coefficient
values as well as setting up different coefficient values in both values as well as setting up different coefficient values in both
phases may lead to more efficient consolidation in the end. phases may lead to to more efficient consolidation in the end.
If the cc equals 1 the full resource capacity may be used, cc If the cc equals 1 the full resource capacity may be used, cc
values lower than 1 will lead to resource under utilization and values lower than 1 will lead to resource under utilization and
values higher than 1 will lead to resource overbooking. values higher than 1 will lead to resource overbooking.
e.g. If targeted utilization is 80 percent of hypervisor capacity, e.g. If targeted utilization is 80 percent of a compute node capacity,
the coefficient in the consolidation phase will be 0.8, but the coefficient in the consolidation phase will be 0.8, but
may any lower value in the offloading phase. The lower it gets may any lower value in the offloading phase. The lower it gets
the cluster will appear more released (distributed) for the the cluster will appear more released (distributed) for the
following consolidation phase. following consolidation phase.
As this strategy leverages VM live migration to move the load As this strategy laverages VM live migration to move the load
from one hypervisor to another, this feature needs to be set up from one compute node to another, this feature needs to be set up
correctly on all hypervisors within the cluster. correctly on all compute nodes within the cluster.
This strategy assumes it is possible to live migrate any VM from This strategy assumes it is possible to live migrate any VM from
an active hypervisor to any other active hypervisor. an active compute node to any other active compute node.
*Requirements* *Requirements*
@@ -86,8 +84,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
super(VMWorkloadConsolidation, self).__init__(config, osc) super(VMWorkloadConsolidation, self).__init__(config, osc)
self._ceilometer = None self._ceilometer = None
self.number_of_migrations = 0 self.number_of_migrations = 0
self.number_of_released_hypervisors = 0 self.number_of_released_nodes = 0
self.ceilometer_vm_data_cache = dict() self.ceilometer_instance_data_cache = dict()
@classmethod @classmethod
def get_name(cls): def get_name(cls):
@@ -119,200 +117,203 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
""" """
if isinstance(state, six.string_types): if isinstance(state, six.string_types):
return state return state
elif isinstance(state, (vm_state.VMState, elif isinstance(state, (element.InstanceState, element.ServiceState)):
hyper_state.HypervisorState)):
return state.value return state.value
else: else:
LOG.error(_LE('Unexpected resource state type, ' LOG.error(_LE('Unexpexted resource state type, '
'state=%(state)s, state_type=%(st)s.'), 'state=%(state)s, state_type=%(st)s.'),
state=state, state=state,
st=type(state)) st=type(state))
raise exception.WatcherException raise exception.WatcherException
def add_action_enable_hypervisor(self, hypervisor): def add_action_enable_compute_node(self, node):
"""Add an action for hypervisor enabler into the solution. """Add an action for node enabler into the solution.
:param hypervisor: hypervisor object :param node: node object
:return: None :return: None
""" """
params = {'state': hyper_state.HypervisorState.ENABLED.value} params = {'state': element.ServiceState.ENABLED.value}
self.solution.add_action( self.solution.add_action(
action_type='change_nova_service_state', action_type='change_nova_service_state',
resource_id=hypervisor.uuid, resource_id=node.uuid,
input_parameters=params) input_parameters=params)
self.number_of_released_hypervisors -= 1 self.number_of_released_nodes -= 1
def add_action_disable_hypervisor(self, hypervisor): def add_action_disable_node(self, node):
"""Add an action for hypervisor disablity into the solution. """Add an action for node disablity into the solution.
:param hypervisor: hypervisor object :param node: node object
:return: None :return: None
""" """
params = {'state': hyper_state.HypervisorState.DISABLED.value} params = {'state': element.ServiceState.DISABLED.value}
self.solution.add_action( self.solution.add_action(
action_type='change_nova_service_state', action_type='change_nova_service_state',
resource_id=hypervisor.uuid, resource_id=node.uuid,
input_parameters=params) input_parameters=params)
self.number_of_released_hypervisors += 1 self.number_of_released_nodes += 1
def add_migration(self, vm_uuid, src_hypervisor, def add_migration(self, instance_uuid, source_node,
dst_hypervisor, model): destination_node, model):
"""Add an action for VM migration into the solution. """Add an action for VM migration into the solution.
:param vm_uuid: vm uuid :param instance_uuid: instance uuid
:param src_hypervisor: hypervisor object :param source_node: node object
:param dst_hypervisor: hypervisor object :param destination_node: node object
:param model: model_root object :param model: model_root object
:return: None :return: None
""" """
vm = model.get_vm_from_id(vm_uuid) instance = model.get_instance_from_id(instance_uuid)
vm_state_str = self.get_state_str(vm.state) instance_state_str = self.get_state_str(instance.state)
if vm_state_str != vm_state.VMState.ACTIVE.value: if instance_state_str != element.InstanceState.ACTIVE.value:
# Watcher curently only supports live VM migration and block live # Watcher curently only supports live VM migration and block live
# VM migration which both requires migrated VM to be active. # VM migration which both requires migrated VM to be active.
# When supported, the cold migration may be used as a fallback # When supported, the cold migration may be used as a fallback
# migration mechanism to move non active VMs. # migration mechanism to move non active VMs.
LOG.error(_LE('Cannot live migrate: vm_uuid=%(vm_uuid)s, ' LOG.error(
'state=%(vm_state)s.'), _LE('Cannot live migrate: instance_uuid=%(instance_uuid)s, '
vm_uuid=vm_uuid, 'state=%(instance_state)s.'),
vm_state=vm_state_str) instance_uuid=instance_uuid,
instance_state=instance_state_str)
raise exception.WatcherException raise exception.WatcherException
migration_type = 'live' migration_type = 'live'
dst_hyper_state_str = self.get_state_str(dst_hypervisor.state) destination_node_state_str = self.get_state_str(destination_node.state)
if dst_hyper_state_str == hyper_state.HypervisorState.DISABLED.value: if destination_node_state_str == element.ServiceState.DISABLED.value:
self.add_action_enable_hypervisor(dst_hypervisor) self.add_action_enable_compute_node(destination_node)
model.get_mapping().unmap(src_hypervisor, vm) model.mapping.unmap(source_node, instance)
model.get_mapping().map(dst_hypervisor, vm) model.mapping.map(destination_node, instance)
params = {'migration_type': migration_type, params = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor.uuid, 'source_node': source_node.uuid,
'dst_hypervisor': dst_hypervisor.uuid} 'destination_node': destination_node.uuid}
self.solution.add_action(action_type='migrate', self.solution.add_action(action_type='migrate',
resource_id=vm.uuid, resource_id=instance.uuid,
input_parameters=params) input_parameters=params)
self.number_of_migrations += 1 self.number_of_migrations += 1
def disable_unused_hypervisors(self, model): def disable_unused_nodes(self, model):
"""Generate actions for disablity of unused hypervisors. """Generate actions for disablity of unused nodes.
:param model: model_root object :param model: model_root object
:return: None :return: None
""" """
for hypervisor in model.get_all_hypervisors().values(): for node in model.get_all_compute_nodes().values():
if (len(model.get_mapping().get_node_vms(hypervisor)) == 0 and if (len(model.mapping.get_node_instances(node)) == 0 and
hypervisor.status != node.status !=
hyper_state.HypervisorState.DISABLED.value): element.ServiceState.DISABLED.value):
self.add_action_disable_hypervisor(hypervisor) self.add_action_disable_node(node)
def get_vm_utilization(self, vm_uuid, model, period=3600, aggr='avg'): def get_instance_utilization(self, instance_uuid, model,
period=3600, aggr='avg'):
"""Collect cpu, ram and disk utilization statistics of a VM. """Collect cpu, ram and disk utilization statistics of a VM.
:param vm_uuid: vm object :param instance_uuid: instance object
:param model: model_root object :param model: model_root object
:param period: seconds :param period: seconds
:param aggr: string :param aggr: string
:return: dict(cpu(number of vcpus used), ram(MB used), disk(B used)) :return: dict(cpu(number of vcpus used), ram(MB used), disk(B used))
""" """
if vm_uuid in self.ceilometer_vm_data_cache.keys(): if instance_uuid in self.ceilometer_instance_data_cache.keys():
return self.ceilometer_vm_data_cache.get(vm_uuid) return self.ceilometer_instance_data_cache.get(instance_uuid)
cpu_util_metric = 'cpu_util' cpu_util_metric = 'cpu_util'
ram_util_metric = 'memory.usage' ram_util_metric = 'memory.usage'
ram_alloc_metric = 'memory' ram_alloc_metric = 'memory'
disk_alloc_metric = 'disk.root.size' disk_alloc_metric = 'disk.root.size'
vm_cpu_util = self.ceilometer.statistic_aggregation( instance_cpu_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=cpu_util_metric, resource_id=instance_uuid, meter_name=cpu_util_metric,
period=period, aggregate=aggr) period=period, aggregate=aggr)
vm_cpu_cores = model.get_resource_from_id( instance_cpu_cores = model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity( element.ResourceType.cpu_cores).get_capacity(
model.get_vm_from_id(vm_uuid)) model.get_instance_from_id(instance_uuid))
if vm_cpu_util: if instance_cpu_util:
total_cpu_utilization = vm_cpu_cores * (vm_cpu_util / 100.0) total_cpu_utilization = (
instance_cpu_cores * (instance_cpu_util / 100.0))
else: else:
total_cpu_utilization = vm_cpu_cores total_cpu_utilization = instance_cpu_cores
vm_ram_util = self.ceilometer.statistic_aggregation( instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=ram_util_metric, resource_id=instance_uuid, meter_name=ram_util_metric,
period=period, aggregate=aggr) period=period, aggregate=aggr)
if not vm_ram_util: if not instance_ram_util:
vm_ram_util = self.ceilometer.statistic_aggregation( instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=ram_alloc_metric, resource_id=instance_uuid, meter_name=ram_alloc_metric,
period=period, aggregate=aggr) period=period, aggregate=aggr)
vm_disk_util = self.ceilometer.statistic_aggregation( instance_disk_util = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, meter_name=disk_alloc_metric, resource_id=instance_uuid, meter_name=disk_alloc_metric,
period=period, aggregate=aggr) period=period, aggregate=aggr)
if not vm_ram_util or not vm_disk_util: if not instance_ram_util or not instance_disk_util:
LOG.error( LOG.error(
_LE('No values returned by %(resource_id)s ' _LE('No values returned by %(resource_id)s '
'for memory.usage or disk.root.size'), 'for memory.usage or disk.root.size'),
resource_id=vm_uuid resource_id=instance_uuid
) )
raise exception.NoDataFound raise exception.NoDataFound
self.ceilometer_vm_data_cache[vm_uuid] = dict( self.ceilometer_instance_data_cache[instance_uuid] = dict(
cpu=total_cpu_utilization, ram=vm_ram_util, disk=vm_disk_util) cpu=total_cpu_utilization, ram=instance_ram_util,
return self.ceilometer_vm_data_cache.get(vm_uuid) disk=instance_disk_util)
return self.ceilometer_instance_data_cache.get(instance_uuid)
def get_hypervisor_utilization(self, hypervisor, model, period=3600, def get_node_utilization(self, node, model, period=3600, aggr='avg'):
aggr='avg'): """Collect cpu, ram and disk utilization statistics of a node.
"""Collect cpu, ram and disk utilization statistics of a hypervisor.
:param hypervisor: hypervisor object :param node: node object
:param model: model_root object :param model: model_root object
:param period: seconds :param period: seconds
:param aggr: string :param aggr: string
:return: dict(cpu(number of cores used), ram(MB used), disk(B used)) :return: dict(cpu(number of cores used), ram(MB used), disk(B used))
""" """
hypervisor_vms = model.get_mapping().get_node_vms_from_id( node_instances = model.mapping.get_node_instances_from_id(
hypervisor.uuid) node.uuid)
hypervisor_ram_util = 0 node_ram_util = 0
hypervisor_disk_util = 0 node_disk_util = 0
hypervisor_cpu_util = 0 node_cpu_util = 0
for vm_uuid in hypervisor_vms: for instance_uuid in node_instances:
vm_util = self.get_vm_utilization(vm_uuid, model, period, aggr) instance_util = self.get_instance_utilization(
hypervisor_cpu_util += vm_util['cpu'] instance_uuid, model, period, aggr)
hypervisor_ram_util += vm_util['ram'] node_cpu_util += instance_util['cpu']
hypervisor_disk_util += vm_util['disk'] node_ram_util += instance_util['ram']
node_disk_util += instance_util['disk']
return dict(cpu=hypervisor_cpu_util, ram=hypervisor_ram_util, return dict(cpu=node_cpu_util, ram=node_ram_util,
disk=hypervisor_disk_util) disk=node_disk_util)
def get_hypervisor_capacity(self, hypervisor, model): def get_node_capacity(self, node, model):
"""Collect cpu, ram and disk capacity of a hypervisor. """Collect cpu, ram and disk capacity of a node.
:param hypervisor: hypervisor object :param node: node object
:param model: model_root object :param model: model_root object
:return: dict(cpu(cores), ram(MB), disk(B)) :return: dict(cpu(cores), ram(MB), disk(B))
""" """
hypervisor_cpu_capacity = model.get_resource_from_id( node_cpu_capacity = model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(hypervisor) element.ResourceType.cpu_cores).get_capacity(node)
hypervisor_disk_capacity = model.get_resource_from_id( node_disk_capacity = model.get_resource_from_id(
resource.ResourceType.disk_capacity).get_capacity(hypervisor) element.ResourceType.disk_capacity).get_capacity(node)
hypervisor_ram_capacity = model.get_resource_from_id( node_ram_capacity = model.get_resource_from_id(
resource.ResourceType.memory).get_capacity(hypervisor) element.ResourceType.memory).get_capacity(node)
return dict(cpu=hypervisor_cpu_capacity, ram=hypervisor_ram_capacity, return dict(cpu=node_cpu_capacity, ram=node_ram_capacity,
disk=hypervisor_disk_capacity) disk=node_disk_capacity)
def get_relative_hypervisor_utilization(self, hypervisor, model): def get_relative_node_utilization(self, node, model):
"""Return relative hypervisor utilization (rhu). """Return relative node utilization (rhu).
:param hypervisor: hypervisor object :param node: node object
:param model: model_root object :param model: model_root object
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
""" """
rhu = {} rhu = {}
util = self.get_hypervisor_utilization(hypervisor, model) util = self.get_node_utilization(node, model)
cap = self.get_hypervisor_capacity(hypervisor, model) cap = self.get_node_capacity(node, model)
for k in util.keys(): for k in util.keys():
rhu[k] = float(util[k]) / float(cap[k]) rhu[k] = float(util[k]) / float(cap[k])
return rhu return rhu
@@ -320,18 +321,18 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
def get_relative_cluster_utilization(self, model): def get_relative_cluster_utilization(self, model):
"""Calculate relative cluster utilization (rcu). """Calculate relative cluster utilization (rcu).
RCU is an average of relative utilizations (rhu) of active hypervisors. RCU is an average of relative utilizations (rhu) of active nodes.
:param model: model_root object :param model: model_root object
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
""" """
hypervisors = model.get_all_hypervisors().values() nodes = model.get_all_compute_nodes().values()
rcu = {} rcu = {}
counters = {} counters = {}
for hypervisor in hypervisors: for node in nodes:
hyper_state_str = self.get_state_str(hypervisor.state) node_state_str = self.get_state_str(node.state)
if hyper_state_str == hyper_state.HypervisorState.ENABLED.value: if node_state_str == element.ServiceState.ENABLED.value:
rhu = self.get_relative_hypervisor_utilization( rhu = self.get_relative_node_utilization(
hypervisor, model) node, model)
for k in rhu.keys(): for k in rhu.keys():
if k not in rcu: if k not in rcu:
rcu[k] = 0 rcu[k] = 0
@@ -343,42 +344,43 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
rcu[k] /= counters[k] rcu[k] /= counters[k]
return rcu return rcu
def is_overloaded(self, hypervisor, model, cc): def is_overloaded(self, node, model, cc):
"""Indicate whether a hypervisor is overloaded. """Indicate whether a node is overloaded.
This considers provided resource capacity coefficients (cc). This considers provided resource capacity coefficients (cc).
:param hypervisor: hypervisor object :param node: node object
:param model: model_root object :param model: model_root object
:param cc: dictionary containing resource capacity coefficients :param cc: dictionary containing resource capacity coefficients
:return: [True, False] :return: [True, False]
""" """
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model) node_capacity = self.get_node_capacity(node, model)
hypervisor_utilization = self.get_hypervisor_utilization( node_utilization = self.get_node_utilization(
hypervisor, model) node, model)
metrics = ['cpu'] metrics = ['cpu']
for m in metrics: for m in metrics:
if hypervisor_utilization[m] > hypervisor_capacity[m] * cc[m]: if node_utilization[m] > node_capacity[m] * cc[m]:
return True return True
return False return False
def vm_fits(self, vm_uuid, hypervisor, model, cc): def instance_fits(self, instance_uuid, node, model, cc):
"""Indicate whether is a hypervisor able to accommodate a VM. """Indicate whether is a node able to accommodate a VM.
This considers provided resource capacity coefficients (cc). This considers provided resource capacity coefficients (cc).
:param vm_uuid: string :param instance_uuid: string
:param hypervisor: hypervisor object :param node: node object
:param model: model_root object :param model: model_root object
:param cc: dictionary containing resource capacity coefficients :param cc: dictionary containing resource capacity coefficients
:return: [True, False] :return: [True, False]
""" """
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model) node_capacity = self.get_node_capacity(node, model)
hypervisor_utilization = self.get_hypervisor_utilization( node_utilization = self.get_node_utilization(
hypervisor, model) node, model)
vm_utilization = self.get_vm_utilization(vm_uuid, model) instance_utilization = self.get_instance_utilization(
instance_uuid, model)
metrics = ['cpu', 'ram', 'disk'] metrics = ['cpu', 'ram', 'disk']
for m in metrics: for m in metrics:
if (vm_utilization[m] + hypervisor_utilization[m] > if (instance_utilization[m] + node_utilization[m] >
hypervisor_capacity[m] * cc[m]): node_capacity[m] * cc[m]):
return False return False
return True return True
@@ -391,7 +393,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* A->B, B->C => replace migrations A->B, B->C with * A->B, B->C => replace migrations A->B, B->C with
a single migration A->C as both solution result in a single migration A->C as both solution result in
VM running on hypervisor C which can be achieved with VM running on node C which can be achieved with
one migration instead of two. one migration instead of two.
* A->B, B->A => remove A->B and B->A as they do not result * A->B, B->A => remove A->B and B->A as they do not result
in a new VM placement. in a new VM placement.
@@ -401,58 +403,59 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
migrate_actions = ( migrate_actions = (
a for a in self.solution.actions if a[ a for a in self.solution.actions if a[
'action_type'] == 'migrate') 'action_type'] == 'migrate')
vm_to_be_migrated = (a['input_parameters']['resource_id'] instance_to_be_migrated = (
for a in migrate_actions) a['input_parameters']['resource_id'] for a in migrate_actions)
vm_uuids = list(set(vm_to_be_migrated)) instance_uuids = list(set(instance_to_be_migrated))
for vm_uuid in vm_uuids: for instance_uuid in instance_uuids:
actions = list( actions = list(
a for a in self.solution.actions if a[ a for a in self.solution.actions if a[
'input_parameters'][ 'input_parameters'][
'resource_id'] == vm_uuid) 'resource_id'] == instance_uuid)
if len(actions) > 1: if len(actions) > 1:
src = actions[0]['input_parameters']['src_hypervisor'] src = actions[0]['input_parameters']['source_node']
dst = actions[-1]['input_parameters']['dst_hypervisor'] dst = actions[-1]['input_parameters']['destination_node']
for a in actions: for a in actions:
self.solution.actions.remove(a) self.solution.actions.remove(a)
self.number_of_migrations -= 1 self.number_of_migrations -= 1
if src != dst: if src != dst:
self.add_migration(vm_uuid, src, dst, model) self.add_migration(instance_uuid, src, dst, model)
def offload_phase(self, model, cc): def offload_phase(self, model, cc):
"""Perform offloading phase. """Perform offloading phase.
This considers provided resource capacity coefficients. This considers provided resource capacity coefficients.
Offload phase performing first-fit based bin packing to offload Offload phase performing first-fit based bin packing to offload
overloaded hypervisors. This is done in a fashion of moving overloaded nodes. This is done in a fashion of moving
the least CPU utilized VM first as live migration these the least CPU utilized VM first as live migration these
generaly causes less troubles. This phase results in a cluster generaly causes less troubles. This phase results in a cluster
with no overloaded hypervisors. with no overloaded nodes.
* This phase is be able to enable disabled hypervisors (if needed * This phase is be able to enable disabled nodes (if needed
and any available) in the case of the resource capacity provided by and any available) in the case of the resource capacity provided by
active hypervisors is not able to accomodate all the load. active nodes is not able to accomodate all the load.
As the offload phase is later followed by the consolidation phase, As the offload phase is later followed by the consolidation phase,
the hypervisor enabler in this phase doesn't necessarily results the node enabler in this phase doesn't necessarily results
in more enabled hypervisors in the final solution. in more enabled nodes in the final solution.
:param model: model_root object :param model: model_root object
:param cc: dictionary containing resource capacity coefficients :param cc: dictionary containing resource capacity coefficients
""" """
sorted_hypervisors = sorted( sorted_nodes = sorted(
model.get_all_hypervisors().values(), model.get_all_compute_nodes().values(),
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu']) key=lambda x: self.get_node_utilization(x, model)['cpu'])
for hypervisor in reversed(sorted_hypervisors): for node in reversed(sorted_nodes):
if self.is_overloaded(hypervisor, model, cc): if self.is_overloaded(node, model, cc):
for vm in sorted( for instance in sorted(
model.get_mapping().get_node_vms(hypervisor), model.mapping.get_node_instances(node),
key=lambda x: self.get_vm_utilization( key=lambda x: self.get_instance_utilization(
x, model)['cpu'] x, model)['cpu']
): ):
for dst_hypervisor in reversed(sorted_hypervisors): for destination_node in reversed(sorted_nodes):
if self.vm_fits(vm, dst_hypervisor, model, cc): if self.instance_fits(
self.add_migration(vm, hypervisor, instance, destination_node, model, cc):
dst_hypervisor, model) self.add_migration(instance, node,
destination_node, model)
break break
if not self.is_overloaded(hypervisor, model, cc): if not self.is_overloaded(node, model, cc):
break break
def consolidation_phase(self, model, cc): def consolidation_phase(self, model, cc):
@@ -460,8 +463,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
This considers provided resource capacity coefficients. This considers provided resource capacity coefficients.
Consolidation phase performing first-fit based bin packing. Consolidation phase performing first-fit based bin packing.
First, hypervisors with the lowest cpu utilization are consolidated First, nodes with the lowest cpu utilization are consolidated
by moving their load to hypervisors with the highest cpu utilization by moving their load to nodes with the highest cpu utilization
which can accomodate the load. In this phase the most cpu utilizied which can accomodate the load. In this phase the most cpu utilizied
VMs are prioritizied as their load is more difficult to accomodate VMs are prioritizied as their load is more difficult to accomodate
in the system than less cpu utilizied VMs which can be later used in the system than less cpu utilizied VMs which can be later used
@@ -470,22 +473,23 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
:param model: model_root object :param model: model_root object
:param cc: dictionary containing resource capacity coefficients :param cc: dictionary containing resource capacity coefficients
""" """
sorted_hypervisors = sorted( sorted_nodes = sorted(
model.get_all_hypervisors().values(), model.get_all_compute_nodes().values(),
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu']) key=lambda x: self.get_node_utilization(x, model)['cpu'])
asc = 0 asc = 0
for hypervisor in sorted_hypervisors: for node in sorted_nodes:
vms = sorted(model.get_mapping().get_node_vms(hypervisor), instances = sorted(
key=lambda x: self.get_vm_utilization(x, model.mapping.get_node_instances(node),
model)['cpu']) key=lambda x: self.get_instance_utilization(x, model)['cpu'])
for vm in reversed(vms): for instance in reversed(instances):
dsc = len(sorted_hypervisors) - 1 dsc = len(sorted_nodes) - 1
for dst_hypervisor in reversed(sorted_hypervisors): for destination_node in reversed(sorted_nodes):
if asc >= dsc: if asc >= dsc:
break break
if self.vm_fits(vm, dst_hypervisor, model, cc): if self.instance_fits(
self.add_migration(vm, hypervisor, instance, destination_node, model, cc):
dst_hypervisor, model) self.add_migration(instance, node,
destination_node, model)
break break
dsc -= 1 dsc -= 1
asc += 1 asc += 1
@@ -504,7 +508,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
* Offload phase - handling over-utilized resources * Offload phase - handling over-utilized resources
* Consolidation phase - handling under-utilized resources * Consolidation phase - handling under-utilized resources
* Solution optimization - reducing number of migrations * Solution optimization - reducing number of migrations
* Disability of unused hypervisors * Disability of unused nodes
:param original_model: root_model object :param original_model: root_model object
""" """
@@ -524,14 +528,14 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# Optimize solution # Optimize solution
self.optimize_solution(model) self.optimize_solution(model)
# disable unused hypervisors # disable unused nodes
self.disable_unused_hypervisors(model) self.disable_unused_nodes(model)
rcu_after = self.get_relative_cluster_utilization(model) rcu_after = self.get_relative_cluster_utilization(model)
info = { info = {
'number_of_migrations': self.number_of_migrations, 'number_of_migrations': self.number_of_migrations,
'number_of_released_hypervisors': 'number_of_released_nodes':
self.number_of_released_hypervisors, self.number_of_released_nodes,
'relative_cluster_utilization_before': str(rcu), 'relative_cluster_utilization_before': str(rcu),
'relative_cluster_utilization_after': str(rcu_after) 'relative_cluster_utilization_after': str(rcu_after)
} }
@@ -542,5 +546,5 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# self.solution.efficacy = rcu_after['cpu'] # self.solution.efficacy = rcu_after['cpu']
self.solution.set_efficacy_indicators( self.solution.set_efficacy_indicators(
released_compute_nodes_count=self.number_of_migrations, released_compute_nodes_count=self.number_of_migrations,
vm_migrations_count=self.number_of_released_hypervisors, instance_migrations_count=self.number_of_released_nodes,
) )

View File

@@ -21,8 +21,7 @@ from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.decision_engine.cluster.history import ceilometer as ceil from watcher.decision_engine.cluster.history import ceilometer as ceil
from watcher.decision_engine.model import resource from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -37,7 +36,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
servers. It generates solutions to move a workload whenever a server's servers. It generates solutions to move a workload whenever a server's
CPU utilization % is higher than the specified threshold. CPU utilization % is higher than the specified threshold.
The VM to be moved should make the host close to average workload The VM to be moved should make the host close to average workload
of all hypervisors. of all compute nodes.
*Requirements* *Requirements*
@@ -115,78 +114,83 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
}, },
} }
def calculate_used_resource(self, hypervisor, cap_cores, cap_mem, def calculate_used_resource(self, node, cap_cores, cap_mem,
cap_disk): cap_disk):
"""Calculate the used vcpus, memory and disk based on VM flavors""" """Calculate the used vcpus, memory and disk based on VM flavors"""
vms = self.compute_model.get_mapping().get_node_vms(hypervisor) instances = self.compute_model.mapping.get_node_instances(node)
vcpus_used = 0 vcpus_used = 0
memory_mb_used = 0 memory_mb_used = 0
disk_gb_used = 0 disk_gb_used = 0
for vm_id in vms: for instance_id in instances:
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
vcpus_used += cap_cores.get_capacity(vm) vcpus_used += cap_cores.get_capacity(instance)
memory_mb_used += cap_mem.get_capacity(vm) memory_mb_used += cap_mem.get_capacity(instance)
disk_gb_used += cap_disk.get_capacity(vm) disk_gb_used += cap_disk.get_capacity(instance)
return vcpus_used, memory_mb_used, disk_gb_used return vcpus_used, memory_mb_used, disk_gb_used
def choose_vm_to_migrate(self, hosts, avg_workload, workload_cache): def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache):
"""Pick up an active vm instance to migrate from provided hosts """Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains hypervisor object :param hosts: the array of dict which contains node object
:param avg_workload: the average workload value of all hypervisors :param avg_workload: the average workload value of all nodes
:param workload_cache: the map contains vm to workload mapping :param workload_cache: the map contains instance to workload mapping
""" """
for hvmap in hosts: for instance_data in hosts:
source_hypervisor = hvmap['hv'] source_node = instance_data['node']
source_vms = self.compute_model.get_mapping().get_node_vms( source_instances = self.compute_model.mapping.get_node_instances(
source_hypervisor) source_node)
if source_vms: if source_instances:
delta_workload = hvmap['workload'] - avg_workload delta_workload = instance_data['workload'] - avg_workload
min_delta = 1000000 min_delta = 1000000
instance_id = None instance_id = None
for vm_id in source_vms: for inst_id in source_instances:
try: try:
# select the first active VM to migrate # select the first active VM to migrate
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(
if vm.state != vm_state.VMState.ACTIVE.value: inst_id)
LOG.debug("VM not active; skipped: %s", if (instance.state !=
vm.uuid) element.InstanceState.ACTIVE.value):
LOG.debug("Instance not active, skipped: %s",
instance.uuid)
continue continue
current_delta = delta_workload - workload_cache[vm_id] current_delta = (
delta_workload - workload_cache[inst_id])
if 0 <= current_delta < min_delta: if 0 <= current_delta < min_delta:
min_delta = current_delta min_delta = current_delta
instance_id = vm_id instance_id = inst_id
except wexc.InstanceNotFound: except wexc.InstanceNotFound:
LOG.error(_LE("VM not found; error: %s"), vm_id) LOG.error(_LE("Instance not found; error: %s"),
instance_id)
if instance_id: if instance_id:
return (source_hypervisor, return (source_node,
self.compute_model.get_vm_from_id(instance_id)) self.compute_model.get_instance_from_id(
instance_id))
else: else:
LOG.info(_LI("VM not found on hypervisor: %s"), LOG.info(_LI("VM not found from node: %s"),
source_hypervisor.uuid) source_node.uuid)
def filter_destination_hosts(self, hosts, vm_to_migrate, def filter_destination_hosts(self, hosts, instance_to_migrate,
avg_workload, workload_cache): avg_workload, workload_cache):
'''Only return hosts with sufficient available resources''' '''Only return hosts with sufficient available resources'''
cap_cores = self.compute_model.get_resource_from_id( cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores) element.ResourceType.cpu_cores)
cap_disk = self.compute_model.get_resource_from_id( cap_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk) element.ResourceType.disk)
cap_mem = self.compute_model.get_resource_from_id( cap_mem = self.compute_model.get_resource_from_id(
resource.ResourceType.memory) element.ResourceType.memory)
required_cores = cap_cores.get_capacity(vm_to_migrate) required_cores = cap_cores.get_capacity(instance_to_migrate)
required_disk = cap_disk.get_capacity(vm_to_migrate) required_disk = cap_disk.get_capacity(instance_to_migrate)
required_mem = cap_mem.get_capacity(vm_to_migrate) required_mem = cap_mem.get_capacity(instance_to_migrate)
# filter hypervisors without enough resource # filter nodes without enough resource
destination_hosts = [] destination_hosts = []
src_vm_workload = workload_cache[vm_to_migrate.uuid] src_instance_workload = workload_cache[instance_to_migrate.uuid]
for hvmap in hosts: for instance_data in hosts:
host = hvmap['hv'] host = instance_data['node']
workload = hvmap['workload'] workload = instance_data['workload']
# calculate the available resources # calculate the available resources
cores_used, mem_used, disk_used = self.calculate_used_resource( cores_used, mem_used, disk_used = self.calculate_used_resource(
host, cap_cores, cap_mem, cap_disk) host, cap_cores, cap_mem, cap_disk)
@@ -197,29 +201,29 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
cores_available >= required_cores and cores_available >= required_cores and
disk_available >= required_disk and disk_available >= required_disk and
mem_available >= required_mem and mem_available >= required_mem and
(src_vm_workload + workload) < self.threshold / 100 * (src_instance_workload + workload) < self.threshold / 100 *
cap_cores.get_capacity(host) cap_cores.get_capacity(host)
): ):
destination_hosts.append(hvmap) destination_hosts.append(instance_data)
return destination_hosts return destination_hosts
def group_hosts_by_cpu_util(self): def group_hosts_by_cpu_util(self):
"""Calculate the workloads of each hypervisor """Calculate the workloads of each node
try to find out the hypervisors which have reached threshold try to find out the nodes which have reached threshold
and the hypervisors which are under threshold. and the nodes which are under threshold.
and also calculate the average workload value of all hypervisors. and also calculate the average workload value of all nodes.
and also generate the VM workload map. and also generate the instance workload map.
""" """
hypervisors = self.compute_model.get_all_hypervisors() nodes = self.compute_model.get_all_compute_nodes()
cluster_size = len(hypervisors) cluster_size = len(nodes)
if not hypervisors: if not nodes:
raise wexc.ClusterEmpty() raise wexc.ClusterEmpty()
# get cpu cores capacity of hypervisors and vms # get cpu cores capacity of nodes and instances
cap_cores = self.compute_model.get_resource_from_id( cap_cores = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores) element.ResourceType.cpu_cores)
overload_hosts = [] overload_hosts = []
nonoverload_hosts = [] nonoverload_hosts = []
# total workload of cluster # total workload of cluster
@@ -227,16 +231,16 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
cluster_workload = 0.0 cluster_workload = 0.0
# use workload_cache to store the workload of VMs for reuse purpose # use workload_cache to store the workload of VMs for reuse purpose
workload_cache = {} workload_cache = {}
for hypervisor_id in hypervisors: for node_id in nodes:
hypervisor = self.compute_model.get_hypervisor_from_id( node = self.compute_model.get_node_from_id(
hypervisor_id) node_id)
vms = self.compute_model.get_mapping().get_node_vms(hypervisor) instances = self.compute_model.mapping.get_node_instances(node)
hypervisor_workload = 0.0 node_workload = 0.0
for vm_id in vms: for instance_id in instances:
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
try: try:
cpu_util = self.ceilometer.statistic_aggregation( cpu_util = self.ceilometer.statistic_aggregation(
resource_id=vm_id, resource_id=instance_id,
meter_name=self._meter, meter_name=self._meter,
period=self._period, period=self._period,
aggregate='avg') aggregate='avg')
@@ -245,24 +249,25 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
LOG.error(_LE("Can not get cpu_util from Ceilometer")) LOG.error(_LE("Can not get cpu_util from Ceilometer"))
continue continue
if cpu_util is None: if cpu_util is None:
LOG.debug("VM (%s): cpu_util is None", vm_id) LOG.debug("Instance (%s): cpu_util is None", instance_id)
continue continue
vm_cores = cap_cores.get_capacity(vm) instance_cores = cap_cores.get_capacity(instance)
workload_cache[vm_id] = cpu_util * vm_cores / 100 workload_cache[instance_id] = cpu_util * instance_cores / 100
hypervisor_workload += workload_cache[vm_id] node_workload += workload_cache[instance_id]
LOG.debug("VM (%s): cpu_util %f", vm_id, cpu_util) LOG.debug("VM (%s): cpu_util %f", instance_id, cpu_util)
hypervisor_cores = cap_cores.get_capacity(hypervisor) node_cores = cap_cores.get_capacity(node)
hy_cpu_util = hypervisor_workload / hypervisor_cores * 100 hy_cpu_util = node_workload / node_cores * 100
cluster_workload += hypervisor_workload cluster_workload += node_workload
hvmap = {'hv': hypervisor, "cpu_util": hy_cpu_util, 'workload': instance_data = {
hypervisor_workload} 'node': node, "cpu_util": hy_cpu_util,
'workload': node_workload}
if hy_cpu_util >= self.threshold: if hy_cpu_util >= self.threshold:
# mark the hypervisor to release resources # mark the node to release resources
overload_hosts.append(hvmap) overload_hosts.append(instance_data)
else: else:
nonoverload_hosts.append(hvmap) nonoverload_hosts.append(instance_data)
avg_workload = cluster_workload / cluster_size avg_workload = cluster_workload / cluster_size
@@ -285,52 +290,52 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
""" """
self.threshold = self.input_parameters.threshold self.threshold = self.input_parameters.threshold
self._period = self.input_parameters.period self._period = self.input_parameters.period
src_hypervisors, target_hypervisors, avg_workload, workload_cache = ( source_nodes, target_nodes, avg_workload, workload_cache = (
self.group_hosts_by_cpu_util()) self.group_hosts_by_cpu_util())
if not src_hypervisors: if not source_nodes:
LOG.debug("No hosts require optimization") LOG.debug("No hosts require optimization")
return self.solution return self.solution
if not target_hypervisors: if not target_nodes:
LOG.warning(_LW("No hosts current have CPU utilization under %s " LOG.warning(_LW("No hosts current have CPU utilization under %s "
"percent, therefore there are no possible target " "percent, therefore there are no possible target "
"hosts for any migrations"), "hosts for any migration"),
self.threshold) self.threshold)
return self.solution return self.solution
# choose the server with largest cpu_util # choose the server with largest cpu_util
src_hypervisors = sorted(src_hypervisors, source_nodes = sorted(source_nodes,
reverse=True, reverse=True,
key=lambda x: (x[self.METER_NAME])) key=lambda x: (x[self.METER_NAME]))
vm_to_migrate = self.choose_vm_to_migrate( instance_to_migrate = self.choose_instance_to_migrate(
src_hypervisors, avg_workload, workload_cache) source_nodes, avg_workload, workload_cache)
if not vm_to_migrate: if not instance_to_migrate:
return self.solution return self.solution
source_hypervisor, vm_src = vm_to_migrate source_node, instance_src = instance_to_migrate
# find the hosts that have enough resource for the VM to be migrated # find the hosts that have enough resource for the VM to be migrated
destination_hosts = self.filter_destination_hosts( destination_hosts = self.filter_destination_hosts(
target_hypervisors, vm_src, avg_workload, workload_cache) target_nodes, instance_src, avg_workload, workload_cache)
# sort the filtered result by workload # sort the filtered result by workload
# pick up the lowest one as dest server # pick up the lowest one as dest server
if not destination_hosts: if not destination_hosts:
LOG.warning(_LW("No target host could be found; it might " # for instance.
"be because there is not enough CPU, memory " LOG.warning(_LW("No proper target host could be found, it might "
"or disk")) "be because of there's no enough CPU/Memory/DISK"))
return self.solution return self.solution
destination_hosts = sorted(destination_hosts, destination_hosts = sorted(destination_hosts,
key=lambda x: (x["cpu_util"])) key=lambda x: (x["cpu_util"]))
# always use the host with lowerest CPU utilization # always use the host with lowerest CPU utilization
mig_dst_hypervisor = destination_hosts[0]['hv'] mig_destination_node = destination_hosts[0]['node']
# generate solution to migrate the vm to the dest server, # generate solution to migrate the instance to the dest server,
if self.compute_model.get_mapping().migrate_vm( if self.compute_model.mapping.migrate_instance(
vm_src, source_hypervisor, mig_dst_hypervisor): instance_src, source_node, mig_destination_node):
parameters = {'migration_type': 'live', parameters = {'migration_type': 'live',
'src_hypervisor': source_hypervisor.uuid, 'source_node': source_node.uuid,
'dst_hypervisor': mig_dst_hypervisor.uuid} 'destination_node': mig_destination_node.uuid}
self.solution.add_action(action_type=self.MIGRATION, self.solution.add_action(action_type=self.MIGRATION,
resource_id=vm_src.uuid, resource_id=instance_src.uuid,
input_parameters=parameters) input_parameters=parameters)
def post_execute(self): def post_execute(self):

View File

@@ -30,8 +30,7 @@ from watcher._i18n import _LI, _
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.cluster.history import ceilometer as \ from watcher.decision_engine.cluster.history import ceilometer as \
ceilometer_cluster_history ceilometer_cluster_history
from watcher.decision_engine.model import resource from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -39,8 +38,8 @@ LOG = log.getLogger(__name__)
metrics = ['cpu_util', 'memory.resident'] metrics = ['cpu_util', 'memory.resident']
thresholds_dict = {'cpu_util': 0.2, 'memory.resident': 0.2} thresholds_dict = {'cpu_util': 0.2, 'memory.resident': 0.2}
weights_dict = {'cpu_util_weight': 1.0, 'memory.resident_weight': 1.0} weights_dict = {'cpu_util_weight': 1.0, 'memory.resident_weight': 1.0}
vm_host_measures = {'cpu_util': 'hardware.cpu.util', instance_host_measures = {'cpu_util': 'hardware.cpu.util',
'memory.resident': 'hardware.memory.used'} 'memory.resident': 'hardware.memory.used'}
ws_opts = [ ws_opts = [
cfg.ListOpt('metrics', cfg.ListOpt('metrics',
@@ -154,73 +153,75 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
def ceilometer(self, c): def ceilometer(self, c):
self._ceilometer = c self._ceilometer = c
def transform_vm_cpu(self, vm_load, host_vcpus): def transform_instance_cpu(self, instance_load, host_vcpus):
"""This method transforms vm cpu utilization to overall host cpu utilization. """Transform instance cpu utilization to overall host cpu utilization.
:param vm_load: dict that contains vm uuid and utilization info. :param instance_load: dict that contains instance uuid and
utilization info.
:param host_vcpus: int :param host_vcpus: int
:return: float value :return: float value
""" """
return vm_load['cpu_util'] * (vm_load['vcpus'] / float(host_vcpus)) return (instance_load['cpu_util'] *
(instance_load['vcpus'] / float(host_vcpus)))
@MEMOIZE @MEMOIZE
def get_vm_load(self, vm_uuid): def get_instance_load(self, instance_uuid):
"""Gathering vm load through ceilometer statistic. """Gathering instance load through ceilometer statistic.
:param vm_uuid: vm for which statistic is gathered. :param instance_uuid: instance for which statistic is gathered.
:return: dict :return: dict
""" """
LOG.debug('get_vm_load started') LOG.debug('get_instance_load started')
vm_vcpus = self.compute_model.get_resource_from_id( instance_vcpus = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity( element.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_vm_from_id(vm_uuid)) self.compute_model.get_instance_from_id(instance_uuid))
vm_load = {'uuid': vm_uuid, 'vcpus': vm_vcpus} instance_load = {'uuid': instance_uuid, 'vcpus': instance_vcpus}
for meter in self.metrics: for meter in self.metrics:
avg_meter = self.ceilometer.statistic_aggregation( avg_meter = self.ceilometer.statistic_aggregation(
resource_id=vm_uuid, resource_id=instance_uuid,
meter_name=meter, meter_name=meter,
period="120", period="120",
aggregate='min' aggregate='min'
) )
if avg_meter is None: if avg_meter is None:
raise exception.NoMetricValuesForVM(resource_id=vm_uuid, raise exception.NoMetricValuesForInstance(
metric_name=meter) resource_id=instance_uuid, metric_name=meter)
vm_load[meter] = avg_meter instance_load[meter] = avg_meter
return vm_load return instance_load
def normalize_hosts_load(self, hosts): def normalize_hosts_load(self, hosts):
normalized_hosts = deepcopy(hosts) normalized_hosts = deepcopy(hosts)
for host in normalized_hosts: for host in normalized_hosts:
if 'memory.resident' in normalized_hosts[host]: if 'memory.resident' in normalized_hosts[host]:
h_memory = self.compute_model.get_resource_from_id( h_memory = self.compute_model.get_resource_from_id(
resource.ResourceType.memory).get_capacity( element.ResourceType.memory).get_capacity(
self.compute_model.get_hypervisor_from_id(host)) self.compute_model.get_node_from_id(host))
normalized_hosts[host]['memory.resident'] /= float(h_memory) normalized_hosts[host]['memory.resident'] /= float(h_memory)
return normalized_hosts return normalized_hosts
def get_hosts_load(self): def get_hosts_load(self):
"""Get load of every host by gathering vms load""" """Get load of every host by gathering instances load"""
hosts_load = {} hosts_load = {}
for hypervisor_id in self.compute_model.get_all_hypervisors(): for node_id in self.compute_model.get_all_compute_nodes():
hosts_load[hypervisor_id] = {} hosts_load[node_id] = {}
host_vcpus = self.compute_model.get_resource_from_id( host_vcpus = self.compute_model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity( element.ResourceType.cpu_cores).get_capacity(
self.compute_model.get_hypervisor_from_id(hypervisor_id)) self.compute_model.get_node_from_id(node_id))
hosts_load[hypervisor_id]['vcpus'] = host_vcpus hosts_load[node_id]['vcpus'] = host_vcpus
for metric in self.metrics: for metric in self.metrics:
avg_meter = self.ceilometer.statistic_aggregation( avg_meter = self.ceilometer.statistic_aggregation(
resource_id=hypervisor_id, resource_id=node_id,
meter_name=vm_host_measures[metric], meter_name=instance_host_measures[metric],
period="60", period="60",
aggregate='avg' aggregate='avg'
) )
if avg_meter is None: if avg_meter is None:
raise exception.NoSuchMetricForHost( raise exception.NoSuchMetricForHost(
metric=vm_host_measures[metric], metric=instance_host_measures[metric],
host=hypervisor_id) host=node_id)
hosts_load[hypervisor_id][metric] = avg_meter hosts_load[node_id][metric] = avg_meter
return hosts_load return hosts_load
def get_sd(self, hosts, meter_name): def get_sd(self, hosts, meter_name):
@@ -249,33 +250,34 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
" for %s in weight dict.") % metric) " for %s in weight dict.") % metric)
return weighted_sd return weighted_sd
def calculate_migration_case(self, hosts, vm_id, src_hp_id, dst_hp_id): def calculate_migration_case(self, hosts, instance_id,
src_node_id, dst_node_id):
"""Calculate migration case """Calculate migration case
Return list of standard deviation values, that appearing in case of Return list of standard deviation values, that appearing in case of
migration of vm from source host to destination host migration of instance from source host to destination host
:param hosts: hosts with their workload :param hosts: hosts with their workload
:param vm_id: the virtual machine :param instance_id: the virtual machine
:param src_hp_id: the source hypervisor id :param src_node_id: the source node id
:param dst_hp_id: the destination hypervisor id :param dst_node_id: the destination node id
:return: list of standard deviation values :return: list of standard deviation values
""" """
migration_case = [] migration_case = []
new_hosts = deepcopy(hosts) new_hosts = deepcopy(hosts)
vm_load = self.get_vm_load(vm_id) instance_load = self.get_instance_load(instance_id)
d_host_vcpus = new_hosts[dst_hp_id]['vcpus'] d_host_vcpus = new_hosts[dst_node_id]['vcpus']
s_host_vcpus = new_hosts[src_hp_id]['vcpus'] s_host_vcpus = new_hosts[src_node_id]['vcpus']
for metric in self.metrics: for metric in self.metrics:
if metric is 'cpu_util': if metric is 'cpu_util':
new_hosts[src_hp_id][metric] -= self.transform_vm_cpu( new_hosts[src_node_id][metric] -= self.transform_instance_cpu(
vm_load, instance_load,
s_host_vcpus) s_host_vcpus)
new_hosts[dst_hp_id][metric] += self.transform_vm_cpu( new_hosts[dst_node_id][metric] += self.transform_instance_cpu(
vm_load, instance_load,
d_host_vcpus) d_host_vcpus)
else: else:
new_hosts[src_hp_id][metric] -= vm_load[metric] new_hosts[src_node_id][metric] -= instance_load[metric]
new_hosts[dst_hp_id][metric] += vm_load[metric] new_hosts[dst_node_id][metric] += instance_load[metric]
normalized_hosts = self.normalize_hosts_load(new_hosts) normalized_hosts = self.normalize_hosts_load(new_hosts)
for metric in self.metrics: for metric in self.metrics:
migration_case.append(self.get_sd(normalized_hosts, metric)) migration_case.append(self.get_sd(normalized_hosts, metric))
@@ -283,45 +285,46 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
return migration_case return migration_case
def simulate_migrations(self, hosts): def simulate_migrations(self, hosts):
"""Make sorted list of pairs vm:dst_host""" """Make sorted list of pairs instance:dst_host"""
def yield_hypervisors(hypervisors): def yield_nodes(nodes):
ct = CONF['watcher_strategies.workload_stabilization'].retry_count ct = CONF['watcher_strategies.workload_stabilization'].retry_count
if self.host_choice == 'cycle': if self.host_choice == 'cycle':
for i in itertools.cycle(hypervisors): for i in itertools.cycle(nodes):
yield [i] yield [i]
if self.host_choice == 'retry': if self.host_choice == 'retry':
while True: while True:
yield random.sample(hypervisors, ct) yield random.sample(nodes, ct)
if self.host_choice == 'fullsearch': if self.host_choice == 'fullsearch':
while True: while True:
yield hypervisors yield nodes
vm_host_map = [] instance_host_map = []
for source_hp_id in self.compute_model.get_all_hypervisors(): for source_hp_id in self.compute_model.get_all_compute_nodes():
hypervisors = list(self.compute_model.get_all_hypervisors()) nodes = list(self.compute_model.get_all_compute_nodes())
hypervisors.remove(source_hp_id) nodes.remove(source_hp_id)
hypervisor_list = yield_hypervisors(hypervisors) node_list = yield_nodes(nodes)
vms_id = self.compute_model.get_mapping(). \ instances_id = self.compute_model.get_mapping(). \
get_node_vms_from_id(source_hp_id) get_node_instances_from_id(source_hp_id)
for vm_id in vms_id: for instance_id in instances_id:
min_sd_case = {'value': len(self.metrics)} min_sd_case = {'value': len(self.metrics)}
vm = self.compute_model.get_vm_from_id(vm_id) instance = self.compute_model.get_instance_from_id(instance_id)
if vm.state not in [vm_state.VMState.ACTIVE.value, if instance.state not in [element.InstanceState.ACTIVE.value,
vm_state.VMState.PAUSED.value]: element.InstanceState.PAUSED.value]:
continue continue
for dst_hp_id in next(hypervisor_list): for dst_node_id in next(node_list):
sd_case = self.calculate_migration_case(hosts, vm_id, sd_case = self.calculate_migration_case(hosts, instance_id,
source_hp_id, source_hp_id,
dst_hp_id) dst_node_id)
weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) weighted_sd = self.calculate_weighted_sd(sd_case[:-1])
if weighted_sd < min_sd_case['value']: if weighted_sd < min_sd_case['value']:
min_sd_case = {'host': dst_hp_id, 'value': weighted_sd, min_sd_case = {
's_host': source_hp_id, 'vm': vm_id} 'host': dst_node_id, 'value': weighted_sd,
vm_host_map.append(min_sd_case) 's_host': source_hp_id, 'instance': instance_id}
instance_host_map.append(min_sd_case)
break break
return sorted(vm_host_map, key=lambda x: x['value']) return sorted(instance_host_map, key=lambda x: x['value'])
def check_threshold(self): def check_threshold(self):
"""Check if cluster is needed in balancing""" """Check if cluster is needed in balancing"""
@@ -335,32 +338,32 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
def add_migration(self, def add_migration(self,
resource_id, resource_id,
migration_type, migration_type,
src_hypervisor, source_node,
dst_hypervisor): destination_node):
parameters = {'migration_type': migration_type, parameters = {'migration_type': migration_type,
'src_hypervisor': src_hypervisor, 'source_node': source_node,
'dst_hypervisor': dst_hypervisor} 'destination_node': destination_node}
self.solution.add_action(action_type=self.MIGRATION, self.solution.add_action(action_type=self.MIGRATION,
resource_id=resource_id, resource_id=resource_id,
input_parameters=parameters) input_parameters=parameters)
def create_migration_vm(self, mig_vm, mig_src_hypervisor, def create_migration_instance(self, mig_instance, mig_source_node,
mig_dst_hypervisor): mig_destination_node):
"""Create migration VM """ """Create migration VM """
if self.compute_model.get_mapping().migrate_vm( if self.compute_model.get_mapping().migrate_instance(
mig_vm, mig_src_hypervisor, mig_dst_hypervisor): mig_instance, mig_source_node, mig_destination_node):
self.add_migration(mig_vm.uuid, 'live', self.add_migration(mig_instance.uuid, 'live',
mig_src_hypervisor.uuid, mig_source_node.uuid,
mig_dst_hypervisor.uuid) mig_destination_node.uuid)
def migrate(self, vm_uuid, src_host, dst_host): def migrate(self, instance_uuid, src_host, dst_host):
mig_vm = self.compute_model.get_vm_from_id(vm_uuid) mig_instance = self.compute_model.get_instance_from_id(instance_uuid)
mig_src_hypervisor = self.compute_model.get_hypervisor_from_id( mig_source_node = self.compute_model.get_node_from_id(
src_host) src_host)
mig_dst_hypervisor = self.compute_model.get_hypervisor_from_id( mig_destination_node = self.compute_model.get_node_from_id(
dst_host) dst_host)
self.create_migration_vm(mig_vm, mig_src_hypervisor, self.create_migration_instance(mig_instance, mig_source_node,
mig_dst_hypervisor) mig_destination_node)
def fill_solution(self): def fill_solution(self):
self.solution.model = self.compute_model self.solution.model = self.compute_model
@@ -378,28 +381,29 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
hosts_load = self.get_hosts_load() hosts_load = self.get_hosts_load()
min_sd = 1 min_sd = 1
balanced = False balanced = False
for vm_host in migration: for instance_host in migration:
dst_hp_disk = self.compute_model.get_resource_from_id( dst_hp_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity( element.ResourceType.disk).get_capacity(
self.compute_model.get_hypervisor_from_id( self.compute_model.get_node_from_id(
vm_host['host'])) instance_host['host']))
vm_disk = self.compute_model.get_resource_from_id( instance_disk = self.compute_model.get_resource_from_id(
resource.ResourceType.disk).get_capacity( element.ResourceType.disk).get_capacity(
self.compute_model.get_vm_from_id(vm_host['vm'])) self.compute_model.get_instance_from_id(
if vm_disk > dst_hp_disk: instance_host['instance']))
if instance_disk > dst_hp_disk:
continue continue
vm_load = self.calculate_migration_case(hosts_load, instance_load = self.calculate_migration_case(
vm_host['vm'], hosts_load, instance_host['instance'],
vm_host['s_host'], instance_host['s_host'], instance_host['host'])
vm_host['host']) weighted_sd = self.calculate_weighted_sd(instance_load[:-1])
weighted_sd = self.calculate_weighted_sd(vm_load[:-1])
if weighted_sd < min_sd: if weighted_sd < min_sd:
min_sd = weighted_sd min_sd = weighted_sd
hosts_load = vm_load[-1] hosts_load = instance_load[-1]
self.migrate(vm_host['vm'], self.migrate(instance_host['instance'],
vm_host['s_host'], vm_host['host']) instance_host['s_host'],
instance_host['host'])
for metric, value in zip(self.metrics, vm_load[:-1]): for metric, value in zip(self.metrics, instance_load[:-1]):
if value < float(self.thresholds[metric]): if value < float(self.thresholds[metric]):
balanced = True balanced = True
break break

View File

@@ -23,7 +23,7 @@ from watcher.applier.actions import base as baction
from watcher.applier.actions import change_nova_service_state from watcher.applier.actions import change_nova_service_state
from watcher.common import clients from watcher.common import clients
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.decision_engine.model import hypervisor_state as hstate from watcher.decision_engine.model import element
from watcher.tests import base from watcher.tests import base
@@ -52,7 +52,7 @@ class TestChangeNovaServiceState(base.TestCase):
self.input_parameters = { self.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1", baction.BaseAction.RESOURCE_ID: "compute-1",
"state": hstate.HypervisorState.ENABLED.value, "state": element.ServiceState.ENABLED.value,
} }
self.action = change_nova_service_state.ChangeNovaServiceState( self.action = change_nova_service_state.ChangeNovaServiceState(
mock.Mock()) mock.Mock())
@@ -61,13 +61,13 @@ class TestChangeNovaServiceState(base.TestCase):
def test_parameters_down(self): def test_parameters_down(self):
self.action.input_parameters = { self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1", baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.DISABLED.value} self.action.STATE: element.ServiceState.DISABLED.value}
self.assertTrue(self.action.validate_parameters()) self.assertTrue(self.action.validate_parameters())
def test_parameters_up(self): def test_parameters_up(self):
self.action.input_parameters = { self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1", baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.ENABLED.value} self.action.STATE: element.ServiceState.ENABLED.value}
self.assertTrue(self.action.validate_parameters()) self.assertTrue(self.action.validate_parameters())
def test_parameters_exception_wrong_state(self): def test_parameters_exception_wrong_state(self):
@@ -82,7 +82,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_parameters_resource_id_empty(self): def test_parameters_resource_id_empty(self):
self.action.input_parameters = { self.action.input_parameters = {
self.action.STATE: hstate.HypervisorState.ENABLED.value, self.action.STATE: element.ServiceState.ENABLED.value,
} }
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.Invalid, self.action.validate_parameters) voluptuous.Invalid, self.action.validate_parameters)
@@ -123,7 +123,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_execute_change_service_state_with_disable_target(self): def test_execute_change_service_state_with_disable_target(self):
self.action.input_parameters["state"] = ( self.action.input_parameters["state"] = (
hstate.HypervisorState.DISABLED.value) element.ServiceState.DISABLED.value)
self.action.execute() self.action.execute()
self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper_cls.assert_called_once_with(osc=self.m_osc)
@@ -139,7 +139,7 @@ class TestChangeNovaServiceState(base.TestCase):
def test_revert_change_service_state_with_disable_target(self): def test_revert_change_service_state_with_disable_target(self):
self.action.input_parameters["state"] = ( self.action.input_parameters["state"] = (
hstate.HypervisorState.DISABLED.value) element.ServiceState.DISABLED.value)
self.action.revert() self.action.revert()
self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper_cls.assert_called_once_with(osc=self.m_osc)

View File

@@ -54,8 +54,8 @@ class TestMigration(base.TestCase):
self.input_parameters = { self.input_parameters = {
"migration_type": "live", "migration_type": "live",
"src_hypervisor": "hypervisor1-hostname", "source_node": "compute1-hostname",
"dst_hypervisor": "hypervisor2-hostname", "destination_node": "compute2-hostname",
baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID,
} }
self.action = migration.Migrate(mock.Mock()) self.action = migration.Migrate(mock.Mock())
@@ -63,8 +63,8 @@ class TestMigration(base.TestCase):
self.input_parameters_cold = { self.input_parameters_cold = {
"migration_type": "cold", "migration_type": "cold",
"src_hypervisor": "hypervisor1-hostname", "source_node": "compute1-hostname",
"dst_hypervisor": "hypervisor2-hostname", "destination_node": "compute2-hostname",
baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID,
} }
self.action_cold = migration.Migrate(mock.Mock()) self.action_cold = migration.Migrate(mock.Mock())
@@ -74,8 +74,8 @@ class TestMigration(base.TestCase):
params = {baction.BaseAction.RESOURCE_ID: params = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID, self.INSTANCE_UUID,
self.action.MIGRATION_TYPE: 'live', self.action.MIGRATION_TYPE: 'live',
self.action.DST_HYPERVISOR: 'compute-2', self.action.DESTINATION_NODE: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'} self.action.SOURCE_NODE: 'compute-3'}
self.action.input_parameters = params self.action.input_parameters = params
self.assertTrue(self.action.validate_parameters()) self.assertTrue(self.action.validate_parameters())
@@ -83,31 +83,31 @@ class TestMigration(base.TestCase):
params = {baction.BaseAction.RESOURCE_ID: params = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID, self.INSTANCE_UUID,
self.action.MIGRATION_TYPE: 'cold', self.action.MIGRATION_TYPE: 'cold',
self.action.DST_HYPERVISOR: 'compute-2', self.action.DESTINATION_NODE: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'} self.action.SOURCE_NODE: 'compute-3'}
self.action_cold.input_parameters = params self.action_cold.input_parameters = params
self.assertTrue(self.action_cold.validate_parameters()) self.assertTrue(self.action_cold.validate_parameters())
def test_parameters_exception_empty_fields(self): def test_parameters_exception_empty_fields(self):
parameters = {baction.BaseAction.RESOURCE_ID: None, parameters = {baction.BaseAction.RESOURCE_ID: None,
'migration_type': None, 'migration_type': None,
'src_hypervisor': None, 'source_node': None,
'dst_hypervisor': None} 'destination_node': None}
self.action.input_parameters = parameters self.action.input_parameters = parameters
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters) voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual( self.assertEqual(
sorted([(['migration_type'], voluptuous.ScalarInvalid), sorted([(['migration_type'], voluptuous.ScalarInvalid),
(['src_hypervisor'], voluptuous.TypeInvalid), (['source_node'], voluptuous.TypeInvalid),
(['dst_hypervisor'], voluptuous.TypeInvalid)]), (['destination_node'], voluptuous.TypeInvalid)]),
sorted([(e.path, type(e)) for e in exc.errors])) sorted([(e.path, type(e)) for e in exc.errors]))
def test_parameters_exception_migration_type(self): def test_parameters_exception_migration_type(self):
parameters = {baction.BaseAction.RESOURCE_ID: parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID, self.INSTANCE_UUID,
'migration_type': 'unknown', 'migration_type': 'unknown',
'src_hypervisor': 'compute-2', 'source_node': 'compute-2',
'dst_hypervisor': 'compute-3'} 'destination_node': 'compute-3'}
self.action.input_parameters = parameters self.action.input_parameters = parameters
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.Invalid, self.action.validate_parameters) voluptuous.Invalid, self.action.validate_parameters)
@@ -115,37 +115,37 @@ class TestMigration(base.TestCase):
[(['migration_type'], voluptuous.ScalarInvalid)], [(['migration_type'], voluptuous.ScalarInvalid)],
[(e.path, type(e)) for e in exc.errors]) [(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_src_hypervisor(self): def test_parameters_exception_source_node(self):
parameters = {baction.BaseAction.RESOURCE_ID: parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID, self.INSTANCE_UUID,
'migration_type': 'live', 'migration_type': 'live',
'src_hypervisor': None, 'source_node': None,
'dst_hypervisor': 'compute-3'} 'destination_node': 'compute-3'}
self.action.input_parameters = parameters self.action.input_parameters = parameters
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters) voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual( self.assertEqual(
[(['src_hypervisor'], voluptuous.TypeInvalid)], [(['source_node'], voluptuous.TypeInvalid)],
[(e.path, type(e)) for e in exc.errors]) [(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_dst_hypervisor(self): def test_parameters_exception_destination_node(self):
parameters = {baction.BaseAction.RESOURCE_ID: parameters = {baction.BaseAction.RESOURCE_ID:
self.INSTANCE_UUID, self.INSTANCE_UUID,
'migration_type': 'live', 'migration_type': 'live',
'src_hypervisor': 'compute-1', 'source_node': 'compute-1',
'dst_hypervisor': None} 'destination_node': None}
self.action.input_parameters = parameters self.action.input_parameters = parameters
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters) voluptuous.MultipleInvalid, self.action.validate_parameters)
self.assertEqual( self.assertEqual(
[(['dst_hypervisor'], voluptuous.TypeInvalid)], [(['destination_node'], voluptuous.TypeInvalid)],
[(e.path, type(e)) for e in exc.errors]) [(e.path, type(e)) for e in exc.errors])
def test_parameters_exception_resource_id(self): def test_parameters_exception_resource_id(self):
parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", parameters = {baction.BaseAction.RESOURCE_ID: "EFEF",
'migration_type': 'live', 'migration_type': 'live',
'src_hypervisor': 'compute-2', 'source_node': 'compute-2',
'dst_hypervisor': 'compute-3'} 'destination_node': 'compute-3'}
self.action.input_parameters = parameters self.action.input_parameters = parameters
exc = self.assertRaises( exc = self.assertRaises(
voluptuous.MultipleInvalid, self.action.validate_parameters) voluptuous.MultipleInvalid, self.action.validate_parameters)
@@ -189,7 +189,7 @@ class TestMigration(base.TestCase):
self.m_helper.live_migrate_instance.assert_called_once_with( self.m_helper.live_migrate_instance.assert_called_once_with(
instance_id=self.INSTANCE_UUID, instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname") dest_hostname="compute2-hostname")
def test_execute_cold_migration(self): def test_execute_cold_migration(self):
self.m_helper.find_instance.return_value = self.INSTANCE_UUID self.m_helper.find_instance.return_value = self.INSTANCE_UUID
@@ -202,7 +202,7 @@ class TestMigration(base.TestCase):
self.m_helper.watcher_non_live_migrate_instance.\ self.m_helper.watcher_non_live_migrate_instance.\
assert_called_once_with( assert_called_once_with(
instance_id=self.INSTANCE_UUID, instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname" dest_hostname="compute2-hostname"
) )
def test_revert_live_migration(self): def test_revert_live_migration(self):
@@ -213,7 +213,7 @@ class TestMigration(base.TestCase):
self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper_cls.assert_called_once_with(osc=self.m_osc)
self.m_helper.live_migrate_instance.assert_called_once_with( self.m_helper.live_migrate_instance.assert_called_once_with(
instance_id=self.INSTANCE_UUID, instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor1-hostname" dest_hostname="compute1-hostname"
) )
def test_revert_cold_migration(self): def test_revert_cold_migration(self):
@@ -225,7 +225,7 @@ class TestMigration(base.TestCase):
self.m_helper.watcher_non_live_migrate_instance.\ self.m_helper.watcher_non_live_migrate_instance.\
assert_called_once_with( assert_called_once_with(
instance_id=self.INSTANCE_UUID, instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor1-hostname" dest_hostname="compute1-hostname"
) )
def test_live_migrate_non_shared_storage_instance(self): def test_live_migrate_non_shared_storage_instance(self):
@@ -241,16 +241,16 @@ class TestMigration(base.TestCase):
self.m_helper.live_migrate_instance.assert_has_calls([ self.m_helper.live_migrate_instance.assert_has_calls([
mock.call(instance_id=self.INSTANCE_UUID, mock.call(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname"), dest_hostname="compute2-hostname"),
mock.call(instance_id=self.INSTANCE_UUID, mock.call(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname", dest_hostname="compute2-hostname",
block_migration=True) block_migration=True)
]) ])
expected = [mock.call.first(instance_id=self.INSTANCE_UUID, expected = [mock.call.first(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname"), dest_hostname="compute2-hostname"),
mock.call.second(instance_id=self.INSTANCE_UUID, mock.call.second(instance_id=self.INSTANCE_UUID,
dest_hostname="hypervisor2-hostname", dest_hostname="compute2-hostname",
block_migration=True) block_migration=True)
] ]
self.m_helper.live_migrate_instance.mock_calls == expected self.m_helper.live_migrate_instance.mock_calls == expected

View File

@@ -56,7 +56,7 @@ class TestCeilometerHelper(base.BaseTestCase):
mock_ceilometer.return_value = ceilometer mock_ceilometer.return_value = ceilometer
cm = ceilometer_helper.CeilometerHelper() cm = ceilometer_helper.CeilometerHelper()
val = cm.statistic_aggregation( val = cm.statistic_aggregation(
resource_id="VM_ID", resource_id="INSTANCE_ID",
meter_name="cpu_util", meter_name="cpu_util",
period="7300" period="7300"
) )

View File

@@ -26,7 +26,7 @@ from watcher.common import clients
from watcher.tests import base from watcher.tests import base
class TestClients(base.BaseTestCase): class TestClients(base.TestCase):
def setUp(self): def setUp(self):
super(TestClients, self).setUp() super(TestClients, self).setUp()

View File

@@ -36,8 +36,8 @@ class TestNovaHelper(base.TestCase):
def setUp(self): def setUp(self):
super(TestNovaHelper, self).setUp() super(TestNovaHelper, self).setUp()
self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe"
self.source_hypervisor = "ldev-indeedsrv005" self.source_node = "ldev-indeedsrv005"
self.destination_hypervisor = "ldev-indeedsrv006" self.destination_node = "ldev-indeedsrv006"
def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron, def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron,
mock_nova): mock_nova):
@@ -71,7 +71,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.servers = mock.MagicMock() nova_util.nova.servers = mock.MagicMock()
nova_util.nova.servers.list.return_value = [server] nova_util.nova.servers.list.return_value = [server]
instance = nova_util.live_migrate_instance( instance = nova_util.live_migrate_instance(
self.instance_uuid, self.destination_hypervisor self.instance_uuid, self.destination_node
) )
self.assertIsNotNone(instance) self.assertIsNotNone(instance)
@@ -83,7 +83,7 @@ class TestNovaHelper(base.TestCase):
is_success = nova_util.watcher_non_live_migrate_instance( is_success = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid, self.instance_uuid,
self.destination_hypervisor) self.destination_node)
self.assertFalse(is_success) self.assertFalse(is_success)
@@ -92,12 +92,12 @@ class TestNovaHelper(base.TestCase):
self, mock_glance, mock_cinder, mock_neutron, mock_nova): self, mock_glance, mock_cinder, mock_neutron, mock_nova):
nova_util = nova_helper.NovaHelper() nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock(id=self.instance_uuid) instance = mock.MagicMock(id=self.instance_uuid)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor) setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
nova_util.nova.servers.list.return_value = [instance] nova_util.nova.servers.list.return_value = [instance]
nova_util.nova.servers.find.return_value = instance nova_util.nova.servers.find.return_value = instance
instance = nova_util.watcher_non_live_migrate_instance( instance = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid, self.instance_uuid,
self.destination_hypervisor) self.destination_node)
self.assertIsNotNone(instance) self.assertIsNotNone(instance)
@mock.patch.object(time, 'sleep', mock.Mock()) @mock.patch.object(time, 'sleep', mock.Mock())
@@ -105,7 +105,7 @@ class TestNovaHelper(base.TestCase):
self, mock_glance, mock_cinder, mock_neutron, mock_nova): self, mock_glance, mock_cinder, mock_neutron, mock_nova):
nova_util = nova_helper.NovaHelper() nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock(id=self.instance_uuid) instance = mock.MagicMock(id=self.instance_uuid)
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor) setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
addresses = mock.MagicMock() addresses = mock.MagicMock()
network_type = mock.MagicMock() network_type = mock.MagicMock()
networks = [] networks = []
@@ -119,7 +119,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.servers.find.return_value = instance nova_util.nova.servers.find.return_value = instance
instance = nova_util.watcher_non_live_migrate_instance( instance = nova_util.watcher_non_live_migrate_instance(
self.instance_uuid, self.instance_uuid,
self.destination_hypervisor, keep_original_image_name=False) self.destination_node, keep_original_image_name=False)
self.assertIsNotNone(instance) self.assertIsNotNone(instance)
@mock.patch.object(time, 'sleep', mock.Mock()) @mock.patch.object(time, 'sleep', mock.Mock())
@@ -128,7 +128,7 @@ class TestNovaHelper(base.TestCase):
nova_util = nova_helper.NovaHelper() nova_util = nova_helper.NovaHelper()
instance = mock.MagicMock() instance = mock.MagicMock()
image = mock.MagicMock() image = mock.MagicMock()
setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_hypervisor) setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node)
nova_util.nova.servers.list.return_value = [instance] nova_util.nova.servers.list.return_value = [instance]
nova_util.nova.servers.find.return_value = instance nova_util.nova.servers.find.return_value = instance
image_uuid = 'fake-image-uuid' image_uuid = 'fake-image-uuid'

View File

@@ -35,7 +35,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
def test_nova_cdmc_execute(self, m_nova_helper_cls): def test_nova_cdmc_execute(self, m_nova_helper_cls):
m_nova_helper = mock.Mock() m_nova_helper = mock.Mock()
m_nova_helper_cls.return_value = m_nova_helper m_nova_helper_cls.return_value = m_nova_helper
fake_hypervisor = mock.Mock( fake_compute_node = mock.Mock(
service={'id': 123}, service={'id': 123},
hypervisor_hostname='test_hostname', hypervisor_hostname='test_hostname',
memory_mb=333, memory_mb=333,
@@ -45,19 +45,19 @@ class TestNovaClusterDataModelCollector(base.TestCase):
state='TEST_STATE', state='TEST_STATE',
status='TEST_STATUS', status='TEST_STATUS',
) )
fake_vm = mock.Mock( fake_instance = mock.Mock(
id='ef500f7e-dac8-470f-960c-169486fce71b', id='ef500f7e-dac8-470f-960c-169486fce71b',
state=mock.Mock(**{'OS-EXT-STS:vm_state': 'VM_STATE'}), state=mock.Mock(**{'OS-EXT-STS:instance_state': 'VM_STATE'}),
flavor={'ram': 333, 'disk': 222, 'vcpus': 4}, flavor={'ram': 333, 'disk': 222, 'vcpus': 4},
) )
m_nova_helper.get_hypervisors_list.return_value = [fake_hypervisor] m_nova_helper.get_compute_node_list.return_value = [fake_compute_node]
m_nova_helper.get_vms_by_hypervisor.return_value = [fake_vm] m_nova_helper.get_instances_by_node.return_value = [fake_instance]
m_nova_helper.nova.services.find.return_value = mock.Mock( m_nova_helper.nova.services.find.return_value = mock.Mock(
host='test_hostname') host='test_hostname')
def m_get_flavor_instance(vm, cache): def m_get_flavor_instance(instance, cache):
vm.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4} instance.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4}
return vm return instance
m_nova_helper.get_flavor_instance.side_effect = m_get_flavor_instance m_nova_helper.get_flavor_instance.side_effect = m_get_flavor_instance
@@ -69,14 +69,14 @@ class TestNovaClusterDataModelCollector(base.TestCase):
model = nova_cdmc.execute() model = nova_cdmc.execute()
hypervisors = model.get_all_hypervisors() compute_nodes = model.get_all_compute_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
self.assertEqual(1, len(hypervisors)) self.assertEqual(1, len(compute_nodes))
self.assertEqual(1, len(vms)) self.assertEqual(1, len(instances))
hypervisor = list(hypervisors.values())[0] node = list(compute_nodes.values())[0]
vm = list(vms.values())[0] instance = list(instances.values())[0]
self.assertEqual(hypervisor.uuid, 'test_hostname') self.assertEqual(node.uuid, 'test_hostname')
self.assertEqual(vm.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')

View File

@@ -17,13 +17,13 @@
# limitations under the License. # limitations under the License.
# #
from watcher.decision_engine.model import disk_info from watcher.decision_engine.model import element
from watcher.tests import base from watcher.tests import base
class TestDiskInfo(base.BaseTestCase): class TestDiskInfo(base.TestCase):
def test_all(self): def test_all(self):
disk_information = disk_info.DiskInfo() disk_information = element.DiskInfo()
disk_information.set_size(1024) disk_information.set_size(1024)
self.assertEqual(1024, disk_information.get_size()) self.assertEqual(1024, disk_information.get_size())

View File

@@ -16,15 +16,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
from watcher.decision_engine.model import vm as vm_model from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.tests import base from watcher.tests import base
class TestVm(base.BaseTestCase): class TestInstance(base.TestCase):
def test_namedelement(self): def test_namedelement(self):
vm = vm_model.VM() instance = element.Instance()
vm.state = vm_state.VMState.ACTIVE instance.state = element.InstanceState.ACTIVE
self.assertEqual(vm_state.VMState.ACTIVE, vm.state) self.assertEqual(element.InstanceState.ACTIVE, instance.state)
vm.human_id = "human_05" instance.human_id = "human_05"
self.assertEqual("human_05", vm.human_id) self.assertEqual("human_05", instance.human_id)

View File

@@ -18,92 +18,99 @@
# #
import uuid import uuid
from watcher.decision_engine.model import hypervisor as modelhyp from watcher.decision_engine.model import element
from watcher.decision_engine.model import vm_state
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies import \ from watcher.tests.decision_engine.strategy.strategies import \
faker_cluster_state faker_cluster_state
class TestMapping(base.BaseTestCase): class TestMapping(base.TestCase):
VM1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc" INST1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
VM2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517" INST2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517"
def setUp(self): def setUp(self):
super(TestMapping, self).setUp() super(TestMapping, self).setUp()
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()
def test_get_node_from_vm(self): def test_get_node_from_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
keys = list(vms.keys()) keys = list(instances.keys())
vm = vms[keys[0]] instance = instances[keys[0]]
if vm.uuid != self.VM1_UUID: if instance.uuid != self.INST1_UUID:
vm = vms[keys[1]] instance = instances[keys[1]]
node = model.mapping.get_node_from_vm(vm) node = model.mapping.get_node_from_instance(instance)
self.assertEqual('Node_0', node.uuid) self.assertEqual('Node_0', node.uuid)
def test_get_node_from_vm_id(self): def test_get_node_from_instance_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
hyps = model.mapping.get_node_vms_from_id("BLABLABLA") nodes = model.mapping.get_node_instances_from_id("BLABLABLA")
self.assertEqual(0, hyps.__len__()) self.assertEqual(0, len(nodes))
def test_get_all_vms(self): def test_get_all_instances(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
self.assertEqual(2, vms.__len__()) self.assertEqual(2, len(instances))
self.assertEqual(vm_state.VMState.ACTIVE.value, self.assertEqual(element.InstanceState.ACTIVE.value,
vms[self.VM1_UUID].state) instances[self.INST1_UUID].state)
self.assertEqual(self.VM1_UUID, vms[self.VM1_UUID].uuid) self.assertEqual(self.INST1_UUID, instances[self.INST1_UUID].uuid)
self.assertEqual(vm_state.VMState.ACTIVE.value, self.assertEqual(element.InstanceState.ACTIVE.value,
vms[self.VM2_UUID].state) instances[self.INST2_UUID].state)
self.assertEqual(self.VM2_UUID, vms[self.VM2_UUID].uuid) self.assertEqual(self.INST2_UUID, instances[self.INST2_UUID].uuid)
def test_get_mapping(self): def test_get_mapping(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
mapping_vm = model.mapping.get_mapping_vm() instance_mapping = model.mapping.instance_mapping
self.assertEqual(2, mapping_vm.__len__()) self.assertEqual(2, len(instance_mapping))
self.assertEqual('Node_0', mapping_vm[self.VM1_UUID]) self.assertEqual('Node_0', instance_mapping[self.INST1_UUID])
self.assertEqual('Node_1', mapping_vm[self.VM2_UUID]) self.assertEqual('Node_1', instance_mapping[self.INST2_UUID])
def test_migrate_vm(self): def test_migrate_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
keys = list(vms.keys()) keys = list(instances.keys())
vm0 = vms[keys[0]] instance0 = instances[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid) node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
vm1 = vms[keys[1]] instance1 = instances[keys[1]]
hyp1 = model.mapping.get_node_from_vm_id(vm1.uuid) node1 = model.mapping.get_node_from_instance_id(instance1.uuid)
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp1, hyp1)) self.assertEqual(
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp0, hyp0)) False,
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp1, hyp0)) model.mapping.migrate_instance(instance1, node1, node1))
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp0, hyp1)) self.assertEqual(
False,
model.mapping.migrate_instance(instance1, node0, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node1, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node0, node1))
def test_unmap_from_id_log_warning(self): def test_unmap_from_id_log_warning(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
keys = list(vms.keys()) keys = list(instances.keys())
vm0 = vms[keys[0]] instance0 = instances[keys[0]]
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = modelhyp.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.mapping.unmap_from_id(hypervisor.uuid, vm0.uuid) model.mapping.unmap_from_id(node.uuid, instance0.uuid)
# self.assertEqual(len(model.mapping.get_node_vms_from_id( # self.assertEqual(len(model.mapping.get_node_instances_from_id(
# hypervisor.uuid)), 1) # node.uuid)), 1)
def test_unmap_from_id(self): def test_unmap_from_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms() instances = model.get_all_instances()
keys = list(vms.keys()) keys = list(instances.keys())
vm0 = vms[keys[0]] instance0 = instances[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid) node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
model.mapping.unmap_from_id(hyp0.uuid, vm0.uuid) model.mapping.unmap_from_id(node0.uuid, instance0.uuid)
self.assertEqual(0, len(model.mapping.get_node_vms_from_id( self.assertEqual(0, len(model.mapping.get_node_instances_from_id(
hyp0.uuid))) node0.uuid)))

View File

@@ -19,120 +19,107 @@
import uuid import uuid
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.model import hypervisor as hypervisor_model from watcher.decision_engine.model import element
from watcher.decision_engine.model import hypervisor_state
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \ from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_state import faker_cluster_state
class TestModel(base.BaseTestCase): class TestModel(base.TestCase):
def test_model(self): def test_model(self):
fake_cluster = faker_cluster_state.FakerModelCollector() fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1() model = fake_cluster.generate_scenario_1()
self.assertEqual(5, len(model._hypervisors)) self.assertEqual(5, len(model._nodes))
self.assertEqual(35, len(model._vms)) self.assertEqual(35, len(model._instances))
self.assertEqual(5, len(model.get_mapping().get_mapping())) self.assertEqual(5, len(model.mapping.get_mapping()))
def test_add_hypervisor(self): def test_add_node(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id)) self.assertEqual(node, model.get_node_from_id(id_))
def test_delete_hypervisor(self): def test_delete_node(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id)) self.assertEqual(node, model.get_node_from_id(id_))
model.remove_hypervisor(hypervisor) model.remove_node(node)
self.assertRaises(exception.HypervisorNotFound, self.assertRaises(exception.ComputeNodeNotFound,
model.get_hypervisor_from_id, id) model.get_node_from_id, id_)
def test_get_all_hypervisors(self): def test_get_all_compute_nodes(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
for i in range(10): for _ in range(10):
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
all_hypervisors = model.get_all_hypervisors() all_nodes = model.get_all_compute_nodes()
for id in all_hypervisors: for id_ in all_nodes:
hyp = model.get_hypervisor_from_id(id) node = model.get_node_from_id(id_)
model.assert_hypervisor(hyp) model.assert_node(node)
def test_set_get_state_hypervisors(self): def test_set_get_state_nodes(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
self.assertIsInstance(hypervisor.state, self.assertIsInstance(node.state, element.ServiceState)
hypervisor_state.HypervisorState)
hyp = model.get_hypervisor_from_id(id) node = model.get_node_from_id(id_)
hyp.state = hypervisor_state.HypervisorState.OFFLINE node.state = element.ServiceState.OFFLINE
self.assertIsInstance(hyp.state, hypervisor_state.HypervisorState) self.assertIsInstance(node.state, element.ServiceState)
# /watcher/decision_engine/framework/model/hypervisor.py def test_node_from_id_raise(self):
# set_state accept any char chain.
# verification (IsInstance) should be used in the function
# hyp.set_state('blablabla')
# self.assertEqual(hyp.get_state(), 'blablabla')
# self.assertIsInstance(hyp.get_state(), HypervisorState)
# def test_get_all_vms(self):
# model = ModelRoot()
# vms = model.get_all_vms()
# self.assert(len(model._vms))
def test_hypervisor_from_id_raise(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
id2 = "{0}".format(uuid.uuid4()) id2 = "{0}".format(uuid.uuid4())
self.assertRaises(exception.HypervisorNotFound, self.assertRaises(exception.ComputeNodeNotFound,
model.get_hypervisor_from_id, id2) model.get_node_from_id, id2)
def test_remove_hypervisor_raise(self): def test_remove_node_raise(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
id2 = "{0}".format(uuid.uuid4()) id2 = "{0}".format(uuid.uuid4())
hypervisor2 = hypervisor_model.Hypervisor() node2 = element.ComputeNode()
hypervisor2.uuid = id2 node2.uuid = id2
self.assertRaises(exception.HypervisorNotFound, self.assertRaises(exception.ComputeNodeNotFound,
model.remove_hypervisor, hypervisor2) model.remove_node, node2)
def test_assert_hypervisor_raise(self): def test_assert_node_raise(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4()) id_ = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor() node = element.ComputeNode()
hypervisor.uuid = id node.uuid = id_
model.add_hypervisor(hypervisor) model.add_node(node)
self.assertRaises(exception.IllegalArgumentException, self.assertRaises(exception.IllegalArgumentException,
model.assert_hypervisor, "objet_qcq") model.assert_node, "objet_qcq")
def test_vm_from_id_raise(self): def test_instance_from_id_raise(self):
fake_cluster = faker_cluster_state.FakerModelCollector() fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1() model = fake_cluster.generate_scenario_1()
self.assertRaises(exception.InstanceNotFound, self.assertRaises(exception.InstanceNotFound,
model.get_vm_from_id, "valeur_qcq") model.get_instance_from_id, "valeur_qcq")
def test_assert_vm_raise(self): def test_assert_instance_raise(self):
model = model_root.ModelRoot() model = model_root.ModelRoot()
self.assertRaises(exception.IllegalArgumentException, self.assertRaises(exception.IllegalArgumentException,
model.assert_vm, "valeur_qcq") model.assert_instance, "valeur_qcq")

View File

@@ -1,32 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.decision_engine.model import compute_resource
from watcher.tests import base
class TestNamedElement(base.BaseTestCase):
def test_namedelement(self):
id = compute_resource.ComputeResource()
id.uuid = "BLABLABLA"
self.assertEqual("BLABLABLA", id.uuid)
def test_set_get_human_id(self):
id = compute_resource.ComputeResource()
id.human_id = "BLABLABLA"
self.assertEqual("BLABLABLA", id.human_id)

View File

@@ -50,7 +50,7 @@ class SolutionFakerSingleHyp(object):
current_state_cluster = faker_cluster_state.FakerModelCollector() current_state_cluster = faker_cluster_state.FakerModelCollector()
sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon = strategies.BasicConsolidation(config=mock.Mock())
sercon._compute_model = ( sercon._compute_model = (
current_state_cluster.generate_scenario_3_with_2_hypervisors()) current_state_cluster.generate_scenario_3_with_2_nodes())
sercon.ceilometer = mock.MagicMock( sercon.ceilometer = mock.MagicMock(
get_statistics=metrics.mock_get_statistics) get_statistics=metrics.mock_get_statistics)
@@ -66,8 +66,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock()) goal=mock.Mock(), strategy=mock.Mock())
parameters = { parameters = {
"src_uuid_hypervisor": "server1", "source_node": "server1",
"dst_uuid_hypervisor": "server2", "destination_node": "server2",
} }
solution.add_action(action_type="migrate", solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -93,8 +93,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock()) goal=mock.Mock(), strategy=mock.Mock())
parameters = { parameters = {
"src_uuid_hypervisor": "server1", "source_node": "server1",
"dst_uuid_hypervisor": "server2", "destination_node": "server2",
} }
solution.add_action(action_type="migrate", solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -125,8 +125,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock()) goal=mock.Mock(), strategy=mock.Mock())
parameters = { parameters = {
"src_uuid_hypervisor": "server1", "src_uuid_node": "server1",
"dst_uuid_hypervisor": "server2", "dst_uuid_node": "server2",
} }
solution.add_action(action_type="migrate", solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",

View File

@@ -20,13 +20,14 @@ from watcher.decision_engine.solution import default
from watcher.tests import base from watcher.tests import base
class TestDefaultSolution(base.BaseTestCase): class TestDefaultSolution(base.TestCase):
def test_default_solution(self): def test_default_solution(self):
solution = default.DefaultSolution( solution = default.DefaultSolution(
goal=mock.Mock(), strategy=mock.Mock()) goal=mock.Mock(), strategy=mock.Mock())
parameters = { parameters = {
"src_uuid_hypervisor": "server1", "source_node": "server1",
"dst_uuid_hypervisor": "server2", "destination_node": "server2",
} }
solution.add_action(action_type="nop", solution.add_action(action_type="nop",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -34,8 +35,8 @@ class TestDefaultSolution(base.BaseTestCase):
self.assertEqual(1, len(solution.actions)) self.assertEqual(1, len(solution.actions))
expected_action_type = "nop" expected_action_type = "nop"
expected_parameters = { expected_parameters = {
"src_uuid_hypervisor": "server1", "source_node": "server1",
"dst_uuid_hypervisor": "server2", "destination_node": "server2",
"resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36"
} }
self.assertEqual(expected_action_type, self.assertEqual(expected_action_type,

View File

@@ -20,11 +20,8 @@
import mock import mock
from watcher.decision_engine.model.collector import base from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
from watcher.decision_engine.model import vm_state
class FakerModelCollector(base.BaseClusterDataModelCollector): class FakerModelCollector(base.BaseClusterDataModelCollector):
@@ -38,17 +35,17 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1() return self.generate_scenario_1()
def generate_scenario_1(self): def generate_scenario_1(self):
"""Simulates cluster with 2 hypervisors and 2 VMs using 1:1 mapping""" """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
count_node = 2 count_node = 2
count_vm = 2 count_instance = 2
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
disk_capacity =\ disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity) element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
@@ -57,7 +54,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node): for i in range(0, count_node):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
node.state = 'enabled' node.state = 'enabled'
@@ -65,43 +62,43 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64) mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250) disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 40) num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
for i in range(0, count_vm): for i in range(0, count_instance):
vm_uuid = "VM_{0}".format(i) instance_uuid = "INSTANCE_{0}".format(i)
vm = modelvm.VM() instance = element.Instance()
vm.uuid = vm_uuid instance.uuid = instance_uuid
vm.state = vm_state.VMState.ACTIVE instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(vm, 2) mem.set_capacity(instance, 2)
disk.set_capacity(vm, 20) disk.set_capacity(instance, 20)
num_cores.set_capacity(vm, 10) num_cores.set_capacity(instance, 10)
current_state_cluster.add_vm(vm) current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0")) current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"), current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_1")) current_state_cluster.get_instance_from_id("INSTANCE_1"))
return current_state_cluster return current_state_cluster
def generate_scenario_2(self): def generate_scenario_2(self):
"""Simulates a cluster """Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor With 4 nodes and 6 instances all mapped to a single node
""" """
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
count_node = 4 count_node = 4
count_vm = 6 count_instance = 6
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
disk_capacity =\ disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity) element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
@@ -110,7 +107,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node): for i in range(0, count_node):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
node.state = 'up' node.state = 'up'
@@ -118,39 +115,39 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64) mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250) disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 16) num_cores.set_capacity(node, 16)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
for i in range(0, count_vm): for i in range(0, count_instance):
vm_uuid = "VM_{0}".format(i) instance_uuid = "INSTANCE_{0}".format(i)
vm = modelvm.VM() instance = element.Instance()
vm.uuid = vm_uuid instance.uuid = instance_uuid
vm.state = vm_state.VMState.ACTIVE instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(vm, 2) mem.set_capacity(instance, 2)
disk.set_capacity(vm, 20) disk.set_capacity(instance, 20)
num_cores.set_capacity(vm, 10) num_cores.set_capacity(instance, 10)
current_state_cluster.add_vm(vm) current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i))) current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster return current_state_cluster
def generate_scenario_3(self): def generate_scenario_3(self):
"""Simulates a cluster """Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor With 4 nodes and 6 instances all mapped to one node
""" """
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
count_node = 2 count_node = 2
count_vm = 4 count_instance = 4
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
disk_capacity =\ disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity) element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
@@ -159,7 +156,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node): for i in range(0, count_node):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
node.state = 'up' node.state = 'up'
@@ -167,21 +164,21 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64) mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250) disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 10) num_cores.set_capacity(node, 10)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
for i in range(6, 6 + count_vm): for i in range(6, 6 + count_instance):
vm_uuid = "VM_{0}".format(i) instance_uuid = "INSTANCE_{0}".format(i)
vm = modelvm.VM() instance = element.Instance()
vm.uuid = vm_uuid instance.uuid = instance_uuid
vm.state = vm_state.VMState.ACTIVE instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(vm, 2) mem.set_capacity(instance, 2)
disk.set_capacity(vm, 20) disk.set_capacity(instance, 20)
num_cores.set_capacity(vm, 2 ** (i-6)) num_cores.set_capacity(instance, 2 ** (i-6))
current_state_cluster.add_vm(vm) current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i))) current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster return current_state_cluster
@@ -193,76 +190,77 @@ class FakeCeilometerMetrics(object):
def mock_get_statistics(self, resource_id, meter_name, period=3600, def mock_get_statistics(self, resource_id, meter_name, period=3600,
aggregate='avg'): aggregate='avg'):
if meter_name == "compute.node.cpu.percent": if meter_name == "compute.node.cpu.percent":
return self.get_hypervisor_cpu_util(resource_id) return self.get_node_cpu_util(resource_id)
elif meter_name == "cpu_util": elif meter_name == "cpu_util":
return self.get_vm_cpu_util(resource_id) return self.get_instance_cpu_util(resource_id)
elif meter_name == "memory.usage": elif meter_name == "memory.usage":
return self.get_vm_ram_util(resource_id) return self.get_instance_ram_util(resource_id)
elif meter_name == "disk.root.size": elif meter_name == "disk.root.size":
return self.get_vm_disk_root_size(resource_id) return self.get_instance_disk_root_size(resource_id)
def get_hypervisor_cpu_util(self, r_id): def get_node_cpu_util(self, r_id):
"""Calculates hypervisor utilization dynamicaly. """Calculates node utilization dynamicaly.
Hypervisor CPU utilization should consider node CPU utilization should consider
and corelate with actual VM-hypervisor mappings and corelate with actual instance-node mappings
provided within a cluster model. provided within a cluster model.
Returns relative hypervisor CPU utilization <0, 100>. Returns relative node CPU utilization <0, 100>.
:param r_id: resource id :param r_id: resource id
""" """
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
vms = self.model.get_mapping().get_node_vms_from_id(id) instances = self.model.get_mapping().get_node_instances_from_id(id)
util_sum = 0.0 util_sum = 0.0
hypervisor_cpu_cores = self.model.get_resource_from_id( node_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity_from_id(id) element.ResourceType.cpu_cores).get_capacity_from_id(id)
for vm_uuid in vms: for instance_uuid in instances:
vm_cpu_cores = self.model.get_resource_from_id( instance_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).\ element.ResourceType.cpu_cores).\
get_capacity(self.model.get_vm_from_id(vm_uuid)) get_capacity(self.model.get_instance_from_id(instance_uuid))
total_cpu_util = vm_cpu_cores * self.get_vm_cpu_util(vm_uuid) total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
instance_uuid)
util_sum += total_cpu_util / 100.0 util_sum += total_cpu_util / 100.0
util_sum /= hypervisor_cpu_cores util_sum /= node_cpu_cores
return util_sum * 100.0 return util_sum * 100.0
def get_vm_cpu_util(self, r_id): def get_instance_cpu_util(self, r_id):
vm_cpu_util = dict() instance_cpu_util = dict()
vm_cpu_util['VM_0'] = 10 instance_cpu_util['INSTANCE_0'] = 10
vm_cpu_util['VM_1'] = 30 instance_cpu_util['INSTANCE_1'] = 30
vm_cpu_util['VM_2'] = 60 instance_cpu_util['INSTANCE_2'] = 60
vm_cpu_util['VM_3'] = 20 instance_cpu_util['INSTANCE_3'] = 20
vm_cpu_util['VM_4'] = 40 instance_cpu_util['INSTANCE_4'] = 40
vm_cpu_util['VM_5'] = 50 instance_cpu_util['INSTANCE_5'] = 50
vm_cpu_util['VM_6'] = 100 instance_cpu_util['INSTANCE_6'] = 100
vm_cpu_util['VM_7'] = 100 instance_cpu_util['INSTANCE_7'] = 100
vm_cpu_util['VM_8'] = 100 instance_cpu_util['INSTANCE_8'] = 100
vm_cpu_util['VM_9'] = 100 instance_cpu_util['INSTANCE_9'] = 100
return vm_cpu_util[str(r_id)] return instance_cpu_util[str(r_id)]
def get_vm_ram_util(self, r_id): def get_instance_ram_util(self, r_id):
vm_ram_util = dict() instance_ram_util = dict()
vm_ram_util['VM_0'] = 1 instance_ram_util['INSTANCE_0'] = 1
vm_ram_util['VM_1'] = 2 instance_ram_util['INSTANCE_1'] = 2
vm_ram_util['VM_2'] = 4 instance_ram_util['INSTANCE_2'] = 4
vm_ram_util['VM_3'] = 8 instance_ram_util['INSTANCE_3'] = 8
vm_ram_util['VM_4'] = 3 instance_ram_util['INSTANCE_4'] = 3
vm_ram_util['VM_5'] = 2 instance_ram_util['INSTANCE_5'] = 2
vm_ram_util['VM_6'] = 1 instance_ram_util['INSTANCE_6'] = 1
vm_ram_util['VM_7'] = 2 instance_ram_util['INSTANCE_7'] = 2
vm_ram_util['VM_8'] = 4 instance_ram_util['INSTANCE_8'] = 4
vm_ram_util['VM_9'] = 8 instance_ram_util['INSTANCE_9'] = 8
return vm_ram_util[str(r_id)] return instance_ram_util[str(r_id)]
def get_vm_disk_root_size(self, r_id): def get_instance_disk_root_size(self, r_id):
vm_disk_util = dict() instance_disk_util = dict()
vm_disk_util['VM_0'] = 10 instance_disk_util['INSTANCE_0'] = 10
vm_disk_util['VM_1'] = 15 instance_disk_util['INSTANCE_1'] = 15
vm_disk_util['VM_2'] = 30 instance_disk_util['INSTANCE_2'] = 30
vm_disk_util['VM_3'] = 35 instance_disk_util['INSTANCE_3'] = 35
vm_disk_util['VM_4'] = 20 instance_disk_util['INSTANCE_4'] = 20
vm_disk_util['VM_5'] = 25 instance_disk_util['INSTANCE_5'] = 25
vm_disk_util['VM_6'] = 25 instance_disk_util['INSTANCE_6'] = 25
vm_disk_util['VM_7'] = 25 instance_disk_util['INSTANCE_7'] = 25
vm_disk_util['VM_8'] = 25 instance_disk_util['INSTANCE_8'] = 25
vm_disk_util['VM_9'] = 25 instance_disk_util['INSTANCE_9'] = 25
return vm_disk_util[str(r_id)] return instance_disk_util[str(r_id)]

View File

@@ -19,10 +19,8 @@
import mock import mock
from watcher.decision_engine.model.collector import base from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
class FakerModelCollector(base.BaseClusterDataModelCollector): class FakerModelCollector(base.BaseClusterDataModelCollector):
@@ -36,292 +34,292 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1() return self.generate_scenario_1()
def generate_scenario_1(self): def generate_scenario_1(self):
vms = [] instances = []
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 5 node_count = 5
# number max of vm per node # number max of instance per node
node_count_vm = 7 node_instance_count = 7
# total number of virtual machine # total number of virtual machine
count_vm = (count_node * node_count_vm) instance_count = (node_count * node_instance_count)
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk) current_state_cluster.create_resource(disk)
for i in range(0, count_node): for i in range(0, node_count):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132) mem.set_capacity(node, 132)
disk.set_capacity(node, 250) disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40) num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
for i in range(0, count_vm): for i in range(0, instance_count):
vm_uuid = "VM_{0}".format(i) instance_uuid = "INSTANCE_{0}".format(i)
vm = modelvm.VM() instance = element.Instance()
vm.uuid = vm_uuid instance.uuid = instance_uuid
mem.set_capacity(vm, 2) mem.set_capacity(instance, 2)
disk.set_capacity(vm, 20) disk.set_capacity(instance, 20)
num_cores.set_capacity(vm, 10) num_cores.set_capacity(instance, 10)
vms.append(vm) instances.append(instance)
current_state_cluster.add_vm(vm) current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0")) current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_1")) current_state_cluster.get_instance_from_id("INSTANCE_1"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"), current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_2")) current_state_cluster.get_instance_from_id("INSTANCE_2"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_3")) current_state_cluster.get_instance_from_id("INSTANCE_3"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_4")) current_state_cluster.get_instance_from_id("INSTANCE_4"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_5")) current_state_cluster.get_instance_from_id("INSTANCE_5"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_3"), current_state_cluster.get_node_from_id("Node_3"),
current_state_cluster.get_vm_from_id("VM_6")) current_state_cluster.get_instance_from_id("INSTANCE_6"))
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_4"), current_state_cluster.get_node_from_id("Node_4"),
current_state_cluster.get_vm_from_id("VM_7")) current_state_cluster.get_instance_from_id("INSTANCE_7"))
return current_state_cluster return current_state_cluster
def map(self, model, h_id, vm_id): def map(self, model, h_id, instance_id):
model.get_mapping().map( model.get_mapping().map(
model.get_hypervisor_from_id(h_id), model.get_node_from_id(h_id),
model.get_vm_from_id(vm_id)) model.get_instance_from_id(instance_id))
def generate_scenario_3_with_2_hypervisors(self): def generate_scenario_3_with_2_nodes(self):
vms = [] instances = []
root = modelroot.ModelRoot() root = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 2 node_count = 2
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem) root.create_resource(mem)
root.create_resource(num_cores) root.create_resource(num_cores)
root.create_resource(disk) root.create_resource(disk)
for i in range(0, count_node): for i in range(0, node_count):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132) mem.set_capacity(node, 132)
disk.set_capacity(node, 250) disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40) num_cores.set_capacity(node, 40)
root.add_hypervisor(node) root.add_node(node)
vm1 = modelvm.VM() instance1 = element.Instance()
vm1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc" instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm1, 2) mem.set_capacity(instance1, 2)
disk.set_capacity(vm1, 20) disk.set_capacity(instance1, 20)
num_cores.set_capacity(vm1, 10) num_cores.set_capacity(instance1, 10)
vms.append(vm1) instances.append(instance1)
root.add_vm(vm1) root.add_instance(instance1)
vm2 = modelvm.VM() instance2 = element.Instance()
vm2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517" instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
mem.set_capacity(vm2, 2) mem.set_capacity(instance2, 2)
disk.set_capacity(vm2, 20) disk.set_capacity(instance2, 20)
num_cores.set_capacity(vm2, 10) num_cores.set_capacity(instance2, 10)
vms.append(vm2) instances.append(instance2)
root.add_vm(vm2) root.add_instance(instance2)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"), root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid))) root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"), root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid))) root.get_instance_from_id(str(instance2.uuid)))
return root return root
def generate_scenario_4_with_1_hypervisor_no_vm(self): def generate_scenario_4_with_1_node_no_instance(self):
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 1 node_count = 1
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk) current_state_cluster.create_resource(disk)
for i in range(0, count_node): for i in range(0, node_count):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 1) mem.set_capacity(node, 1)
disk.set_capacity(node, 1) disk.set_capacity(node, 1)
num_cores.set_capacity(node, 1) num_cores.set_capacity(node, 1)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
return current_state_cluster return current_state_cluster
def generate_scenario_5_with_vm_disk_0(self): def generate_scenario_5_with_instance_disk_0(self):
vms = [] instances = []
current_state_cluster = modelroot.ModelRoot() current_state_cluster = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 1 node_count = 1
# number of vms # number of instances
count_vm = 1 instance_count = 1
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem) current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk) current_state_cluster.create_resource(disk)
for i in range(0, count_node): for i in range(0, node_count):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 4) mem.set_capacity(node, 4)
disk.set_capacity(node, 4) disk.set_capacity(node, 4)
num_cores.set_capacity(node, 4) num_cores.set_capacity(node, 4)
current_state_cluster.add_hypervisor(node) current_state_cluster.add_node(node)
for i in range(0, count_vm): for i in range(0, instance_count):
vm_uuid = "VM_{0}".format(i) instance_uuid = "INSTANCE_{0}".format(i)
vm = modelvm.VM() instance = element.Instance()
vm.uuid = vm_uuid instance.uuid = instance_uuid
mem.set_capacity(vm, 2) mem.set_capacity(instance, 2)
disk.set_capacity(vm, 0) disk.set_capacity(instance, 0)
num_cores.set_capacity(vm, 4) num_cores.set_capacity(instance, 4)
vms.append(vm) instances.append(instance)
current_state_cluster.add_vm(vm) current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map( current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0")) current_state_cluster.get_instance_from_id("INSTANCE_0"))
return current_state_cluster return current_state_cluster
def generate_scenario_6_with_2_hypervisors(self): def generate_scenario_6_with_2_nodes(self):
vms = [] instances = []
root = modelroot.ModelRoot() root = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 2 node_count = 2
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem) root.create_resource(mem)
root.create_resource(num_cores) root.create_resource(num_cores)
root.create_resource(disk) root.create_resource(disk)
for i in range(0, count_node): for i in range(0, node_count):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132) mem.set_capacity(node, 132)
disk.set_capacity(node, 250) disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40) num_cores.set_capacity(node, 40)
root.add_hypervisor(node) root.add_node(node)
vm1 = modelvm.VM() instance1 = element.Instance()
vm1.uuid = "VM_1" instance1.uuid = "INSTANCE_1"
mem.set_capacity(vm1, 2) mem.set_capacity(instance1, 2)
disk.set_capacity(vm1, 20) disk.set_capacity(instance1, 20)
num_cores.set_capacity(vm1, 10) num_cores.set_capacity(instance1, 10)
vms.append(vm1) instances.append(instance1)
root.add_vm(vm1) root.add_instance(instance1)
vm11 = modelvm.VM() instance11 = element.Instance()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc" instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2) mem.set_capacity(instance11, 2)
disk.set_capacity(vm11, 20) disk.set_capacity(instance11, 20)
num_cores.set_capacity(vm11, 10) num_cores.set_capacity(instance11, 10)
vms.append(vm11) instances.append(instance11)
root.add_vm(vm11) root.add_instance(instance11)
vm2 = modelvm.VM() instance2 = element.Instance()
vm2.uuid = "VM_3" instance2.uuid = "INSTANCE_3"
mem.set_capacity(vm2, 2) mem.set_capacity(instance2, 2)
disk.set_capacity(vm2, 20) disk.set_capacity(instance2, 20)
num_cores.set_capacity(vm2, 10) num_cores.set_capacity(instance2, 10)
vms.append(vm2) instances.append(instance2)
root.add_vm(vm2) root.add_instance(instance2)
vm21 = modelvm.VM() instance21 = element.Instance()
vm21.uuid = "VM_4" instance21.uuid = "INSTANCE_4"
mem.set_capacity(vm21, 2) mem.set_capacity(instance21, 2)
disk.set_capacity(vm21, 20) disk.set_capacity(instance21, 20)
num_cores.set_capacity(vm21, 10) num_cores.set_capacity(instance21, 10)
vms.append(vm21) instances.append(instance21)
root.add_vm(vm21) root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"), root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid))) root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"), root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid))) root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"), root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid))) root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"), root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid))) root.get_instance_from_id(str(instance21.uuid)))
return root return root
def generate_scenario_7_with_2_hypervisors(self): def generate_scenario_7_with_2_nodes(self):
vms = [] instances = []
root = modelroot.ModelRoot() root = modelroot.ModelRoot()
# number of nodes # number of nodes
count_node = 2 count_node = 2
# define ressouce ( CPU, MEM disk, ... ) # define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory) mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz # 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores) num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk) disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem) root.create_resource(mem)
root.create_resource(num_cores) root.create_resource(num_cores)
@@ -329,54 +327,54 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node): for i in range(0, count_node):
node_uuid = "Node_{0}".format(i) node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor() node = element.ComputeNode()
node.uuid = node_uuid node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i) node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132) mem.set_capacity(node, 132)
disk.set_capacity(node, 250) disk.set_capacity(node, 250)
num_cores.set_capacity(node, 50) num_cores.set_capacity(node, 50)
root.add_hypervisor(node) root.add_node(node)
vm1 = modelvm.VM() instance1 = element.Instance()
vm1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906" instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
mem.set_capacity(vm1, 2) mem.set_capacity(instance1, 2)
disk.set_capacity(vm1, 20) disk.set_capacity(instance1, 20)
num_cores.set_capacity(vm1, 15) num_cores.set_capacity(instance1, 15)
vms.append(vm1) instances.append(instance1)
root.add_vm(vm1) root.add_instance(instance1)
vm11 = modelvm.VM() instance11 = element.Instance()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc" instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2) mem.set_capacity(instance11, 2)
disk.set_capacity(vm11, 20) disk.set_capacity(instance11, 20)
num_cores.set_capacity(vm11, 10) num_cores.set_capacity(instance11, 10)
vms.append(vm11) instances.append(instance11)
root.add_vm(vm11) root.add_instance(instance11)
vm2 = modelvm.VM() instance2 = element.Instance()
vm2.uuid = "VM_3" instance2.uuid = "INSTANCE_3"
mem.set_capacity(vm2, 2) mem.set_capacity(instance2, 2)
disk.set_capacity(vm2, 20) disk.set_capacity(instance2, 20)
num_cores.set_capacity(vm2, 10) num_cores.set_capacity(instance2, 10)
vms.append(vm2) instances.append(instance2)
root.add_vm(vm2) root.add_instance(instance2)
vm21 = modelvm.VM() instance21 = element.Instance()
vm21.uuid = "VM_4" instance21.uuid = "INSTANCE_4"
mem.set_capacity(vm21, 2) mem.set_capacity(instance21, 2)
disk.set_capacity(vm21, 20) disk.set_capacity(instance21, 20)
num_cores.set_capacity(vm21, 10) num_cores.set_capacity(instance21, 10)
vms.append(vm21) instances.append(instance21)
root.add_vm(vm21) root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"), root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid))) root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"), root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid))) root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"), root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid))) root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"), root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid))) root.get_instance_from_id(str(instance21.uuid)))
return root return root

View File

@@ -15,7 +15,6 @@
# implied. # implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
import random import random
@@ -37,9 +36,9 @@ class FakerMetricsCollector(object):
elif meter_name == "hardware.memory.used": elif meter_name == "hardware.memory.used":
result = self.get_usage_node_ram(resource_id) result = self.get_usage_node_ram(resource_id)
elif meter_name == "cpu_util": elif meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu(resource_id) result = self.get_average_usage_instance_cpu(resource_id)
elif meter_name == "memory.resident": elif meter_name == "memory.resident":
result = self.get_average_usage_vm_memory(resource_id) result = self.get_average_usage_instance_memory(resource_id)
elif meter_name == "hardware.ipmi.node.outlet_temperature": elif meter_name == "hardware.ipmi.node.outlet_temperature":
result = self.get_average_outlet_temperature(resource_id) result = self.get_average_outlet_temperature(resource_id)
elif meter_name == "hardware.ipmi.node.airflow": elif meter_name == "hardware.ipmi.node.airflow":
@@ -54,7 +53,7 @@ class FakerMetricsCollector(object):
aggregate='avg'): aggregate='avg'):
result = 0 result = 0
if meter_name == "cpu_util": if meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu_wb(resource_id) result = self.get_average_usage_instance_cpu_wb(resource_id)
return result return result
def get_average_outlet_temperature(self, uuid): def get_average_outlet_temperature(self, uuid):
@@ -135,7 +134,7 @@ class FakerMetricsCollector(object):
mock['Node_6_hostname_6'] = 8 mock['Node_6_hostname_6'] = 8
mock['Node_19_hostname_19'] = 10 mock['Node_19_hostname_19'] = 10
# node 4 # node 4
mock['VM_7_hostname_7'] = 4 mock['INSTANCE_7_hostname_7'] = 4
mock['Node_0'] = 0.07 mock['Node_0'] = 0.07
mock['Node_1'] = 0.05 mock['Node_1'] = 0.05
@@ -149,7 +148,7 @@ class FakerMetricsCollector(object):
return float(mock[str(uuid)]) return float(mock[str(uuid)])
def get_average_usage_vm_cpu_wb(self, uuid): def get_average_usage_instance_cpu_wb(self, uuid):
"""The last VM CPU usage values to average """The last VM CPU usage values to average
:param uuid:00 :param uuid:00
@@ -162,14 +161,14 @@ class FakerMetricsCollector(object):
# Normalize # Normalize
mock = {} mock = {}
# node 0 # node 0
mock['VM_1'] = 80 mock['INSTANCE_1'] = 80
mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50
# node 1 # node 1
mock['VM_3'] = 20 mock['INSTANCE_3'] = 20
mock['VM_4'] = 10 mock['INSTANCE_4'] = 10
return float(mock[str(uuid)]) return float(mock[str(uuid)])
def get_average_usage_vm_cpu(self, uuid): def get_average_usage_instance_cpu(self, uuid):
"""The last VM CPU usage values to average """The last VM CPU usage values to average
:param uuid:00 :param uuid:00
@@ -182,66 +181,66 @@ class FakerMetricsCollector(object):
# Normalize # Normalize
mock = {} mock = {}
# node 0 # node 0
mock['VM_0'] = 7 mock['INSTANCE_0'] = 7
mock['VM_1'] = 7 mock['INSTANCE_1'] = 7
# node 1 # node 1
mock['VM_2'] = 10 mock['INSTANCE_2'] = 10
# node 2 # node 2
mock['VM_3'] = 5 mock['INSTANCE_3'] = 5
mock['VM_4'] = 5 mock['INSTANCE_4'] = 5
mock['VM_5'] = 10 mock['INSTANCE_5'] = 10
# node 3 # node 3
mock['VM_6'] = 8 mock['INSTANCE_6'] = 8
# node 4 # node 4
mock['VM_7'] = 4 mock['INSTANCE_7'] = 4
if uuid not in mock.keys(): if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
mock[uuid] = 8 mock[uuid] = 8
return mock[str(uuid)] return mock[str(uuid)]
def get_average_usage_vm_memory(self, uuid): def get_average_usage_instance_memory(self, uuid):
mock = {} mock = {}
# node 0 # node 0
mock['VM_0'] = 2 mock['INSTANCE_0'] = 2
mock['VM_1'] = 5 mock['INSTANCE_1'] = 5
# node 1 # node 1
mock['VM_2'] = 5 mock['INSTANCE_2'] = 5
# node 2 # node 2
mock['VM_3'] = 8 mock['INSTANCE_3'] = 8
mock['VM_4'] = 5 mock['INSTANCE_4'] = 5
mock['VM_5'] = 16 mock['INSTANCE_5'] = 16
# node 3 # node 3
mock['VM_6'] = 8 mock['INSTANCE_6'] = 8
# node 4 # node 4
mock['VM_7'] = 4 mock['INSTANCE_7'] = 4
if uuid not in mock.keys(): if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
mock[uuid] = 10 mock[uuid] = 10
return mock[str(uuid)] return mock[str(uuid)]
def get_average_usage_vm_disk(self, uuid): def get_average_usage_instance_disk(self, uuid):
mock = {} mock = {}
# node 0 # node 0
mock['VM_0'] = 2 mock['INSTANCE_0'] = 2
mock['VM_1'] = 2 mock['INSTANCE_1'] = 2
# node 1 # node 1
mock['VM_2'] = 2 mock['INSTANCE_2'] = 2
# node 2 # node 2
mock['VM_3'] = 10 mock['INSTANCE_3'] = 10
mock['VM_4'] = 15 mock['INSTANCE_4'] = 15
mock['VM_5'] = 20 mock['INSTANCE_5'] = 20
# node 3 # node 3
mock['VM_6'] = 8 mock['INSTANCE_6'] = 8
# node 4 # node 4
mock['VM_7'] = 4 mock['INSTANCE_7'] = 4
if uuid not in mock.keys(): if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
@@ -249,7 +248,7 @@ class FakerMetricsCollector(object):
return mock[str(uuid)] return mock[str(uuid)]
def get_virtual_machine_capacity(self, vm_uuid): def get_virtual_machine_capacity(self, instance_uuid):
return random.randint(1, 4) return random.randint(1, 4)
def get_average_network_incomming(self, node): def get_average_network_incomming(self, node):

View File

@@ -30,7 +30,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector import faker_metrics_collector
class TestBasicConsolidation(base.BaseTestCase): class TestBasicConsolidation(base.TestCase):
def setUp(self): def setUp(self):
super(TestBasicConsolidation, self).setUp() super(TestBasicConsolidation, self).setUp()
@@ -58,63 +58,75 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_cluster_size(self): def test_cluster_size(self):
size_cluster = len( size_cluster = len(
self.fake_cluster.generate_scenario_1().get_all_hypervisors()) self.fake_cluster.generate_scenario_1().get_all_compute_nodes())
size_cluster_assert = 5 size_cluster_assert = 5
self.assertEqual(size_cluster_assert, size_cluster) self.assertEqual(size_cluster_assert, size_cluster)
def test_basic_consolidation_score_hypervisor(self): def test_basic_consolidation_score_node(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
node_1_score = 0.023333333333333317 node_1_score = 0.023333333333333317
self.assertEqual(node_1_score, self.strategy.calculate_score_node( self.assertEqual(node_1_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_1"))) model.get_node_from_id("Node_1")))
node_2_score = 0.26666666666666666 node_2_score = 0.26666666666666666
self.assertEqual(node_2_score, self.strategy.calculate_score_node( self.assertEqual(node_2_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_2"))) model.get_node_from_id("Node_2")))
node_0_score = 0.023333333333333317 node_0_score = 0.023333333333333317
self.assertEqual(node_0_score, self.strategy.calculate_score_node( self.assertEqual(node_0_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_0"))) model.get_node_from_id("Node_0")))
def test_basic_consolidation_score_vm(self): def test_basic_consolidation_score_instance(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0") instance_0 = model.get_instance_from_id("INSTANCE_0")
vm_0_score = 0.023333333333333317 instance_0_score = 0.023333333333333317
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0)) self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0))
vm_1 = model.get_vm_from_id("VM_1") instance_1 = model.get_instance_from_id("INSTANCE_1")
vm_1_score = 0.023333333333333317 instance_1_score = 0.023333333333333317
self.assertEqual(vm_1_score, self.strategy.calculate_score_vm(vm_1)) self.assertEqual(
vm_2 = model.get_vm_from_id("VM_2") instance_1_score,
vm_2_score = 0.033333333333333326 self.strategy.calculate_score_instance(instance_1))
self.assertEqual(vm_2_score, self.strategy.calculate_score_vm(vm_2)) instance_2 = model.get_instance_from_id("INSTANCE_2")
vm_6 = model.get_vm_from_id("VM_6") instance_2_score = 0.033333333333333326
vm_6_score = 0.02666666666666669 self.assertEqual(
self.assertEqual(vm_6_score, self.strategy.calculate_score_vm(vm_6)) instance_2_score,
vm_7 = model.get_vm_from_id("VM_7") self.strategy.calculate_score_instance(instance_2))
vm_7_score = 0.013333333333333345 instance_6 = model.get_instance_from_id("INSTANCE_6")
self.assertEqual(vm_7_score, self.strategy.calculate_score_vm(vm_7)) instance_6_score = 0.02666666666666669
self.assertEqual(
instance_6_score,
self.strategy.calculate_score_instance(instance_6))
instance_7 = model.get_instance_from_id("INSTANCE_7")
instance_7_score = 0.013333333333333345
self.assertEqual(
instance_7_score,
self.strategy.calculate_score_instance(instance_7))
def test_basic_consolidation_score_vm_disk(self): def test_basic_consolidation_score_instance_disk(self):
model = self.fake_cluster.generate_scenario_5_with_vm_disk_0() model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
self.m_model.return_value = model self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0") instance_0 = model.get_instance_from_id("INSTANCE_0")
vm_0_score = 0.023333333333333355 instance_0_score = 0.023333333333333355
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0, )) self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0, ))
def test_basic_consolidation_weight(self): def test_basic_consolidation_weight(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0") instance_0 = model.get_instance_from_id("INSTANCE_0")
cores = 16 cores = 16
# 80 Go # 80 Go
disk = 80 disk = 80
# mem 8 Go # mem 8 Go
mem = 8 mem = 8
vm_0_weight_assert = 3.1999999999999997 instance_0_weight_assert = 3.1999999999999997
self.assertEqual( self.assertEqual(
vm_0_weight_assert, instance_0_weight_assert,
self.strategy.calculate_weight(vm_0, cores, disk, mem)) self.strategy.calculate_weight(instance_0, cores, disk, mem))
def test_calculate_migration_efficacy(self): def test_calculate_migration_efficacy(self):
self.strategy.calculate_migration_efficacy() self.strategy.calculate_migration_efficacy()
@@ -130,28 +142,28 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute) self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_check_migration(self): def test_check_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
all_vms = model.get_all_vms() all_instances = model.get_all_instances()
all_hyps = model.get_all_hypervisors() all_nodes = model.get_all_compute_nodes()
vm0 = all_vms[list(all_vms.keys())[0]] instance0 = all_instances[list(all_instances.keys())[0]]
hyp0 = all_hyps[list(all_hyps.keys())[0]] node0 = all_nodes[list(all_nodes.keys())[0]]
self.strategy.check_migration(hyp0, hyp0, vm0) self.strategy.check_migration(node0, node0, instance0)
def test_threshold(self): def test_threshold(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
all_hyps = model.get_all_hypervisors() all_nodes = model.get_all_compute_nodes()
hyp0 = all_hyps[list(all_hyps.keys())[0]] node0 = all_nodes[list(all_nodes.keys())[0]]
self.assertFalse(self.strategy.check_threshold( self.assertFalse(self.strategy.check_threshold(
hyp0, 1000, 1000, 1000)) node0, 1000, 1000, 1000))
def test_basic_consolidation_migration(self): def test_basic_consolidation_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
@@ -163,10 +175,10 @@ class TestBasicConsolidation(base.BaseTestCase):
expected_power_state = 0 expected_power_state = 0
num_migrations = actions_counter.get("migrate", 0) num_migrations = actions_counter.get("migrate", 0)
num_hypervisor_state_change = actions_counter.get( num_node_state_change = actions_counter.get(
"change_hypervisor_state", 0) "change_node_state", 0)
self.assertEqual(expected_num_migrations, num_migrations) self.assertEqual(expected_num_migrations, num_migrations)
self.assertEqual(expected_power_state, num_hypervisor_state_change) self.assertEqual(expected_power_state, num_node_state_change)
def test_exception_stale_cdm(self): def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale() self.fake_cluster.set_cluster_data_model_as_stale()
@@ -180,7 +192,7 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_execute_no_workload(self): def test_execute_no_workload(self):
model = ( model = (
self.fake_cluster self.fake_cluster
.generate_scenario_4_with_1_hypervisor_no_vm()) .generate_scenario_4_with_1_node_no_instance())
self.m_model.return_value = model self.m_model.return_value = model
with mock.patch.object( with mock.patch.object(
@@ -191,7 +203,7 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertEqual(0, solution.efficacy.global_efficacy.value) self.assertEqual(0, solution.efficacy.global_efficacy.value)
def test_check_parameters(self): def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
loader = default.DefaultActionLoader() loader = default.DefaultActionLoader()

View File

@@ -52,7 +52,7 @@ class TestDummyStrategy(base.TestCase):
self.assertEqual(3, len(solution.actions)) self.assertEqual(3, len(solution.actions))
def test_check_parameters(self): def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'})

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default from watcher.applier.loading import default
from watcher.common import exception from watcher.common import exception
from watcher.common import utils from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \ from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector import faker_metrics_collector
class TestOutletTempControl(base.BaseTestCase): class TestOutletTempControl(base.TestCase):
def setUp(self): def setUp(self):
super(TestOutletTempControl, self).setUp() super(TestOutletTempControl, self).setUp()
@@ -63,41 +63,41 @@ class TestOutletTempControl(base.BaseTestCase):
self.strategy.threshold = 34.3 self.strategy.threshold = 34.3
def test_calc_used_res(self): def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0') node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores) cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory) cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk) cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.strategy.calc_used_res( cores_used, mem_used, disk_used = self.strategy.calc_used_res(
hypervisor, cap_cores, cap_mem, cap_disk) node, cap_cores, cap_mem, cap_disk)
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used)) self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts_by_outlet_temp(self): def test_group_hosts_by_outlet_temp(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp() n1, n2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', h1[0]['hv'].uuid) self.assertEqual('Node_1', n1[0]['node'].uuid)
self.assertEqual('Node_0', h2[0]['hv'].uuid) self.assertEqual('Node_0', n2[0]['node'].uuid)
def test_choose_vm_to_migrate(self): def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp() n1, n2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual('Node_1', vm_to_mig[0].uuid) self.assertEqual('Node_1', instance_to_mig[0].uuid)
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517', self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
vm_to_mig[1].uuid) instance_to_mig[1].uuid)
def test_filter_dest_servers(self): def test_filter_dest_servers(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp() n1, n2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_dest_servers(h2, vm_to_mig[1]) dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
self.assertEqual(1, len(dest_hosts)) self.assertEqual(1, len(dest_hosts))
self.assertEqual('Node_0', dest_hosts[0]['hv'].uuid) self.assertEqual('Node_0', dest_hosts[0]['node'].uuid)
def test_exception_model(self): def test_exception_model(self):
self.m_model.return_value = None self.m_model.return_value = None
@@ -123,14 +123,14 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute) self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self): def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm() model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
self.assertEqual([], solution.actions) self.assertEqual([], solution.actions)
def test_execute(self): def test_execute(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
actions_counter = collections.Counter( actions_counter = collections.Counter(
@@ -140,7 +140,7 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertEqual(1, num_migrations) self.assertEqual(1, num_migrations)
def test_check_parameters(self): def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors() model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
loader = default.DefaultActionLoader() loader = default.DefaultActionLoader()

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default from watcher.applier.loading import default
from watcher.common import exception from watcher.common import exception
from watcher.common import utils from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \ from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector import faker_metrics_collector
class TestUniformAirflow(base.BaseTestCase): class TestUniformAirflow(base.TestCase):
def setUp(self): def setUp(self):
super(TestUniformAirflow, self).setUp() super(TestUniformAirflow, self).setUp()
@@ -68,72 +68,73 @@ class TestUniformAirflow(base.BaseTestCase):
self._period = 300 self._period = 300
def test_calc_used_res(self): def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0') node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores) cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory) cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk) cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.\ cores_used, mem_used, disk_used = self.\
strategy.calculate_used_resource( strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk) node, cap_cores, cap_mem, cap_disk)
self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40)) self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40))
def test_group_hosts_by_airflow(self): def test_group_hosts_by_airflow(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
h1, h2 = self.strategy.group_hosts_by_airflow() n1, n2 = self.strategy.group_hosts_by_airflow()
# print h1, h2, avg, w_map # print n1, n2, avg, w_map
self.assertEqual(h1[0]['hv'].uuid, 'Node_0') self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1') self.assertEqual(n2[0]['node'].uuid, 'Node_1')
def test_choose_vm_to_migrate(self): def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22 self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow() n1, n2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0') self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 1) self.assertEqual(len(instance_to_mig[1]), 1)
self.assertEqual(vm_to_mig[1][0].uuid, self.assertEqual(instance_to_mig[1][0].uuid,
"cae81432-1631-4d4e-b29c-6f3acdcde906") "cae81432-1631-4d4e-b29c-6f3acdcde906")
def test_choose_vm_to_migrate_all(self): def test_choose_instance_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25 self.strategy.threshold_inlet_t = 25
h1, h2 = self.strategy.group_hosts_by_airflow() n1, n2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0') self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 2) self.assertEqual(len(instance_to_mig[1]), 2)
self.assertEqual(vm_to_mig[1][1].uuid, self.assertEqual(instance_to_mig[1][1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc") "73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self): def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22 self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow() n1, n2 = self.strategy.group_hosts_by_airflow()
vms = model.get_all_vms() instances = model.get_all_instances()
vms.clear() instances.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertIsNone(vm_to_mig) self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self): def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22 self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow() n1, n2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1) instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_destination_hosts(h2, vm_to_mig[1]) dest_hosts = self.strategy.filter_destination_hosts(
n2, instance_to_mig[1])
self.assertEqual(len(dest_hosts), 1) self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1') self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['vm'].uuid, self.assertEqual(dest_hosts[0]['instance'].uuid,
'cae81432-1631-4d4e-b29c-6f3acdcde906') 'cae81432-1631-4d4e-b29c-6f3acdcde906')
def test_exception_model(self): def test_exception_model(self):
@@ -163,7 +164,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25 self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300 self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm() model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
self.assertEqual([], solution.actions) self.assertEqual([], solution.actions)
@@ -172,7 +173,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300 self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25 self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300 self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
actions_counter = collections.Counter( actions_counter = collections.Counter(
@@ -182,7 +183,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.assertEqual(num_migrations, 2) self.assertEqual(num_migrations, 2)
def test_check_parameters(self): def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors() model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
loader = default.DefaultActionLoader() loader = default.DefaultActionLoader()

View File

@@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_and_metrics import faker_cluster_and_metrics
class TestVMWorkloadConsolidation(base.BaseTestCase): class TestVMWorkloadConsolidation(base.TestCase):
def setUp(self): def setUp(self):
super(TestVMWorkloadConsolidation, self).setUp() super(TestVMWorkloadConsolidation, self).setUp()
@@ -65,41 +65,42 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
exception.ClusterStateNotDefined, exception.ClusterStateNotDefined,
self.strategy.execute) self.strategy.execute)
def test_get_vm_utilization(self): def test_get_instance_utilization(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
vm_0 = model.get_vm_from_id("VM_0") instance_0 = model.get_instance_from_id("INSTANCE_0")
vm_util = dict(cpu=1.0, ram=1, disk=10) instance_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(vm_util, self.assertEqual(
self.strategy.get_vm_utilization(vm_0.uuid, model)) instance_util,
self.strategy.get_instance_utilization(instance_0.uuid, model))
def test_get_hypervisor_utilization(self): def test_get_node_utilization(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0") node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=1.0, ram=1, disk=10) node_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual( self.assertEqual(
node_util, node_util,
self.strategy.get_hypervisor_utilization(node_0, model)) self.strategy.get_node_utilization(node_0, model))
def test_get_hypervisor_capacity(self): def test_get_node_capacity(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0") node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=40, ram=64, disk=250) node_util = dict(cpu=40, ram=64, disk=250)
self.assertEqual(node_util, self.assertEqual(node_util,
self.strategy.get_hypervisor_capacity(node_0, model)) self.strategy.get_node_capacity(node_0, model))
def test_get_relative_hypervisor_utilization(self): def test_get_relative_node_utilization(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
hypervisor = model.get_hypervisor_from_id('Node_0') node = model.get_node_from_id('Node_0')
rhu = self.strategy.get_relative_hypervisor_utilization( rhu = self.strategy.get_relative_node_utilization(
hypervisor, model) node, model)
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025} expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
self.assertEqual(expected_rhu, rhu) self.assertEqual(expected_rhu, rhu)
@@ -115,85 +116,85 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1') n2 = model.get_node_from_id('Node_1')
vm_uuid = 'VM_0' instance_uuid = 'INSTANCE_0'
self.strategy.add_migration(vm_uuid, h1, h2, model) self.strategy.add_migration(instance_uuid, n1, n2, model)
self.assertEqual(1, len(self.strategy.solution.actions)) self.assertEqual(1, len(self.strategy.solution.actions))
expected = {'action_type': 'migrate', expected = {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid, 'input_parameters': {'destination_node': n2.uuid,
'src_hypervisor': h1.uuid, 'source_node': n1.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': vm_uuid}} 'resource_id': instance_uuid}}
self.assertEqual(expected, self.strategy.solution.actions[0]) self.assertEqual(expected, self.strategy.solution.actions[0])
def test_is_overloaded(self): def test_is_overloaded(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc) res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res) self.assertFalse(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc) res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res) self.assertFalse(res)
cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc) res = self.strategy.is_overloaded(n1, model, cc)
self.assertTrue(res) self.assertTrue(res)
def test_vm_fits(self): def test_instance_fits(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_1') n = model.get_node_from_id('Node_1')
vm_uuid = 'VM_0' instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc) res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertTrue(res) self.assertTrue(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc) res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertFalse(res) self.assertFalse(res)
def test_add_action_enable_hypervisor(self): def test_add_action_enable_compute_node(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0') n = model.get_node_from_id('Node_0')
self.strategy.add_action_enable_hypervisor(h) self.strategy.add_action_enable_compute_node(n)
expected = [{'action_type': 'change_nova_service_state', expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'enabled', 'input_parameters': {'state': 'enabled',
'resource_id': 'Node_0'}}] 'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
def test_add_action_disable_hypervisor(self): def test_add_action_disable_node(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0') n = model.get_node_from_id('Node_0')
self.strategy.add_action_disable_hypervisor(h) self.strategy.add_action_disable_node(n)
expected = [{'action_type': 'change_nova_service_state', expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled', 'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}}] 'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
def test_disable_unused_hypervisors(self): def test_disable_unused_nodes(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1') n2 = model.get_node_from_id('Node_1')
vm_uuid = 'VM_0' instance_uuid = 'INSTANCE_0'
self.strategy.disable_unused_hypervisors(model) self.strategy.disable_unused_nodes(model)
self.assertEqual(0, len(self.strategy.solution.actions)) self.assertEqual(0, len(self.strategy.solution.actions))
# Migrate VM to free the hypervisor # Migrate VM to free the node
self.strategy.add_migration(vm_uuid, h1, h2, model) self.strategy.add_migration(instance_uuid, n1, n2, model)
self.strategy.disable_unused_hypervisors(model) self.strategy.disable_unused_nodes(model)
expected = {'action_type': 'change_nova_service_state', expected = {'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled', 'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}} 'resource_id': 'Node_0'}}
@@ -213,39 +214,39 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1') n2 = model.get_node_from_id('Node_1')
vm_uuid = 'VM_0' instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.consolidation_phase(model, cc) self.strategy.consolidation_phase(model, cc)
expected = [{'action_type': 'migrate', expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid, 'input_parameters': {'destination_node': n2.uuid,
'src_hypervisor': h1.uuid, 'source_node': n1.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': vm_uuid}}] 'resource_id': instance_uuid}}]
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
def test_strategy(self): def test_strategy(self):
model = self.fake_cluster.generate_scenario_2() model = self.fake_cluster.generate_scenario_2()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc) self.strategy.offload_phase(model, cc)
self.strategy.consolidation_phase(model, cc) self.strategy.consolidation_phase(model, cc)
self.strategy.optimize_solution(model) self.strategy.optimize_solution(model)
h2 = self.strategy.solution.actions[0][ n2 = self.strategy.solution.actions[0][
'input_parameters']['dst_hypervisor'] 'input_parameters']['destination_node']
expected = [{'action_type': 'migrate', expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2, 'input_parameters': {'destination_node': n2,
'src_hypervisor': h1.uuid, 'source_node': n1.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_3'}}, 'resource_id': 'INSTANCE_3'}},
{'action_type': 'migrate', {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2, 'input_parameters': {'destination_node': n2,
'src_hypervisor': h1.uuid, 'source_node': n1.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_1'}}] 'resource_id': 'INSTANCE_1'}}]
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
@@ -253,32 +254,32 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_3() model = self.fake_cluster.generate_scenario_3()
self.m_model.return_value = model self.m_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0') n1 = model.get_node_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1') n2 = model.get_node_from_id('Node_1')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc) self.strategy.offload_phase(model, cc)
expected = [{'action_type': 'migrate', expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid, 'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_6', 'resource_id': 'INSTANCE_6',
'src_hypervisor': h1.uuid}}, 'source_node': n1.uuid}},
{'action_type': 'migrate', {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid, 'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_7', 'resource_id': 'INSTANCE_7',
'src_hypervisor': h1.uuid}}, 'source_node': n1.uuid}},
{'action_type': 'migrate', {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid, 'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_8', 'resource_id': 'INSTANCE_8',
'src_hypervisor': h1.uuid}}] 'source_node': n1.uuid}}]
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.consolidation_phase(model, cc) self.strategy.consolidation_phase(model, cc)
expected.append({'action_type': 'migrate', expected.append({'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h1.uuid, 'input_parameters': {'destination_node': n1.uuid,
'migration_type': 'live', 'migration_type': 'live',
'resource_id': 'VM_7', 'resource_id': 'INSTANCE_7',
'src_hypervisor': h2.uuid}}) 'source_node': n2.uuid}})
self.assertEqual(expected, self.strategy.solution.actions) self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.optimize_solution(model) self.strategy.optimize_solution(model)
del expected[3] del expected[3]

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default from watcher.applier.loading import default
from watcher.common import exception from watcher.common import exception
from watcher.common import utils from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \ from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector import faker_metrics_collector
class TestWorkloadBalance(base.BaseTestCase): class TestWorkloadBalance(base.TestCase):
def setUp(self): def setUp(self):
super(TestWorkloadBalance, self).setUp() super(TestWorkloadBalance, self).setUp()
@@ -59,59 +59,64 @@ class TestWorkloadBalance(base.BaseTestCase):
self.strategy = strategies.WorkloadBalance(config=mock.Mock()) self.strategy = strategies.WorkloadBalance(config=mock.Mock())
self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'threshold': 25.0, self.strategy.input_parameters.update({'threshold': 25.0,
'period': 300}) 'period': 300})
self.strategy.threshold = 25.0 self.strategy.threshold = 25.0
self.strategy._period = 300 self.strategy._period = 300
def test_calc_used_res(self): def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0') node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores) cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory) cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk) cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = ( cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource( self.strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk)) node, cap_cores, cap_mem, cap_disk))
self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40)) self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40))
def test_group_hosts_by_cpu_util(self): def test_group_hosts_by_cpu_util(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
self.strategy.threshold = 30 self.strategy.threshold = 30
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util() n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
self.assertEqual(h1[0]['hv'].uuid, 'Node_0') self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1') self.assertEqual(n2[0]['node'].uuid, 'Node_1')
self.assertEqual(avg, 8.0) self.assertEqual(avg, 8.0)
def test_choose_vm_to_migrate(self): def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util() n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map) instance_to_mig = self.strategy.choose_instance_to_migrate(
self.assertEqual(vm_to_mig[0].uuid, 'Node_0') n1, avg, w_map)
self.assertEqual(vm_to_mig[1].uuid, self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(instance_to_mig[1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc") "73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self): def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util() n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vms = model.get_all_vms() instances = model.get_all_instances()
vms.clear() instances.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map) instance_to_mig = self.strategy.choose_instance_to_migrate(
self.assertIsNone(vm_to_mig) n1, avg, w_map)
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self): def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util() self.strategy.ceilometer = mock.MagicMock(
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map) statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
dest_hosts = self.strategy.filter_destination_hosts( dest_hosts = self.strategy.filter_destination_hosts(
h2, vm_to_mig[1], avg, w_map) n2, instance_to_mig[1], avg, w_map)
self.assertEqual(len(dest_hosts), 1) self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1') self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
def test_exception_model(self): def test_exception_model(self):
self.m_model.return_value = None self.m_model.return_value = None
@@ -137,13 +142,13 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute) self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self): def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm() model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
self.assertEqual([], solution.actions) self.assertEqual([], solution.actions)
def test_execute(self): def test_execute(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
actions_counter = collections.Counter( actions_counter = collections.Counter(
@@ -153,7 +158,7 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertEqual(num_migrations, 1) self.assertEqual(num_migrations, 1)
def test_check_parameters(self): def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors() model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model self.m_model.return_value = model
solution = self.strategy.execute() solution = self.strategy.execute()
loader = default.DefaultActionLoader() loader = default.DefaultActionLoader()

View File

@@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector import faker_metrics_collector
class TestWorkloadStabilization(base.BaseTestCase): class TestWorkloadStabilization(base.TestCase):
def setUp(self): def setUp(self):
super(TestWorkloadStabilization, self).setUp() super(TestWorkloadStabilization, self).setUp()
@@ -63,11 +63,13 @@ class TestWorkloadStabilization(base.BaseTestCase):
statistic_aggregation=self.fake_metrics.mock_get_statistics) statistic_aggregation=self.fake_metrics.mock_get_statistics)
self.strategy = strategies.WorkloadStabilization(config=mock.Mock()) self.strategy = strategies.WorkloadStabilization(config=mock.Mock())
def test_get_vm_load(self): def test_get_instance_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
vm_0_dict = {'uuid': 'VM_0', 'vcpus': 10, instance_0_dict = {
'cpu_util': 7, 'memory.resident': 2} 'uuid': 'INSTANCE_0', 'vcpus': 10,
self.assertEqual(vm_0_dict, self.strategy.get_vm_load("VM_0")) 'cpu_util': 7, 'memory.resident': 2}
self.assertEqual(
instance_0_dict, self.strategy.get_instance_load("INSTANCE_0"))
def test_normalize_hosts_load(self): def test_normalize_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
@@ -109,7 +111,7 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.assertEqual( self.assertEqual(
self.strategy.calculate_migration_case( self.strategy.calculate_migration_case(
self.hosts_load_assert, "VM_5", self.hosts_load_assert, "INSTANCE_5",
"Node_2", "Node_1")[-1]["Node_1"], "Node_2", "Node_1")[-1]["Node_1"],
{'cpu_util': 2.55, 'memory.resident': 21, 'vcpus': 40}) {'cpu_util': 2.55, 'memory.resident': 21, 'vcpus': 40})
@@ -131,20 +133,25 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2} self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2}
self.strategy.simulate_migrations = mock.Mock( self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'}] return_value=[
{'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'}]
) )
with mock.patch.object(self.strategy, 'migrate') as mock_migration: with mock.patch.object(self.strategy, 'migrate') as mock_migration:
self.strategy.execute() self.strategy.execute()
mock_migration.assert_called_once_with( mock_migration.assert_called_once_with(
'VM_4', 'Node_2', 'Node_1') 'INSTANCE_4', 'Node_2', 'Node_1')
def test_execute_multiply_migrations(self): def test_execute_multiply_migrations(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.00001, self.strategy.thresholds = {'cpu_util': 0.00001,
'memory.resident': 0.0001} 'memory.resident': 0.0001}
self.strategy.simulate_migrations = mock.Mock( self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'}, return_value=[
{'vm': 'VM_3', 's_host': 'Node_2', 'host': 'Node_3'}] {'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'},
{'instance': 'INSTANCE_3', 's_host': 'Node_2',
'host': 'Node_3'}]
) )
with mock.patch.object(self.strategy, 'migrate') as mock_migrate: with mock.patch.object(self.strategy, 'migrate') as mock_migrate:
self.strategy.execute() self.strategy.execute()