remove id field from CDM
There are 3 related fields(id, uuid and hostname) in ComputeNode[1]. according to [2], after nova api 2.53, the id of the hypervisor as a UUID. and service.host is equal to hypervisor name for compute node. so we can remove id and only keep uuid then set uuid to node.id [1]:https://github.com/openstack/watcher/blob/master/watcher/decision_engine/model/collector/nova.py#L306 [2]:https://developer.openstack.org/api-ref/compute/?expanded=list-hypervisors-details-detail#list-hypervisors-details Change-Id: Ie1d1ad56808270d936ec25186061f7f12cc49fdc Closes-Bug: #1835192 Depends-on: I752fbfa560313e28e87d83e46431c283b4db4f23 Depends-on: I0975500f359de92b6d6fdea2e01614cf0ba73f05
This commit is contained in:
@@ -50,6 +50,7 @@ class ChangeNovaServiceState(base.BaseAction):
|
|||||||
|
|
||||||
STATE = 'state'
|
STATE = 'state'
|
||||||
REASON = 'disabled_reason'
|
REASON = 'disabled_reason'
|
||||||
|
RESOURCE_NAME = 'resource_name'
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def schema(self):
|
def schema(self):
|
||||||
@@ -82,7 +83,7 @@ class ChangeNovaServiceState(base.BaseAction):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def host(self):
|
def host(self):
|
||||||
return self.resource_id
|
return self.input_parameters.get(self.RESOURCE_NAME)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def state(self):
|
def state(self):
|
||||||
|
|||||||
@@ -175,7 +175,7 @@ class CeilometerHelper(base.DataSourceBase):
|
|||||||
|
|
||||||
resource_id = resource.uuid
|
resource_id = resource.uuid
|
||||||
if resource_type == 'compute_node':
|
if resource_type == 'compute_node':
|
||||||
resource_id = "%s_%s" % (resource.uuid, resource.hostname)
|
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
|
||||||
|
|
||||||
query = self.build_query(
|
query = self.build_query(
|
||||||
resource_id=resource_id, start_time=start_time, end_time=end_time)
|
resource_id=resource_id, start_time=start_time, end_time=end_time)
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
|
|
||||||
resource_id = resource.uuid
|
resource_id = resource.uuid
|
||||||
if resource_type == 'compute_node':
|
if resource_type == 'compute_node':
|
||||||
resource_id = "%s_%s" % (resource.uuid, resource.hostname)
|
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
|
||||||
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
|
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
|
||||||
limit=1)
|
limit=1)
|
||||||
resources = self.query_retry(
|
resources = self.query_retry(
|
||||||
|
|||||||
@@ -349,9 +349,9 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
|||||||
|
|
||||||
# build up the compute node.
|
# build up the compute node.
|
||||||
node_attributes = {
|
node_attributes = {
|
||||||
"id": node.id,
|
# The id of the hypervisor as a UUID from version 2.53.
|
||||||
"uuid": node.service["host"],
|
"uuid": node.id,
|
||||||
"hostname": node.hypervisor_hostname,
|
"hostname": node.service["host"],
|
||||||
"memory": memory_mb,
|
"memory": memory_mb,
|
||||||
"memory_ratio": memory_ratio,
|
"memory_ratio": memory_ratio,
|
||||||
"memory_mb_reserved": memory_mb_reserved,
|
"memory_mb_reserved": memory_mb_reserved,
|
||||||
@@ -379,7 +379,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
|||||||
LOG.info("no instances on compute_node: {0}".format(node))
|
LOG.info("no instances on compute_node: {0}".format(node))
|
||||||
return
|
return
|
||||||
host = node.service["host"]
|
host = node.service["host"]
|
||||||
compute_node = self.model.get_node_by_uuid(host)
|
compute_node = self.model.get_node_by_uuid(node.id)
|
||||||
filters = {'host': host}
|
filters = {'host': host}
|
||||||
limit = len(instances) if len(instances) <= 1000 else -1
|
limit = len(instances) if len(instances) <= 1000 else -1
|
||||||
# Get all servers on this compute host.
|
# Get all servers on this compute host.
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ class ServiceState(enum.Enum):
|
|||||||
class ComputeNode(compute_resource.ComputeResource):
|
class ComputeNode(compute_resource.ComputeResource):
|
||||||
|
|
||||||
fields = {
|
fields = {
|
||||||
"id": wfields.StringField(),
|
|
||||||
"hostname": wfields.StringField(),
|
"hostname": wfields.StringField(),
|
||||||
"status": wfields.StringField(default=ServiceState.ENABLED.value),
|
"status": wfields.StringField(default=ServiceState.ENABLED.value),
|
||||||
"disabled_reason": wfields.StringField(nullable=True),
|
"disabled_reason": wfields.StringField(nullable=True),
|
||||||
|
|||||||
@@ -19,6 +19,7 @@
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import nova_helper
|
from watcher.common import nova_helper
|
||||||
|
from watcher.common import utils
|
||||||
from watcher.decision_engine.model import element
|
from watcher.decision_engine.model import element
|
||||||
from watcher.decision_engine.model.notification import base
|
from watcher.decision_engine.model.notification import base
|
||||||
from watcher.decision_engine.model.notification import filtering
|
from watcher.decision_engine.model.notification import filtering
|
||||||
@@ -38,15 +39,15 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
self._nova = nova_helper.NovaHelper()
|
self._nova = nova_helper.NovaHelper()
|
||||||
return self._nova
|
return self._nova
|
||||||
|
|
||||||
def get_or_create_instance(self, instance_uuid, node_uuid=None):
|
def get_or_create_instance(self, instance_uuid, node_name=None):
|
||||||
try:
|
try:
|
||||||
node = None
|
node = None
|
||||||
if node_uuid:
|
if node_name:
|
||||||
node = self.get_or_create_node(node_uuid)
|
node = self.get_or_create_node(node_name)
|
||||||
except exception.ComputeNodeNotFound:
|
except exception.ComputeNodeNotFound:
|
||||||
LOG.warning("Could not find compute node %(node)s for "
|
LOG.warning("Could not find compute node %(node)s for "
|
||||||
"instance %(instance)s",
|
"instance %(instance)s",
|
||||||
dict(node=node_uuid, instance=instance_uuid))
|
dict(node=node_name, instance=instance_uuid))
|
||||||
try:
|
try:
|
||||||
instance = self.cluster_data_model.get_instance_by_uuid(
|
instance = self.cluster_data_model.get_instance_by_uuid(
|
||||||
instance_uuid)
|
instance_uuid)
|
||||||
@@ -117,13 +118,16 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
'disabled_reason': disabled_reason,
|
'disabled_reason': disabled_reason,
|
||||||
})
|
})
|
||||||
|
|
||||||
def create_compute_node(self, node_hostname):
|
def create_compute_node(self, uuid_or_name):
|
||||||
"""Update the compute node by querying the Nova API."""
|
"""Create the computeNode node."""
|
||||||
try:
|
try:
|
||||||
_node = self.nova.get_compute_node_by_hostname(node_hostname)
|
if utils.is_uuid_like(uuid_or_name):
|
||||||
|
_node = self.nova.get_compute_node_by_uuid(uuid_or_name)
|
||||||
|
else:
|
||||||
|
_node = self.nova.get_compute_node_by_hostname(uuid_or_name)
|
||||||
|
|
||||||
node = element.ComputeNode(
|
node = element.ComputeNode(
|
||||||
id=_node.id,
|
uuid=_node.id,
|
||||||
uuid=node_hostname,
|
|
||||||
hostname=_node.hypervisor_hostname,
|
hostname=_node.hypervisor_hostname,
|
||||||
state=_node.state,
|
state=_node.state,
|
||||||
status=_node.status,
|
status=_node.status,
|
||||||
@@ -132,26 +136,27 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
disk=_node.free_disk_gb,
|
disk=_node.free_disk_gb,
|
||||||
disk_capacity=_node.local_gb,
|
disk_capacity=_node.local_gb,
|
||||||
)
|
)
|
||||||
|
self.cluster_data_model.add_node(node)
|
||||||
|
LOG.debug("New compute node mapped: %s", node.uuid)
|
||||||
return node
|
return node
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
LOG.debug("Could not refresh the node %s.", node_hostname)
|
LOG.debug("Could not refresh the node %s.", uuid_or_name)
|
||||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
raise exception.ComputeNodeNotFound(name=uuid_or_name)
|
||||||
|
|
||||||
return False
|
def get_or_create_node(self, uuid_or_name):
|
||||||
|
if uuid_or_name is None:
|
||||||
def get_or_create_node(self, uuid):
|
LOG.debug("Compute node UUID or name not provided: skipping")
|
||||||
if uuid is None:
|
|
||||||
LOG.debug("Compute node UUID not provided: skipping")
|
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
return self.cluster_data_model.get_node_by_uuid(uuid)
|
if utils.is_uuid_like(uuid_or_name):
|
||||||
|
return self.cluster_data_model.get_node_by_uuid(uuid_or_name)
|
||||||
|
else:
|
||||||
|
return self.cluster_data_model.get_node_by_name(uuid_or_name)
|
||||||
except exception.ComputeNodeNotFound:
|
except exception.ComputeNodeNotFound:
|
||||||
# The node didn't exist yet so we create a new node object
|
# The node didn't exist yet so we create a new node object
|
||||||
node = self.create_compute_node(uuid)
|
node = self.create_compute_node(uuid_or_name)
|
||||||
LOG.debug("New compute node created: %s", uuid)
|
LOG.debug("New compute node created: %s", uuid_or_name)
|
||||||
self.cluster_data_model.add_node(node)
|
|
||||||
LOG.debug("New compute node mapped: %s", uuid)
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def update_instance_mapping(self, instance, node):
|
def update_instance_mapping(self, instance, node):
|
||||||
@@ -202,18 +207,18 @@ class VersionedNotification(NovaNotification):
|
|||||||
|
|
||||||
def service_updated(self, payload):
|
def service_updated(self, payload):
|
||||||
node_data = payload['nova_object.data']
|
node_data = payload['nova_object.data']
|
||||||
node_uuid = node_data['host']
|
node_name = node_data['host']
|
||||||
try:
|
try:
|
||||||
node = self.get_or_create_node(node_uuid)
|
node = self.get_or_create_node(node_name)
|
||||||
self.update_compute_node(node, payload)
|
self.update_compute_node(node, payload)
|
||||||
except exception.ComputeNodeNotFound as exc:
|
except exception.ComputeNodeNotFound as exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
|
|
||||||
def service_deleted(self, payload):
|
def service_deleted(self, payload):
|
||||||
node_data = payload['nova_object.data']
|
node_data = payload['nova_object.data']
|
||||||
node_uuid = node_data['host']
|
node_name = node_data['host']
|
||||||
try:
|
try:
|
||||||
node = self.get_or_create_node(node_uuid)
|
node = self.get_or_create_node(node_name)
|
||||||
self.delete_node(node)
|
self.delete_node(node)
|
||||||
except exception.ComputeNodeNotFound as exc:
|
except exception.ComputeNodeNotFound as exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
@@ -222,12 +227,12 @@ class VersionedNotification(NovaNotification):
|
|||||||
instance_data = payload['nova_object.data']
|
instance_data = payload['nova_object.data']
|
||||||
instance_uuid = instance_data['uuid']
|
instance_uuid = instance_data['uuid']
|
||||||
instance_state = instance_data['state']
|
instance_state = instance_data['state']
|
||||||
node_uuid = instance_data.get('host')
|
node_name = instance_data.get('host')
|
||||||
# if instance state is building, don't update data model
|
# if instance state is building, don't update data model
|
||||||
if instance_state == 'building':
|
if instance_state == 'building':
|
||||||
return
|
return
|
||||||
|
|
||||||
instance = self.get_or_create_instance(instance_uuid, node_uuid)
|
instance = self.get_or_create_instance(instance_uuid, node_name)
|
||||||
|
|
||||||
self.update_instance(instance, payload)
|
self.update_instance(instance, payload)
|
||||||
|
|
||||||
@@ -237,9 +242,9 @@ class VersionedNotification(NovaNotification):
|
|||||||
instance = element.Instance(uuid=instance_uuid)
|
instance = element.Instance(uuid=instance_uuid)
|
||||||
self.cluster_data_model.add_instance(instance)
|
self.cluster_data_model.add_instance(instance)
|
||||||
|
|
||||||
node_uuid = instance_data.get('host')
|
node_name = instance_data.get('host')
|
||||||
if node_uuid:
|
if node_name:
|
||||||
node = self.get_or_create_node(node_uuid)
|
node = self.get_or_create_node(node_name)
|
||||||
self.cluster_data_model.map_instance(instance, node)
|
self.cluster_data_model.map_instance(instance, node)
|
||||||
|
|
||||||
self.update_instance(instance, payload)
|
self.update_instance(instance, payload)
|
||||||
@@ -247,8 +252,8 @@ class VersionedNotification(NovaNotification):
|
|||||||
def instance_deleted(self, payload):
|
def instance_deleted(self, payload):
|
||||||
instance_data = payload['nova_object.data']
|
instance_data = payload['nova_object.data']
|
||||||
instance_uuid = instance_data['uuid']
|
instance_uuid = instance_data['uuid']
|
||||||
node_uuid = instance_data.get('host')
|
node_name = instance_data.get('host')
|
||||||
instance = self.get_or_create_instance(instance_uuid, node_uuid)
|
instance = self.get_or_create_instance(instance_uuid, node_name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
node = self.get_or_create_node(instance_data['host'])
|
node = self.get_or_create_node(instance_data['host'])
|
||||||
|
|||||||
@@ -32,12 +32,12 @@ class ComputeScope(base.BaseScope):
|
|||||||
self._osc = osc
|
self._osc = osc
|
||||||
self.wrapper = nova_helper.NovaHelper(osc=self._osc)
|
self.wrapper = nova_helper.NovaHelper(osc=self._osc)
|
||||||
|
|
||||||
def remove_instance(self, cluster_model, instance, node_name):
|
def remove_instance(self, cluster_model, instance, node_uuid):
|
||||||
node = cluster_model.get_node_by_uuid(node_name)
|
node = cluster_model.get_node_by_uuid(node_uuid)
|
||||||
cluster_model.delete_instance(instance, node)
|
cluster_model.delete_instance(instance, node)
|
||||||
|
|
||||||
def update_exclude_instance(self, cluster_model, instance, node_name):
|
def update_exclude_instance(self, cluster_model, instance, node_uuid):
|
||||||
node = cluster_model.get_node_by_uuid(node_name)
|
node = cluster_model.get_node_by_uuid(node_uuid)
|
||||||
cluster_model.unmap_instance(instance, node)
|
cluster_model.unmap_instance(instance, node)
|
||||||
instance.update({"watcher_exclude": True})
|
instance.update({"watcher_exclude": True})
|
||||||
cluster_model.map_instance(instance, node)
|
cluster_model.map_instance(instance, node)
|
||||||
@@ -109,18 +109,18 @@ class ComputeScope(base.BaseScope):
|
|||||||
[project['uuid'] for project in resource['projects']])
|
[project['uuid'] for project in resource['projects']])
|
||||||
|
|
||||||
def remove_nodes_from_model(self, nodes_to_remove, cluster_model):
|
def remove_nodes_from_model(self, nodes_to_remove, cluster_model):
|
||||||
for node_uuid in nodes_to_remove:
|
for node_name in nodes_to_remove:
|
||||||
node = cluster_model.get_node_by_uuid(node_uuid)
|
node = cluster_model.get_node_by_name(node_name)
|
||||||
instances = cluster_model.get_node_instances(node)
|
instances = cluster_model.get_node_instances(node)
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
self.remove_instance(cluster_model, instance, node_uuid)
|
self.remove_instance(cluster_model, instance, node.uuid)
|
||||||
cluster_model.remove_node(node)
|
cluster_model.remove_node(node)
|
||||||
|
|
||||||
def update_exclude_instance_in_model(
|
def update_exclude_instance_in_model(
|
||||||
self, instances_to_exclude, cluster_model):
|
self, instances_to_exclude, cluster_model):
|
||||||
for instance_uuid in instances_to_exclude:
|
for instance_uuid in instances_to_exclude:
|
||||||
try:
|
try:
|
||||||
node_name = cluster_model.get_node_by_instance_uuid(
|
node_uuid = cluster_model.get_node_by_instance_uuid(
|
||||||
instance_uuid).uuid
|
instance_uuid).uuid
|
||||||
except exception.ComputeResourceNotFound:
|
except exception.ComputeResourceNotFound:
|
||||||
LOG.warning("The following instance %s cannot be found. "
|
LOG.warning("The following instance %s cannot be found. "
|
||||||
@@ -131,7 +131,7 @@ class ComputeScope(base.BaseScope):
|
|||||||
self.update_exclude_instance(
|
self.update_exclude_instance(
|
||||||
cluster_model,
|
cluster_model,
|
||||||
cluster_model.get_instance_by_uuid(instance_uuid),
|
cluster_model.get_instance_by_uuid(instance_uuid),
|
||||||
node_name)
|
node_uuid)
|
||||||
|
|
||||||
def exclude_instances_with_given_metadata(
|
def exclude_instances_with_given_metadata(
|
||||||
self, instance_metadata, cluster_model, instances_to_remove):
|
self, instance_metadata, cluster_model, instances_to_remove):
|
||||||
@@ -166,7 +166,8 @@ class ComputeScope(base.BaseScope):
|
|||||||
projects_to_exclude = []
|
projects_to_exclude = []
|
||||||
compute_scope = []
|
compute_scope = []
|
||||||
found_nothing_flag = False
|
found_nothing_flag = False
|
||||||
model_hosts = list(cluster_model.get_all_compute_nodes().keys())
|
model_hosts = [n.hostname for n in
|
||||||
|
cluster_model.get_all_compute_nodes().values()]
|
||||||
|
|
||||||
if not self.scope:
|
if not self.scope:
|
||||||
return cluster_model
|
return cluster_model
|
||||||
|
|||||||
@@ -449,8 +449,8 @@ class BaseStrategy(loadable.Loadable):
|
|||||||
source_node,
|
source_node,
|
||||||
destination_node):
|
destination_node):
|
||||||
parameters = {'migration_type': migration_type,
|
parameters = {'migration_type': migration_type,
|
||||||
'source_node': source_node.uuid,
|
'source_node': source_node.hostname,
|
||||||
'destination_node': destination_node.uuid,
|
'destination_node': destination_node.hostname,
|
||||||
'resource_name': instance.name}
|
'resource_name': instance.name}
|
||||||
self.solution.add_action(action_type=self.MIGRATION,
|
self.solution.add_action(action_type=self.MIGRATION,
|
||||||
resource_id=instance.uuid,
|
resource_id=instance.uuid,
|
||||||
|
|||||||
@@ -304,7 +304,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
|||||||
backup_node = self.input_parameters.get('backup_node')
|
backup_node = self.input_parameters.get('backup_node')
|
||||||
|
|
||||||
# if no VMs in the maintenance_node, just maintain the compute node
|
# if no VMs in the maintenance_node, just maintain the compute node
|
||||||
src_node = self.compute_model.get_node_by_uuid(maintenance_node)
|
src_node = self.compute_model.get_node_by_name(maintenance_node)
|
||||||
if len(self.compute_model.get_node_instances(src_node)) == 0:
|
if len(self.compute_model.get_node_instances(src_node)) == 0:
|
||||||
if (src_node.disabled_reason !=
|
if (src_node.disabled_reason !=
|
||||||
self.REASON_FOR_MAINTAINING):
|
self.REASON_FOR_MAINTAINING):
|
||||||
@@ -312,7 +312,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if backup_node:
|
if backup_node:
|
||||||
des_node = self.compute_model.get_node_by_uuid(backup_node)
|
des_node = self.compute_model.get_node_by_name(backup_node)
|
||||||
else:
|
else:
|
||||||
des_node = None
|
des_node = None
|
||||||
|
|
||||||
|
|||||||
@@ -177,9 +177,9 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
|||||||
node.hostname = hypervisor_node.hypervisor_hostname
|
node.hostname = hypervisor_node.hypervisor_hostname
|
||||||
hypervisor_node = hypervisor_node.to_dict()
|
hypervisor_node = hypervisor_node.to_dict()
|
||||||
compute_service = hypervisor_node.get('service', None)
|
compute_service = hypervisor_node.get('service', None)
|
||||||
host_uuid = compute_service.get('host')
|
host_name = compute_service.get('host')
|
||||||
try:
|
try:
|
||||||
self.compute_model.get_node_by_uuid(host_uuid)
|
self.compute_model.get_node_by_name(host_name)
|
||||||
except exception.ComputeNodeNotFound:
|
except exception.ComputeNodeNotFound:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|||||||
@@ -419,13 +419,13 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
'input_parameters'][
|
'input_parameters'][
|
||||||
'resource_id'] == instance_uuid)
|
'resource_id'] == instance_uuid)
|
||||||
if len(actions) > 1:
|
if len(actions) > 1:
|
||||||
src_uuid = actions[0]['input_parameters']['source_node']
|
src_name = actions[0]['input_parameters']['source_node']
|
||||||
dst_uuid = actions[-1]['input_parameters']['destination_node']
|
dst_name = actions[-1]['input_parameters']['destination_node']
|
||||||
for a in actions:
|
for a in actions:
|
||||||
self.solution.actions.remove(a)
|
self.solution.actions.remove(a)
|
||||||
self.number_of_migrations -= 1
|
self.number_of_migrations -= 1
|
||||||
src_node = self.compute_model.get_node_by_uuid(src_uuid)
|
src_node = self.compute_model.get_node_by_name(src_name)
|
||||||
dst_node = self.compute_model.get_node_by_uuid(dst_uuid)
|
dst_node = self.compute_model.get_node_by_name(dst_name)
|
||||||
instance = self.compute_model.get_instance_by_uuid(
|
instance = self.compute_model.get_instance_by_uuid(
|
||||||
instance_uuid)
|
instance_uuid)
|
||||||
if self.compute_model.migrate_instance(
|
if self.compute_model.migrate_instance(
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ class TestChangeNovaServiceState(base.TestCase):
|
|||||||
self.addCleanup(m_nova_helper.stop)
|
self.addCleanup(m_nova_helper.stop)
|
||||||
|
|
||||||
self.input_parameters = {
|
self.input_parameters = {
|
||||||
baction.BaseAction.RESOURCE_ID: "compute-1",
|
"resource_name": "compute-1",
|
||||||
"state": element.ServiceState.ENABLED.value,
|
"state": element.ServiceState.ENABLED.value,
|
||||||
}
|
}
|
||||||
self.action = change_nova_service_state.ChangeNovaServiceState(
|
self.action = change_nova_service_state.ChangeNovaServiceState(
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
|
|||||||
disabled_reason='',
|
disabled_reason='',
|
||||||
)
|
)
|
||||||
minimal_node = dict(
|
minimal_node = dict(
|
||||||
id=1337,
|
id='160a0e7b-8b0b-4854-8257-9c71dff4efcc',
|
||||||
hypervisor_hostname='test_hostname',
|
hypervisor_hostname='test_hostname',
|
||||||
state='TEST_STATE',
|
state='TEST_STATE',
|
||||||
status='TEST_STATUS',
|
status='TEST_STATUS',
|
||||||
@@ -149,7 +149,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
|
|||||||
node = list(compute_nodes.values())[0]
|
node = list(compute_nodes.values())[0]
|
||||||
instance = list(instances.values())[0]
|
instance = list(instances.values())[0]
|
||||||
|
|
||||||
self.assertEqual(node.uuid, 'test_hostname')
|
self.assertEqual(node.uuid, '160a0e7b-8b0b-4854-8257-9c71dff4efcc')
|
||||||
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
|
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
|
||||||
|
|
||||||
memory_total = node.memory - node.memory_mb_reserved
|
memory_total = node.memory - node.memory_mb_reserved
|
||||||
|
|||||||
@@ -112,9 +112,9 @@ class FakeCeilometerMetrics(object):
|
|||||||
|
|
||||||
uuid = resource.uuid
|
uuid = resource.uuid
|
||||||
mock = {}
|
mock = {}
|
||||||
mock['Node_0'] = 30
|
mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30
|
||||||
# use a big value to make sure it exceeds threshold
|
# use a big value to make sure it exceeds threshold
|
||||||
mock['Node_1'] = 100
|
mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100
|
||||||
if uuid not in mock.keys():
|
if uuid not in mock.keys():
|
||||||
mock[uuid] = 100
|
mock[uuid] = 100
|
||||||
return float(mock[str(uuid)])
|
return float(mock[str(uuid)])
|
||||||
@@ -189,6 +189,8 @@ class FakeCeilometerMetrics(object):
|
|||||||
# node 0
|
# node 0
|
||||||
measurements['Node_0_hostname_0'] = 7
|
measurements['Node_0_hostname_0'] = 7
|
||||||
measurements['Node_1_hostname_1'] = 7
|
measurements['Node_1_hostname_1'] = 7
|
||||||
|
measurements['fa69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_0'] = 7
|
||||||
|
measurements['af69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_1'] = 7
|
||||||
# node 1
|
# node 1
|
||||||
measurements['Node_2_hostname_2'] = 80
|
measurements['Node_2_hostname_2'] = 80
|
||||||
# node 2
|
# node 2
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
<ModelRoot>
|
<ModelRoot>
|
||||||
<ComputeNode uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
<ComputeNode uuid="fa69c544-906b-4a6a-a9c6-c1f7a8078c73" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||||
<Instance watcher_exclude="False" state="active" name="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
<Instance watcher_exclude="False" state="active" human_id="" name="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||||
</ComputeNode>
|
</ComputeNode>
|
||||||
<ComputeNode uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
<ComputeNode uuid="af69c544-906b-4a6a-a9c6-c1f7a8078c73" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||||
<Instance watcher_exclude="False" state="active" name="" uuid="a4cab39b-9828-413a-bf88-f76921bf1517" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
<Instance watcher_exclude="False" state="active" human_id="" name="" uuid="a4cab39b-9828-413a-bf88-f76921bf1517" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||||
</ComputeNode>
|
</ComputeNode>
|
||||||
</ModelRoot>
|
</ModelRoot>
|
||||||
|
|||||||
@@ -108,9 +108,10 @@ class FakeGnocchiMetrics(object):
|
|||||||
uuid = resource.uuid
|
uuid = resource.uuid
|
||||||
|
|
||||||
mock = {}
|
mock = {}
|
||||||
mock['Node_0'] = 30
|
mock["Node_0"] = 30
|
||||||
|
mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30
|
||||||
# use a big value to make sure it exceeds threshold
|
# use a big value to make sure it exceeds threshold
|
||||||
mock['Node_1'] = 100
|
mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100
|
||||||
|
|
||||||
return mock[str(uuid)]
|
return mock[str(uuid)]
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
"nova_object.name": "ServiceStatusPayload",
|
"nova_object.name": "ServiceStatusPayload",
|
||||||
"nova_object.version": "1.0",
|
"nova_object.version": "1.0",
|
||||||
"nova_object.data": {
|
"nova_object.data": {
|
||||||
"host": "Node_0",
|
"host": "hostname_0",
|
||||||
"disabled": true,
|
"disabled": true,
|
||||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||||
"binary": "nova-compute",
|
"binary": "nova-compute",
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
"nova_object.name": "ServiceStatusPayload",
|
"nova_object.name": "ServiceStatusPayload",
|
||||||
"nova_object.version": "1.0",
|
"nova_object.version": "1.0",
|
||||||
"nova_object.data": {
|
"nova_object.data": {
|
||||||
"host": "Node_0",
|
"host": "hostname_0",
|
||||||
"disabled": false,
|
"disabled": false,
|
||||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||||
"binary": "nova-compute",
|
"binary": "nova-compute",
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
"last_seen_up": null,
|
"last_seen_up": null,
|
||||||
"report_count": 0,
|
"report_count": 0,
|
||||||
"topic": "compute",
|
"topic": "compute",
|
||||||
"uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
"uuid": "fafac544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
"version": 23
|
"version": 23
|
||||||
},
|
},
|
||||||
"nova_object.name": "ServiceStatusPayload",
|
"nova_object.name": "ServiceStatusPayload",
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
"disabled": false,
|
"disabled": false,
|
||||||
"disabled_reason": null,
|
"disabled_reason": null,
|
||||||
"forced_down": false,
|
"forced_down": false,
|
||||||
"host": "Node_0",
|
"host": "hostname_0",
|
||||||
"last_seen_up": null,
|
"last_seen_up": null,
|
||||||
"report_count": 0,
|
"report_count": 0,
|
||||||
"topic": "compute",
|
"topic": "compute",
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
{
|
{
|
||||||
"priority": "INFO",
|
|
||||||
"payload": {
|
|
||||||
"nova_object.namespace": "nova",
|
|
||||||
"nova_object.name": "ServiceStatusPayload",
|
|
||||||
"nova_object.version": "1.0",
|
|
||||||
"nova_object.data": {
|
|
||||||
"host": "host1",
|
|
||||||
"disabled": false,
|
|
||||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
|
||||||
"binary": "nova-compute",
|
|
||||||
"topic": "compute",
|
|
||||||
"disabled_reason": null,
|
|
||||||
"report_count": 1,
|
|
||||||
"forced_down": false,
|
|
||||||
"version": 15
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"event_type": "service.update",
|
"event_type": "service.update",
|
||||||
|
"payload": {
|
||||||
|
"nova_object.data": {
|
||||||
|
"availability_zone": null,
|
||||||
|
"binary": "nova-compute",
|
||||||
|
"disabled": false,
|
||||||
|
"disabled_reason": null,
|
||||||
|
"forced_down": false,
|
||||||
|
"host": "host1",
|
||||||
|
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||||
|
"report_count": 1,
|
||||||
|
"topic": "compute",
|
||||||
|
"uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
|
"version": 23
|
||||||
|
},
|
||||||
|
"nova_object.name": "ServiceStatusPayload",
|
||||||
|
"nova_object.namespace": "nova",
|
||||||
|
"nova_object.version": "1.1"
|
||||||
|
},
|
||||||
|
"priority": "INFO",
|
||||||
"publisher_id": "nova-compute:host1"
|
"publisher_id": "nova-compute:host1"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -122,8 +122,8 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||||
|
|
||||||
node0_uuid = 'Node_0'
|
node0_name = "hostname_0"
|
||||||
node0 = compute_model.get_node_by_uuid(node0_uuid)
|
node0 = compute_model.get_node_by_name(node0_name)
|
||||||
|
|
||||||
message = self.load_message('scenario3_service-update-disabled.json')
|
message = self.load_message('scenario3_service-update-disabled.json')
|
||||||
|
|
||||||
@@ -139,7 +139,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual('Node_0', node0.hostname)
|
self.assertEqual("hostname_0", node0.hostname)
|
||||||
self.assertEqual(element.ServiceState.OFFLINE.value, node0.state)
|
self.assertEqual(element.ServiceState.OFFLINE.value, node0.state)
|
||||||
self.assertEqual(element.ServiceState.DISABLED.value, node0.status)
|
self.assertEqual(element.ServiceState.DISABLED.value, node0.status)
|
||||||
|
|
||||||
@@ -153,7 +153,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual('Node_0', node0.hostname)
|
self.assertEqual("hostname_0", node0.hostname)
|
||||||
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
|
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
|
||||||
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
|
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
|
||||||
|
|
||||||
@@ -161,12 +161,11 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
def test_nova_service_create(self, m_nova_helper_cls):
|
def test_nova_service_create(self, m_nova_helper_cls):
|
||||||
m_get_compute_node_by_hostname = mock.Mock(
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
side_effect=lambda uuid: mock.Mock(
|
side_effect=lambda uuid: mock.Mock(
|
||||||
name='m_get_compute_node_by_hostname',
|
name='m_get_compute_node_by_uuid',
|
||||||
id=3,
|
id="fafac544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
hypervisor_hostname="host2",
|
hypervisor_hostname="host2",
|
||||||
state='up',
|
state='up',
|
||||||
status='enabled',
|
status='enabled',
|
||||||
uuid=uuid,
|
|
||||||
memory_mb=7777,
|
memory_mb=7777,
|
||||||
vcpus=42,
|
vcpus=42,
|
||||||
free_disk_gb=974,
|
free_disk_gb=974,
|
||||||
@@ -179,11 +178,11 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||||
|
|
||||||
new_node_uuid = 'host2'
|
new_node_name = "host2"
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ComputeNodeNotFound,
|
exception.ComputeNodeNotFound,
|
||||||
compute_model.get_node_by_uuid, new_node_uuid)
|
compute_model.get_node_by_name, new_node_name)
|
||||||
|
|
||||||
message = self.load_message('service-create.json')
|
message = self.load_message('service-create.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -194,7 +193,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
new_node = compute_model.get_node_by_uuid(new_node_uuid)
|
new_node = compute_model.get_node_by_name(new_node_name)
|
||||||
self.assertEqual('host2', new_node.hostname)
|
self.assertEqual('host2', new_node.hostname)
|
||||||
self.assertEqual(element.ServiceState.ONLINE.value, new_node.state)
|
self.assertEqual(element.ServiceState.ONLINE.value, new_node.state)
|
||||||
self.assertEqual(element.ServiceState.ENABLED.value, new_node.status)
|
self.assertEqual(element.ServiceState.ENABLED.value, new_node.status)
|
||||||
@@ -204,10 +203,10 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||||
|
|
||||||
node0_uuid = 'Node_0'
|
node0_name = "hostname_0"
|
||||||
|
|
||||||
# Before
|
# Before
|
||||||
self.assertTrue(compute_model.get_node_by_uuid(node0_uuid))
|
self.assertTrue(compute_model.get_node_by_name(node0_name))
|
||||||
|
|
||||||
message = self.load_message('service-delete.json')
|
message = self.load_message('service-delete.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -221,7 +220,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
# After
|
# After
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ComputeNodeNotFound,
|
exception.ComputeNodeNotFound,
|
||||||
compute_model.get_node_by_uuid, node0_uuid)
|
compute_model.get_node_by_name, node0_name)
|
||||||
|
|
||||||
def test_nova_instance_update(self):
|
def test_nova_instance_update(self):
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
@@ -276,11 +275,10 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
m_get_compute_node_by_hostname = mock.Mock(
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
side_effect=lambda uuid: mock.Mock(
|
side_effect=lambda uuid: mock.Mock(
|
||||||
name='m_get_compute_node_by_hostname',
|
name='m_get_compute_node_by_hostname',
|
||||||
id=3,
|
id='669966bd-a45c-4e1c-9d57-3054899a3ec7',
|
||||||
hypervisor_hostname="Node_2",
|
hypervisor_hostname="Node_2",
|
||||||
state='up',
|
state='up',
|
||||||
status='enabled',
|
status='enabled',
|
||||||
uuid=uuid,
|
|
||||||
memory_mb=7777,
|
memory_mb=7777,
|
||||||
vcpus=42,
|
vcpus=42,
|
||||||
free_disk_gb=974,
|
free_disk_gb=974,
|
||||||
@@ -312,7 +310,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
self.assertEqual(512, instance0.memory)
|
self.assertEqual(512, instance0.memory)
|
||||||
|
|
||||||
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
|
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
|
||||||
node_2 = compute_model.get_node_by_uuid('Node_2')
|
node_2 = compute_model.get_node_by_name('Node_2')
|
||||||
self.assertEqual(7777, node_2.memory)
|
self.assertEqual(7777, node_2.memory)
|
||||||
self.assertEqual(42, node_2.vcpus)
|
self.assertEqual(42, node_2.vcpus)
|
||||||
self.assertEqual(974, node_2.disk)
|
self.assertEqual(974, node_2.disk)
|
||||||
@@ -463,7 +461,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
message = self.load_message(
|
message = self.load_message(
|
||||||
'instance-live_migration_force_complete-end.json')
|
'instance-live_migration_force_complete-end.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -474,7 +472,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_1', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
|
|
||||||
def test_live_migrated_end(self):
|
def test_live_migrated_end(self):
|
||||||
@@ -484,7 +482,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
message = self.load_message(
|
message = self.load_message(
|
||||||
'instance-live_migration_post-end.json')
|
'instance-live_migration_post-end.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -495,7 +493,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_1', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
|
|
||||||
def test_nova_instance_lock(self):
|
def test_nova_instance_lock(self):
|
||||||
@@ -605,7 +603,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", node.uuid)
|
||||||
message = self.load_message('instance-rebuild-end.json')
|
message = self.load_message('instance-rebuild-end.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
ctxt=self.context,
|
ctxt=self.context,
|
||||||
@@ -615,7 +613,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_1', node.uuid)
|
self.assertEqual('hostname_0', node.hostname)
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
|
|
||||||
def test_nova_instance_rescue(self):
|
def test_nova_instance_rescue(self):
|
||||||
@@ -659,7 +657,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
message = self.load_message(
|
message = self.load_message(
|
||||||
'instance-resize_confirm-end.json')
|
'instance-resize_confirm-end.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -670,7 +668,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_1', node.uuid)
|
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
|
|
||||||
def test_nova_instance_restore_end(self):
|
def test_nova_instance_restore_end(self):
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ class TestComputeScope(base.TestCase):
|
|||||||
audit_scope = fake_scopes.fake_scope_1
|
audit_scope = fake_scopes.fake_scope_1
|
||||||
mock_zone_list.return_value = [
|
mock_zone_list.return_value = [
|
||||||
mock.Mock(zone='AZ{0}'.format(i),
|
mock.Mock(zone='AZ{0}'.format(i),
|
||||||
host={'Node_{0}'.format(i): {}})
|
host={'hostname_{0}'.format(i): {}})
|
||||||
for i in range(4)]
|
for i in range(4)]
|
||||||
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
||||||
osc=mock.Mock()).get_scoped_model(cluster)
|
osc=mock.Mock()).get_scoped_model(cluster)
|
||||||
@@ -240,7 +240,7 @@ class TestComputeScope(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
compute.ComputeScope([], mock.Mock(),
|
compute.ComputeScope([], mock.Mock(),
|
||||||
osc=mock.Mock()).remove_nodes_from_model(
|
osc=mock.Mock()).remove_nodes_from_model(
|
||||||
['Node_1', 'Node_2'], model)
|
['hostname_1', 'hostname_2'], model)
|
||||||
expected_edges = [
|
expected_edges = [
|
||||||
('INSTANCE_0', 'Node_0'),
|
('INSTANCE_0', 'Node_0'),
|
||||||
('INSTANCE_1', 'Node_0'),
|
('INSTANCE_1', 'Node_0'),
|
||||||
@@ -290,7 +290,7 @@ class TestComputeScope(base.TestCase):
|
|||||||
audit_scope.extend(fake_scopes.fake_scope_2)
|
audit_scope.extend(fake_scopes.fake_scope_2)
|
||||||
mock_zone_list.return_value = [
|
mock_zone_list.return_value = [
|
||||||
mock.Mock(zone='AZ{0}'.format(i),
|
mock.Mock(zone='AZ{0}'.format(i),
|
||||||
host={'Node_{0}'.format(i): {}})
|
host={'hostname_{0}'.format(i): {}})
|
||||||
for i in range(4)]
|
for i in range(4)]
|
||||||
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
||||||
osc=mock.Mock()).get_scoped_model(cluster)
|
osc=mock.Mock()).get_scoped_model(cluster)
|
||||||
|
|||||||
@@ -212,8 +212,8 @@ class TestHostMaintenance(TestBaseStrategy):
|
|||||||
result = self.strategy.pre_execute()
|
result = self.strategy.pre_execute()
|
||||||
self.assertIsNone(result)
|
self.assertIsNone(result)
|
||||||
|
|
||||||
self.strategy.input_parameters = {"maintenance_node": 'Node_2',
|
self.strategy.input_parameters = {"maintenance_node": 'hostname_2',
|
||||||
"backup_node": 'Node_3'}
|
"backup_node": 'hostname_3'}
|
||||||
self.strategy.do_execute()
|
self.strategy.do_execute()
|
||||||
|
|
||||||
expected = [{'action_type': 'change_nova_service_state',
|
expected = [{'action_type': 'change_nova_service_state',
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ class TestNoisyNeighbor(TestBaseStrategy):
|
|||||||
def test_calc_used_resource(self):
|
def test_calc_used_resource(self):
|
||||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_c_model.return_value = model
|
self.m_c_model.return_value = model
|
||||||
node = model.get_node_by_uuid('Node_0')
|
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||||
node)
|
node)
|
||||||
|
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ class TestOutletTempControl(TestBaseStrategy):
|
|||||||
def test_calc_used_resource(self):
|
def test_calc_used_resource(self):
|
||||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_c_model.return_value = model
|
self.m_c_model.return_value = model
|
||||||
node = model.get_node_by_uuid('Node_0')
|
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||||
node)
|
node)
|
||||||
|
|
||||||
@@ -73,15 +73,18 @@ class TestOutletTempControl(TestBaseStrategy):
|
|||||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_c_model.return_value = model
|
self.m_c_model.return_value = model
|
||||||
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
||||||
self.assertEqual('Node_1', n1[0]['compute_node'].uuid)
|
self.assertEqual("af69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
self.assertEqual('Node_0', n2[0]['compute_node'].uuid)
|
n1[0]['compute_node'].uuid)
|
||||||
|
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
|
n2[0]['compute_node'].uuid)
|
||||||
|
|
||||||
def test_choose_instance_to_migrate(self):
|
def test_choose_instance_to_migrate(self):
|
||||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_c_model.return_value = model
|
self.m_c_model.return_value = model
|
||||||
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
||||||
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
||||||
self.assertEqual('Node_1', instance_to_mig[0].uuid)
|
self.assertEqual('af69c544-906b-4a6a-a9c6-c1f7a8078c73',
|
||||||
|
instance_to_mig[0].uuid)
|
||||||
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
|
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
|
||||||
instance_to_mig[1].uuid)
|
instance_to_mig[1].uuid)
|
||||||
|
|
||||||
@@ -92,7 +95,8 @@ class TestOutletTempControl(TestBaseStrategy):
|
|||||||
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
||||||
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
|
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
|
||||||
self.assertEqual(1, len(dest_hosts))
|
self.assertEqual(1, len(dest_hosts))
|
||||||
self.assertEqual('Node_0', dest_hosts[0]['compute_node'].uuid)
|
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||||
|
dest_hosts[0]['compute_node'].uuid)
|
||||||
|
|
||||||
def test_execute_no_workload(self):
|
def test_execute_no_workload(self):
|
||||||
model = self.fake_c_cluster.\
|
model = self.fake_c_cluster.\
|
||||||
|
|||||||
@@ -76,9 +76,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
|||||||
mock_hyper1 = mock.Mock()
|
mock_hyper1 = mock.Mock()
|
||||||
mock_hyper2 = mock.Mock()
|
mock_hyper2 = mock.Mock()
|
||||||
mock_hyper1.to_dict.return_value = {
|
mock_hyper1.to_dict.return_value = {
|
||||||
'running_vms': 2, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
'running_vms': 2, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||||
mock_hyper2.to_dict.return_value = {
|
mock_hyper2.to_dict.return_value = {
|
||||||
'running_vms': 2, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
'running_vms': 2, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||||
|
|
||||||
self.strategy.get_hosts_pool()
|
self.strategy.get_hosts_pool()
|
||||||
@@ -101,9 +101,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
|||||||
mock_hyper1 = mock.Mock()
|
mock_hyper1 = mock.Mock()
|
||||||
mock_hyper2 = mock.Mock()
|
mock_hyper2 = mock.Mock()
|
||||||
mock_hyper1.to_dict.return_value = {
|
mock_hyper1.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||||
mock_hyper2.to_dict.return_value = {
|
mock_hyper2.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||||
|
|
||||||
self.strategy.get_hosts_pool()
|
self.strategy.get_hosts_pool()
|
||||||
@@ -126,9 +126,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
|||||||
mock_hyper1 = mock.Mock()
|
mock_hyper1 = mock.Mock()
|
||||||
mock_hyper2 = mock.Mock()
|
mock_hyper2 = mock.Mock()
|
||||||
mock_hyper1.to_dict.return_value = {
|
mock_hyper1.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||||
mock_hyper2.to_dict.return_value = {
|
mock_hyper2.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||||
|
|
||||||
self.strategy.get_hosts_pool()
|
self.strategy.get_hosts_pool()
|
||||||
@@ -151,9 +151,11 @@ class TestSavingEnergy(TestBaseStrategy):
|
|||||||
mock_hyper1 = mock.Mock()
|
mock_hyper1 = mock.Mock()
|
||||||
mock_hyper2 = mock.Mock()
|
mock_hyper2 = mock.Mock()
|
||||||
mock_hyper1.to_dict.return_value = {
|
mock_hyper1.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_0'},
|
||||||
|
'state': 'up'}
|
||||||
mock_hyper2.to_dict.return_value = {
|
mock_hyper2.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_10'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_10'},
|
||||||
|
'state': 'up'}
|
||||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||||
|
|
||||||
self.strategy.get_hosts_pool()
|
self.strategy.get_hosts_pool()
|
||||||
@@ -196,9 +198,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
|||||||
mock_hyper1 = mock.Mock()
|
mock_hyper1 = mock.Mock()
|
||||||
mock_hyper2 = mock.Mock()
|
mock_hyper2 = mock.Mock()
|
||||||
mock_hyper1.to_dict.return_value = {
|
mock_hyper1.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||||
mock_hyper2.to_dict.return_value = {
|
mock_hyper2.to_dict.return_value = {
|
||||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||||
|
|
||||||
model = self.fake_c_cluster.generate_scenario_1()
|
model = self.fake_c_cluster.generate_scenario_1()
|
||||||
|
|||||||
@@ -124,8 +124,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
self.strategy.add_migration(instance, n1, n2)
|
self.strategy.add_migration(instance, n1, n2)
|
||||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||||
expected = {'action_type': 'migrate',
|
expected = {'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'source_node': n1.uuid,
|
'source_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': instance.uuid,
|
'resource_id': instance.uuid,
|
||||||
'resource_name': instance.name}}
|
'resource_name': instance.name}}
|
||||||
@@ -147,8 +147,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
self.strategy.add_migration(instance, n1, n2)
|
self.strategy.add_migration(instance, n1, n2)
|
||||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||||
expected = {'action_type': 'migrate',
|
expected = {'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'source_node': n1.uuid,
|
'source_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': instance.uuid,
|
'resource_id': instance.uuid,
|
||||||
'resource_name': instance.name}}
|
'resource_name': instance.name}}
|
||||||
@@ -255,8 +255,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
self.strategy.consolidation_phase(cc)
|
self.strategy.consolidation_phase(cc)
|
||||||
expected = [{'action_type': 'migrate',
|
expected = [{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'source_node': n1.uuid,
|
'source_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': instance.uuid,
|
'resource_id': instance.uuid,
|
||||||
'resource_name': instance.name}}]
|
'resource_name': instance.name}}]
|
||||||
@@ -273,9 +273,9 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
n1 = model.get_node_by_uuid('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
self.strategy.get_relative_cluster_utilization = mock.MagicMock()
|
self.strategy.get_relative_cluster_utilization = mock.MagicMock()
|
||||||
self.strategy.do_execute()
|
self.strategy.do_execute()
|
||||||
n2_uuid = self.strategy.solution.actions[0][
|
n2_name = self.strategy.solution.actions[0][
|
||||||
'input_parameters']['destination_node']
|
'input_parameters']['destination_node']
|
||||||
n2 = model.get_node_by_uuid(n2_uuid)
|
n2 = model.get_node_by_name(n2_name)
|
||||||
n3_uuid = self.strategy.solution.actions[2][
|
n3_uuid = self.strategy.solution.actions[2][
|
||||||
'input_parameters']['resource_id']
|
'input_parameters']['resource_id']
|
||||||
n3 = model.get_node_by_uuid(n3_uuid)
|
n3 = model.get_node_by_uuid(n3_uuid)
|
||||||
@@ -283,14 +283,14 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
'input_parameters']['resource_id']
|
'input_parameters']['resource_id']
|
||||||
n4 = model.get_node_by_uuid(n4_uuid)
|
n4 = model.get_node_by_uuid(n4_uuid)
|
||||||
expected = [{'action_type': 'migrate',
|
expected = [{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'source_node': n1.uuid,
|
'source_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_3',
|
'resource_id': 'INSTANCE_3',
|
||||||
'resource_name': ''}},
|
'resource_name': ''}},
|
||||||
{'action_type': 'migrate',
|
{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'source_node': n1.uuid,
|
'source_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_1',
|
'resource_id': 'INSTANCE_1',
|
||||||
'resource_name': ''}},
|
'resource_name': ''}},
|
||||||
@@ -330,31 +330,31 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
|||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
self.strategy.offload_phase(cc)
|
self.strategy.offload_phase(cc)
|
||||||
expected = [{'action_type': 'migrate',
|
expected = [{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_6',
|
'resource_id': 'INSTANCE_6',
|
||||||
'resource_name': '',
|
'resource_name': '',
|
||||||
'source_node': n1.uuid}},
|
'source_node': n1.hostname}},
|
||||||
{'action_type': 'migrate',
|
{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_7',
|
'resource_id': 'INSTANCE_7',
|
||||||
'resource_name': '',
|
'resource_name': '',
|
||||||
'source_node': n1.uuid}},
|
'source_node': n1.hostname}},
|
||||||
{'action_type': 'migrate',
|
{'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n2.uuid,
|
'input_parameters': {'destination_node': n2.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_8',
|
'resource_id': 'INSTANCE_8',
|
||||||
'resource_name': '',
|
'resource_name': '',
|
||||||
'source_node': n1.uuid}}]
|
'source_node': n1.hostname}}]
|
||||||
self.assertEqual(expected, self.strategy.solution.actions)
|
self.assertEqual(expected, self.strategy.solution.actions)
|
||||||
self.strategy.consolidation_phase(cc)
|
self.strategy.consolidation_phase(cc)
|
||||||
expected.append({'action_type': 'migrate',
|
expected.append({'action_type': 'migrate',
|
||||||
'input_parameters': {'destination_node': n1.uuid,
|
'input_parameters': {'destination_node': n1.hostname,
|
||||||
'migration_type': 'live',
|
'migration_type': 'live',
|
||||||
'resource_id': 'INSTANCE_7',
|
'resource_id': 'INSTANCE_7',
|
||||||
'resource_name': '',
|
'resource_name': '',
|
||||||
'source_node': n2.uuid}})
|
'source_node': n2.hostname}})
|
||||||
self.assertEqual(expected, self.strategy.solution.actions)
|
self.assertEqual(expected, self.strategy.solution.actions)
|
||||||
self.strategy.optimize_solution()
|
self.strategy.optimize_solution()
|
||||||
del expected[3]
|
del expected[3]
|
||||||
|
|||||||
Reference in New Issue
Block a user