remove id field from CDM
There are 3 related fields(id, uuid and hostname) in ComputeNode[1]. according to [2], after nova api 2.53, the id of the hypervisor as a UUID. and service.host is equal to hypervisor name for compute node. so we can remove id and only keep uuid then set uuid to node.id [1]:https://github.com/openstack/watcher/blob/master/watcher/decision_engine/model/collector/nova.py#L306 [2]:https://developer.openstack.org/api-ref/compute/?expanded=list-hypervisors-details-detail#list-hypervisors-details Change-Id: Ie1d1ad56808270d936ec25186061f7f12cc49fdc Closes-Bug: #1835192 Depends-on: I752fbfa560313e28e87d83e46431c283b4db4f23 Depends-on: I0975500f359de92b6d6fdea2e01614cf0ba73f05
This commit is contained in:
@@ -50,6 +50,7 @@ class ChangeNovaServiceState(base.BaseAction):
|
||||
|
||||
STATE = 'state'
|
||||
REASON = 'disabled_reason'
|
||||
RESOURCE_NAME = 'resource_name'
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
@@ -82,7 +83,7 @@ class ChangeNovaServiceState(base.BaseAction):
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
return self.resource_id
|
||||
return self.input_parameters.get(self.RESOURCE_NAME)
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
|
||||
@@ -175,7 +175,7 @@ class CeilometerHelper(base.DataSourceBase):
|
||||
|
||||
resource_id = resource.uuid
|
||||
if resource_type == 'compute_node':
|
||||
resource_id = "%s_%s" % (resource.uuid, resource.hostname)
|
||||
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
|
||||
|
||||
query = self.build_query(
|
||||
resource_id=resource_id, start_time=start_time, end_time=end_time)
|
||||
|
||||
@@ -84,7 +84,7 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
|
||||
resource_id = resource.uuid
|
||||
if resource_type == 'compute_node':
|
||||
resource_id = "%s_%s" % (resource.uuid, resource.hostname)
|
||||
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
|
||||
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
|
||||
limit=1)
|
||||
resources = self.query_retry(
|
||||
|
||||
@@ -349,9 +349,9 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
||||
|
||||
# build up the compute node.
|
||||
node_attributes = {
|
||||
"id": node.id,
|
||||
"uuid": node.service["host"],
|
||||
"hostname": node.hypervisor_hostname,
|
||||
# The id of the hypervisor as a UUID from version 2.53.
|
||||
"uuid": node.id,
|
||||
"hostname": node.service["host"],
|
||||
"memory": memory_mb,
|
||||
"memory_ratio": memory_ratio,
|
||||
"memory_mb_reserved": memory_mb_reserved,
|
||||
@@ -379,7 +379,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
||||
LOG.info("no instances on compute_node: {0}".format(node))
|
||||
return
|
||||
host = node.service["host"]
|
||||
compute_node = self.model.get_node_by_uuid(host)
|
||||
compute_node = self.model.get_node_by_uuid(node.id)
|
||||
filters = {'host': host}
|
||||
limit = len(instances) if len(instances) <= 1000 else -1
|
||||
# Get all servers on this compute host.
|
||||
|
||||
@@ -34,7 +34,6 @@ class ServiceState(enum.Enum):
|
||||
class ComputeNode(compute_resource.ComputeResource):
|
||||
|
||||
fields = {
|
||||
"id": wfields.StringField(),
|
||||
"hostname": wfields.StringField(),
|
||||
"status": wfields.StringField(default=ServiceState.ENABLED.value),
|
||||
"disabled_reason": wfields.StringField(nullable=True),
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
from oslo_log import log
|
||||
from watcher.common import exception
|
||||
from watcher.common import nova_helper
|
||||
from watcher.common import utils
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.model.notification import base
|
||||
from watcher.decision_engine.model.notification import filtering
|
||||
@@ -38,15 +39,15 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
self._nova = nova_helper.NovaHelper()
|
||||
return self._nova
|
||||
|
||||
def get_or_create_instance(self, instance_uuid, node_uuid=None):
|
||||
def get_or_create_instance(self, instance_uuid, node_name=None):
|
||||
try:
|
||||
node = None
|
||||
if node_uuid:
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
if node_name:
|
||||
node = self.get_or_create_node(node_name)
|
||||
except exception.ComputeNodeNotFound:
|
||||
LOG.warning("Could not find compute node %(node)s for "
|
||||
"instance %(instance)s",
|
||||
dict(node=node_uuid, instance=instance_uuid))
|
||||
dict(node=node_name, instance=instance_uuid))
|
||||
try:
|
||||
instance = self.cluster_data_model.get_instance_by_uuid(
|
||||
instance_uuid)
|
||||
@@ -117,13 +118,16 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
'disabled_reason': disabled_reason,
|
||||
})
|
||||
|
||||
def create_compute_node(self, node_hostname):
|
||||
"""Update the compute node by querying the Nova API."""
|
||||
def create_compute_node(self, uuid_or_name):
|
||||
"""Create the computeNode node."""
|
||||
try:
|
||||
_node = self.nova.get_compute_node_by_hostname(node_hostname)
|
||||
if utils.is_uuid_like(uuid_or_name):
|
||||
_node = self.nova.get_compute_node_by_uuid(uuid_or_name)
|
||||
else:
|
||||
_node = self.nova.get_compute_node_by_hostname(uuid_or_name)
|
||||
|
||||
node = element.ComputeNode(
|
||||
id=_node.id,
|
||||
uuid=node_hostname,
|
||||
uuid=_node.id,
|
||||
hostname=_node.hypervisor_hostname,
|
||||
state=_node.state,
|
||||
status=_node.status,
|
||||
@@ -132,26 +136,27 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
disk=_node.free_disk_gb,
|
||||
disk_capacity=_node.local_gb,
|
||||
)
|
||||
self.cluster_data_model.add_node(node)
|
||||
LOG.debug("New compute node mapped: %s", node.uuid)
|
||||
return node
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
LOG.debug("Could not refresh the node %s.", node_hostname)
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
LOG.debug("Could not refresh the node %s.", uuid_or_name)
|
||||
raise exception.ComputeNodeNotFound(name=uuid_or_name)
|
||||
|
||||
return False
|
||||
|
||||
def get_or_create_node(self, uuid):
|
||||
if uuid is None:
|
||||
LOG.debug("Compute node UUID not provided: skipping")
|
||||
def get_or_create_node(self, uuid_or_name):
|
||||
if uuid_or_name is None:
|
||||
LOG.debug("Compute node UUID or name not provided: skipping")
|
||||
return
|
||||
try:
|
||||
return self.cluster_data_model.get_node_by_uuid(uuid)
|
||||
if utils.is_uuid_like(uuid_or_name):
|
||||
return self.cluster_data_model.get_node_by_uuid(uuid_or_name)
|
||||
else:
|
||||
return self.cluster_data_model.get_node_by_name(uuid_or_name)
|
||||
except exception.ComputeNodeNotFound:
|
||||
# The node didn't exist yet so we create a new node object
|
||||
node = self.create_compute_node(uuid)
|
||||
LOG.debug("New compute node created: %s", uuid)
|
||||
self.cluster_data_model.add_node(node)
|
||||
LOG.debug("New compute node mapped: %s", uuid)
|
||||
node = self.create_compute_node(uuid_or_name)
|
||||
LOG.debug("New compute node created: %s", uuid_or_name)
|
||||
return node
|
||||
|
||||
def update_instance_mapping(self, instance, node):
|
||||
@@ -202,18 +207,18 @@ class VersionedNotification(NovaNotification):
|
||||
|
||||
def service_updated(self, payload):
|
||||
node_data = payload['nova_object.data']
|
||||
node_uuid = node_data['host']
|
||||
node_name = node_data['host']
|
||||
try:
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
node = self.get_or_create_node(node_name)
|
||||
self.update_compute_node(node, payload)
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
def service_deleted(self, payload):
|
||||
node_data = payload['nova_object.data']
|
||||
node_uuid = node_data['host']
|
||||
node_name = node_data['host']
|
||||
try:
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
node = self.get_or_create_node(node_name)
|
||||
self.delete_node(node)
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
@@ -222,12 +227,12 @@ class VersionedNotification(NovaNotification):
|
||||
instance_data = payload['nova_object.data']
|
||||
instance_uuid = instance_data['uuid']
|
||||
instance_state = instance_data['state']
|
||||
node_uuid = instance_data.get('host')
|
||||
node_name = instance_data.get('host')
|
||||
# if instance state is building, don't update data model
|
||||
if instance_state == 'building':
|
||||
return
|
||||
|
||||
instance = self.get_or_create_instance(instance_uuid, node_uuid)
|
||||
instance = self.get_or_create_instance(instance_uuid, node_name)
|
||||
|
||||
self.update_instance(instance, payload)
|
||||
|
||||
@@ -237,9 +242,9 @@ class VersionedNotification(NovaNotification):
|
||||
instance = element.Instance(uuid=instance_uuid)
|
||||
self.cluster_data_model.add_instance(instance)
|
||||
|
||||
node_uuid = instance_data.get('host')
|
||||
if node_uuid:
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
node_name = instance_data.get('host')
|
||||
if node_name:
|
||||
node = self.get_or_create_node(node_name)
|
||||
self.cluster_data_model.map_instance(instance, node)
|
||||
|
||||
self.update_instance(instance, payload)
|
||||
@@ -247,8 +252,8 @@ class VersionedNotification(NovaNotification):
|
||||
def instance_deleted(self, payload):
|
||||
instance_data = payload['nova_object.data']
|
||||
instance_uuid = instance_data['uuid']
|
||||
node_uuid = instance_data.get('host')
|
||||
instance = self.get_or_create_instance(instance_uuid, node_uuid)
|
||||
node_name = instance_data.get('host')
|
||||
instance = self.get_or_create_instance(instance_uuid, node_name)
|
||||
|
||||
try:
|
||||
node = self.get_or_create_node(instance_data['host'])
|
||||
|
||||
@@ -32,12 +32,12 @@ class ComputeScope(base.BaseScope):
|
||||
self._osc = osc
|
||||
self.wrapper = nova_helper.NovaHelper(osc=self._osc)
|
||||
|
||||
def remove_instance(self, cluster_model, instance, node_name):
|
||||
node = cluster_model.get_node_by_uuid(node_name)
|
||||
def remove_instance(self, cluster_model, instance, node_uuid):
|
||||
node = cluster_model.get_node_by_uuid(node_uuid)
|
||||
cluster_model.delete_instance(instance, node)
|
||||
|
||||
def update_exclude_instance(self, cluster_model, instance, node_name):
|
||||
node = cluster_model.get_node_by_uuid(node_name)
|
||||
def update_exclude_instance(self, cluster_model, instance, node_uuid):
|
||||
node = cluster_model.get_node_by_uuid(node_uuid)
|
||||
cluster_model.unmap_instance(instance, node)
|
||||
instance.update({"watcher_exclude": True})
|
||||
cluster_model.map_instance(instance, node)
|
||||
@@ -109,18 +109,18 @@ class ComputeScope(base.BaseScope):
|
||||
[project['uuid'] for project in resource['projects']])
|
||||
|
||||
def remove_nodes_from_model(self, nodes_to_remove, cluster_model):
|
||||
for node_uuid in nodes_to_remove:
|
||||
node = cluster_model.get_node_by_uuid(node_uuid)
|
||||
for node_name in nodes_to_remove:
|
||||
node = cluster_model.get_node_by_name(node_name)
|
||||
instances = cluster_model.get_node_instances(node)
|
||||
for instance in instances:
|
||||
self.remove_instance(cluster_model, instance, node_uuid)
|
||||
self.remove_instance(cluster_model, instance, node.uuid)
|
||||
cluster_model.remove_node(node)
|
||||
|
||||
def update_exclude_instance_in_model(
|
||||
self, instances_to_exclude, cluster_model):
|
||||
for instance_uuid in instances_to_exclude:
|
||||
try:
|
||||
node_name = cluster_model.get_node_by_instance_uuid(
|
||||
node_uuid = cluster_model.get_node_by_instance_uuid(
|
||||
instance_uuid).uuid
|
||||
except exception.ComputeResourceNotFound:
|
||||
LOG.warning("The following instance %s cannot be found. "
|
||||
@@ -131,7 +131,7 @@ class ComputeScope(base.BaseScope):
|
||||
self.update_exclude_instance(
|
||||
cluster_model,
|
||||
cluster_model.get_instance_by_uuid(instance_uuid),
|
||||
node_name)
|
||||
node_uuid)
|
||||
|
||||
def exclude_instances_with_given_metadata(
|
||||
self, instance_metadata, cluster_model, instances_to_remove):
|
||||
@@ -166,7 +166,8 @@ class ComputeScope(base.BaseScope):
|
||||
projects_to_exclude = []
|
||||
compute_scope = []
|
||||
found_nothing_flag = False
|
||||
model_hosts = list(cluster_model.get_all_compute_nodes().keys())
|
||||
model_hosts = [n.hostname for n in
|
||||
cluster_model.get_all_compute_nodes().values()]
|
||||
|
||||
if not self.scope:
|
||||
return cluster_model
|
||||
|
||||
@@ -449,8 +449,8 @@ class BaseStrategy(loadable.Loadable):
|
||||
source_node,
|
||||
destination_node):
|
||||
parameters = {'migration_type': migration_type,
|
||||
'source_node': source_node.uuid,
|
||||
'destination_node': destination_node.uuid,
|
||||
'source_node': source_node.hostname,
|
||||
'destination_node': destination_node.hostname,
|
||||
'resource_name': instance.name}
|
||||
self.solution.add_action(action_type=self.MIGRATION,
|
||||
resource_id=instance.uuid,
|
||||
|
||||
@@ -304,7 +304,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
||||
backup_node = self.input_parameters.get('backup_node')
|
||||
|
||||
# if no VMs in the maintenance_node, just maintain the compute node
|
||||
src_node = self.compute_model.get_node_by_uuid(maintenance_node)
|
||||
src_node = self.compute_model.get_node_by_name(maintenance_node)
|
||||
if len(self.compute_model.get_node_instances(src_node)) == 0:
|
||||
if (src_node.disabled_reason !=
|
||||
self.REASON_FOR_MAINTAINING):
|
||||
@@ -312,7 +312,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
||||
return
|
||||
|
||||
if backup_node:
|
||||
des_node = self.compute_model.get_node_by_uuid(backup_node)
|
||||
des_node = self.compute_model.get_node_by_name(backup_node)
|
||||
else:
|
||||
des_node = None
|
||||
|
||||
|
||||
@@ -177,9 +177,9 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
node.hostname = hypervisor_node.hypervisor_hostname
|
||||
hypervisor_node = hypervisor_node.to_dict()
|
||||
compute_service = hypervisor_node.get('service', None)
|
||||
host_uuid = compute_service.get('host')
|
||||
host_name = compute_service.get('host')
|
||||
try:
|
||||
self.compute_model.get_node_by_uuid(host_uuid)
|
||||
self.compute_model.get_node_by_name(host_name)
|
||||
except exception.ComputeNodeNotFound:
|
||||
continue
|
||||
|
||||
|
||||
@@ -419,13 +419,13 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
'input_parameters'][
|
||||
'resource_id'] == instance_uuid)
|
||||
if len(actions) > 1:
|
||||
src_uuid = actions[0]['input_parameters']['source_node']
|
||||
dst_uuid = actions[-1]['input_parameters']['destination_node']
|
||||
src_name = actions[0]['input_parameters']['source_node']
|
||||
dst_name = actions[-1]['input_parameters']['destination_node']
|
||||
for a in actions:
|
||||
self.solution.actions.remove(a)
|
||||
self.number_of_migrations -= 1
|
||||
src_node = self.compute_model.get_node_by_uuid(src_uuid)
|
||||
dst_node = self.compute_model.get_node_by_uuid(dst_uuid)
|
||||
src_node = self.compute_model.get_node_by_name(src_name)
|
||||
dst_node = self.compute_model.get_node_by_name(dst_name)
|
||||
instance = self.compute_model.get_instance_by_uuid(
|
||||
instance_uuid)
|
||||
if self.compute_model.migrate_instance(
|
||||
|
||||
@@ -50,7 +50,7 @@ class TestChangeNovaServiceState(base.TestCase):
|
||||
self.addCleanup(m_nova_helper.stop)
|
||||
|
||||
self.input_parameters = {
|
||||
baction.BaseAction.RESOURCE_ID: "compute-1",
|
||||
"resource_name": "compute-1",
|
||||
"state": element.ServiceState.ENABLED.value,
|
||||
}
|
||||
self.action = change_nova_service_state.ChangeNovaServiceState(
|
||||
|
||||
@@ -78,7 +78,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
|
||||
disabled_reason='',
|
||||
)
|
||||
minimal_node = dict(
|
||||
id=1337,
|
||||
id='160a0e7b-8b0b-4854-8257-9c71dff4efcc',
|
||||
hypervisor_hostname='test_hostname',
|
||||
state='TEST_STATE',
|
||||
status='TEST_STATUS',
|
||||
@@ -149,7 +149,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
|
||||
node = list(compute_nodes.values())[0]
|
||||
instance = list(instances.values())[0]
|
||||
|
||||
self.assertEqual(node.uuid, 'test_hostname')
|
||||
self.assertEqual(node.uuid, '160a0e7b-8b0b-4854-8257-9c71dff4efcc')
|
||||
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
|
||||
|
||||
memory_total = node.memory - node.memory_mb_reserved
|
||||
|
||||
@@ -112,9 +112,9 @@ class FakeCeilometerMetrics(object):
|
||||
|
||||
uuid = resource.uuid
|
||||
mock = {}
|
||||
mock['Node_0'] = 30
|
||||
mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30
|
||||
# use a big value to make sure it exceeds threshold
|
||||
mock['Node_1'] = 100
|
||||
mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100
|
||||
if uuid not in mock.keys():
|
||||
mock[uuid] = 100
|
||||
return float(mock[str(uuid)])
|
||||
@@ -189,6 +189,8 @@ class FakeCeilometerMetrics(object):
|
||||
# node 0
|
||||
measurements['Node_0_hostname_0'] = 7
|
||||
measurements['Node_1_hostname_1'] = 7
|
||||
measurements['fa69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_0'] = 7
|
||||
measurements['af69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_1'] = 7
|
||||
# node 1
|
||||
measurements['Node_2_hostname_2'] = 80
|
||||
# node 2
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
<ModelRoot>
|
||||
<ComputeNode uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance watcher_exclude="False" state="active" name="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||
<ComputeNode uuid="fa69c544-906b-4a6a-a9c6-c1f7a8078c73" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance watcher_exclude="False" state="active" human_id="" name="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance watcher_exclude="False" state="active" name="" uuid="a4cab39b-9828-413a-bf88-f76921bf1517" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||
<ComputeNode uuid="af69c544-906b-4a6a-a9c6-c1f7a8078c73" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance watcher_exclude="False" state="active" human_id="" name="" uuid="a4cab39b-9828-413a-bf88-f76921bf1517" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}' project_id="91FFFE30-78A0-4152-ACD2-8310FF274DC9"/>
|
||||
</ComputeNode>
|
||||
</ModelRoot>
|
||||
|
||||
@@ -108,9 +108,10 @@ class FakeGnocchiMetrics(object):
|
||||
uuid = resource.uuid
|
||||
|
||||
mock = {}
|
||||
mock['Node_0'] = 30
|
||||
mock["Node_0"] = 30
|
||||
mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30
|
||||
# use a big value to make sure it exceeds threshold
|
||||
mock['Node_1'] = 100
|
||||
mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100
|
||||
|
||||
return mock[str(uuid)]
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"nova_object.name": "ServiceStatusPayload",
|
||||
"nova_object.version": "1.0",
|
||||
"nova_object.data": {
|
||||
"host": "Node_0",
|
||||
"host": "hostname_0",
|
||||
"disabled": true,
|
||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||
"binary": "nova-compute",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"nova_object.name": "ServiceStatusPayload",
|
||||
"nova_object.version": "1.0",
|
||||
"nova_object.data": {
|
||||
"host": "Node_0",
|
||||
"host": "hostname_0",
|
||||
"disabled": false,
|
||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||
"binary": "nova-compute",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"last_seen_up": null,
|
||||
"report_count": 0,
|
||||
"topic": "compute",
|
||||
"uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
"uuid": "fafac544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
"version": 23
|
||||
},
|
||||
"nova_object.name": "ServiceStatusPayload",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"disabled": false,
|
||||
"disabled_reason": null,
|
||||
"forced_down": false,
|
||||
"host": "Node_0",
|
||||
"host": "hostname_0",
|
||||
"last_seen_up": null,
|
||||
"report_count": 0,
|
||||
"topic": "compute",
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
{
|
||||
"priority": "INFO",
|
||||
"payload": {
|
||||
"nova_object.namespace": "nova",
|
||||
"nova_object.name": "ServiceStatusPayload",
|
||||
"nova_object.version": "1.0",
|
||||
"nova_object.data": {
|
||||
"host": "host1",
|
||||
"disabled": false,
|
||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||
"binary": "nova-compute",
|
||||
"topic": "compute",
|
||||
"disabled_reason": null,
|
||||
"report_count": 1,
|
||||
"forced_down": false,
|
||||
"version": 15
|
||||
}
|
||||
},
|
||||
"event_type": "service.update",
|
||||
"payload": {
|
||||
"nova_object.data": {
|
||||
"availability_zone": null,
|
||||
"binary": "nova-compute",
|
||||
"disabled": false,
|
||||
"disabled_reason": null,
|
||||
"forced_down": false,
|
||||
"host": "host1",
|
||||
"last_seen_up": "2012-10-29T13:42:05Z",
|
||||
"report_count": 1,
|
||||
"topic": "compute",
|
||||
"uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
"version": 23
|
||||
},
|
||||
"nova_object.name": "ServiceStatusPayload",
|
||||
"nova_object.namespace": "nova",
|
||||
"nova_object.version": "1.1"
|
||||
},
|
||||
"priority": "INFO",
|
||||
"publisher_id": "nova-compute:host1"
|
||||
}
|
||||
|
||||
@@ -122,8 +122,8 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||
|
||||
node0_uuid = 'Node_0'
|
||||
node0 = compute_model.get_node_by_uuid(node0_uuid)
|
||||
node0_name = "hostname_0"
|
||||
node0 = compute_model.get_node_by_name(node0_name)
|
||||
|
||||
message = self.load_message('scenario3_service-update-disabled.json')
|
||||
|
||||
@@ -139,7 +139,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
self.assertEqual('Node_0', node0.hostname)
|
||||
self.assertEqual("hostname_0", node0.hostname)
|
||||
self.assertEqual(element.ServiceState.OFFLINE.value, node0.state)
|
||||
self.assertEqual(element.ServiceState.DISABLED.value, node0.status)
|
||||
|
||||
@@ -153,7 +153,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
self.assertEqual('Node_0', node0.hostname)
|
||||
self.assertEqual("hostname_0", node0.hostname)
|
||||
self.assertEqual(element.ServiceState.ONLINE.value, node0.state)
|
||||
self.assertEqual(element.ServiceState.ENABLED.value, node0.status)
|
||||
|
||||
@@ -161,12 +161,11 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
def test_nova_service_create(self, m_nova_helper_cls):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=lambda uuid: mock.Mock(
|
||||
name='m_get_compute_node_by_hostname',
|
||||
id=3,
|
||||
name='m_get_compute_node_by_uuid',
|
||||
id="fafac544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
hypervisor_hostname="host2",
|
||||
state='up',
|
||||
status='enabled',
|
||||
uuid=uuid,
|
||||
memory_mb=7777,
|
||||
vcpus=42,
|
||||
free_disk_gb=974,
|
||||
@@ -179,11 +178,11 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||
|
||||
new_node_uuid = 'host2'
|
||||
new_node_name = "host2"
|
||||
|
||||
self.assertRaises(
|
||||
exception.ComputeNodeNotFound,
|
||||
compute_model.get_node_by_uuid, new_node_uuid)
|
||||
compute_model.get_node_by_name, new_node_name)
|
||||
|
||||
message = self.load_message('service-create.json')
|
||||
handler.info(
|
||||
@@ -194,7 +193,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
new_node = compute_model.get_node_by_uuid(new_node_uuid)
|
||||
new_node = compute_model.get_node_by_name(new_node_name)
|
||||
self.assertEqual('host2', new_node.hostname)
|
||||
self.assertEqual(element.ServiceState.ONLINE.value, new_node.state)
|
||||
self.assertEqual(element.ServiceState.ENABLED.value, new_node.status)
|
||||
@@ -204,10 +203,10 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.VersionedNotification(self.fake_cdmc)
|
||||
|
||||
node0_uuid = 'Node_0'
|
||||
node0_name = "hostname_0"
|
||||
|
||||
# Before
|
||||
self.assertTrue(compute_model.get_node_by_uuid(node0_uuid))
|
||||
self.assertTrue(compute_model.get_node_by_name(node0_name))
|
||||
|
||||
message = self.load_message('service-delete.json')
|
||||
handler.info(
|
||||
@@ -221,7 +220,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
# After
|
||||
self.assertRaises(
|
||||
exception.ComputeNodeNotFound,
|
||||
compute_model.get_node_by_uuid, node0_uuid)
|
||||
compute_model.get_node_by_name, node0_name)
|
||||
|
||||
def test_nova_instance_update(self):
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
@@ -276,11 +275,10 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=lambda uuid: mock.Mock(
|
||||
name='m_get_compute_node_by_hostname',
|
||||
id=3,
|
||||
id='669966bd-a45c-4e1c-9d57-3054899a3ec7',
|
||||
hypervisor_hostname="Node_2",
|
||||
state='up',
|
||||
status='enabled',
|
||||
uuid=uuid,
|
||||
memory_mb=7777,
|
||||
vcpus=42,
|
||||
free_disk_gb=974,
|
||||
@@ -312,7 +310,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
self.assertEqual(512, instance0.memory)
|
||||
|
||||
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
|
||||
node_2 = compute_model.get_node_by_uuid('Node_2')
|
||||
node_2 = compute_model.get_node_by_name('Node_2')
|
||||
self.assertEqual(7777, node_2.memory)
|
||||
self.assertEqual(42, node_2.vcpus)
|
||||
self.assertEqual(974, node_2.disk)
|
||||
@@ -463,7 +461,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
message = self.load_message(
|
||||
'instance-live_migration_force_complete-end.json')
|
||||
handler.info(
|
||||
@@ -474,7 +472,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_1', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
|
||||
def test_live_migrated_end(self):
|
||||
@@ -484,7 +482,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
message = self.load_message(
|
||||
'instance-live_migration_post-end.json')
|
||||
handler.info(
|
||||
@@ -495,7 +493,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_1', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
|
||||
def test_nova_instance_lock(self):
|
||||
@@ -605,7 +603,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", node.uuid)
|
||||
message = self.load_message('instance-rebuild-end.json')
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
@@ -615,7 +613,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_1', node.uuid)
|
||||
self.assertEqual('hostname_0', node.hostname)
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
|
||||
def test_nova_instance_rescue(self):
|
||||
@@ -659,7 +657,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
message = self.load_message(
|
||||
'instance-resize_confirm-end.json')
|
||||
handler.info(
|
||||
@@ -670,7 +668,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_1', node.uuid)
|
||||
self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid)
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
|
||||
def test_nova_instance_restore_end(self):
|
||||
|
||||
@@ -39,7 +39,7 @@ class TestComputeScope(base.TestCase):
|
||||
audit_scope = fake_scopes.fake_scope_1
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zone='AZ{0}'.format(i),
|
||||
host={'Node_{0}'.format(i): {}})
|
||||
host={'hostname_{0}'.format(i): {}})
|
||||
for i in range(4)]
|
||||
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
||||
osc=mock.Mock()).get_scoped_model(cluster)
|
||||
@@ -240,7 +240,7 @@ class TestComputeScope(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
compute.ComputeScope([], mock.Mock(),
|
||||
osc=mock.Mock()).remove_nodes_from_model(
|
||||
['Node_1', 'Node_2'], model)
|
||||
['hostname_1', 'hostname_2'], model)
|
||||
expected_edges = [
|
||||
('INSTANCE_0', 'Node_0'),
|
||||
('INSTANCE_1', 'Node_0'),
|
||||
@@ -290,7 +290,7 @@ class TestComputeScope(base.TestCase):
|
||||
audit_scope.extend(fake_scopes.fake_scope_2)
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zone='AZ{0}'.format(i),
|
||||
host={'Node_{0}'.format(i): {}})
|
||||
host={'hostname_{0}'.format(i): {}})
|
||||
for i in range(4)]
|
||||
model = compute.ComputeScope(audit_scope, mock.Mock(),
|
||||
osc=mock.Mock()).get_scoped_model(cluster)
|
||||
|
||||
@@ -212,8 +212,8 @@ class TestHostMaintenance(TestBaseStrategy):
|
||||
result = self.strategy.pre_execute()
|
||||
self.assertIsNone(result)
|
||||
|
||||
self.strategy.input_parameters = {"maintenance_node": 'Node_2',
|
||||
"backup_node": 'Node_3'}
|
||||
self.strategy.input_parameters = {"maintenance_node": 'hostname_2',
|
||||
"backup_node": 'hostname_3'}
|
||||
self.strategy.do_execute()
|
||||
|
||||
expected = [{'action_type': 'change_nova_service_state',
|
||||
|
||||
@@ -63,7 +63,7 @@ class TestNoisyNeighbor(TestBaseStrategy):
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||
node)
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ class TestOutletTempControl(TestBaseStrategy):
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||
node)
|
||||
|
||||
@@ -73,15 +73,18 @@ class TestOutletTempControl(TestBaseStrategy):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
||||
self.assertEqual('Node_1', n1[0]['compute_node'].uuid)
|
||||
self.assertEqual('Node_0', n2[0]['compute_node'].uuid)
|
||||
self.assertEqual("af69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
n1[0]['compute_node'].uuid)
|
||||
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
n2[0]['compute_node'].uuid)
|
||||
|
||||
def test_choose_instance_to_migrate(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
|
||||
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
||||
self.assertEqual('Node_1', instance_to_mig[0].uuid)
|
||||
self.assertEqual('af69c544-906b-4a6a-a9c6-c1f7a8078c73',
|
||||
instance_to_mig[0].uuid)
|
||||
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
|
||||
instance_to_mig[1].uuid)
|
||||
|
||||
@@ -92,7 +95,8 @@ class TestOutletTempControl(TestBaseStrategy):
|
||||
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
|
||||
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
|
||||
self.assertEqual(1, len(dest_hosts))
|
||||
self.assertEqual('Node_0', dest_hosts[0]['compute_node'].uuid)
|
||||
self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73",
|
||||
dest_hosts[0]['compute_node'].uuid)
|
||||
|
||||
def test_execute_no_workload(self):
|
||||
model = self.fake_c_cluster.\
|
||||
|
||||
@@ -76,9 +76,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 2, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
'running_vms': 2, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 2, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
'running_vms': 2, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
self.strategy.get_hosts_pool()
|
||||
@@ -101,9 +101,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
self.strategy.get_hosts_pool()
|
||||
@@ -126,9 +126,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
self.strategy.get_hosts_pool()
|
||||
@@ -151,9 +151,11 @@ class TestSavingEnergy(TestBaseStrategy):
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_0'},
|
||||
'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_10'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_10'},
|
||||
'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
self.strategy.get_hosts_pool()
|
||||
@@ -196,9 +198,9 @@ class TestSavingEnergy(TestBaseStrategy):
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
|
||||
@@ -124,8 +124,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
self.strategy.add_migration(instance, n1, n2)
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'source_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}
|
||||
@@ -147,8 +147,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
self.strategy.add_migration(instance, n1, n2)
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'source_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}
|
||||
@@ -255,8 +255,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
self.strategy.consolidation_phase(cc)
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'source_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}]
|
||||
@@ -273,9 +273,9 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
self.strategy.get_relative_cluster_utilization = mock.MagicMock()
|
||||
self.strategy.do_execute()
|
||||
n2_uuid = self.strategy.solution.actions[0][
|
||||
n2_name = self.strategy.solution.actions[0][
|
||||
'input_parameters']['destination_node']
|
||||
n2 = model.get_node_by_uuid(n2_uuid)
|
||||
n2 = model.get_node_by_name(n2_name)
|
||||
n3_uuid = self.strategy.solution.actions[2][
|
||||
'input_parameters']['resource_id']
|
||||
n3 = model.get_node_by_uuid(n3_uuid)
|
||||
@@ -283,14 +283,14 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
'input_parameters']['resource_id']
|
||||
n4 = model.get_node_by_uuid(n4_uuid)
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'source_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_3',
|
||||
'resource_name': ''}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'source_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_1',
|
||||
'resource_name': ''}},
|
||||
@@ -330,31 +330,31 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
self.strategy.offload_phase(cc)
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_6',
|
||||
'resource_name': '',
|
||||
'source_node': n1.uuid}},
|
||||
'source_node': n1.hostname}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_7',
|
||||
'resource_name': '',
|
||||
'source_node': n1.uuid}},
|
||||
'source_node': n1.hostname}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.uuid,
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_8',
|
||||
'resource_name': '',
|
||||
'source_node': n1.uuid}}]
|
||||
'source_node': n1.hostname}}]
|
||||
self.assertEqual(expected, self.strategy.solution.actions)
|
||||
self.strategy.consolidation_phase(cc)
|
||||
expected.append({'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n1.uuid,
|
||||
'input_parameters': {'destination_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'INSTANCE_7',
|
||||
'resource_name': '',
|
||||
'source_node': n2.uuid}})
|
||||
'source_node': n2.hostname}})
|
||||
self.assertEqual(expected, self.strategy.solution.actions)
|
||||
self.strategy.optimize_solution()
|
||||
del expected[3]
|
||||
|
||||
Reference in New Issue
Block a user