Refactored the compute model and its elements

In this changeset, I refactored the whole Watcher codebase to
adopt a naming convention about the various elements of the
Compute model so that it reflects the same naming convention
adopted by Nova.

Change-Id: I28adba5e1f27175f025330417b072686134d5f51
Partially-Implements: blueprint cluster-model-objects-wrapper
This commit is contained in:
Vincent Françoise
2016-07-06 17:44:29 +02:00
parent dbde1afea0
commit 31c37342cd
53 changed files with 1865 additions and 1803 deletions

View File

@@ -35,7 +35,7 @@ class TestNovaClusterDataModelCollector(base.TestCase):
def test_nova_cdmc_execute(self, m_nova_helper_cls):
m_nova_helper = mock.Mock()
m_nova_helper_cls.return_value = m_nova_helper
fake_hypervisor = mock.Mock(
fake_compute_node = mock.Mock(
service={'id': 123},
hypervisor_hostname='test_hostname',
memory_mb=333,
@@ -45,19 +45,19 @@ class TestNovaClusterDataModelCollector(base.TestCase):
state='TEST_STATE',
status='TEST_STATUS',
)
fake_vm = mock.Mock(
fake_instance = mock.Mock(
id='ef500f7e-dac8-470f-960c-169486fce71b',
state=mock.Mock(**{'OS-EXT-STS:vm_state': 'VM_STATE'}),
state=mock.Mock(**{'OS-EXT-STS:instance_state': 'VM_STATE'}),
flavor={'ram': 333, 'disk': 222, 'vcpus': 4},
)
m_nova_helper.get_hypervisors_list.return_value = [fake_hypervisor]
m_nova_helper.get_vms_by_hypervisor.return_value = [fake_vm]
m_nova_helper.get_compute_node_list.return_value = [fake_compute_node]
m_nova_helper.get_instances_by_node.return_value = [fake_instance]
m_nova_helper.nova.services.find.return_value = mock.Mock(
host='test_hostname')
def m_get_flavor_instance(vm, cache):
vm.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4}
return vm
def m_get_flavor_instance(instance, cache):
instance.flavor = {'ram': 333, 'disk': 222, 'vcpus': 4}
return instance
m_nova_helper.get_flavor_instance.side_effect = m_get_flavor_instance
@@ -69,14 +69,14 @@ class TestNovaClusterDataModelCollector(base.TestCase):
model = nova_cdmc.execute()
hypervisors = model.get_all_hypervisors()
vms = model.get_all_vms()
compute_nodes = model.get_all_compute_nodes()
instances = model.get_all_instances()
self.assertEqual(1, len(hypervisors))
self.assertEqual(1, len(vms))
self.assertEqual(1, len(compute_nodes))
self.assertEqual(1, len(instances))
hypervisor = list(hypervisors.values())[0]
vm = list(vms.values())[0]
node = list(compute_nodes.values())[0]
instance = list(instances.values())[0]
self.assertEqual(hypervisor.uuid, 'test_hostname')
self.assertEqual(vm.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
self.assertEqual(node.uuid, 'test_hostname')
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')

View File

@@ -17,13 +17,13 @@
# limitations under the License.
#
from watcher.decision_engine.model import disk_info
from watcher.decision_engine.model import element
from watcher.tests import base
class TestDiskInfo(base.BaseTestCase):
class TestDiskInfo(base.TestCase):
def test_all(self):
disk_information = disk_info.DiskInfo()
disk_information = element.DiskInfo()
disk_information.set_size(1024)
self.assertEqual(1024, disk_information.get_size())

View File

@@ -16,15 +16,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.decision_engine.model import vm as vm_model
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.tests import base
class TestVm(base.BaseTestCase):
class TestInstance(base.TestCase):
def test_namedelement(self):
vm = vm_model.VM()
vm.state = vm_state.VMState.ACTIVE
self.assertEqual(vm_state.VMState.ACTIVE, vm.state)
vm.human_id = "human_05"
self.assertEqual("human_05", vm.human_id)
instance = element.Instance()
instance.state = element.InstanceState.ACTIVE
self.assertEqual(element.InstanceState.ACTIVE, instance.state)
instance.human_id = "human_05"
self.assertEqual("human_05", instance.human_id)

View File

@@ -18,92 +18,99 @@
#
import uuid
from watcher.decision_engine.model import hypervisor as modelhyp
from watcher.decision_engine.model import vm_state
from watcher.decision_engine.model import element
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies import \
faker_cluster_state
class TestMapping(base.BaseTestCase):
class TestMapping(base.TestCase):
VM1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
VM2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517"
INST1_UUID = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
INST2_UUID = "a4cab39b-9828-413a-bf88-f76921bf1517"
def setUp(self):
super(TestMapping, self).setUp()
self.fake_cluster = faker_cluster_state.FakerModelCollector()
def test_get_node_from_vm(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_node_from_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms()
keys = list(vms.keys())
vm = vms[keys[0]]
if vm.uuid != self.VM1_UUID:
vm = vms[keys[1]]
node = model.mapping.get_node_from_vm(vm)
instances = model.get_all_instances()
keys = list(instances.keys())
instance = instances[keys[0]]
if instance.uuid != self.INST1_UUID:
instance = instances[keys[1]]
node = model.mapping.get_node_from_instance(instance)
self.assertEqual('Node_0', node.uuid)
def test_get_node_from_vm_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_node_from_instance_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
hyps = model.mapping.get_node_vms_from_id("BLABLABLA")
self.assertEqual(0, hyps.__len__())
nodes = model.mapping.get_node_instances_from_id("BLABLABLA")
self.assertEqual(0, len(nodes))
def test_get_all_vms(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_get_all_instances(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
vms = model.get_all_vms()
self.assertEqual(2, vms.__len__())
self.assertEqual(vm_state.VMState.ACTIVE.value,
vms[self.VM1_UUID].state)
self.assertEqual(self.VM1_UUID, vms[self.VM1_UUID].uuid)
self.assertEqual(vm_state.VMState.ACTIVE.value,
vms[self.VM2_UUID].state)
self.assertEqual(self.VM2_UUID, vms[self.VM2_UUID].uuid)
instances = model.get_all_instances()
self.assertEqual(2, len(instances))
self.assertEqual(element.InstanceState.ACTIVE.value,
instances[self.INST1_UUID].state)
self.assertEqual(self.INST1_UUID, instances[self.INST1_UUID].uuid)
self.assertEqual(element.InstanceState.ACTIVE.value,
instances[self.INST2_UUID].state)
self.assertEqual(self.INST2_UUID, instances[self.INST2_UUID].uuid)
def test_get_mapping(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
mapping_vm = model.mapping.get_mapping_vm()
self.assertEqual(2, mapping_vm.__len__())
self.assertEqual('Node_0', mapping_vm[self.VM1_UUID])
self.assertEqual('Node_1', mapping_vm[self.VM2_UUID])
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instance_mapping = model.mapping.instance_mapping
self.assertEqual(2, len(instance_mapping))
self.assertEqual('Node_0', instance_mapping[self.INST1_UUID])
self.assertEqual('Node_1', instance_mapping[self.INST2_UUID])
def test_migrate_vm(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid)
vm1 = vms[keys[1]]
hyp1 = model.mapping.get_node_from_vm_id(vm1.uuid)
def test_migrate_instance(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
instance1 = instances[keys[1]]
node1 = model.mapping.get_node_from_instance_id(instance1.uuid)
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp1, hyp1))
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp0, hyp0))
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp1, hyp0))
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp0, hyp1))
self.assertEqual(
False,
model.mapping.migrate_instance(instance1, node1, node1))
self.assertEqual(
False,
model.mapping.migrate_instance(instance1, node0, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node1, node0))
self.assertEqual(
True,
model.mapping.migrate_instance(instance1, node0, node1))
def test_unmap_from_id_log_warning(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
id = "{0}".format(uuid.uuid4())
hypervisor = modelhyp.Hypervisor()
hypervisor.uuid = id
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.mapping.unmap_from_id(hypervisor.uuid, vm0.uuid)
# self.assertEqual(len(model.mapping.get_node_vms_from_id(
# hypervisor.uuid)), 1)
model.mapping.unmap_from_id(node.uuid, instance0.uuid)
# self.assertEqual(len(model.mapping.get_node_instances_from_id(
# node.uuid)), 1)
def test_unmap_from_id(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
vms = model.get_all_vms()
keys = list(vms.keys())
vm0 = vms[keys[0]]
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid)
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
instances = model.get_all_instances()
keys = list(instances.keys())
instance0 = instances[keys[0]]
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
model.mapping.unmap_from_id(hyp0.uuid, vm0.uuid)
self.assertEqual(0, len(model.mapping.get_node_vms_from_id(
hyp0.uuid)))
model.mapping.unmap_from_id(node0.uuid, instance0.uuid)
self.assertEqual(0, len(model.mapping.get_node_instances_from_id(
node0.uuid)))

View File

@@ -19,120 +19,107 @@
import uuid
from watcher.common import exception
from watcher.decision_engine.model import hypervisor as hypervisor_model
from watcher.decision_engine.model import hypervisor_state
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_state
class TestModel(base.BaseTestCase):
class TestModel(base.TestCase):
def test_model(self):
fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1()
self.assertEqual(5, len(model._hypervisors))
self.assertEqual(35, len(model._vms))
self.assertEqual(5, len(model.get_mapping().get_mapping()))
self.assertEqual(5, len(model._nodes))
self.assertEqual(35, len(model._instances))
self.assertEqual(5, len(model.mapping.get_mapping()))
def test_add_hypervisor(self):
def test_add_node(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertEqual(node, model.get_node_from_id(id_))
def test_delete_hypervisor(self):
def test_delete_node(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
model.remove_hypervisor(hypervisor)
self.assertRaises(exception.HypervisorNotFound,
model.get_hypervisor_from_id, id)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertEqual(node, model.get_node_from_id(id_))
model.remove_node(node)
self.assertRaises(exception.ComputeNodeNotFound,
model.get_node_from_id, id_)
def test_get_all_hypervisors(self):
def test_get_all_compute_nodes(self):
model = model_root.ModelRoot()
for i in range(10):
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
all_hypervisors = model.get_all_hypervisors()
for id in all_hypervisors:
hyp = model.get_hypervisor_from_id(id)
model.assert_hypervisor(hyp)
for _ in range(10):
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
all_nodes = model.get_all_compute_nodes()
for id_ in all_nodes:
node = model.get_node_from_id(id_)
model.assert_node(node)
def test_set_get_state_hypervisors(self):
def test_set_get_state_nodes(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertIsInstance(hypervisor.state,
hypervisor_state.HypervisorState)
self.assertIsInstance(node.state, element.ServiceState)
hyp = model.get_hypervisor_from_id(id)
hyp.state = hypervisor_state.HypervisorState.OFFLINE
self.assertIsInstance(hyp.state, hypervisor_state.HypervisorState)
node = model.get_node_from_id(id_)
node.state = element.ServiceState.OFFLINE
self.assertIsInstance(node.state, element.ServiceState)
# /watcher/decision_engine/framework/model/hypervisor.py
# set_state accept any char chain.
# verification (IsInstance) should be used in the function
# hyp.set_state('blablabla')
# self.assertEqual(hyp.get_state(), 'blablabla')
# self.assertIsInstance(hyp.get_state(), HypervisorState)
# def test_get_all_vms(self):
# model = ModelRoot()
# vms = model.get_all_vms()
# self.assert(len(model._vms))
def test_hypervisor_from_id_raise(self):
def test_node_from_id_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
id2 = "{0}".format(uuid.uuid4())
self.assertRaises(exception.HypervisorNotFound,
model.get_hypervisor_from_id, id2)
self.assertRaises(exception.ComputeNodeNotFound,
model.get_node_from_id, id2)
def test_remove_hypervisor_raise(self):
def test_remove_node_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
id2 = "{0}".format(uuid.uuid4())
hypervisor2 = hypervisor_model.Hypervisor()
hypervisor2.uuid = id2
node2 = element.ComputeNode()
node2.uuid = id2
self.assertRaises(exception.HypervisorNotFound,
model.remove_hypervisor, hypervisor2)
self.assertRaises(exception.ComputeNodeNotFound,
model.remove_node, node2)
def test_assert_hypervisor_raise(self):
def test_assert_node_raise(self):
model = model_root.ModelRoot()
id = "{0}".format(uuid.uuid4())
hypervisor = hypervisor_model.Hypervisor()
hypervisor.uuid = id
model.add_hypervisor(hypervisor)
id_ = "{0}".format(uuid.uuid4())
node = element.ComputeNode()
node.uuid = id_
model.add_node(node)
self.assertRaises(exception.IllegalArgumentException,
model.assert_hypervisor, "objet_qcq")
model.assert_node, "objet_qcq")
def test_vm_from_id_raise(self):
def test_instance_from_id_raise(self):
fake_cluster = faker_cluster_state.FakerModelCollector()
model = fake_cluster.generate_scenario_1()
self.assertRaises(exception.InstanceNotFound,
model.get_vm_from_id, "valeur_qcq")
model.get_instance_from_id, "valeur_qcq")
def test_assert_vm_raise(self):
def test_assert_instance_raise(self):
model = model_root.ModelRoot()
self.assertRaises(exception.IllegalArgumentException,
model.assert_vm, "valeur_qcq")
model.assert_instance, "valeur_qcq")

View File

@@ -1,32 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.decision_engine.model import compute_resource
from watcher.tests import base
class TestNamedElement(base.BaseTestCase):
def test_namedelement(self):
id = compute_resource.ComputeResource()
id.uuid = "BLABLABLA"
self.assertEqual("BLABLABLA", id.uuid)
def test_set_get_human_id(self):
id = compute_resource.ComputeResource()
id.human_id = "BLABLABLA"
self.assertEqual("BLABLABLA", id.human_id)

View File

@@ -50,7 +50,7 @@ class SolutionFakerSingleHyp(object):
current_state_cluster = faker_cluster_state.FakerModelCollector()
sercon = strategies.BasicConsolidation(config=mock.Mock())
sercon._compute_model = (
current_state_cluster.generate_scenario_3_with_2_hypervisors())
current_state_cluster.generate_scenario_3_with_2_nodes())
sercon.ceilometer = mock.MagicMock(
get_statistics=metrics.mock_get_statistics)
@@ -66,8 +66,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -93,8 +93,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -125,8 +125,8 @@ class TestActionScheduling(base.DbTestCase):
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"src_uuid_node": "server1",
"dst_uuid_node": "server2",
}
solution.add_action(action_type="migrate",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",

View File

@@ -20,13 +20,14 @@ from watcher.decision_engine.solution import default
from watcher.tests import base
class TestDefaultSolution(base.BaseTestCase):
class TestDefaultSolution(base.TestCase):
def test_default_solution(self):
solution = default.DefaultSolution(
goal=mock.Mock(), strategy=mock.Mock())
parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
}
solution.add_action(action_type="nop",
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
@@ -34,8 +35,8 @@ class TestDefaultSolution(base.BaseTestCase):
self.assertEqual(1, len(solution.actions))
expected_action_type = "nop"
expected_parameters = {
"src_uuid_hypervisor": "server1",
"dst_uuid_hypervisor": "server2",
"source_node": "server1",
"destination_node": "server2",
"resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36"
}
self.assertEqual(expected_action_type,

View File

@@ -20,11 +20,8 @@
import mock
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
from watcher.decision_engine.model import vm_state
class FakerModelCollector(base.BaseClusterDataModelCollector):
@@ -38,17 +35,17 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1()
def generate_scenario_1(self):
"""Simulates cluster with 2 hypervisors and 2 VMs using 1:1 mapping"""
"""Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
current_state_cluster = modelroot.ModelRoot()
count_node = 2
count_vm = 2
count_instance = 2
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@@ -57,7 +54,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'enabled'
@@ -65,43 +62,43 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
current_state_cluster.add_vm(vm)
for i in range(0, count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_1"))
current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_instance_from_id("INSTANCE_1"))
return current_state_cluster
def generate_scenario_2(self):
"""Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor
With 4 nodes and 6 instances all mapped to a single node
"""
current_state_cluster = modelroot.ModelRoot()
count_node = 4
count_vm = 6
count_instance = 6
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@@ -110,7 +107,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'up'
@@ -118,39 +115,39 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 16)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
current_state_cluster.add_vm(vm)
for i in range(0, count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster
def generate_scenario_3(self):
"""Simulates a cluster
With 4 hypervisors and 6 VMs all mapped to one hypervisor
With 4 nodes and 6 instances all mapped to one node
"""
current_state_cluster = modelroot.ModelRoot()
count_node = 2
count_vm = 4
count_instance = 4
mem = resource.Resource(resource.ResourceType.memory)
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
mem = element.Resource(element.ResourceType.memory)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
disk_capacity =\
resource.Resource(resource.ResourceType.disk_capacity)
element.Resource(element.ResourceType.disk_capacity)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
@@ -159,7 +156,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
node.state = 'up'
@@ -167,21 +164,21 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
mem.set_capacity(node, 64)
disk_capacity.set_capacity(node, 250)
num_cores.set_capacity(node, 10)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(6, 6 + count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
vm.state = vm_state.VMState.ACTIVE
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 2 ** (i-6))
current_state_cluster.add_vm(vm)
for i in range(6, 6 + count_instance):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
instance.state = element.InstanceState.ACTIVE.value
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 2 ** (i-6))
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
return current_state_cluster
@@ -193,76 +190,77 @@ class FakeCeilometerMetrics(object):
def mock_get_statistics(self, resource_id, meter_name, period=3600,
aggregate='avg'):
if meter_name == "compute.node.cpu.percent":
return self.get_hypervisor_cpu_util(resource_id)
return self.get_node_cpu_util(resource_id)
elif meter_name == "cpu_util":
return self.get_vm_cpu_util(resource_id)
return self.get_instance_cpu_util(resource_id)
elif meter_name == "memory.usage":
return self.get_vm_ram_util(resource_id)
return self.get_instance_ram_util(resource_id)
elif meter_name == "disk.root.size":
return self.get_vm_disk_root_size(resource_id)
return self.get_instance_disk_root_size(resource_id)
def get_hypervisor_cpu_util(self, r_id):
"""Calculates hypervisor utilization dynamicaly.
def get_node_cpu_util(self, r_id):
"""Calculates node utilization dynamicaly.
Hypervisor CPU utilization should consider
and corelate with actual VM-hypervisor mappings
node CPU utilization should consider
and corelate with actual instance-node mappings
provided within a cluster model.
Returns relative hypervisor CPU utilization <0, 100>.
Returns relative node CPU utilization <0, 100>.
:param r_id: resource id
"""
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
vms = self.model.get_mapping().get_node_vms_from_id(id)
instances = self.model.get_mapping().get_node_instances_from_id(id)
util_sum = 0.0
hypervisor_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity_from_id(id)
for vm_uuid in vms:
vm_cpu_cores = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).\
get_capacity(self.model.get_vm_from_id(vm_uuid))
total_cpu_util = vm_cpu_cores * self.get_vm_cpu_util(vm_uuid)
node_cpu_cores = self.model.get_resource_from_id(
element.ResourceType.cpu_cores).get_capacity_from_id(id)
for instance_uuid in instances:
instance_cpu_cores = self.model.get_resource_from_id(
element.ResourceType.cpu_cores).\
get_capacity(self.model.get_instance_from_id(instance_uuid))
total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
instance_uuid)
util_sum += total_cpu_util / 100.0
util_sum /= hypervisor_cpu_cores
util_sum /= node_cpu_cores
return util_sum * 100.0
def get_vm_cpu_util(self, r_id):
vm_cpu_util = dict()
vm_cpu_util['VM_0'] = 10
vm_cpu_util['VM_1'] = 30
vm_cpu_util['VM_2'] = 60
vm_cpu_util['VM_3'] = 20
vm_cpu_util['VM_4'] = 40
vm_cpu_util['VM_5'] = 50
vm_cpu_util['VM_6'] = 100
vm_cpu_util['VM_7'] = 100
vm_cpu_util['VM_8'] = 100
vm_cpu_util['VM_9'] = 100
return vm_cpu_util[str(r_id)]
def get_instance_cpu_util(self, r_id):
instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30
instance_cpu_util['INSTANCE_2'] = 60
instance_cpu_util['INSTANCE_3'] = 20
instance_cpu_util['INSTANCE_4'] = 40
instance_cpu_util['INSTANCE_5'] = 50
instance_cpu_util['INSTANCE_6'] = 100
instance_cpu_util['INSTANCE_7'] = 100
instance_cpu_util['INSTANCE_8'] = 100
instance_cpu_util['INSTANCE_9'] = 100
return instance_cpu_util[str(r_id)]
def get_vm_ram_util(self, r_id):
vm_ram_util = dict()
vm_ram_util['VM_0'] = 1
vm_ram_util['VM_1'] = 2
vm_ram_util['VM_2'] = 4
vm_ram_util['VM_3'] = 8
vm_ram_util['VM_4'] = 3
vm_ram_util['VM_5'] = 2
vm_ram_util['VM_6'] = 1
vm_ram_util['VM_7'] = 2
vm_ram_util['VM_8'] = 4
vm_ram_util['VM_9'] = 8
return vm_ram_util[str(r_id)]
def get_instance_ram_util(self, r_id):
instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2
instance_ram_util['INSTANCE_2'] = 4
instance_ram_util['INSTANCE_3'] = 8
instance_ram_util['INSTANCE_4'] = 3
instance_ram_util['INSTANCE_5'] = 2
instance_ram_util['INSTANCE_6'] = 1
instance_ram_util['INSTANCE_7'] = 2
instance_ram_util['INSTANCE_8'] = 4
instance_ram_util['INSTANCE_9'] = 8
return instance_ram_util[str(r_id)]
def get_vm_disk_root_size(self, r_id):
vm_disk_util = dict()
vm_disk_util['VM_0'] = 10
vm_disk_util['VM_1'] = 15
vm_disk_util['VM_2'] = 30
vm_disk_util['VM_3'] = 35
vm_disk_util['VM_4'] = 20
vm_disk_util['VM_5'] = 25
vm_disk_util['VM_6'] = 25
vm_disk_util['VM_7'] = 25
vm_disk_util['VM_8'] = 25
vm_disk_util['VM_9'] = 25
return vm_disk_util[str(r_id)]
def get_instance_disk_root_size(self, r_id):
instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15
instance_disk_util['INSTANCE_2'] = 30
instance_disk_util['INSTANCE_3'] = 35
instance_disk_util['INSTANCE_4'] = 20
instance_disk_util['INSTANCE_5'] = 25
instance_disk_util['INSTANCE_6'] = 25
instance_disk_util['INSTANCE_7'] = 25
instance_disk_util['INSTANCE_8'] = 25
instance_disk_util['INSTANCE_9'] = 25
return instance_disk_util[str(r_id)]

View File

@@ -19,10 +19,8 @@
import mock
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import hypervisor
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root as modelroot
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm as modelvm
class FakerModelCollector(base.BaseClusterDataModelCollector):
@@ -36,292 +34,292 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
return self.generate_scenario_1()
def generate_scenario_1(self):
vms = []
instances = []
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 5
# number max of vm per node
node_count_vm = 7
node_count = 5
# number max of instance per node
node_instance_count = 7
# total number of virtual machine
count_vm = (count_node * node_count_vm)
instance_count = (node_count * node_instance_count)
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 20)
num_cores.set_capacity(vm, 10)
vms.append(vm)
current_state_cluster.add_vm(vm)
for i in range(0, instance_count):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 20)
num_cores.set_capacity(instance, 10)
instances.append(instance)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_1"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_1"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_1"),
current_state_cluster.get_vm_from_id("VM_2"))
current_state_cluster.get_node_from_id("Node_1"),
current_state_cluster.get_instance_from_id("INSTANCE_2"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_3"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_3"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_4"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_4"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_2"),
current_state_cluster.get_vm_from_id("VM_5"))
current_state_cluster.get_node_from_id("Node_2"),
current_state_cluster.get_instance_from_id("INSTANCE_5"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_3"),
current_state_cluster.get_vm_from_id("VM_6"))
current_state_cluster.get_node_from_id("Node_3"),
current_state_cluster.get_instance_from_id("INSTANCE_6"))
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_4"),
current_state_cluster.get_vm_from_id("VM_7"))
current_state_cluster.get_node_from_id("Node_4"),
current_state_cluster.get_instance_from_id("INSTANCE_7"))
return current_state_cluster
def map(self, model, h_id, vm_id):
def map(self, model, h_id, instance_id):
model.get_mapping().map(
model.get_hypervisor_from_id(h_id),
model.get_vm_from_id(vm_id))
model.get_node_from_id(h_id),
model.get_instance_from_id(instance_id))
def generate_scenario_3_with_2_hypervisors(self):
vms = []
def generate_scenario_3_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
node_count = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
root.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 10)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 10)
instances.append(instance1)
root.add_instance(instance1)
vm2 = modelvm.VM()
vm2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
return root
def generate_scenario_4_with_1_hypervisor_no_vm(self):
def generate_scenario_4_with_1_node_no_instance(self):
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 1
node_count = 1
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 1)
disk.set_capacity(node, 1)
num_cores.set_capacity(node, 1)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
return current_state_cluster
def generate_scenario_5_with_vm_disk_0(self):
vms = []
def generate_scenario_5_with_instance_disk_0(self):
instances = []
current_state_cluster = modelroot.ModelRoot()
# number of nodes
count_node = 1
# number of vms
count_vm = 1
node_count = 1
# number of instances
instance_count = 1
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
current_state_cluster.create_resource(mem)
current_state_cluster.create_resource(num_cores)
current_state_cluster.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 4)
disk.set_capacity(node, 4)
num_cores.set_capacity(node, 4)
current_state_cluster.add_hypervisor(node)
current_state_cluster.add_node(node)
for i in range(0, count_vm):
vm_uuid = "VM_{0}".format(i)
vm = modelvm.VM()
vm.uuid = vm_uuid
mem.set_capacity(vm, 2)
disk.set_capacity(vm, 0)
num_cores.set_capacity(vm, 4)
vms.append(vm)
current_state_cluster.add_vm(vm)
for i in range(0, instance_count):
instance_uuid = "INSTANCE_{0}".format(i)
instance = element.Instance()
instance.uuid = instance_uuid
mem.set_capacity(instance, 2)
disk.set_capacity(instance, 0)
num_cores.set_capacity(instance, 4)
instances.append(instance)
current_state_cluster.add_instance(instance)
current_state_cluster.get_mapping().map(
current_state_cluster.get_hypervisor_from_id("Node_0"),
current_state_cluster.get_vm_from_id("VM_0"))
current_state_cluster.get_node_from_id("Node_0"),
current_state_cluster.get_instance_from_id("INSTANCE_0"))
return current_state_cluster
def generate_scenario_6_with_2_hypervisors(self):
vms = []
def generate_scenario_6_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
node_count = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
root.create_resource(disk)
for i in range(0, count_node):
for i in range(0, node_count):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 40)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "VM_1"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 10)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "INSTANCE_1"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 10)
instances.append(instance1)
root.add_instance(instance1)
vm11 = modelvm.VM()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2)
disk.set_capacity(vm11, 20)
num_cores.set_capacity(vm11, 10)
vms.append(vm11)
root.add_vm(vm11)
instance11 = element.Instance()
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance11, 2)
disk.set_capacity(instance11, 20)
num_cores.set_capacity(instance11, 10)
instances.append(instance11)
root.add_instance(instance11)
vm2 = modelvm.VM()
vm2.uuid = "VM_3"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "INSTANCE_3"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
vm21 = modelvm.VM()
vm21.uuid = "VM_4"
mem.set_capacity(vm21, 2)
disk.set_capacity(vm21, 20)
num_cores.set_capacity(vm21, 10)
vms.append(vm21)
root.add_vm(vm21)
instance21 = element.Instance()
instance21.uuid = "INSTANCE_4"
mem.set_capacity(instance21, 2)
disk.set_capacity(instance21, 20)
num_cores.set_capacity(instance21, 10)
instances.append(instance21)
root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance21.uuid)))
return root
def generate_scenario_7_with_2_hypervisors(self):
vms = []
def generate_scenario_7_with_2_nodes(self):
instances = []
root = modelroot.ModelRoot()
# number of nodes
count_node = 2
# define ressouce ( CPU, MEM disk, ... )
mem = resource.Resource(resource.ResourceType.memory)
mem = element.Resource(element.ResourceType.memory)
# 2199.954 Mhz
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
disk = resource.Resource(resource.ResourceType.disk)
num_cores = element.Resource(element.ResourceType.cpu_cores)
disk = element.Resource(element.ResourceType.disk)
root.create_resource(mem)
root.create_resource(num_cores)
@@ -329,54 +327,54 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
for i in range(0, count_node):
node_uuid = "Node_{0}".format(i)
node = hypervisor.Hypervisor()
node = element.ComputeNode()
node.uuid = node_uuid
node.hostname = "hostname_{0}".format(i)
mem.set_capacity(node, 132)
disk.set_capacity(node, 250)
num_cores.set_capacity(node, 50)
root.add_hypervisor(node)
root.add_node(node)
vm1 = modelvm.VM()
vm1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
mem.set_capacity(vm1, 2)
disk.set_capacity(vm1, 20)
num_cores.set_capacity(vm1, 15)
vms.append(vm1)
root.add_vm(vm1)
instance1 = element.Instance()
instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
mem.set_capacity(instance1, 2)
disk.set_capacity(instance1, 20)
num_cores.set_capacity(instance1, 15)
instances.append(instance1)
root.add_instance(instance1)
vm11 = modelvm.VM()
vm11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(vm11, 2)
disk.set_capacity(vm11, 20)
num_cores.set_capacity(vm11, 10)
vms.append(vm11)
root.add_vm(vm11)
instance11 = element.Instance()
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
mem.set_capacity(instance11, 2)
disk.set_capacity(instance11, 20)
num_cores.set_capacity(instance11, 10)
instances.append(instance11)
root.add_instance(instance11)
vm2 = modelvm.VM()
vm2.uuid = "VM_3"
mem.set_capacity(vm2, 2)
disk.set_capacity(vm2, 20)
num_cores.set_capacity(vm2, 10)
vms.append(vm2)
root.add_vm(vm2)
instance2 = element.Instance()
instance2.uuid = "INSTANCE_3"
mem.set_capacity(instance2, 2)
disk.set_capacity(instance2, 20)
num_cores.set_capacity(instance2, 10)
instances.append(instance2)
root.add_instance(instance2)
vm21 = modelvm.VM()
vm21.uuid = "VM_4"
mem.set_capacity(vm21, 2)
disk.set_capacity(vm21, 20)
num_cores.set_capacity(vm21, 10)
vms.append(vm21)
root.add_vm(vm21)
instance21 = element.Instance()
instance21.uuid = "INSTANCE_4"
mem.set_capacity(instance21, 2)
disk.set_capacity(instance21, 20)
num_cores.set_capacity(instance21, 10)
instances.append(instance21)
root.add_instance(instance21)
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm1.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_0"),
root.get_vm_from_id(str(vm11.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance1.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_0"),
root.get_instance_from_id(str(instance11.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm2.uuid)))
root.get_mapping().map(root.get_hypervisor_from_id("Node_1"),
root.get_vm_from_id(str(vm21.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance2.uuid)))
root.get_mapping().map(root.get_node_from_id("Node_1"),
root.get_instance_from_id(str(instance21.uuid)))
return root

View File

@@ -15,7 +15,6 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
@@ -37,9 +36,9 @@ class FakerMetricsCollector(object):
elif meter_name == "hardware.memory.used":
result = self.get_usage_node_ram(resource_id)
elif meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu(resource_id)
result = self.get_average_usage_instance_cpu(resource_id)
elif meter_name == "memory.resident":
result = self.get_average_usage_vm_memory(resource_id)
result = self.get_average_usage_instance_memory(resource_id)
elif meter_name == "hardware.ipmi.node.outlet_temperature":
result = self.get_average_outlet_temperature(resource_id)
elif meter_name == "hardware.ipmi.node.airflow":
@@ -54,7 +53,7 @@ class FakerMetricsCollector(object):
aggregate='avg'):
result = 0
if meter_name == "cpu_util":
result = self.get_average_usage_vm_cpu_wb(resource_id)
result = self.get_average_usage_instance_cpu_wb(resource_id)
return result
def get_average_outlet_temperature(self, uuid):
@@ -135,7 +134,7 @@ class FakerMetricsCollector(object):
mock['Node_6_hostname_6'] = 8
mock['Node_19_hostname_19'] = 10
# node 4
mock['VM_7_hostname_7'] = 4
mock['INSTANCE_7_hostname_7'] = 4
mock['Node_0'] = 0.07
mock['Node_1'] = 0.05
@@ -149,7 +148,7 @@ class FakerMetricsCollector(object):
return float(mock[str(uuid)])
def get_average_usage_vm_cpu_wb(self, uuid):
def get_average_usage_instance_cpu_wb(self, uuid):
"""The last VM CPU usage values to average
:param uuid:00
@@ -162,14 +161,14 @@ class FakerMetricsCollector(object):
# Normalize
mock = {}
# node 0
mock['VM_1'] = 80
mock['INSTANCE_1'] = 80
mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50
# node 1
mock['VM_3'] = 20
mock['VM_4'] = 10
mock['INSTANCE_3'] = 20
mock['INSTANCE_4'] = 10
return float(mock[str(uuid)])
def get_average_usage_vm_cpu(self, uuid):
def get_average_usage_instance_cpu(self, uuid):
"""The last VM CPU usage values to average
:param uuid:00
@@ -182,66 +181,66 @@ class FakerMetricsCollector(object):
# Normalize
mock = {}
# node 0
mock['VM_0'] = 7
mock['VM_1'] = 7
mock['INSTANCE_0'] = 7
mock['INSTANCE_1'] = 7
# node 1
mock['VM_2'] = 10
mock['INSTANCE_2'] = 10
# node 2
mock['VM_3'] = 5
mock['VM_4'] = 5
mock['VM_5'] = 10
mock['INSTANCE_3'] = 5
mock['INSTANCE_4'] = 5
mock['INSTANCE_5'] = 10
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
mock[uuid] = 8
return mock[str(uuid)]
def get_average_usage_vm_memory(self, uuid):
def get_average_usage_instance_memory(self, uuid):
mock = {}
# node 0
mock['VM_0'] = 2
mock['VM_1'] = 5
mock['INSTANCE_0'] = 2
mock['INSTANCE_1'] = 5
# node 1
mock['VM_2'] = 5
mock['INSTANCE_2'] = 5
# node 2
mock['VM_3'] = 8
mock['VM_4'] = 5
mock['VM_5'] = 16
mock['INSTANCE_3'] = 8
mock['INSTANCE_4'] = 5
mock['INSTANCE_5'] = 16
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
mock[uuid] = 10
return mock[str(uuid)]
def get_average_usage_vm_disk(self, uuid):
def get_average_usage_instance_disk(self, uuid):
mock = {}
# node 0
mock['VM_0'] = 2
mock['VM_1'] = 2
mock['INSTANCE_0'] = 2
mock['INSTANCE_1'] = 2
# node 1
mock['VM_2'] = 2
mock['INSTANCE_2'] = 2
# node 2
mock['VM_3'] = 10
mock['VM_4'] = 15
mock['VM_5'] = 20
mock['INSTANCE_3'] = 10
mock['INSTANCE_4'] = 15
mock['INSTANCE_5'] = 20
# node 3
mock['VM_6'] = 8
mock['INSTANCE_6'] = 8
# node 4
mock['VM_7'] = 4
mock['INSTANCE_7'] = 4
if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4)
@@ -249,7 +248,7 @@ class FakerMetricsCollector(object):
return mock[str(uuid)]
def get_virtual_machine_capacity(self, vm_uuid):
def get_virtual_machine_capacity(self, instance_uuid):
return random.randint(1, 4)
def get_average_network_incomming(self, node):

View File

@@ -30,7 +30,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestBasicConsolidation(base.BaseTestCase):
class TestBasicConsolidation(base.TestCase):
def setUp(self):
super(TestBasicConsolidation, self).setUp()
@@ -58,63 +58,75 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_cluster_size(self):
size_cluster = len(
self.fake_cluster.generate_scenario_1().get_all_hypervisors())
self.fake_cluster.generate_scenario_1().get_all_compute_nodes())
size_cluster_assert = 5
self.assertEqual(size_cluster_assert, size_cluster)
def test_basic_consolidation_score_hypervisor(self):
def test_basic_consolidation_score_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
node_1_score = 0.023333333333333317
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_1")))
model.get_node_from_id("Node_1")))
node_2_score = 0.26666666666666666
self.assertEqual(node_2_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_2")))
model.get_node_from_id("Node_2")))
node_0_score = 0.023333333333333317
self.assertEqual(node_0_score, self.strategy.calculate_score_node(
model.get_hypervisor_from_id("Node_0")))
model.get_node_from_id("Node_0")))
def test_basic_consolidation_score_vm(self):
def test_basic_consolidation_score_instance(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
vm_0_score = 0.023333333333333317
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_0_score = 0.023333333333333317
self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0))
vm_1 = model.get_vm_from_id("VM_1")
vm_1_score = 0.023333333333333317
self.assertEqual(vm_1_score, self.strategy.calculate_score_vm(vm_1))
vm_2 = model.get_vm_from_id("VM_2")
vm_2_score = 0.033333333333333326
self.assertEqual(vm_2_score, self.strategy.calculate_score_vm(vm_2))
vm_6 = model.get_vm_from_id("VM_6")
vm_6_score = 0.02666666666666669
self.assertEqual(vm_6_score, self.strategy.calculate_score_vm(vm_6))
vm_7 = model.get_vm_from_id("VM_7")
vm_7_score = 0.013333333333333345
self.assertEqual(vm_7_score, self.strategy.calculate_score_vm(vm_7))
instance_1 = model.get_instance_from_id("INSTANCE_1")
instance_1_score = 0.023333333333333317
self.assertEqual(
instance_1_score,
self.strategy.calculate_score_instance(instance_1))
instance_2 = model.get_instance_from_id("INSTANCE_2")
instance_2_score = 0.033333333333333326
self.assertEqual(
instance_2_score,
self.strategy.calculate_score_instance(instance_2))
instance_6 = model.get_instance_from_id("INSTANCE_6")
instance_6_score = 0.02666666666666669
self.assertEqual(
instance_6_score,
self.strategy.calculate_score_instance(instance_6))
instance_7 = model.get_instance_from_id("INSTANCE_7")
instance_7_score = 0.013333333333333345
self.assertEqual(
instance_7_score,
self.strategy.calculate_score_instance(instance_7))
def test_basic_consolidation_score_vm_disk(self):
model = self.fake_cluster.generate_scenario_5_with_vm_disk_0()
def test_basic_consolidation_score_instance_disk(self):
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
vm_0_score = 0.023333333333333355
self.assertEqual(vm_0_score, self.strategy.calculate_score_vm(vm_0, ))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_0_score = 0.023333333333333355
self.assertEqual(
instance_0_score,
self.strategy.calculate_score_instance(instance_0, ))
def test_basic_consolidation_weight(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
vm_0 = model.get_vm_from_id("VM_0")
instance_0 = model.get_instance_from_id("INSTANCE_0")
cores = 16
# 80 Go
disk = 80
# mem 8 Go
mem = 8
vm_0_weight_assert = 3.1999999999999997
instance_0_weight_assert = 3.1999999999999997
self.assertEqual(
vm_0_weight_assert,
self.strategy.calculate_weight(vm_0, cores, disk, mem))
instance_0_weight_assert,
self.strategy.calculate_weight(instance_0, cores, disk, mem))
def test_calculate_migration_efficacy(self):
self.strategy.calculate_migration_efficacy()
@@ -130,28 +142,28 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_check_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
all_vms = model.get_all_vms()
all_hyps = model.get_all_hypervisors()
vm0 = all_vms[list(all_vms.keys())[0]]
hyp0 = all_hyps[list(all_hyps.keys())[0]]
all_instances = model.get_all_instances()
all_nodes = model.get_all_compute_nodes()
instance0 = all_instances[list(all_instances.keys())[0]]
node0 = all_nodes[list(all_nodes.keys())[0]]
self.strategy.check_migration(hyp0, hyp0, vm0)
self.strategy.check_migration(node0, node0, instance0)
def test_threshold(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
all_hyps = model.get_all_hypervisors()
hyp0 = all_hyps[list(all_hyps.keys())[0]]
all_nodes = model.get_all_compute_nodes()
node0 = all_nodes[list(all_nodes.keys())[0]]
self.assertFalse(self.strategy.check_threshold(
hyp0, 1000, 1000, 1000))
node0, 1000, 1000, 1000))
def test_basic_consolidation_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
@@ -163,10 +175,10 @@ class TestBasicConsolidation(base.BaseTestCase):
expected_power_state = 0
num_migrations = actions_counter.get("migrate", 0)
num_hypervisor_state_change = actions_counter.get(
"change_hypervisor_state", 0)
num_node_state_change = actions_counter.get(
"change_node_state", 0)
self.assertEqual(expected_num_migrations, num_migrations)
self.assertEqual(expected_power_state, num_hypervisor_state_change)
self.assertEqual(expected_power_state, num_node_state_change)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
@@ -180,7 +192,7 @@ class TestBasicConsolidation(base.BaseTestCase):
def test_execute_no_workload(self):
model = (
self.fake_cluster
.generate_scenario_4_with_1_hypervisor_no_vm())
.generate_scenario_4_with_1_node_no_instance())
self.m_model.return_value = model
with mock.patch.object(
@@ -191,7 +203,7 @@ class TestBasicConsolidation(base.BaseTestCase):
self.assertEqual(0, solution.efficacy.global_efficacy.value)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@@ -52,7 +52,7 @@ class TestDummyStrategy(base.TestCase):
self.assertEqual(3, len(solution.actions))
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'})

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestOutletTempControl(base.BaseTestCase):
class TestOutletTempControl(base.TestCase):
def setUp(self):
super(TestOutletTempControl, self).setUp()
@@ -63,41 +63,41 @@ class TestOutletTempControl(base.BaseTestCase):
self.strategy.threshold = 34.3
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.strategy.calc_used_res(
hypervisor, cap_cores, cap_mem, cap_disk)
node, cap_cores, cap_mem, cap_disk)
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts_by_outlet_temp(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', h1[0]['hv'].uuid)
self.assertEqual('Node_0', h2[0]['hv'].uuid)
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', n1[0]['node'].uuid)
self.assertEqual('Node_0', n2[0]['node'].uuid)
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual('Node_1', vm_to_mig[0].uuid)
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual('Node_1', instance_to_mig[0].uuid)
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
vm_to_mig[1].uuid)
instance_to_mig[1].uuid)
def test_filter_dest_servers(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
h1, h2 = self.strategy.group_hosts_by_outlet_temp()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
dest_hosts = self.strategy.filter_dest_servers(h2, vm_to_mig[1])
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
self.assertEqual(1, len(dest_hosts))
self.assertEqual('Node_0', dest_hosts[0]['hv'].uuid)
self.assertEqual('Node_0', dest_hosts[0]['node'].uuid)
def test_exception_model(self):
self.m_model.return_value = None
@@ -123,14 +123,14 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@@ -140,7 +140,7 @@ class TestOutletTempControl(base.BaseTestCase):
self.assertEqual(1, num_migrations)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestUniformAirflow(base.BaseTestCase):
class TestUniformAirflow(base.TestCase):
def setUp(self):
super(TestUniformAirflow, self).setUp()
@@ -68,72 +68,73 @@ class TestUniformAirflow(base.BaseTestCase):
self._period = 300
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = self.\
strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk)
node, cap_cores, cap_mem, cap_disk)
self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40))
def test_group_hosts_by_airflow(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
h1, h2 = self.strategy.group_hosts_by_airflow()
# print h1, h2, avg, w_map
self.assertEqual(h1[0]['hv'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1')
n1, n2 = self.strategy.group_hosts_by_airflow()
# print n1, n2, avg, w_map
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 1)
self.assertEqual(vm_to_mig[1][0].uuid,
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(instance_to_mig[1]), 1)
self.assertEqual(instance_to_mig[1][0].uuid,
"cae81432-1631-4d4e-b29c-6f3acdcde906")
def test_choose_vm_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(vm_to_mig[1]), 2)
self.assertEqual(vm_to_mig[1][1].uuid,
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(len(instance_to_mig[1]), 2)
self.assertEqual(instance_to_mig[1][1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vms = model.get_all_vms()
vms.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
self.assertIsNone(vm_to_mig)
n1, n2 = self.strategy.group_hosts_by_airflow()
instances = model.get_all_instances()
instances.clear()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
h1, h2 = self.strategy.group_hosts_by_airflow()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1)
dest_hosts = self.strategy.filter_destination_hosts(h2, vm_to_mig[1])
n1, n2 = self.strategy.group_hosts_by_airflow()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_destination_hosts(
n2, instance_to_mig[1])
self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['vm'].uuid,
self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['instance'].uuid,
'cae81432-1631-4d4e-b29c-6f3acdcde906')
def test_exception_model(self):
@@ -163,7 +164,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
@@ -172,7 +173,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@@ -182,7 +183,7 @@ class TestUniformAirflow(base.BaseTestCase):
self.assertEqual(num_migrations, 2)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_7_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_cluster_and_metrics
class TestVMWorkloadConsolidation(base.BaseTestCase):
class TestVMWorkloadConsolidation(base.TestCase):
def setUp(self):
super(TestVMWorkloadConsolidation, self).setUp()
@@ -65,41 +65,42 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_get_vm_utilization(self):
def test_get_instance_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
vm_0 = model.get_vm_from_id("VM_0")
vm_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(vm_util,
self.strategy.get_vm_utilization(vm_0.uuid, model))
instance_0 = model.get_instance_from_id("INSTANCE_0")
instance_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(
instance_util,
self.strategy.get_instance_utilization(instance_0.uuid, model))
def test_get_hypervisor_utilization(self):
def test_get_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0")
node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=1.0, ram=1, disk=10)
self.assertEqual(
node_util,
self.strategy.get_hypervisor_utilization(node_0, model))
self.strategy.get_node_utilization(node_0, model))
def test_get_hypervisor_capacity(self):
def test_get_node_capacity(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_hypervisor_from_id("Node_0")
node_0 = model.get_node_from_id("Node_0")
node_util = dict(cpu=40, ram=64, disk=250)
self.assertEqual(node_util,
self.strategy.get_hypervisor_capacity(node_0, model))
self.strategy.get_node_capacity(node_0, model))
def test_get_relative_hypervisor_utilization(self):
def test_get_relative_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
hypervisor = model.get_hypervisor_from_id('Node_0')
rhu = self.strategy.get_relative_hypervisor_utilization(
hypervisor, model)
node = model.get_node_from_id('Node_0')
rhu = self.strategy.get_relative_node_utilization(
node, model)
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
self.assertEqual(expected_rhu, rhu)
@@ -115,85 +116,85 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
self.strategy.add_migration(vm_uuid, h1, h2, model)
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
self.strategy.add_migration(instance_uuid, n1, n2, model)
self.assertEqual(1, len(self.strategy.solution.actions))
expected = {'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2.uuid,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': vm_uuid}}
'resource_id': instance_uuid}}
self.assertEqual(expected, self.strategy.solution.actions[0])
def test_is_overloaded(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertFalse(res)
cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
res = self.strategy.is_overloaded(n1, model, cc)
self.assertTrue(res)
def test_vm_fits(self):
def test_instance_fits(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
n = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertTrue(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
self.assertFalse(res)
def test_add_action_enable_hypervisor(self):
def test_add_action_enable_compute_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0')
self.strategy.add_action_enable_hypervisor(h)
n = model.get_node_from_id('Node_0')
self.strategy.add_action_enable_compute_node(n)
expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'enabled',
'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_action_disable_hypervisor(self):
def test_add_action_disable_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h = model.get_hypervisor_from_id('Node_0')
self.strategy.add_action_disable_hypervisor(h)
n = model.get_node_from_id('Node_0')
self.strategy.add_action_disable_node(n)
expected = [{'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_disable_unused_hypervisors(self):
def test_disable_unused_nodes(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
self.strategy.disable_unused_hypervisors(model)
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
self.strategy.disable_unused_nodes(model)
self.assertEqual(0, len(self.strategy.solution.actions))
# Migrate VM to free the hypervisor
self.strategy.add_migration(vm_uuid, h1, h2, model)
# Migrate VM to free the node
self.strategy.add_migration(instance_uuid, n1, n2, model)
self.strategy.disable_unused_hypervisors(model)
self.strategy.disable_unused_nodes(model)
expected = {'action_type': 'change_nova_service_state',
'input_parameters': {'state': 'disabled',
'resource_id': 'Node_0'}}
@@ -213,39 +214,39 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
vm_uuid = 'VM_0'
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
instance_uuid = 'INSTANCE_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.consolidation_phase(model, cc)
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2.uuid,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': vm_uuid}}]
'resource_id': instance_uuid}}]
self.assertEqual(expected, self.strategy.solution.actions)
def test_strategy(self):
model = self.fake_cluster.generate_scenario_2()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
n1 = model.get_node_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc)
self.strategy.consolidation_phase(model, cc)
self.strategy.optimize_solution(model)
h2 = self.strategy.solution.actions[0][
'input_parameters']['dst_hypervisor']
n2 = self.strategy.solution.actions[0][
'input_parameters']['destination_node']
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_3'}},
'resource_id': 'INSTANCE_3'}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2,
'src_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n2,
'source_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_1'}}]
'resource_id': 'INSTANCE_1'}}]
self.assertEqual(expected, self.strategy.solution.actions)
@@ -253,32 +254,32 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
model = self.fake_cluster.generate_scenario_3()
self.m_model.return_value = model
self.fake_metrics.model = model
h1 = model.get_hypervisor_from_id('Node_0')
h2 = model.get_hypervisor_from_id('Node_1')
n1 = model.get_node_from_id('Node_0')
n2 = model.get_node_from_id('Node_1')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(model, cc)
expected = [{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_6',
'src_hypervisor': h1.uuid}},
'resource_id': 'INSTANCE_6',
'source_node': n1.uuid}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_7',
'src_hypervisor': h1.uuid}},
'resource_id': 'INSTANCE_7',
'source_node': n1.uuid}},
{'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h2.uuid,
'input_parameters': {'destination_node': n2.uuid,
'migration_type': 'live',
'resource_id': 'VM_8',
'src_hypervisor': h1.uuid}}]
'resource_id': 'INSTANCE_8',
'source_node': n1.uuid}}]
self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.consolidation_phase(model, cc)
expected.append({'action_type': 'migrate',
'input_parameters': {'dst_hypervisor': h1.uuid,
'input_parameters': {'destination_node': n1.uuid,
'migration_type': 'live',
'resource_id': 'VM_7',
'src_hypervisor': h2.uuid}})
'resource_id': 'INSTANCE_7',
'source_node': n2.uuid}})
self.assertEqual(expected, self.strategy.solution.actions)
self.strategy.optimize_solution(model)
del expected[3]

View File

@@ -22,8 +22,8 @@ import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model import resource
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies \
@@ -32,7 +32,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestWorkloadBalance(base.BaseTestCase):
class TestWorkloadBalance(base.TestCase):
def setUp(self):
super(TestWorkloadBalance, self).setUp()
@@ -59,59 +59,64 @@ class TestWorkloadBalance(base.BaseTestCase):
self.strategy = strategies.WorkloadBalance(config=mock.Mock())
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'threshold': 25.0,
'period': 300})
'period': 300})
self.strategy.threshold = 25.0
self.strategy._period = 300
def test_calc_used_res(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
hypervisor = model.get_hypervisor_from_id('Node_0')
cap_cores = model.get_resource_from_id(resource.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(resource.ResourceType.memory)
cap_disk = model.get_resource_from_id(resource.ResourceType.disk)
node = model.get_node_from_id('Node_0')
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource(
hypervisor, cap_cores, cap_mem, cap_disk))
node, cap_cores, cap_mem, cap_disk))
self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40))
def test_group_hosts_by_cpu_util(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
self.strategy.threshold = 30
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
self.assertEqual(h1[0]['hv'].uuid, 'Node_0')
self.assertEqual(h2[0]['hv'].uuid, 'Node_1')
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
self.assertEqual(avg, 8.0)
def test_choose_vm_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.assertEqual(vm_to_mig[0].uuid, 'Node_0')
self.assertEqual(vm_to_mig[1].uuid,
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
self.assertEqual(instance_to_mig[1].uuid,
"73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_vm_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vms = model.get_all_vms()
vms.clear()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.assertIsNone(vm_to_mig)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instances = model.get_all_instances()
instances.clear()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
h1, h2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
vm_to_mig = self.strategy.choose_vm_to_migrate(h1, avg, w_map)
self.strategy.ceilometer = mock.MagicMock(
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
dest_hosts = self.strategy.filter_destination_hosts(
h2, vm_to_mig[1], avg, w_map)
n2, instance_to_mig[1], avg, w_map)
self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_1')
self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
def test_exception_model(self):
self.m_model.return_value = None
@@ -137,13 +142,13 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_hypervisor_no_vm()
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
@@ -153,7 +158,7 @@ class TestWorkloadBalance(base.BaseTestCase):
self.assertEqual(num_migrations, 1)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()

View File

@@ -28,7 +28,7 @@ from watcher.tests.decision_engine.strategy.strategies \
import faker_metrics_collector
class TestWorkloadStabilization(base.BaseTestCase):
class TestWorkloadStabilization(base.TestCase):
def setUp(self):
super(TestWorkloadStabilization, self).setUp()
@@ -63,11 +63,13 @@ class TestWorkloadStabilization(base.BaseTestCase):
statistic_aggregation=self.fake_metrics.mock_get_statistics)
self.strategy = strategies.WorkloadStabilization(config=mock.Mock())
def test_get_vm_load(self):
def test_get_instance_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
vm_0_dict = {'uuid': 'VM_0', 'vcpus': 10,
'cpu_util': 7, 'memory.resident': 2}
self.assertEqual(vm_0_dict, self.strategy.get_vm_load("VM_0"))
instance_0_dict = {
'uuid': 'INSTANCE_0', 'vcpus': 10,
'cpu_util': 7, 'memory.resident': 2}
self.assertEqual(
instance_0_dict, self.strategy.get_instance_load("INSTANCE_0"))
def test_normalize_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
@@ -109,7 +111,7 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.assertEqual(
self.strategy.calculate_migration_case(
self.hosts_load_assert, "VM_5",
self.hosts_load_assert, "INSTANCE_5",
"Node_2", "Node_1")[-1]["Node_1"],
{'cpu_util': 2.55, 'memory.resident': 21, 'vcpus': 40})
@@ -131,20 +133,25 @@ class TestWorkloadStabilization(base.BaseTestCase):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2}
self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'}]
return_value=[
{'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'}]
)
with mock.patch.object(self.strategy, 'migrate') as mock_migration:
self.strategy.execute()
mock_migration.assert_called_once_with(
'VM_4', 'Node_2', 'Node_1')
'INSTANCE_4', 'Node_2', 'Node_1')
def test_execute_multiply_migrations(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.00001,
'memory.resident': 0.0001}
self.strategy.simulate_migrations = mock.Mock(
return_value=[{'vm': 'VM_4', 's_host': 'Node_2', 'host': 'Node_1'},
{'vm': 'VM_3', 's_host': 'Node_2', 'host': 'Node_3'}]
return_value=[
{'instance': 'INSTANCE_4', 's_host': 'Node_2',
'host': 'Node_1'},
{'instance': 'INSTANCE_3', 's_host': 'Node_2',
'host': 'Node_3'}]
)
with mock.patch.object(self.strategy, 'migrate') as mock_migrate:
self.strategy.execute()