vm workload consolidation: allow cold migrations

Although Watcher supports cold migrations, the vm workload
consolidation workflow only allows live migrations to be
performed.

We'll remove this unnecessary limitation so that stopped instances
could be cold migrated.

Change-Id: I4b41550f2255560febf8586722a0e02045c3a486
This commit is contained in:
Lucian Petrut
2023-10-10 15:47:14 +03:00
parent 922478fbda
commit 7336a48057
2 changed files with 35 additions and 37 deletions

View File

@@ -21,6 +21,7 @@
from oslo_log import log from oslo_log import log
from watcher._i18n import _ from watcher._i18n import _
from watcher.applier.actions import migration
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
@@ -196,12 +197,12 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
:return: None :return: None
""" """
instance_state_str = self.get_instance_state_str(instance) instance_state_str = self.get_instance_state_str(instance)
if instance_state_str not in (element.InstanceState.ACTIVE.value, if instance_state_str in (element.InstanceState.ACTIVE.value,
element.InstanceState.PAUSED.value): element.InstanceState.PAUSED.value):
# Watcher currently only supports live VM migration and block live migration_type = migration.Migrate.LIVE_MIGRATION
# VM migration which both requires migrated VM to be active. elif instance_state_str == element.InstanceState.STOPPED.value:
# When supported, the cold migration may be used as a fallback migration_type = migration.Migrate.COLD_MIGRATION
# migration mechanism to move non active VMs. else:
LOG.error( LOG.error(
'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' 'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.', dict( 'state=%(instance_state)s.', dict(
@@ -209,8 +210,6 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
instance_state=instance_state_str)) instance_state=instance_state_str))
return return
migration_type = 'live'
# Here will makes repeated actions to enable the same compute node, # Here will makes repeated actions to enable the same compute node,
# when migrating VMs to the destination node which is disabled. # when migrating VMs to the destination node which is disabled.
# Whether should we remove the same actions in the solution??? # Whether should we remove the same actions in the solution???

View File

@@ -113,7 +113,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375} expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375}
self.assertEqual(expected_cru, cru) self.assertEqual(expected_cru, cru)
def test_add_migration_with_active_state(self): def _test_add_migration(self, instance_state, expect_migration=True,
expected_migration_type="live"):
model = self.fake_c_cluster.generate_scenario_1() model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model self.m_c_model.return_value = model
self.fake_metrics.model = model self.fake_metrics.model = model
@@ -121,38 +122,36 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
n2 = model.get_node_by_uuid('Node_1') n2 = model.get_node_by_uuid('Node_1')
instance_uuid = 'INSTANCE_0' instance_uuid = 'INSTANCE_0'
instance = model.get_instance_by_uuid(instance_uuid) instance = model.get_instance_by_uuid(instance_uuid)
instance.state = instance_state
self.strategy.add_migration(instance, n1, n2) self.strategy.add_migration(instance, n1, n2)
self.assertEqual(1, len(self.strategy.solution.actions))
expected = {'action_type': 'migrate', if expect_migration:
'input_parameters': {'destination_node': n2.hostname, self.assertEqual(1, len(self.strategy.solution.actions))
'source_node': n1.hostname,
'migration_type': 'live', expected = {'action_type': 'migrate',
'resource_id': instance.uuid, 'input_parameters': {
'resource_name': instance.name}} 'destination_node': n2.hostname,
self.assertEqual(expected, self.strategy.solution.actions[0]) 'source_node': n1.hostname,
'migration_type': expected_migration_type,
'resource_id': instance.uuid,
'resource_name': instance.name}}
self.assertEqual(expected, self.strategy.solution.actions[0])
else:
self.assertEqual(0, len(self.strategy.solution.actions))
def test_add_migration_with_active_state(self):
self._test_add_migration(element.InstanceState.ACTIVE.value)
def test_add_migration_with_paused_state(self): def test_add_migration_with_paused_state(self):
model = self.fake_c_cluster.generate_scenario_1() self._test_add_migration(element.InstanceState.PAUSED.value)
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')
instance_uuid = 'INSTANCE_0'
instance = model.get_instance_by_uuid(instance_uuid)
setattr(instance, 'state', element.InstanceState.ERROR.value)
self.strategy.add_migration(instance, n1, n2)
self.assertEqual(0, len(self.strategy.solution.actions))
setattr(instance, 'state', element.InstanceState.PAUSED.value) def test_add_migration_with_error_state(self):
self.strategy.add_migration(instance, n1, n2) self._test_add_migration(element.InstanceState.ERROR.value,
self.assertEqual(1, len(self.strategy.solution.actions)) expect_migration=False)
expected = {'action_type': 'migrate',
'input_parameters': {'destination_node': n2.hostname, def test_add_migration_with_stopped_state(self):
'source_node': n1.hostname, self._test_add_migration(element.InstanceState.STOPPED.value,
'migration_type': 'live', expected_migration_type="cold")
'resource_id': instance.uuid,
'resource_name': instance.name}}
self.assertEqual(expected, self.strategy.solution.actions[0])
def test_is_overloaded(self): def test_is_overloaded(self):
model = self.fake_c_cluster.generate_scenario_1() model = self.fake_c_cluster.generate_scenario_1()