vm workload consolidation: allow cold migrations
Although Watcher supports cold migrations, the vm workload consolidation workflow only allows live migrations to be performed. We'll remove this unnecessary limitation so that stopped instances could be cold migrated. Change-Id: I4b41550f2255560febf8586722a0e02045c3a486
This commit is contained in:
@@ -21,6 +21,7 @@
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.applier.actions import migration
|
||||
from watcher.common import exception
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
@@ -196,12 +197,12 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:return: None
|
||||
"""
|
||||
instance_state_str = self.get_instance_state_str(instance)
|
||||
if instance_state_str not in (element.InstanceState.ACTIVE.value,
|
||||
element.InstanceState.PAUSED.value):
|
||||
# Watcher currently only supports live VM migration and block live
|
||||
# VM migration which both requires migrated VM to be active.
|
||||
# When supported, the cold migration may be used as a fallback
|
||||
# migration mechanism to move non active VMs.
|
||||
if instance_state_str in (element.InstanceState.ACTIVE.value,
|
||||
element.InstanceState.PAUSED.value):
|
||||
migration_type = migration.Migrate.LIVE_MIGRATION
|
||||
elif instance_state_str == element.InstanceState.STOPPED.value:
|
||||
migration_type = migration.Migrate.COLD_MIGRATION
|
||||
else:
|
||||
LOG.error(
|
||||
'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
|
||||
'state=%(instance_state)s.', dict(
|
||||
@@ -209,8 +210,6 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
instance_state=instance_state_str))
|
||||
return
|
||||
|
||||
migration_type = 'live'
|
||||
|
||||
# Here will makes repeated actions to enable the same compute node,
|
||||
# when migrating VMs to the destination node which is disabled.
|
||||
# Whether should we remove the same actions in the solution???
|
||||
|
||||
@@ -113,7 +113,8 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375}
|
||||
self.assertEqual(expected_cru, cru)
|
||||
|
||||
def test_add_migration_with_active_state(self):
|
||||
def _test_add_migration(self, instance_state, expect_migration=True,
|
||||
expected_migration_type="live"):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
self.m_c_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
@@ -121,38 +122,36 @@ class TestVMWorkloadConsolidation(TestBaseStrategy):
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
instance = model.get_instance_by_uuid(instance_uuid)
|
||||
instance.state = instance_state
|
||||
self.strategy.add_migration(instance, n1, n2)
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}
|
||||
self.assertEqual(expected, self.strategy.solution.actions[0])
|
||||
|
||||
if expect_migration:
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {
|
||||
'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': expected_migration_type,
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}
|
||||
self.assertEqual(expected, self.strategy.solution.actions[0])
|
||||
else:
|
||||
self.assertEqual(0, len(self.strategy.solution.actions))
|
||||
|
||||
def test_add_migration_with_active_state(self):
|
||||
self._test_add_migration(element.InstanceState.ACTIVE.value)
|
||||
|
||||
def test_add_migration_with_paused_state(self):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
self.m_c_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
instance = model.get_instance_by_uuid(instance_uuid)
|
||||
setattr(instance, 'state', element.InstanceState.ERROR.value)
|
||||
self.strategy.add_migration(instance, n1, n2)
|
||||
self.assertEqual(0, len(self.strategy.solution.actions))
|
||||
self._test_add_migration(element.InstanceState.PAUSED.value)
|
||||
|
||||
setattr(instance, 'state', element.InstanceState.PAUSED.value)
|
||||
self.strategy.add_migration(instance, n1, n2)
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {'destination_node': n2.hostname,
|
||||
'source_node': n1.hostname,
|
||||
'migration_type': 'live',
|
||||
'resource_id': instance.uuid,
|
||||
'resource_name': instance.name}}
|
||||
self.assertEqual(expected, self.strategy.solution.actions[0])
|
||||
def test_add_migration_with_error_state(self):
|
||||
self._test_add_migration(element.InstanceState.ERROR.value,
|
||||
expect_migration=False)
|
||||
|
||||
def test_add_migration_with_stopped_state(self):
|
||||
self._test_add_migration(element.InstanceState.STOPPED.value,
|
||||
expected_migration_type="cold")
|
||||
|
||||
def test_is_overloaded(self):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
|
||||
Reference in New Issue
Block a user