Merge "Generalize exceptions & structure of strategies"

This commit is contained in:
Zuul
2019-03-11 12:40:24 +00:00
committed by Gerrit Code Review
35 changed files with 413 additions and 794 deletions

View File

@@ -376,18 +376,6 @@ class KeystoneFailure(WatcherException):
msg_fmt = _("Keystone API endpoint is missing")
class ClusterEmpty(WatcherException):
msg_fmt = _("The list of compute node(s) in the cluster is empty")
class ComputeClusterEmpty(WatcherException):
msg_fmt = _("The list of compute node(s) in the cluster is empty")
class StorageClusterEmpty(WatcherException):
msg_fmt = _("The list of storage node(s) in the cluster is empty")
class MetricCollectorNotDefined(WatcherException):
msg_fmt = _("The metrics resource collector is not defined")

View File

@@ -15,6 +15,7 @@
# limitations under the License.
from watcher.decision_engine.strategy.strategies import actuation
from watcher.decision_engine.strategy.strategies import base
from watcher.decision_engine.strategy.strategies import basic_consolidation
from watcher.decision_engine.strategy.strategies import dummy_strategy
from watcher.decision_engine.strategy.strategies import dummy_with_scorer
@@ -33,6 +34,7 @@ from watcher.decision_engine.strategy.strategies import zone_migration
Actuator = actuation.Actuator
BaseStrategy = base.BaseStrategy
BasicConsolidation = basic_consolidation.BasicConsolidation
OutletTempControl = outlet_temp_control.OutletTempControl
DummyStrategy = dummy_strategy.DummyStrategy
@@ -47,8 +49,9 @@ NoisyNeighbor = noisy_neighbor.NoisyNeighbor
ZoneMigration = zone_migration.ZoneMigration
HostMaintenance = host_maintenance.HostMaintenance
__all__ = ("Actuator", "BasicConsolidation", "OutletTempControl",
"DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation",
"WorkloadBalance", "WorkloadStabilization", "UniformAirflow",
"NoisyNeighbor", "SavingEnergy", "StorageCapacityBalance",
"ZoneMigration", "HostMaintenance")
__all__ = ("Actuator", "BaseStrategy", "BasicConsolidation",
"OutletTempControl", "DummyStrategy", "DummyWithScorer",
"VMWorkloadConsolidation", "WorkloadBalance",
"WorkloadStabilization", "UniformAirflow", "NoisyNeighbor",
"SavingEnergy", "StorageCapacityBalance", "ZoneMigration",
"HostMaintenance")

View File

@@ -15,13 +15,9 @@
# limitations under the License.
#
from oslo_log import log
from watcher._i18n import _
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
class Actuator(base.UnclassifiedStrategy):
"""Actuator
@@ -89,7 +85,7 @@ class Actuator(base.UnclassifiedStrategy):
return self.input_parameters.get('actions', [])
def pre_execute(self):
LOG.info("Preparing Actuator strategy...")
self._pre_execute()
def do_execute(self):
for action in self.actions:

View File

@@ -39,6 +39,8 @@ which are dynamically loaded by Watcher at launch time.
import abc
import six
from oslo_config import cfg
from oslo_log import log
from oslo_utils import strutils
from watcher.common import clients
@@ -52,6 +54,9 @@ from watcher.decision_engine.model.collector import manager
from watcher.decision_engine.solution import default
from watcher.decision_engine.strategy.common import level
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class StrategyEndpoint(object):
def __init__(self, messaging):
@@ -218,6 +223,23 @@ class BaseStrategy(loadable.Loadable):
"""
raise NotImplementedError()
def _pre_execute(self):
"""Base Pre-execution phase
This will perform basic pre execution operations most strategies
should perform.
"""
LOG.info("Initializing " + self.get_display_name() + " Strategy")
if not self.compute_model:
raise exception.ClusterStateNotDefined()
if self.compute_model.stale:
raise exception.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
def execute(self):
"""Execute a strategy

View File

@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -429,18 +428,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
return unsuccessful_migration + 1
def pre_execute(self):
LOG.info("Initializing Server Consolidation")
if not self.compute_model:
raise exception.ClusterStateNotDefined()
if len(self.get_available_compute_nodes()) == 0:
raise exception.ClusterEmpty()
if self.compute_model.stale:
raise exception.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
def do_execute(self):
unsuccessful_migration = 0

View File

@@ -49,7 +49,7 @@ class DummyStrategy(base.DummyBaseStrategy):
SLEEP = "sleep"
def pre_execute(self):
pass
self._pre_execute()
def do_execute(self):
para1 = self.input_parameters.para1

View File

@@ -46,7 +46,7 @@ class DummyWithResize(base.DummyBaseStrategy):
SLEEP = "sleep"
def pre_execute(self):
pass
self._pre_execute()
def do_execute(self):
para1 = self.input_parameters.para1

View File

@@ -76,7 +76,7 @@ class DummyWithScorer(base.DummyBaseStrategy):
metainfo['workloads'])}
def pre_execute(self):
pass
self._pre_execute()
def do_execute(self):
# Simple "hello world" from strategy

View File

@@ -21,7 +21,7 @@ from oslo_log import log
import six
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -112,7 +112,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
'state=%(state)s, state_type=%(st)s.',
dict(state=instance.state,
st=type(instance.state)))
raise wexc.WatcherException
raise exception.WatcherException
def get_node_status_str(self, node):
"""Get node status in string format"""
@@ -125,7 +125,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
'status=%(status)s, status_type=%(st)s.',
dict(status=node.status,
st=type(node.status)))
raise wexc.WatcherException
raise exception.WatcherException
def get_node_capacity(self, node):
"""Collect cpu, ram and disk capacity of a node.
@@ -292,13 +292,7 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
self.instance_migration(instance, maintenance_node)
def pre_execute(self):
LOG.debug(self.compute_model.to_string())
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
self._pre_execute()
def do_execute(self):
LOG.info(_('Executing Host Maintenance Migration Strategy'))

View File

@@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@@ -156,12 +155,7 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
return None
def group_hosts(self):
nodes = self.compute_model.get_all_compute_nodes()
size_cluster = len(nodes)
if size_cluster == 0:
raise wexc.ClusterEmpty()
hosts_need_release = {}
hosts_target = []
@@ -259,15 +253,7 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
return dest_servers
def pre_execute(self):
LOG.debug("Initializing Noisy Neighbor strategy")
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
def do_execute(self):
self.cache_threshold = self.input_parameters.cache_threshold

View File

@@ -36,7 +36,7 @@ from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -177,10 +177,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
def group_hosts_by_outlet_temp(self):
"""Group hosts based on outlet temp meters"""
nodes = self.get_available_compute_nodes()
size_cluster = len(nodes)
if size_cluster == 0:
raise wexc.ClusterEmpty()
hosts_need_release = []
hosts_target = []
metric_name = self.METRIC_NAMES[
@@ -231,7 +227,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
instance.uuid)
continue
return mig_source_node, instance
except wexc.InstanceNotFound as e:
except exception.InstanceNotFound as e:
LOG.exception(e)
LOG.info("Instance not found")
@@ -260,22 +256,14 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
return dest_servers
def pre_execute(self):
LOG.debug("Initializing Outlet temperature strategy")
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
def do_execute(self):
self._pre_execute()
# the migration plan will be triggered when the outlet temperature
# reaches threshold
self.threshold = self.input_parameters.threshold
LOG.debug("Initializing Outlet temperature strategy with threshold=%d",
self.threshold)
LOG.info("Outlet temperature strategy threshold=%d",
self.threshold)
def do_execute(self):
hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp()
if len(hosts_need_release) == 0:

View File

@@ -22,7 +22,7 @@ import random
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.common import exception
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@@ -179,7 +179,7 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
host_uuid = compute_service.get('host')
try:
self.compute_model.get_node_by_uuid(host_uuid)
except wexc.ComputeNodeNotFound:
except exception.ComputeNodeNotFound:
continue
if not (hypervisor_node.get('state') == 'up'):
@@ -214,28 +214,15 @@ class SavingEnergy(base.SavingEnergyBaseStrategy):
LOG.debug("power on %s", node)
def pre_execute(self):
"""Pre-execution phase
This can be used to fetch some pre-requisites or data.
"""
LOG.info("Initializing Saving Energy Strategy")
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
self.free_used_percent = self.input_parameters.free_used_percent
self.min_free_hosts_num = self.input_parameters.min_free_hosts_num
def do_execute(self):
"""Strategy execution phase
This phase is where you should put the main logic of your strategy.
"""
self.free_used_percent = self.input_parameters.free_used_percent
self.min_free_hosts_num = self.input_parameters.min_free_hosts_num
self.get_hosts_pool()
self.save_energy()

View File

@@ -349,11 +349,7 @@ class StorageCapacityBalance(base.WorkloadStabilizationBaseStrategy):
return retype_dicts, migrate_dicts
def pre_execute(self):
"""Pre-execution phase
This can be used to fetch some pre-requisites or data.
"""
LOG.info("Initializing Storage Capacity Balance Strategy")
LOG.info("Initializing " + self.get_display_name() + " Strategy")
self.volume_threshold = self.input_parameters.volume_threshold
def do_execute(self, audit=None):

View File

@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -276,8 +275,6 @@ class UniformAirflow(base.BaseStrategy):
"""Group hosts based on airflow meters"""
nodes = self.get_available_compute_nodes()
if not nodes:
raise wexc.ClusterEmpty()
overload_hosts = []
nonoverload_hosts = []
for node_id in nodes:
@@ -306,14 +303,7 @@ class UniformAirflow(base.BaseStrategy):
return overload_hosts, nonoverload_hosts
def pre_execute(self):
LOG.debug("Initializing Uniform Airflow Strategy")
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
self._pre_execute()
self.meter_name_airflow = self.METRIC_NAMES[
self.datasource_backend.NAME]['host_airflow']
self.meter_name_inlet_t = self.METRIC_NAMES[
@@ -321,13 +311,12 @@ class UniformAirflow(base.BaseStrategy):
self.meter_name_power = self.METRIC_NAMES[
self.datasource_backend.NAME]['host_power']
LOG.debug(self.compute_model.to_string())
def do_execute(self):
self.threshold_airflow = self.input_parameters.threshold_airflow
self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
self.threshold_power = self.input_parameters.threshold_power
self._period = self.input_parameters.period
def do_execute(self):
source_nodes, target_nodes = self.group_hosts_by_airflow()
if not source_nodes:

View File

@@ -154,8 +154,9 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
def get_available_compute_nodes(self):
default_node_scope = [element.ServiceState.ENABLED.value,
element.ServiceState.DISABLED.value]
nodes = self.compute_model.get_all_compute_nodes().items()
return {uuid: cn for uuid, cn in
self.compute_model.get_all_compute_nodes().items()
nodes
if cn.state == element.ServiceState.ONLINE.value and
cn.status in default_node_scope}
@@ -554,13 +555,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
asc += 1
def pre_execute(self):
if not self.compute_model:
raise exception.ClusterStateNotDefined()
if self.compute_model.stale:
raise exception.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
def do_execute(self):
"""Execute strategy.

View File

@@ -23,7 +23,7 @@ from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -196,7 +196,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
if 0 <= current_delta < min_delta:
min_delta = current_delta
instance_id = instance.uuid
except wexc.InstanceNotFound:
except exception.InstanceNotFound:
LOG.error("Instance not found; error: %s",
instance_id)
if instance_id:
@@ -251,8 +251,6 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
nodes = self.get_available_compute_nodes()
cluster_size = len(nodes)
if not nodes:
raise wexc.ClusterEmpty()
overload_hosts = []
nonoverload_hosts = []
# total workload of cluster
@@ -303,34 +301,24 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
else:
nonoverload_hosts.append(instance_data)
avg_workload = cluster_workload / cluster_size
avg_workload = 0
if cluster_size != 0:
avg_workload = cluster_workload / cluster_size
return overload_hosts, nonoverload_hosts, avg_workload, workload_cache
def pre_execute(self):
"""Pre-execution phase
This can be used to fetch some pre-requisites or data.
"""
LOG.info("Initializing Workload Balance Strategy")
if not self.compute_model:
raise wexc.ClusterStateNotDefined()
if self.compute_model.stale:
raise wexc.ClusterStateStale()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
self.threshold = self.input_parameters.threshold
self._period = self.input_parameters.period
self._meter = self.input_parameters.metrics
self._granularity = self.input_parameters.granularity
def do_execute(self):
"""Strategy execution phase
This phase is where you should put the main logic of your strategy.
"""
self.threshold = self.input_parameters.threshold
self._period = self.input_parameters.period
self._meter = self.input_parameters.metrics
self._granularity = self.input_parameters.granularity
source_nodes, target_nodes, avg_workload, workload_cache = (
self.group_hosts_by_cpu_or_ram_util())

View File

@@ -290,8 +290,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
return normalized_hosts
def get_available_nodes(self):
return {node_uuid: node for node_uuid, node in
self.compute_model.get_all_compute_nodes().items()
nodes = self.compute_model.get_all_compute_nodes().items()
return {node_uuid: node for node_uuid, node in nodes
if node.state == element.ServiceState.ONLINE.value and
node.status == element.ServiceState.ENABLED.value}
@@ -506,14 +506,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
return self.solution
def pre_execute(self):
LOG.info("Initializing Workload Stabilization")
if not self.compute_model:
raise exception.ClusterStateNotDefined()
if self.compute_model.stale:
raise exception.ClusterStateStale()
self._pre_execute()
self.weights = self.input_parameters.weights
self.metrics = self.input_parameters.metrics
self.thresholds = self.input_parameters.thresholds

View File

@@ -21,7 +21,6 @@ from cinderclient.v2.volumes import Volume
from novaclient.v2.servers import Server
from watcher._i18n import _
from watcher.common import cinder_helper
from watcher.common import exception as wexc
from watcher.common import nova_helper
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -267,19 +266,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
cn.status in default_node_scope}
def pre_execute(self):
"""Pre-execution phase
This can be used to fetch some pre-requisites or data.
"""
LOG.info("Initializing zone migration Strategy")
if len(self.get_available_compute_nodes()) == 0:
raise wexc.ComputeClusterEmpty()
if len(self.get_available_storage_nodes()) == 0:
raise wexc.StorageClusterEmpty()
LOG.debug(self.compute_model.to_string())
self._pre_execute()
LOG.debug(self.storage_model.to_string())
def do_execute(self):

View File

@@ -29,6 +29,7 @@ from watcher.db.sqlalchemy import api as sq_api
from watcher.decision_engine.audit import continuous
from watcher.decision_engine.audit import oneshot
from watcher.decision_engine.model.collector import manager
from watcher.decision_engine.strategy.strategies import base as base_strategy
from watcher.decision_engine.strategy.strategies import dummy_strategy
from watcher import notifications
from watcher import objects
@@ -62,6 +63,8 @@ class TestOneShotAuditHandler(base.DbTestCase):
goal=self.goal)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_without_errors(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
@@ -85,7 +88,7 @@ class TestOneShotAuditHandler(base.DbTestCase):
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(dummy_strategy.DummyStrategy, "do_execute")
@mock.patch.object(base_strategy.BaseStrategy, "do_execute")
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
def test_trigger_audit_with_error(self, m_collector, m_do_execute):
m_collector.return_value = faker.FakerModelCollector()
@@ -107,6 +110,8 @@ class TestOneShotAuditHandler(base.DbTestCase):
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_state_succeeded(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
@@ -133,6 +138,8 @@ class TestOneShotAuditHandler(base.DbTestCase):
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_send_notification(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
@@ -425,23 +432,25 @@ class TestContinuousAuditHandler(base.DbTestCase):
audit_handler.launch_audits_periodically()
m_remove_job.assert_called()
@mock.patch.object(continuous.ContinuousAuditHandler, 'planner')
def test_execute_audit(self, m_planner):
@mock.patch.object(continuous.ContinuousAuditHandler, 'planner',
mock.Mock())
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_execute_audit(self):
audit_handler = continuous.ContinuousAuditHandler()
audit = self.audits[0]
audit_handler.execute_audit(audit, self.context)
audit_handler.execute_audit(self.audits[0], self.context)
expected_calls = [
mock.call(self.context, audit,
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, audit,
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.END),
mock.call(self.context, audit,
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, audit,
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.END)]

View File

@@ -23,6 +23,7 @@ from watcher.decision_engine.strategy.context import default as d_strategy_ctx
from watcher.decision_engine.strategy.selection import default as d_selector
from watcher.decision_engine.strategy import strategies
from watcher.tests.db import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.objects import utils as obj_utils
@@ -35,14 +36,20 @@ class TestStrategyContext(base.DbTestCase):
self.context, uuid=utils.generate_uuid())
self.audit = obj_utils.create_test_audit(
self.context, audit_template_id=audit_template.id)
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.DummyStrategy, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
self.m_model.return_value = self.fake_cluster.build_scenario_1()
strategy_context = d_strategy_ctx.DefaultStrategyContext()
@mock.patch.object(strategies.DummyStrategy, 'compute_model',
new_callable=mock.PropertyMock)
@mock.patch.object(d_selector.DefaultStrategySelector, 'select')
def test_execute_strategy(self, mock_call, m_model):
m_model.return_value = mock.Mock()
def test_execute_strategy(self, mock_call):
mock_call.return_value = strategies.DummyStrategy(
config=mock.Mock())
solution = self.strategy_context.execute_strategy(

View File

@@ -18,14 +18,14 @@ import mock
from watcher.common import utils
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestActuator(base.TestCase):
class TestActuator(TestBaseStrategy):
def setUp(self):
super(TestActuator, self).setUp()
# fake cluster
self.strategy = strategies.Actuator(config=mock.Mock())
def test_actuator_strategy(self):

View File

@@ -0,0 +1,70 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from watcher.common import exception
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
class TestBaseStrategy(base.TestCase):
def setUp(self):
super(TestBaseStrategy, self).setUp()
# fake cluster
self.fake_c_cluster = faker_cluster_state.FakerModelCollector()
p_c_model = mock.patch.object(
strategies.BaseStrategy, "compute_model",
new_callable=mock.PropertyMock)
self.m_c_model = p_c_model.start()
self.addCleanup(p_c_model.stop)
p_audit_scope = mock.patch.object(
strategies.BaseStrategy, "audit_scope",
new_callable=mock.PropertyMock)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_c_model.return_value = model_root.ModelRoot()
self.strategy = strategies.DummyStrategy(config=mock.Mock())
class TestBaseStrategyException(TestBaseStrategy):
def setUp(self):
super(TestBaseStrategyException, self).setUp()
def test_exception_model(self):
self.m_c_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_c_cluster.set_cluster_data_model_as_stale()
self.m_c_model.return_value = self.fake_c_cluster.cluster_data_model
self.assertRaises(
# TODO(Dantali0n) This should return ClusterStale,
# improve set_cluster_data_model_as_stale().
exception.ClusterStateNotDefined,
self.strategy.execute)

View File

@@ -22,17 +22,16 @@ import mock
from watcher.applier.loading import default
from watcher.common import clients
from watcher.common import exception
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.model import monasca_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestBasicConsolidation(base.TestCase):
class TestBasicConsolidation(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -50,36 +49,18 @@ class TestBasicConsolidation(base.TestCase):
super(TestBasicConsolidation, self).setUp()
# fake metrics
self.fake_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_osc = mock.patch.object(
clients, "OpenStackClients")
self.m_osc = p_osc.start()
self.addCleanup(p_osc.stop)
p_model = mock.patch.object(
strategies.BasicConsolidation, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.BasicConsolidation, 'datasource_backend',
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.BasicConsolidation, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
get_host_cpu_usage=self.fake_metrics.get_usage_node_cpu,
get_instance_cpu_usage=self.fake_metrics.
@@ -90,13 +71,13 @@ class TestBasicConsolidation(base.TestCase):
def test_cluster_size(self):
size_cluster = len(
self.fake_cluster.generate_scenario_1().get_all_compute_nodes())
self.fake_c_cluster.generate_scenario_1().get_all_compute_nodes())
size_cluster_assert = 5
self.assertEqual(size_cluster_assert, size_cluster)
def test_basic_consolidation_score_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_1_score = 0.023333333333333317
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
model.get_node_by_uuid("Node_1")))
@@ -108,8 +89,8 @@ class TestBasicConsolidation(base.TestCase):
model.get_node_by_uuid("Node_0")))
def test_basic_consolidation_score_instance(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
instance_0_score = 0.023333333333333317
self.assertEqual(
@@ -138,8 +119,8 @@ class TestBasicConsolidation(base.TestCase):
self.strategy.calculate_score_instance(instance_7))
def test_basic_consolidation_score_instance_disk(self):
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_5_with_instance_disk_0()
self.m_c_model.return_value = model
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
instance_0_score = 0.023333333333333355
self.assertEqual(
@@ -147,8 +128,8 @@ class TestBasicConsolidation(base.TestCase):
self.strategy.calculate_score_instance(instance_0))
def test_basic_consolidation_weight(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
cores = 16
# 80 Go
@@ -160,19 +141,9 @@ class TestBasicConsolidation(base.TestCase):
instance_0_weight_assert,
self.strategy.calculate_weight(instance_0, cores, disk, mem))
def test_exception_model(self):
self.m_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_check_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
all_instances = model.get_all_instances()
all_nodes = model.get_all_compute_nodes()
@@ -182,8 +153,8 @@ class TestBasicConsolidation(base.TestCase):
self.strategy.check_migration(node0, node0, instance0)
def test_threshold(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
all_nodes = model.get_all_compute_nodes()
node0 = all_nodes[list(all_nodes.keys())[0]]
@@ -192,16 +163,16 @@ class TestBasicConsolidation(base.TestCase):
node0, 1000, 1000, 1000))
def test_basic_consolidation_works_on_model_copy(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = copy.deepcopy(model)
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = copy.deepcopy(model)
self.assertTrue(model_root.ModelRoot.is_isomorphic(
model, self.strategy.compute_model))
self.assertIsNot(model, self.strategy.compute_model)
def test_basic_consolidation_migration(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
@@ -218,8 +189,8 @@ class TestBasicConsolidation(base.TestCase):
self.assertEqual(expected_power_state, num_node_state_change)
def test_basic_consolidation_execute_scenario_8_with_4_nodes(self):
model = self.fake_cluster.generate_scenario_8_with_4_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_8_with_4_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
@@ -240,20 +211,12 @@ class TestBasicConsolidation(base.TestCase):
self.assertEqual(expected_power_state, num_node_state_change)
self.assertEqual(expected_global_efficacy, global_efficacy_value)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
# calculate_weight
def test_execute_no_workload(self):
model = (
self.fake_cluster
self.fake_c_cluster
.generate_scenario_4_with_1_node_no_instance())
self.m_model.return_value = model
self.m_c_model.return_value = model
with mock.patch.object(
strategies.BasicConsolidation, 'calculate_weight'
@@ -265,8 +228,8 @@ class TestBasicConsolidation(base.TestCase):
solution.efficacy.global_efficacy[0].get('value'))
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()
for action in solution.actions:
@@ -275,8 +238,8 @@ class TestBasicConsolidation(base.TestCase):
loaded_action.validate_parameters()
"""def test_periods(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_1 = model.get_node_by_uuid("Node_1")
p_ceilometer = mock.patch.object(
strategies.BasicConsolidation, "ceilometer")

View File

@@ -18,38 +18,15 @@ import mock
from watcher.applier.loading import default
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestDummyStrategy(base.TestCase):
class TestDummyStrategy(TestBaseStrategy):
def setUp(self):
super(TestDummyStrategy, self).setUp()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.DummyStrategy, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_audit_scope = mock.patch.object(
strategies.DummyStrategy, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.strategy = strategies.DummyStrategy(config=mock.Mock())
self.m_model.return_value = model_root.ModelRoot()
self.strategy = strategies.DummyStrategy(config=mock.Mock())
def test_dummy_strategy(self):
@@ -60,8 +37,8 @@ class TestDummyStrategy(base.TestCase):
self.assertEqual(3, len(solution.actions))
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'})
solution = self.strategy.execute()

View File

@@ -18,26 +18,15 @@ import mock
from watcher.applier.loading import default
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestDummyWithScorer(base.TestCase):
class TestDummyWithScorer(TestBaseStrategy):
def setUp(self):
super(TestDummyWithScorer, self).setUp()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.DummyWithScorer, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
self.m_model.return_value = model_root.ModelRoot()
self.strategy = strategies.DummyWithScorer(config=mock.Mock())
def test_dummy_with_scorer(self):
@@ -48,8 +37,8 @@ class TestDummyWithScorer(base.TestCase):
self.assertEqual(4, len(solution.actions))
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'})
solution = self.strategy.execute()

View File

@@ -21,46 +21,17 @@ import mock
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestHostMaintenance(base.TestCase):
class TestHostMaintenance(TestBaseStrategy):
def setUp(self):
super(TestHostMaintenance, self).setUp()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.HostMaintenance, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_audit_scope = mock.patch.object(
strategies.HostMaintenance, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.strategy = strategies.HostMaintenance(config=mock.Mock())
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_get_instance_state_str(self):
mock_instance = mock.MagicMock(state="active")
self.assertEqual("active",
@@ -92,39 +63,39 @@ class TestHostMaintenance(base.TestCase):
mock_node)
def test_get_node_capacity(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_capacity = dict(cpu=40, ram=132, disk=250)
self.assertEqual(node_capacity,
self.strategy.get_node_capacity(node_0))
def test_get_node_used(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_used = dict(cpu=20, ram=4, disk=40)
self.assertEqual(node_used,
self.strategy.get_node_used(node_0))
def test_get_node_free(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_free = dict(cpu=20, ram=128, disk=210)
self.assertEqual(node_free,
self.strategy.get_node_free(node_0))
def test_host_fits(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_1 = model.get_node_by_uuid("Node_1")
self.assertTrue(self.strategy.host_fits(node_0, node_1))
def test_add_action_enable_compute_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
self.strategy.add_action_enable_compute_node(node_0)
expected = [{'action_type': 'change_nova_service_state',
@@ -134,8 +105,8 @@ class TestHostMaintenance(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_action_maintain_compute_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
self.strategy.add_action_maintain_compute_node(node_0)
expected = [{'action_type': 'change_nova_service_state',
@@ -146,8 +117,8 @@ class TestHostMaintenance(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_instance_migration(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
node_1 = model.get_node_by_uuid('Node_1')
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
@@ -161,8 +132,8 @@ class TestHostMaintenance(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_instance_migration_without_dest_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
self.strategy.instance_migration(instance_0, node_0)
@@ -174,8 +145,8 @@ class TestHostMaintenance(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_host_migration(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
node_1 = model.get_node_by_uuid('Node_1')
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
@@ -196,29 +167,34 @@ class TestHostMaintenance(base.TestCase):
self.assertIn(expected[1], self.strategy.solution.actions)
def test_safe_maintain(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
node_1 = model.get_node_by_uuid('Node_1')
self.assertFalse(self.strategy.safe_maintain(node_0))
self.assertFalse(self.strategy.safe_maintain(node_1))
model = self.fake_cluster.generate_scenario_1_with_all_nodes_disable()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_1_with_all_nodes_disable()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid('Node_0')
self.assertTrue(self.strategy.safe_maintain(node_0))
def test_try_maintain(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_1 = model.get_node_by_uuid('Node_1')
self.strategy.try_maintain(node_1)
self.assertEqual(2, len(self.strategy.solution.actions))
def test_exception_compute_node_not_found(self):
self.m_c_model.return_value = self.fake_c_cluster.build_scenario_1()
self.assertRaises(exception.ComputeNodeNotFound, self.strategy.execute)
def test_strategy(self):
model = self.fake_cluster. \
model = self.fake_c_cluster. \
generate_scenario_9_with_3_active_plus_1_disabled_nodes()
self.m_model.return_value = model
self.m_c_model.return_value = model
node_2 = model.get_node_by_uuid('Node_2')
node_3 = model.get_node_by_uuid('Node_3')
instance_4 = model.get_instance_by_uuid("INSTANCE_4")

View File

@@ -20,17 +20,15 @@ import collections
import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestNoisyNeighbor(base.TestCase):
class TestNoisyNeighbor(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -45,14 +43,6 @@ class TestNoisyNeighbor(base.TestCase):
super(TestNoisyNeighbor, self).setUp()
# fake metrics
self.f_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.NoisyNeighbor, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.NoisyNeighbor, "datasource_backend",
@@ -60,16 +50,6 @@ class TestNoisyNeighbor(base.TestCase):
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.NoisyNeighbor, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
get_instance_l3_cache_usage=self.f_metrics.mock_get_statistics_nn)
self.strategy = strategies.NoisyNeighbor(config=mock.Mock())
@@ -81,8 +61,8 @@ class TestNoisyNeighbor(base.TestCase):
self.strategy.threshold = 100
def test_calc_used_resource(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid('Node_0')
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
node)
@@ -92,8 +72,8 @@ class TestNoisyNeighbor(base.TestCase):
def test_group_hosts(self):
self.strategy.cache_threshold = 35
self.strategy.period = 100
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
node_uuid = 'Node_1'
n1, n2 = self.strategy.group_hosts()
self.assertTrue(node_uuid in n1)
@@ -104,8 +84,8 @@ class TestNoisyNeighbor(base.TestCase):
def test_find_priority_instance(self):
self.strategy.cache_threshold = 35
self.strategy.period = 100
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
potential_prio_inst = model.get_instance_by_uuid('INSTANCE_3')
inst_res = self.strategy.find_priority_instance(potential_prio_inst)
self.assertEqual('INSTANCE_3', inst_res.uuid)
@@ -113,15 +93,15 @@ class TestNoisyNeighbor(base.TestCase):
def test_find_noisy_instance(self):
self.strategy.cache_threshold = 35
self.strategy.period = 100
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
potential_noisy_inst = model.get_instance_by_uuid('INSTANCE_4')
inst_res = self.strategy.find_noisy_instance(potential_noisy_inst)
self.assertEqual('INSTANCE_4', inst_res.uuid)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.cache_threshold = 35
self.strategy.period = 100
n1, n2 = self.strategy.group_hosts()
@@ -134,34 +114,12 @@ class TestNoisyNeighbor(base.TestCase):
self.assertEqual(1, len(dest_hosts))
self.assertEqual('Node_0', dest_hosts[0].uuid)
def test_exception_model(self):
self.m_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_execute_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
self.strategy.cache_threshold = 35
self.strategy.period = 100
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_4_with_1_node_no_instance()
self.m_c_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
@@ -169,8 +127,8 @@ class TestNoisyNeighbor(base.TestCase):
def test_execute(self):
self.strategy.cache_threshold = 35
self.strategy.period = 100
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
[action.get('action_type') for action in solution.actions])
@@ -179,8 +137,8 @@ class TestNoisyNeighbor(base.TestCase):
self.assertEqual(1, num_migrations)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()
for action in solution.actions:

View File

@@ -20,17 +20,15 @@ import collections
import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestOutletTempControl(base.TestCase):
class TestOutletTempControl(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -46,31 +44,12 @@ class TestOutletTempControl(base.TestCase):
# fake metrics
self.fake_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.OutletTempControl, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.OutletTempControl, 'datasource_backend',
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.OutletTempControl, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics,
NAME=self.fake_metrics.NAME)
@@ -82,8 +61,8 @@ class TestOutletTempControl(base.TestCase):
self.strategy.threshold = 34.3
def test_calc_used_resource(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid('Node_0')
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
node)
@@ -91,15 +70,15 @@ class TestOutletTempControl(base.TestCase):
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts_by_outlet_temp(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
self.assertEqual('Node_1', n1[0]['node'].uuid)
self.assertEqual('Node_0', n2[0]['node'].uuid)
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
self.assertEqual('Node_1', instance_to_mig[0].uuid)
@@ -107,47 +86,25 @@ class TestOutletTempControl(base.TestCase):
instance_to_mig[1].uuid)
def test_filter_dest_servers(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
n1, n2 = self.strategy.group_hosts_by_outlet_temp()
instance_to_mig = self.strategy.choose_instance_to_migrate(n1)
dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1])
self.assertEqual(1, len(dest_hosts))
self.assertEqual('Node_0', dest_hosts[0]['node'].uuid)
def test_exception_model(self):
self.m_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_execute_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_4_with_1_node_no_instance()
self.m_c_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
[action.get('action_type') for action in solution.actions])
@@ -156,8 +113,8 @@ class TestOutletTempControl(base.TestCase):
self.assertEqual(1, num_migrations)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()
for action in solution.actions:

View File

@@ -20,11 +20,11 @@ import mock
from watcher.common import clients
from watcher.common import utils
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_and_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestSavingEnergy(base.TestCase):
class TestSavingEnergy(TestBaseStrategy):
def setUp(self):
super(TestSavingEnergy, self).setUp()
@@ -37,15 +37,6 @@ class TestSavingEnergy(base.TestCase):
'uuid': '922d4762-0bc5-4b30-9cb9-48ab644dd862'}
self.fake_nodes = [mock_node1, mock_node2]
# fake cluster
self.fake_cluster = faker_cluster_and_metrics.FakerModelCollector()
p_model = mock.patch.object(
strategies.SavingEnergy, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_ironic = mock.patch.object(
clients.OpenStackClients, 'ironic')
self.m_ironic = p_ironic.start()
@@ -56,22 +47,10 @@ class TestSavingEnergy(base.TestCase):
self.m_nova = p_nova.start()
self.addCleanup(p_nova.stop)
p_model = mock.patch.object(
strategies.SavingEnergy, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_audit_scope = mock.patch.object(
strategies.SavingEnergy, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_ironic.node.list.return_value = self.fake_nodes
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
self.strategy = strategies.SavingEnergy(
config=mock.Mock())
self.strategy.input_parameters = utils.Struct()
@@ -102,8 +81,6 @@ class TestSavingEnergy(base.TestCase):
'running_vms': 2, 'service': {'host': 'Node_1'}, 'state': 'up'}
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.strategy.get_hosts_pool()
self.assertEqual(len(self.strategy.with_vms_node_pool), 2)
@@ -129,8 +106,6 @@ class TestSavingEnergy(base.TestCase):
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.strategy.get_hosts_pool()
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
@@ -156,8 +131,6 @@ class TestSavingEnergy(base.TestCase):
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.strategy.get_hosts_pool()
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
@@ -183,8 +156,6 @@ class TestSavingEnergy(base.TestCase):
'running_vms': 0, 'service': {'host': 'Node_10'}, 'state': 'up'}
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
self.strategy.get_hosts_pool()
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
@@ -230,8 +201,8 @@ class TestSavingEnergy(base.TestCase):
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
solution = self.strategy.execute()
self.assertEqual(len(solution.actions), 1)

View File

@@ -24,10 +24,12 @@ from watcher.common import cinder_helper
from watcher.common import clients
from watcher.common import utils
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestStorageCapacityBalance(base.TestCase):
class TestStorageCapacityBalance(TestBaseStrategy):
def setUp(self):
super(TestStorageCapacityBalance, self).setUp()
@@ -115,6 +117,8 @@ class TestStorageCapacityBalance(base.TestCase):
'type2', {'volume_backend_name': 'pool2'})
]
self.fake_c_cluster = faker_cluster_state.FakerStorageModelCollector()
osc = clients.OpenStackClients()
p_cinder = mock.patch.object(osc, 'cinder')
@@ -122,20 +126,6 @@ class TestStorageCapacityBalance(base.TestCase):
self.addCleanup(p_cinder.stop)
self.m_cinder = cinder_helper.CinderHelper(osc=osc)
p_model = mock.patch.object(
strategies.StorageCapacityBalance, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_audit_scope = mock.patch.object(
strategies.StorageCapacityBalance, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_cinder.get_storage_pool_list = mock.Mock(
return_value=self.fake_pools)
self.m_cinder.get_volume_list = mock.Mock(
@@ -144,6 +134,10 @@ class TestStorageCapacityBalance(base.TestCase):
return_value=self.fake_snap)
self.m_cinder.get_volume_type_list = mock.Mock(
return_value=self.fake_types)
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.strategy = strategies.StorageCapacityBalance(
config=mock.Mock(), osc=osc)
self.strategy._cinder = self.m_cinder
@@ -241,5 +235,6 @@ class TestStorageCapacityBalance(base.TestCase):
setattr(self.fake_pool1, 'free_capacity_gb', '60')
self.strategy.input_parameters.update(
{'volume_threshold': 60.0})
solution = self.strategy.execute()
self.assertEqual(len(solution.actions), 3)

View File

@@ -20,17 +20,15 @@ import collections
import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestUniformAirflow(base.TestCase):
class TestUniformAirflow(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -45,14 +43,6 @@ class TestUniformAirflow(base.TestCase):
super(TestUniformAirflow, self).setUp()
# fake metrics
self.fake_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.UniformAirflow, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.UniformAirflow, 'datasource_backend',
@@ -60,16 +50,6 @@ class TestUniformAirflow(base.TestCase):
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.UniformAirflow, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics,
NAME=self.fake_metrics.NAME)
@@ -87,16 +67,16 @@ class TestUniformAirflow(base.TestCase):
self.strategy.pre_execute()
def test_calc_used_resource(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid('Node_0')
cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource(node))
self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40))
def test_group_hosts_by_airflow(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold_airflow = 300
n1, n2 = self.strategy.group_hosts_by_airflow()
# print n1, n2, avg, w_map
@@ -104,8 +84,8 @@ class TestUniformAirflow(base.TestCase):
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
n1, n2 = self.strategy.group_hosts_by_airflow()
@@ -118,8 +98,8 @@ class TestUniformAirflow(base.TestCase):
'73b09e16-35b7-4922-804e-e8f5d9b740fc'})
def test_choose_instance_to_migrate_all(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
n1, n2 = self.strategy.group_hosts_by_airflow()
@@ -132,8 +112,8 @@ class TestUniformAirflow(base.TestCase):
{inst.uuid for inst in instance_to_mig[1]})
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
n1, n2 = self.strategy.group_hosts_by_airflow()
@@ -143,8 +123,8 @@ class TestUniformAirflow(base.TestCase):
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 22
n1, n2 = self.strategy.group_hosts_by_airflow()
@@ -158,35 +138,13 @@ class TestUniformAirflow(base.TestCase):
{'cae81432-1631-4d4e-b29c-6f3acdcde906',
'73b09e16-35b7-4922-804e-e8f5d9b740fc'})
def test_exception_model(self):
self.m_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_execute_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_4_with_1_node_no_instance()
self.m_c_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
@@ -194,8 +152,8 @@ class TestUniformAirflow(base.TestCase):
self.strategy.threshold_airflow = 300
self.strategy.threshold_inlet_t = 25
self.strategy.threshold_power = 300
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
[action.get('action_type') for action in solution.actions])
@@ -204,8 +162,8 @@ class TestUniformAirflow(base.TestCase):
self.assertEqual(num_migrations, 2)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_7_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()
for action in solution.actions:

View File

@@ -20,16 +20,15 @@
import mock
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.solution.base import BaseSolution
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_and_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestVMWorkloadConsolidation(base.TestCase):
class TestVMWorkloadConsolidation(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -46,13 +45,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
super(TestVMWorkloadConsolidation, self).setUp()
# fake cluster
self.fake_cluster = faker_cluster_and_metrics.FakerModelCollector()
p_model = mock.patch.object(
strategies.VMWorkloadConsolidation, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
self.fake_c_cluster = faker_cluster_and_metrics.FakerModelCollector()
p_datasource = mock.patch.object(
strategies.VMWorkloadConsolidation, 'datasource_backend',
@@ -60,20 +53,10 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.VMWorkloadConsolidation, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
# fake metrics
self.fake_metrics = self.fake_datasource_cls(
self.m_model.return_value)
self.m_c_model.return_value)
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
get_instance_cpu_usage=(
self.fake_metrics.get_instance_cpu_util),
@@ -85,17 +68,9 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.strategy = strategies.VMWorkloadConsolidation(
config=mock.Mock(datasources=self.datasource))
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_get_instance_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
instance_util = dict(cpu=1.0, ram=1, disk=10)
@@ -104,8 +79,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.strategy.get_instance_utilization(instance_0))
def test_get_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_node_by_uuid("Node_0")
node_util = dict(cpu=1.0, ram=1, disk=10)
@@ -114,16 +89,16 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.strategy.get_node_utilization(node_0))
def test_get_node_capacity(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
node_0 = model.get_node_by_uuid("Node_0")
node_util = dict(cpu=40, ram=64, disk=250)
self.assertEqual(node_util, self.strategy.get_node_capacity(node_0))
def test_get_relative_node_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
node = model.get_node_by_uuid('Node_0')
rhu = self.strategy.get_relative_node_utilization(node)
@@ -131,16 +106,16 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected_rhu, rhu)
def test_get_relative_cluster_utilization(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
cru = self.strategy.get_relative_cluster_utilization()
expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375}
self.assertEqual(expected_cru, cru)
def test_add_migration_with_active_state(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')
@@ -156,8 +131,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions[0])
def test_add_migration_with_paused_state(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')
@@ -178,8 +153,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions[0])
def test_is_overloaded(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
@@ -195,8 +170,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertTrue(res)
def test_instance_fits(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n = model.get_node_by_uuid('Node_1')
instance0 = model.get_instance_by_uuid('INSTANCE_0')
@@ -209,8 +184,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertFalse(res)
def test_add_action_enable_compute_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n = model.get_node_by_uuid('Node_0')
self.strategy.add_action_enable_compute_node(n)
@@ -220,8 +195,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_add_action_disable_node(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n = model.get_node_by_uuid('Node_0')
self.strategy.add_action_disable_node(n)
@@ -233,8 +208,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_disable_unused_nodes(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')
@@ -256,8 +231,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions[1])
def test_offload_phase(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
self.strategy.offload_phase(cc)
@@ -265,8 +240,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_consolidation_phase(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')
@@ -281,8 +256,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.assertEqual(expected, self.strategy.solution.actions)
def test_strategy(self):
model = self.fake_cluster.generate_scenario_2()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_2()
self.m_c_model.return_value = model
self.fake_metrics.model = model
result = self.strategy.pre_execute()
@@ -333,8 +308,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
)
def test_strategy2(self):
model = self.fake_cluster.generate_scenario_3()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_3()
self.m_c_model.return_value = model
self.fake_metrics.model = model
n1 = model.get_node_by_uuid('Node_0')
n2 = model.get_node_by_uuid('Node_1')

View File

@@ -20,17 +20,15 @@ import collections
import mock
from watcher.applier.loading import default
from watcher.common import exception
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestWorkloadBalance(base.TestCase):
class TestWorkloadBalance(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -45,14 +43,6 @@ class TestWorkloadBalance(base.TestCase):
super(TestWorkloadBalance, self).setUp()
# fake metrics
self.fake_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
p_model = mock.patch.object(
strategies.WorkloadBalance, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.WorkloadBalance, "datasource_backend",
@@ -60,14 +50,6 @@ class TestWorkloadBalance(base.TestCase):
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.WorkloadBalance, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_audit_scope.return_value = mock.Mock()
self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
self.strategy = strategies.WorkloadBalance(
@@ -83,8 +65,8 @@ class TestWorkloadBalance(base.TestCase):
self.strategy._granularity = 300
def test_calc_used_resource(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid('Node_0')
cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource(node))
@@ -92,8 +74,8 @@ class TestWorkloadBalance(base.TestCase):
self.assertEqual((cores_used, mem_used, disk_used), (20, 64, 40))
def test_group_hosts_by_cpu_util(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.threshold = 30
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
@@ -101,8 +83,8 @@ class TestWorkloadBalance(base.TestCase):
self.assertEqual(avg, 8.0)
def test_group_hosts_by_ram_util(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
self.strategy._meter = "memory.resident"
self.strategy.threshold = 30
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
@@ -111,8 +93,8 @@ class TestWorkloadBalance(base.TestCase):
self.assertEqual(avg, 33.0)
def test_choose_instance_to_migrate(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
instance_to_mig = self.strategy.choose_instance_to_migrate(
n1, avg, w_map)
@@ -121,8 +103,8 @@ class TestWorkloadBalance(base.TestCase):
"73b09e16-35b7-4922-804e-e8f5d9b740fc")
def test_choose_instance_notfound(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
instances = model.get_all_instances()
[model.remove_instance(inst) for inst in instances.values()]
@@ -131,8 +113,8 @@ class TestWorkloadBalance(base.TestCase):
self.assertIsNone(instance_to_mig)
def test_filter_destination_hosts(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
self.strategy.datasource = mock.MagicMock(
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
@@ -143,38 +125,16 @@ class TestWorkloadBalance(base.TestCase):
self.assertEqual(len(dest_hosts), 1)
self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1')
def test_exception_model(self):
self.m_model.return_value = None
self.assertRaises(
exception.ClusterStateNotDefined, self.strategy.execute)
def test_exception_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()
self.m_model.return_value = self.fake_cluster.cluster_data_model
self.assertRaises(
exception.ClusterStateNotDefined,
self.strategy.execute)
def test_execute_cluster_empty(self):
model = model_root.ModelRoot()
self.m_model.return_value = model
self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def test_execute_no_workload(self):
model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_4_with_1_node_no_instance()
self.m_c_model.return_value = model
solution = self.strategy.execute()
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
actions_counter = collections.Counter(
[action.get('action_type') for action in solution.actions])
@@ -183,8 +143,8 @@ class TestWorkloadBalance(base.TestCase):
self.assertEqual(num_migrations, 1)
def test_check_parameters(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
solution = self.strategy.execute()
loader = default.DefaultActionLoader()
for action in solution.actions:

View File

@@ -21,15 +21,14 @@ import mock
from watcher.common import clients
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import gnocchi_metrics
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
class TestWorkloadStabilization(base.TestCase):
class TestWorkloadStabilization(TestBaseStrategy):
scenarios = [
("Ceilometer",
@@ -46,9 +45,6 @@ class TestWorkloadStabilization(base.TestCase):
# fake metrics
self.fake_metrics = self.fake_datasource_cls()
# fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector()
self.hosts_load_assert = {
'Node_0': {'cpu_util': 0.07, 'memory.resident': 7.0, 'vcpus': 40},
'Node_1': {'cpu_util': 0.07, 'memory.resident': 5, 'vcpus': 40},
@@ -61,27 +57,12 @@ class TestWorkloadStabilization(base.TestCase):
self.m_osc = p_osc.start()
self.addCleanup(p_osc.stop)
p_model = mock.patch.object(
strategies.WorkloadStabilization, "compute_model",
new_callable=mock.PropertyMock)
self.m_model = p_model.start()
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.WorkloadStabilization, "datasource_backend",
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object(
strategies.WorkloadStabilization, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
self.m_model.return_value = model_root.ModelRoot()
self.m_audit_scope.return_value = mock.Mock()
self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics)
@@ -113,8 +94,8 @@ class TestWorkloadStabilization(base.TestCase):
self.strategy.aggregation_method = {"instance": "mean", "node": "mean"}
def test_get_instance_load(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
instance0 = model.get_instance_by_uuid("INSTANCE_0")
instance_0_dict = {
'uuid': 'INSTANCE_0', 'vcpus': 10,
@@ -123,13 +104,14 @@ class TestWorkloadStabilization(base.TestCase):
instance_0_dict, self.strategy.get_instance_load(instance0))
def test_get_instance_load_with_no_metrics(self):
model = self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
self.m_model.return_value = model
model = self.fake_c_cluster.\
generate_scenario_1_with_1_node_unavailable()
self.m_c_model.return_value = model
lost_instance = model.get_instance_by_uuid("LOST_INSTANCE")
self.assertIsNone(self.strategy.get_instance_load(lost_instance))
def test_normalize_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7},
'Node_1': {'cpu_util': 0.05, 'memory.resident': 5}}
normalized_hosts = {'Node_0':
@@ -143,18 +125,20 @@ class TestWorkloadStabilization(base.TestCase):
self.strategy.normalize_hosts_load(fake_hosts))
def test_get_available_nodes(self):
self.m_model.return_value = self.fake_cluster. \
self.m_c_model.return_value = self.fake_c_cluster. \
generate_scenario_9_with_3_active_plus_1_disabled_nodes()
self.assertEqual(3, len(self.strategy.get_available_nodes()))
def test_get_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.\
generate_scenario_1()
self.assertEqual(self.strategy.get_hosts_load(),
self.hosts_load_assert)
def test_get_hosts_load_with_node_missing(self):
self.m_model.return_value = \
self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
self.m_c_model.return_value = \
self.fake_c_cluster.\
generate_scenario_1_with_1_node_unavailable()
self.assertEqual(self.hosts_load_assert,
self.strategy.get_hosts_load())
@@ -175,8 +159,8 @@ class TestWorkloadStabilization(base.TestCase):
self.assertEqual(self.strategy.calculate_weighted_sd(sd_case), 1.25)
def test_calculate_migration_case(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
instance = model.get_instance_by_uuid("INSTANCE_5")
src_node = model.get_node_by_uuid("Node_2")
dst_node = model.get_node_by_uuid("Node_1")
@@ -188,8 +172,8 @@ class TestWorkloadStabilization(base.TestCase):
'vcpus': 40})
def test_simulate_migrations(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
self.strategy.host_choice = 'fullsearch'
self.assertEqual(
10,
@@ -197,21 +181,22 @@ class TestWorkloadStabilization(base.TestCase):
def test_simulate_migrations_with_all_instances_exclude(self):
model = \
self.fake_cluster.generate_scenario_1_with_all_instances_exclude()
self.m_model.return_value = model
self.fake_c_cluster.\
generate_scenario_1_with_all_instances_exclude()
self.m_c_model.return_value = model
self.strategy.host_choice = 'fullsearch'
self.assertEqual(
0,
len(self.strategy.simulate_migrations(self.hosts_load_assert)))
def test_check_threshold(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2}
self.strategy.simulate_migrations = mock.Mock(return_value=True)
self.assertTrue(self.strategy.check_threshold())
def test_execute_one_migration(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2}
self.strategy.simulate_migrations = mock.Mock(
return_value=[
@@ -224,7 +209,7 @@ class TestWorkloadStabilization(base.TestCase):
'INSTANCE_4', 'Node_2', 'Node_1')
def test_execute_multiply_migrations(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.00001,
'memory.resident': 0.0001}
self.strategy.simulate_migrations = mock.Mock(
@@ -239,7 +224,7 @@ class TestWorkloadStabilization(base.TestCase):
self.assertEqual(mock_migrate.call_count, 2)
def test_execute_nothing_to_migrate(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1()
self.strategy.thresholds = {'cpu_util': 0.042,
'memory.resident': 0.0001}
self.strategy.simulate_migrations = mock.Mock(return_value=False)

View File

@@ -18,34 +18,24 @@ import cinderclient
import novaclient
from watcher.common import cinder_helper
from watcher.common import clients
from watcher.common import exception
from watcher.common import nova_helper
from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.strategy.strategies.test_base \
import TestBaseStrategy
volume_uuid_mapping = faker_cluster_state.volume_uuid_mapping
class TestZoneMigration(base.TestCase):
class TestZoneMigration(TestBaseStrategy):
def setUp(self):
super(TestZoneMigration, self).setUp()
# fake compute cluster
self.fake_c_cluster = faker_cluster_state.FakerModelCollector()
# fake storage cluster
self.fake_s_cluster = faker_cluster_state.FakerStorageModelCollector()
p_c_model = mock.patch.object(
strategies.ZoneMigration, "compute_model",
new_callable=mock.PropertyMock)
self.m_c_model = p_c_model.start()
self.addCleanup(p_c_model.stop)
p_s_model = mock.patch.object(
strategies.ZoneMigration, "storage_model",
new_callable=mock.PropertyMock)
@@ -82,13 +72,6 @@ class TestZoneMigration(base.TestCase):
self.m_parallel_per_pool = p_parallel_per_pool.start()
self.addCleanup(p_parallel_per_pool.stop)
p_audit_scope = mock.patch.object(
strategies.ZoneMigration, "audit_scope",
new_callable=mock.PropertyMock
)
self.m_audit_scope = p_audit_scope.start()
self.addCleanup(p_audit_scope.stop)
p_priority = mock.patch.object(
strategies.ZoneMigration, "priority",
new_callable=mock.PropertyMock
@@ -116,7 +99,6 @@ class TestZoneMigration(base.TestCase):
{"src_pool": "src2@back1#pool1", "dst_pool": "dst2@back2#pool1",
"src_type": "type2", "dst_type": "type3"}
]
self.m_audit_scope.return_value = mock.Mock()
self.strategy = strategies.ZoneMigration(
config=mock.Mock())
@@ -145,19 +127,6 @@ class TestZoneMigration(base.TestCase):
m_cinder_helper.start()
self.addCleanup(m_cinder_helper.stop)
def test_exception_empty_compute_model(self):
model = model_root.ModelRoot()
self.m_c_model.return_value = model
self.assertRaises(exception.ComputeClusterEmpty, self.strategy.execute)
def test_exception_empty_storage_model(self):
c_model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = c_model
s_model = model_root.StorageModelRoot()
self.m_s_model.return_value = s_model
self.assertRaises(exception.StorageClusterEmpty, self.strategy.execute)
@staticmethod
def fake_instance(**kwargs):
instance = mock.MagicMock(spec=novaclient.v2.servers.Server)