From 477b4d01e421a6fd7a1037f00539e8a79e92347e Mon Sep 17 00:00:00 2001
From: licanwei
Date: Sat, 16 Feb 2019 14:07:43 +0800
Subject: [PATCH] change config parameter from 'datasource' to 'datasources'
Change-Id: Iaf59ea25f8d62bf29562f21d846243f98d9b6997
---
.../strategies/vm_workload_consolidation.py | 61 +++++++++----------
.../model/faker_cluster_and_metrics.py | 40 +++++++-----
.../test_vm_workload_consolidation.py | 10 ++-
3 files changed, 61 insertions(+), 50 deletions(-)
diff --git a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py
index bc4df0776..5dae22849 100644
--- a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py
+++ b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py
@@ -68,6 +68,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
+ AGGREGATION = 'mean'
DATASOURCE_METRICS = ['instance_ram_allocated', 'instance_cpu_usage',
'instance_ram_usage', 'instance_root_disk_size']
@@ -140,11 +141,14 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
@classmethod
def get_config_opts(cls):
return [
- cfg.StrOpt(
- "datasource",
- help="Data source to use in order to query the needed metrics",
- default="gnocchi",
- choices=["ceilometer", "gnocchi"])
+ cfg.ListOpt(
+ "datasources",
+ help="Datasources to use in order to query the needed metrics."
+ " If one of strategy metric isn't available in the first"
+ " datasource, the next datasource will be chosen.",
+ item_type=cfg.types.String(choices=['gnocchi', 'ceilometer',
+ 'monasca']),
+ default=['gnocchi', 'ceilometer', 'monasca'])
]
def get_available_compute_nodes(self):
@@ -281,36 +285,29 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
if instance.uuid in self.datasource_instance_data_cache.keys():
return self.datasource_instance_data_cache.get(instance.uuid)
- cpu_util_metric = self.METRIC_NAMES[
- self.config.datasource]['cpu_util_metric']
- ram_util_metric = self.METRIC_NAMES[
- self.config.datasource]['ram_util_metric']
- ram_alloc_metric = self.METRIC_NAMES[
- self.config.datasource]['ram_alloc_metric']
- disk_alloc_metric = self.METRIC_NAMES[
- self.config.datasource]['disk_alloc_metric']
-
- instance_cpu_util = self.datasource_backend.statistic_aggregation(
- resource_id=instance.uuid,
- meter_name=cpu_util_metric,
- period=self.period,
+ instance_cpu_util = self.datasource_backend.get_instance_cpu_usage(
+ instance.uuid,
+ self.period,
+ self.AGGREGATION,
granularity=self.granularity)
- instance_ram_util = self.datasource_backend.statistic_aggregation(
- resource_id=instance.uuid,
- meter_name=ram_util_metric,
- period=self.period,
+ instance_ram_util = self.datasource_backend.get_instance_memory_usage(
+ instance.uuid,
+ self.period,
+ self.AGGREGATION,
granularity=self.granularity)
if not instance_ram_util:
- instance_ram_util = self.datasource_backend.statistic_aggregation(
- resource_id=instance.uuid,
- meter_name=ram_alloc_metric,
- period=self.period,
- granularity=self.granularity)
- instance_disk_util = self.datasource_backend.statistic_aggregation(
- resource_id=instance.uuid,
- meter_name=disk_alloc_metric,
- period=self.period,
- granularity=self.granularity)
+ instance_ram_util = (
+ self.datasource_backend.get_instance_ram_allocated(
+ instance.uuid,
+ self.period,
+ self.AGGREGATION,
+ granularity=self.granularity))
+ instance_disk_util = (
+ self.datasource_backend.get_instance_root_disk_allocated(
+ instance.uuid,
+ self.period,
+ self.AGGREGATION,
+ granularity=self.granularity))
if instance_cpu_util:
total_cpu_utilization = (
diff --git a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
index 595e9b667..2ad485ef4 100644
--- a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
+++ b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
@@ -88,15 +88,19 @@ class FakeCeilometerMetrics(object):
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
if meter_name == "compute.node.cpu.percent":
- return self.get_node_cpu_util(resource_id)
+ return self.get_node_cpu_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "cpu_util":
- return self.get_instance_cpu_util(resource_id)
+ return self.get_instance_cpu_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "memory.resident":
- return self.get_instance_ram_util(resource_id)
+ return self.get_instance_ram_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "disk.root.size":
- return self.get_instance_disk_root_size(resource_id)
+ return self.get_instance_disk_root_size(resource_id, period,
+ aggregation, granularity)
- def get_node_cpu_util(self, r_id):
+ def get_node_cpu_util(self, r_id, period, aggregation, granularity):
"""Calculates node utilization dynamicaly.
node CPU utilization should consider
@@ -118,7 +122,7 @@ class FakeCeilometerMetrics(object):
return util_sum * 100.0
@staticmethod
- def get_instance_cpu_util(r_id):
+ def get_instance_cpu_util(r_id, period, aggregation, granularity):
instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30
@@ -133,7 +137,7 @@ class FakeCeilometerMetrics(object):
return instance_cpu_util[str(r_id)]
@staticmethod
- def get_instance_ram_util(r_id):
+ def get_instance_ram_util(r_id, period, aggregation, granularity):
instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2
@@ -148,7 +152,7 @@ class FakeCeilometerMetrics(object):
return instance_ram_util[str(r_id)]
@staticmethod
- def get_instance_disk_root_size(r_id):
+ def get_instance_disk_root_size(r_id, period, aggregation, granularity):
instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15
@@ -171,15 +175,19 @@ class FakeGnocchiMetrics(object):
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
if meter_name == "compute.node.cpu.percent":
- return self.get_node_cpu_util(resource_id)
+ return self.get_node_cpu_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "cpu_util":
- return self.get_instance_cpu_util(resource_id)
+ return self.get_instance_cpu_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "memory.resident":
- return self.get_instance_ram_util(resource_id)
+ return self.get_instance_ram_util(resource_id, period,
+ aggregation, granularity)
elif meter_name == "disk.root.size":
- return self.get_instance_disk_root_size(resource_id)
+ return self.get_instance_disk_root_size(resource_id, period,
+ aggregation, granularity)
- def get_node_cpu_util(self, r_id):
+ def get_node_cpu_util(self, r_id, period, aggregation, granularity):
"""Calculates node utilization dynamicaly.
node CPU utilization should consider
@@ -202,7 +210,7 @@ class FakeGnocchiMetrics(object):
return util_sum * 100.0
@staticmethod
- def get_instance_cpu_util(r_id):
+ def get_instance_cpu_util(r_id, period, aggregation, granularity):
instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30
@@ -217,7 +225,7 @@ class FakeGnocchiMetrics(object):
return instance_cpu_util[str(r_id)]
@staticmethod
- def get_instance_ram_util(r_id):
+ def get_instance_ram_util(r_id, period, aggregation, granularity):
instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2
@@ -232,7 +240,7 @@ class FakeGnocchiMetrics(object):
return instance_ram_util[str(r_id)]
@staticmethod
- def get_instance_disk_root_size(r_id):
+ def get_instance_disk_root_size(r_id, period, aggregation, granularity):
instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
index dd75fb3d9..f2d608f1c 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
@@ -75,9 +75,15 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.m_model.return_value = model_root.ModelRoot()
self.m_datasource.return_value = mock.Mock(
- statistic_aggregation=self.fake_metrics.mock_get_statistics)
+ get_instance_cpu_usage=(
+ self.fake_metrics.get_instance_cpu_util),
+ get_instance_memory_usage=(
+ self.fake_metrics.get_instance_ram_util),
+ get_instance_root_disk_allocated=(
+ self.fake_metrics.get_instance_disk_root_size),
+ )
self.strategy = strategies.VMWorkloadConsolidation(
- config=mock.Mock(datasource=self.datasource))
+ config=mock.Mock(datasources=self.datasource))
def test_exception_stale_cdm(self):
self.fake_cluster.set_cluster_data_model_as_stale()