formal datasource interface implementation

Changes to the baseclass for datasources so strategies can be made
compatible with every datasource. Baseclass methods clearly describe
expected values and types for both parameters and for method returns.
query_retry has been added as base method since every current
datasource implements it.

Ceilometer is updated to work with the new baseclass. Several methods
which are not part of the baseclass and are not used by any strategies
are removed. The signature of these methods would have to be changed
to fit with the new base class while it would limit strategies to
only work with Ceilometer.

Gnocchi is updated to work with the new baseclass.

Gnocchi and Ceilometer will perform a transformation for the
host_airflow metric as it retrieves 1/10 th of the actual CFM

Monasca is updated to work with the new baseclass.

FakeMetrics for Gnocchi, Monasca and Ceilometer are updated to work
with the new method signatures of the baseclass.

FakeClusterAndMetrics for Ceilometer and Gnocchi are updated to work
with the new method signatures of the baseclass.

The strategies workload_balance, vm_workload_consolidation,
workload_stabilization, basic_consolidation, noisy_neighbour,
outlet_temp_control and uniform_airflow are updated to work with the
new datasource baseclass.

This patch will break compatibility with plugin strategies and
datasources due to the changes in signatures.

Depends-on: I7aa52a9b82f4aa849f2378d4d1c03453e45c0c78
Change-Id: Ie30ca3dbf01062cbb20d3be5d514ec6b5155cd7c
Implements: blueprint formal-datasource-interface
This commit is contained in:
Dantali0n
2019-05-01 10:09:58 +02:00
parent f049815cf4
commit 84cb589aa9
23 changed files with 915 additions and 1253 deletions

View File

@@ -48,23 +48,8 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
that live migration is possible on your OpenStack cluster.
"""
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
DATASOURCE_METRICS = ['host_cpu_usage', 'instance_cpu_usage']
METRIC_NAMES = dict(
ceilometer=dict(
host_cpu_usage='compute.node.cpu.percent',
instance_cpu_usage='cpu_util'),
monasca=dict(
host_cpu_usage='cpu.percent',
instance_cpu_usage='vm.cpu.utilization_perc'),
gnocchi=dict(
host_cpu_usage='compute.node.cpu.percent',
instance_cpu_usage='cpu_util'),
)
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
def __init__(self, config, osc=None):
@@ -111,7 +96,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def aggregation_method(self):
return self.input_parameters.get(
'aggregation_method',
{"instance": 'mean', "node": 'mean'})
{"instance": 'mean', "compute_node": 'mean'})
@classmethod
def get_display_name(cls):
@@ -159,12 +144,12 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
"type": "string",
"default": 'mean'
},
"node": {
"compute_node": {
"type": "string",
"default": 'mean'
},
},
"default": {"instance": 'mean', "node": 'mean'}
"default": {"instance": 'mean', "compute_node": 'mean'}
},
},
}
@@ -271,16 +256,15 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# TODO(jed): take in account weight
return (score_cores + score_disk + score_memory) / 3
def get_node_cpu_usage(self, node):
resource_id = "%s_%s" % (node.uuid, node.hostname)
def get_compute_node_cpu_usage(self, compute_node):
return self.datasource_backend.get_host_cpu_usage(
resource_id, self.period, self.aggregation_method['node'],
granularity=self.granularity)
compute_node, self.period, self.aggregation_method['compute_node'],
self.granularity)
def get_instance_cpu_usage(self, instance):
return self.datasource_backend.get_instance_cpu_usage(
instance.uuid, self.period, self.aggregation_method['instance'],
granularity=self.granularity)
instance, self.period, self.aggregation_method['instance'],
self.granularity)
def calculate_score_node(self, node):
"""Calculate the score that represent the utilization level
@@ -289,7 +273,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
:return: Score for the given compute node
:rtype: float
"""
host_avg_cpu_util = self.get_node_cpu_usage(node)
host_avg_cpu_util = self.get_compute_node_cpu_usage(node)
if host_avg_cpu_util is None:
resource_id = "%s_%s" % (node.uuid, node.hostname)
@@ -297,8 +281,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
"No values returned by %(resource_id)s "
"for %(metric_name)s", dict(
resource_id=resource_id,
metric_name=self.METRIC_NAMES[
self.datasource_backend.NAME]['host_cpu_usage']))
metric_name='host_cpu_usage'))
host_avg_cpu_util = 100
total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0)
@@ -317,8 +300,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
"No values returned by %(resource_id)s "
"for %(metric_name)s", dict(
resource_id=instance.uuid,
metric_name=self.METRIC_NAMES[
self.datasource_backend.NAME]['instance_cpu_usage']))
metric_name='instance_cpu_usage'))
instance_cpu_utilization = 100
total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0)