Compare commits

..

9 Commits
1.4.1 ... 1.4.2

Author SHA1 Message Date
Ian Wienand
30d6f07ceb Replace openstack.org git:// URLs with https://
This is a mechanically generated change to replace openstack.org
git:// URLs with https:// equivalents.

This is in aid of a planned future move of the git hosting
infrastructure to a self-hosted instance of gitea (https://gitea.io),
which does not support the git wire protocol at this stage.

This update should result in no functional change.

For more information see the thread at

 http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003825.html

Change-Id: I5ffcaf509ec6901f7e221a4312cc6b0577090440
2019-03-24 20:36:25 +00:00
licanwei
343a65952a Check job before removing it
Change-Id: Ibbd4da25fac6016a0d76c8f810ac567f6fd075f1
Closes-Bug: #1782731
(cherry picked from commit 4022714f5d)
2018-10-23 11:57:16 +00:00
LiXiangyu
9af6886b0e Fix TypeError in function chunkify
This patch fixes TypeError of range() in function chunkify, as
range() integer step argument expected, but got str.

Change-Id: I2acde859e014baa4c4c59caa6f4ea938c7c4c3bf
(cherry picked from commit c717be12a6)
2018-10-23 06:55:49 +00:00
Nguyen Hai
b0ef77f5d1 import zuul job settings from project-config
This is a mechanically generated patch to complete step 1 of moving
the zuul job settings out of project-config and into each project
repository.

Because there will be a separate patch on each branch, the branch
specifiers for branch-specific jobs have been removed.

Because this patch is generated by a script, there may be some
cosmetic changes to the layout of the YAML file(s) as the contents are
normalized.

See the python3-first goal document for details:
https://governance.openstack.org/tc/goals/stein/python3-first.html

Change-Id: I9ccef45c11c17c3bdda143a53b325be327b9459d
Story: #2002586
Task: #24344
2018-08-19 00:58:52 +09:00
Alexander Chadin
f5157f2894 workload_stabilization trivial fix
This fix allows to compare metric name by value,
not by object.

Change-Id: I57c50ff97efa43efe4fd81875e481b25e9a18cc6
2018-02-20 14:42:04 +00:00
OpenStack Proposal Bot
13331935df Updated from global requirements
Change-Id: I941f6e5a005124a98d2860695fb9a30a77bc595c
2018-02-14 18:48:22 +00:00
Alexander Chadin
8d61c1a2b4 Fix workload_stabilization unavailable nodes and instances
This patch set excludes nodes and instances from auditing
if appropriate metrics aren't available.

Change-Id: I87c6c249e3962f45d082f92d7e6e0be04e101799
Closes-Bug: #1736982
(cherry picked from commit 701b258dc7)
2018-01-24 11:57:14 +00:00
Hidekazu Nakamura
6b4b5c2fe5 Fix gnocchiclient creation
Gnocchiclient uses keystoneauth1.adapter so that adapter_options
need to be given.
This patch fixes gnocchiclient creation.

Change-Id: I6b5d8ee775929f4b3fd30be3321b378d19085547
Closes-Bug: #1714871
(cherry picked from commit a2fa13c8ff)
2017-11-13 08:20:55 +00:00
OpenStack Proposal Bot
62623a7f77 Updated from global requirements
Change-Id: Iede1409c379d90238b6f2ab6a9aa750b3081df94
2017-09-21 01:08:52 +00:00
17 changed files with 148 additions and 25 deletions

9
.zuul.yaml Normal file
View File

@@ -0,0 +1,9 @@
- project:
templates:
- openstack-python-jobs
- openstack-python35-jobs
- publish-openstack-sphinx-docs
- check-requirements
- release-notes-jobs
gate:
queue: watcher

View File

@@ -35,7 +35,7 @@ VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
NOVA_INSTANCES_PATH=/opt/stack/data/instances NOVA_INSTANCES_PATH=/opt/stack/data/instances
# Enable the Ceilometer plugin for the compute agent # Enable the Ceilometer plugin for the compute agent
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer
disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api
LOGFILE=$DEST/logs/stack.sh.log LOGFILE=$DEST/logs/stack.sh.log

View File

@@ -32,13 +32,13 @@ ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,neutron
enable_service n-cauth enable_service n-cauth
# Enable the Watcher Dashboard plugin # Enable the Watcher Dashboard plugin
enable_plugin watcher-dashboard git://git.openstack.org/openstack/watcher-dashboard enable_plugin watcher-dashboard https://git.openstack.org/openstack/watcher-dashboard
# Enable the Watcher plugin # Enable the Watcher plugin
enable_plugin watcher git://git.openstack.org/openstack/watcher enable_plugin watcher https://git.openstack.org/openstack/watcher
# Enable the Ceilometer plugin # Enable the Ceilometer plugin
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer
# This is the controller node, so disable the ceilometer compute agent # This is the controller node, so disable the ceilometer compute agent
disable_service ceilometer-acompute disable_service ceilometer-acompute

View File

@@ -165,7 +165,7 @@ You can easily generate and update a sample configuration file
named :ref:`watcher.conf.sample <watcher_sample_configuration_files>` by using named :ref:`watcher.conf.sample <watcher_sample_configuration_files>` by using
these following commands:: these following commands::
$ git clone git://git.openstack.org/openstack/watcher $ git clone https://git.openstack.org/openstack/watcher
$ cd watcher/ $ cd watcher/
$ tox -e genconfig $ tox -e genconfig
$ vi etc/watcher/watcher.conf.sample $ vi etc/watcher/watcher.conf.sample

View File

@@ -19,7 +19,7 @@ model. To enable the Watcher plugin with DevStack, add the following to the
`[[local|localrc]]` section of your controller's `local.conf` to enable the `[[local|localrc]]` section of your controller's `local.conf` to enable the
Watcher plugin:: Watcher plugin::
enable_plugin watcher git://git.openstack.org/openstack/watcher enable_plugin watcher https://git.openstack.org/openstack/watcher
For more detailed instructions, see `Detailed DevStack Instructions`_. Check For more detailed instructions, see `Detailed DevStack Instructions`_. Check
out the `DevStack documentation`_ for more information regarding DevStack. out the `DevStack documentation`_ for more information regarding DevStack.

View File

@@ -37,7 +37,7 @@ python-keystoneclient>=3.8.0 # Apache-2.0
python-monascaclient>=1.7.0 # Apache-2.0 python-monascaclient>=1.7.0 # Apache-2.0
python-neutronclient>=6.3.0 # Apache-2.0 python-neutronclient>=6.3.0 # Apache-2.0
python-novaclient>=9.0.0 # Apache-2.0 python-novaclient>=9.0.0 # Apache-2.0
python-openstackclient!=3.10.0,>=3.3.0 # Apache-2.0 python-openstackclient>=3.11.0 # Apache-2.0
python-ironicclient>=1.14.0 # Apache-2.0 python-ironicclient>=1.14.0 # Apache-2.0
six>=1.9.0 # MIT six>=1.9.0 # MIT
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
@@ -45,5 +45,5 @@ stevedore>=1.20.0 # Apache-2.0
taskflow>=2.7.0 # Apache-2.0 taskflow>=2.7.0 # Apache-2.0
WebOb>=1.7.1 # MIT WebOb>=1.7.1 # MIT
WSME>=0.8 # MIT WSME>=0.8 # MIT
networkx>=1.10 # BSD networkx<2.0,>=1.10 # BSD

View File

@@ -110,8 +110,12 @@ class OpenStackClients(object):
'api_version') 'api_version')
gnocchiclient_interface = self._get_client_option('gnocchi', gnocchiclient_interface = self._get_client_option('gnocchi',
'endpoint_type') 'endpoint_type')
adapter_options = {
"interface": gnocchiclient_interface
}
self._gnocchi = gnclient.Client(gnocchiclient_version, self._gnocchi = gnclient.Client(gnocchiclient_version,
interface=gnocchiclient_interface, adapter_options=adapter_options,
session=self.session) session=self.session)
return self._gnocchi return self._gnocchi

View File

@@ -62,11 +62,12 @@ class ContinuousAuditHandler(base.AuditHandler):
if objects.audit.AuditStateTransitionManager().is_inactive(audit): if objects.audit.AuditStateTransitionManager().is_inactive(audit):
# if audit isn't in active states, audit's job must be removed to # if audit isn't in active states, audit's job must be removed to
# prevent using of inactive audit in future. # prevent using of inactive audit in future.
if self.scheduler.get_jobs(): jobs = [job for job in self.scheduler.get_jobs()
[job for job in self.scheduler.get_jobs() if job.name == 'execute_audit' and
if job.name == 'execute_audit' and job.args[0].uuid == audit.uuid]
job.args[0].uuid == audit.uuid][0].remove() if jobs:
return True jobs[0].remove()
return True
return False return False

View File

@@ -87,6 +87,7 @@ class WeightPlanner(base.BasePlanner):
@staticmethod @staticmethod
def chunkify(lst, n): def chunkify(lst, n):
"""Yield successive n-sized chunks from lst.""" """Yield successive n-sized chunks from lst."""
n = int(n)
if n < 1: if n < 1:
# Just to make sure the number is valid # Just to make sure the number is valid
n = 1 n = 1

View File

@@ -252,7 +252,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
"No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s" % dict( "for %(metric_name)s" % dict(
resource_id=instance.uuid, metric_name=meter)) resource_id=instance.uuid, metric_name=meter))
avg_meter = 0 return
if meter == 'cpu_util': if meter == 'cpu_util':
avg_meter /= float(100) avg_meter /= float(100)
instance_load[meter] = avg_meter instance_load[meter] = avg_meter
@@ -308,12 +308,10 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
) )
if avg_meter is None: if avg_meter is None:
if meter_name == 'hardware.memory.used':
avg_meter = node.memory
if meter_name == 'compute.node.cpu.percent':
avg_meter = 1
LOG.warning('No values returned by node %s for %s', LOG.warning('No values returned by node %s for %s',
node_id, meter_name) node_id, meter_name)
del hosts_load[node_id]
break
else: else:
if meter_name == 'hardware.memory.used': if meter_name == 'hardware.memory.used':
avg_meter /= oslo_utils.units.Ki avg_meter /= oslo_utils.units.Ki
@@ -362,10 +360,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
migration_case = [] migration_case = []
new_hosts = copy.deepcopy(hosts) new_hosts = copy.deepcopy(hosts)
instance_load = self.get_instance_load(instance) instance_load = self.get_instance_load(instance)
if not instance_load:
return
s_host_vcpus = new_hosts[src_node.uuid]['vcpus'] s_host_vcpus = new_hosts[src_node.uuid]['vcpus']
d_host_vcpus = new_hosts[dst_node.uuid]['vcpus'] d_host_vcpus = new_hosts[dst_node.uuid]['vcpus']
for metric in self.metrics: for metric in self.metrics:
if metric is 'cpu_util': if metric == 'cpu_util':
new_hosts[src_node.uuid][metric] -= ( new_hosts[src_node.uuid][metric] -= (
self.transform_instance_cpu(instance_load, s_host_vcpus)) self.transform_instance_cpu(instance_load, s_host_vcpus))
new_hosts[dst_node.uuid][metric] += ( new_hosts[dst_node.uuid][metric] += (
@@ -408,6 +408,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
dst_node = self.compute_model.get_node_by_uuid(dst_host) dst_node = self.compute_model.get_node_by_uuid(dst_host)
sd_case = self.calculate_migration_case( sd_case = self.calculate_migration_case(
hosts, instance, src_node, dst_node) hosts, instance, src_node, dst_node)
if sd_case is None:
break
weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) weighted_sd = self.calculate_weighted_sd(sd_case[:-1])
@@ -416,6 +418,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
'host': dst_node.uuid, 'value': weighted_sd, 'host': dst_node.uuid, 'value': weighted_sd,
's_host': src_node.uuid, 'instance': instance.uuid} 's_host': src_node.uuid, 'instance': instance.uuid}
instance_host_map.append(min_sd_case) instance_host_map.append(min_sd_case)
if sd_case is None:
continue
return sorted(instance_host_map, key=lambda x: x['value']) return sorted(instance_host_map, key=lambda x: x['value'])
def check_threshold(self): def check_threshold(self):
@@ -424,7 +428,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
normalized_load = self.normalize_hosts_load(hosts_load) normalized_load = self.normalize_hosts_load(hosts_load)
for metric in self.metrics: for metric in self.metrics:
metric_sd = self.get_sd(normalized_load, metric) metric_sd = self.get_sd(normalized_load, metric)
LOG.info("Standard deviation for %s is %s."
% (metric, metric_sd))
if metric_sd > float(self.thresholds[metric]): if metric_sd > float(self.thresholds[metric]):
LOG.info("Standard deviation of %s exceeds"
" appropriate threshold %s."
% (metric, metric_sd))
return self.simulate_migrations(hosts_load) return self.simulate_migrations(hosts_load)
def add_migration(self, def add_migration(self,

View File

@@ -190,7 +190,8 @@ class TestClients(base.TestCase):
osc.gnocchi() osc.gnocchi()
mock_call.assert_called_once_with( mock_call.assert_called_once_with(
CONF.gnocchi_client.api_version, CONF.gnocchi_client.api_version,
interface=CONF.gnocchi_client.endpoint_type, adapter_options={
"interface": CONF.gnocchi_client.endpoint_type},
session=mock_session) session=mock_session)
@mock.patch.object(clients.OpenStackClients, 'session') @mock.patch.object(clients.OpenStackClients, 'session')

View File

@@ -379,8 +379,27 @@ class TestContinuousAuditHandler(base.DbTestCase):
self.audits[0].next_run_time = (datetime.datetime.now() - self.audits[0].next_run_time = (datetime.datetime.now() -
datetime.timedelta(seconds=1800)) datetime.timedelta(seconds=1800))
m_is_inactive.return_value = True m_is_inactive.return_value = True
m_get_jobs.return_value = None m_get_jobs.return_value = []
audit_handler.execute_audit(self.audits[0], self.context) audit_handler.execute_audit(self.audits[0], self.context)
m_execute.assert_called_once_with(self.audits[0], self.context)
self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNotNone(self.audits[0].next_run_time)
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
def test_is_audit_inactive(self, mock_jobs):
audit_handler = continuous.ContinuousAuditHandler()
mock_jobs.return_value = mock.MagicMock()
audit_handler._scheduler = mock.MagicMock()
ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit',
func=audit_handler.execute_audit,
args=(self.audits[0], mock.MagicMock()),
kwargs={}),
]
audit_handler.update_audit_state(self.audits[1],
objects.audit.State.CANCELLED)
mock_jobs.return_value = ap_jobs
is_inactive = audit_handler._is_audit_inactive(self.audits[1])
self.assertTrue(is_inactive)
is_inactive = audit_handler._is_audit_inactive(self.audits[0])
self.assertFalse(is_inactive)

View File

@@ -176,6 +176,8 @@ class FakeCeilometerMetrics(object):
# node 3 # node 3
mock['Node_6_hostname_6'] = 8 mock['Node_6_hostname_6'] = 8
# This node doesn't send metrics
mock['LOST_NODE_hostname_7'] = None
mock['Node_19_hostname_19'] = 10 mock['Node_19_hostname_19'] = 10
# node 4 # node 4
mock['INSTANCE_7_hostname_7'] = 4 mock['INSTANCE_7_hostname_7'] = 4
@@ -190,7 +192,10 @@ class FakeCeilometerMetrics(object):
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
mock[uuid] = 8 mock[uuid] = 8
return float(mock[str(uuid)]) if mock[str(uuid)] is not None:
return float(mock[str(uuid)])
else:
return mock[str(uuid)]
@staticmethod @staticmethod
def get_average_usage_instance_cpu_wb(uuid): def get_average_usage_instance_cpu_wb(uuid):
@@ -255,6 +260,8 @@ class FakeCeilometerMetrics(object):
# node 4 # node 4
mock['INSTANCE_7'] = 4 mock['INSTANCE_7'] = 4
mock['LOST_INSTANCE'] = None
if uuid not in mock.keys(): if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
mock[uuid] = 8 mock[uuid] = 8

View File

@@ -0,0 +1,50 @@
<ModelRoot>
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="INSTANCE_0" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_1" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="INSTANCE_2" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<ComputeNode human_id="" uuid="Node_2" status="enabled" state="up" id="2" hostname="hostname_2" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="INSTANCE_3" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_4" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_5" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<ComputeNode human_id="" uuid="Node_3" status="enabled" state="up" id="3" hostname="hostname_3" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="INSTANCE_6" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<ComputeNode human_id="" uuid="Node_4" status="enabled" state="up" id="4" hostname="hostname_4" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="INSTANCE_7" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<ComputeNode human_id="" uuid="LOST_NODE" status="enabled" state="up" id="1" hostname="hostname_7" vcpus="40" disk="250" disk_capacity="250" memory="132">
<Instance state="active" human_id="" uuid="LOST_INSTANCE" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ComputeNode>
<Instance state="active" human_id="" uuid="INSTANCE_10" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_11" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_12" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_13" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_14" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_15" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_16" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_17" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_18" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_19" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_20" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_21" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_22" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_23" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_24" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_25" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_26" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_27" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_28" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_29" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_30" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_31" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_32" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_33" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_34" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_8" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
<Instance state="active" human_id="" uuid="INSTANCE_9" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
</ModelRoot>

View File

@@ -114,6 +114,9 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
def generate_scenario_1(self): def generate_scenario_1(self):
return self.load_model('scenario_1.xml') return self.load_model('scenario_1.xml')
def generate_scenario_1_with_1_node_unavailable(self):
return self.load_model('scenario_1_with_1_node_unavailable.xml')
def generate_scenario_3_with_2_nodes(self): def generate_scenario_3_with_2_nodes(self):
return self.load_model('scenario_3_with_2_nodes.xml') return self.load_model('scenario_3_with_2_nodes.xml')

View File

@@ -132,6 +132,8 @@ class FakeGnocchiMetrics(object):
# node 3 # node 3
mock['Node_6_hostname_6'] = 8 mock['Node_6_hostname_6'] = 8
# This node doesn't send metrics
mock['LOST_NODE_hostname_7'] = None
mock['Node_19_hostname_19'] = 10 mock['Node_19_hostname_19'] = 10
# node 4 # node 4
mock['INSTANCE_7_hostname_7'] = 4 mock['INSTANCE_7_hostname_7'] = 4
@@ -145,7 +147,10 @@ class FakeGnocchiMetrics(object):
if uuid not in mock.keys(): if uuid not in mock.keys():
mock[uuid] = 8 mock[uuid] = 8
return float(mock[str(uuid)]) if mock[str(uuid)] is not None:
return float(mock[str(uuid)])
else:
return mock[str(uuid)]
@staticmethod @staticmethod
def get_average_usage_instance_cpu(uuid): def get_average_usage_instance_cpu(uuid):
@@ -172,6 +177,8 @@ class FakeGnocchiMetrics(object):
# node 4 # node 4
mock['INSTANCE_7'] = 4 mock['INSTANCE_7'] = 4
mock['LOST_INSTANCE'] = None
if uuid not in mock.keys(): if uuid not in mock.keys():
mock[uuid] = 8 mock[uuid] = 8

View File

@@ -172,6 +172,12 @@ class TestWorkloadStabilization(base.TestCase):
granularity=300, start_time=start_time, stop_time=stop_time, granularity=300, start_time=start_time, stop_time=stop_time,
aggregation='mean') aggregation='mean')
def test_get_instance_load_with_no_metrics(self):
model = self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
self.m_model.return_value = model
lost_instance = model.get_instance_by_uuid("LOST_INSTANCE")
self.assertIsNone(self.strategy.get_instance_load(lost_instance))
def test_normalize_hosts_load(self): def test_normalize_hosts_load(self):
self.m_model.return_value = self.fake_cluster.generate_scenario_1() self.m_model.return_value = self.fake_cluster.generate_scenario_1()
fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7}, fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7},
@@ -196,6 +202,12 @@ class TestWorkloadStabilization(base.TestCase):
self.assertEqual(self.strategy.get_hosts_load(), self.assertEqual(self.strategy.get_hosts_load(),
self.hosts_load_assert) self.hosts_load_assert)
def test_get_hosts_load_with_node_missing(self):
self.m_model.return_value = \
self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
self.assertEqual(self.hosts_load_assert,
self.strategy.get_hosts_load())
def test_get_sd(self): def test_get_sd(self):
test_cpu_sd = 0.296 test_cpu_sd = 0.296
test_ram_sd = 9.3 test_ram_sd = 9.3