Compare commits
123 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
391bb92bd2 | ||
|
|
3912075c19 | ||
|
|
d42a89f70f | ||
|
|
6bb25d2c36 | ||
|
|
4179c3527c | ||
|
|
3b1356346a | ||
|
|
67be974861 | ||
|
|
8c916930c8 | ||
|
|
b537979e45 | ||
|
|
831e58df10 | ||
|
|
3dd03b2d45 | ||
|
|
2548f0bbba | ||
|
|
39d7ce9ee8 | ||
|
|
1f8c073cb3 | ||
|
|
0353a0ac77 | ||
|
|
921584ac4b | ||
|
|
65a09ce32d | ||
|
|
92dad3be2d | ||
|
|
d86fee294f | ||
|
|
95a01c4e12 | ||
|
|
b9456e242e | ||
|
|
4e49ad64c0 | ||
|
|
184b1b1ce6 | ||
|
|
f49d0555e7 | ||
|
|
9d8a0feab4 | ||
|
|
52a5c99fc5 | ||
|
|
cfaab0cbdc | ||
|
|
6bb0432ee7 | ||
|
|
99837d6339 | ||
|
|
3075723da9 | ||
|
|
b0bdeea7cf | ||
|
|
5eaad33709 | ||
|
|
24b6432490 | ||
|
|
ca61594511 | ||
|
|
bd57077bfe | ||
|
|
56bcba2dc0 | ||
|
|
73928412b3 | ||
|
|
29f41b7dff | ||
|
|
02f86ffe02 | ||
|
|
20c6bf1b5a | ||
|
|
083f070d17 | ||
|
|
4022b59d79 | ||
|
|
3d1cb11ea6 | ||
|
|
d0b1dacec1 | ||
|
|
45a06445f3 | ||
|
|
2f173bba56 | ||
|
|
cb497d2642 | ||
|
|
e1fd686272 | ||
|
|
8f7127a874 | ||
|
|
3a529a0f7b | ||
|
|
5c81f1bd7f | ||
|
|
e0c019002a | ||
|
|
cc24ef6e08 | ||
|
|
7e27abc5db | ||
|
|
4844baa816 | ||
|
|
e771ae9e95 | ||
|
|
a2488045ea | ||
|
|
cce5ebd3f0 | ||
|
|
a7ab77078e | ||
|
|
9af32bce5b | ||
|
|
4cf35e7e62 | ||
|
|
6f27e50cf0 | ||
|
|
bd8c5c684c | ||
|
|
1834db853b | ||
|
|
59ef0d24d1 | ||
|
|
c53817c33d | ||
|
|
b33b7a0474 | ||
|
|
033bc072c0 | ||
|
|
f32ed6bc79 | ||
|
|
707590143b | ||
|
|
b2663de513 | ||
|
|
dd210292ae | ||
|
|
abb9155eb4 | ||
|
|
f607ae8ec0 | ||
|
|
b3ded34244 | ||
|
|
bdfb074aa4 | ||
|
|
b3be5f16fc | ||
|
|
dad60fb878 | ||
|
|
fb66a9f2c3 | ||
|
|
dc9ef6f49c | ||
|
|
8e8a43ed48 | ||
|
|
5ac65b7bfc | ||
|
|
7b9b726577 | ||
|
|
c81cd675a5 | ||
|
|
ab926bf6c5 | ||
|
|
08c688ed11 | ||
|
|
e399d96661 | ||
|
|
ba54b30d4a | ||
|
|
44d9183d36 | ||
|
|
f6f3c00206 | ||
|
|
cc87b823fa | ||
|
|
ba2395f7e7 | ||
|
|
b546ce8777 | ||
|
|
0900eaa9df | ||
|
|
9fb5b2a4e7 | ||
|
|
d80edea218 | ||
|
|
26d6074689 | ||
|
|
40a653215f | ||
|
|
1492f5d8dc | ||
|
|
76263f149a | ||
|
|
028006d15d | ||
|
|
d27ba8cc2a | ||
|
|
33750ce7a9 | ||
|
|
cb8d1a98d6 | ||
|
|
f32252d510 | ||
|
|
4849f8dde9 | ||
|
|
0cafdcdee9 | ||
|
|
3a70225164 | ||
|
|
892c766ac4 | ||
|
|
63a3fd84ae | ||
|
|
287ace1dcc | ||
|
|
4b302e415e | ||
|
|
f24744c910 | ||
|
|
d9a85eda2c | ||
|
|
82c8633e42 | ||
|
|
d3f23795f5 | ||
|
|
e7f4456a80 | ||
|
|
a36a309e2e | ||
|
|
8e3affd9ac | ||
|
|
71e979cae0 | ||
|
|
6edfd34a53 | ||
|
|
0c8c32e69e | ||
|
|
9138b7bacb |
146
.zuul.yaml
146
.zuul.yaml
@@ -1,39 +1,139 @@
|
||||
- project:
|
||||
name: openstack/watcher
|
||||
check:
|
||||
jobs:
|
||||
- watcher-tempest-multinode
|
||||
- watcher-tempest-functional
|
||||
- watcher-tempest-dummy_optim
|
||||
- watcher-tempest-actuator
|
||||
- watcher-tempest-basic_optim
|
||||
- watcher-tempest-workload_balancing
|
||||
- watcherclient-tempest-functional
|
||||
- legacy-rally-dsvm-watcher-rally
|
||||
- openstack-tox-lower-constraints
|
||||
gate:
|
||||
jobs:
|
||||
- watcher-tempest-functional
|
||||
- watcher-tempest-dummy_optim
|
||||
- watcher-tempest-actuator
|
||||
- watcher-tempest-basic_optim
|
||||
- watcher-tempest-workload_balancing
|
||||
- watcherclient-tempest-functional
|
||||
- legacy-rally-dsvm-watcher-rally
|
||||
- openstack-tox-lower-constraints
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-base-multinode
|
||||
parent: legacy-dsvm-base-multinode
|
||||
run: playbooks/legacy/watcher-tempest-base-multinode/run.yaml
|
||||
post-run: playbooks/legacy/watcher-tempest-base-multinode/post.yaml
|
||||
timeout: 4200
|
||||
name: watcher-tempest-dummy_optim
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-actuator
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_actuator'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-basic_optim
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_basic_optim'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-workload_balancing
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-multinode
|
||||
parent: watcher-tempest-functional
|
||||
voting: false
|
||||
nodeset: openstack-two-node
|
||||
pre-run: playbooks/pre.yaml
|
||||
run: playbooks/orchestrate-tempest.yaml
|
||||
roles:
|
||||
- zuul: openstack/tempest
|
||||
group-vars:
|
||||
subnode:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$NOVA_CONF:
|
||||
libvirt:
|
||||
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||
devstack_services:
|
||||
watcher-api: false
|
||||
watcher-decision-engine: false
|
||||
watcher-applier: false
|
||||
# We need to add TLS support for watcher plugin
|
||||
tls-proxy: false
|
||||
ceilometer: false
|
||||
ceilometer-acompute: false
|
||||
ceilometer-acentral: false
|
||||
ceilometer-anotification: false
|
||||
watcher: false
|
||||
gnocchi-api: false
|
||||
gnocchi-metricd: false
|
||||
rabbit: false
|
||||
mysql: false
|
||||
vars:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$NOVA_CONF:
|
||||
libvirt:
|
||||
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||
test-config:
|
||||
$TEMPEST_CONFIG:
|
||||
compute:
|
||||
min_compute_nodes: 2
|
||||
compute-feature-enabled:
|
||||
live_migration: true
|
||||
block_migration_for_live_migration: true
|
||||
devstack_plugins:
|
||||
ceilometer: https://git.openstack.org/openstack/ceilometer
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-functional
|
||||
parent: devstack-tempest
|
||||
timeout: 7200
|
||||
required-projects:
|
||||
- openstack/ceilometer
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/python-openstackclient
|
||||
- openstack/python-watcherclient
|
||||
- openstack/watcher
|
||||
- openstack/watcher-tempest-plugin
|
||||
nodeset: legacy-ubuntu-xenial-2-node
|
||||
- openstack/tempest
|
||||
vars:
|
||||
devstack_plugins:
|
||||
watcher: https://git.openstack.org/openstack/watcher
|
||||
devstack_services:
|
||||
tls-proxy: false
|
||||
watcher-api: true
|
||||
watcher-decision-engine: true
|
||||
watcher-applier: true
|
||||
tempest: true
|
||||
s-account: false
|
||||
s-container: false
|
||||
s-object: false
|
||||
s-proxy: false
|
||||
devstack_localrc:
|
||||
TEMPEST_PLUGINS: '/opt/stack/watcher-tempest-plugin'
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.api'
|
||||
tox_envlist: all
|
||||
tox_environment:
|
||||
# Do we really need to set this? It's cargo culted
|
||||
PYTHONUNBUFFERED: 'true'
|
||||
zuul_copy_output:
|
||||
/etc/hosts: logs
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-multinode
|
||||
parent: watcher-tempest-base-multinode
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
# This job is used by python-watcherclient repo
|
||||
# This job is used in python-watcherclient repo
|
||||
name: watcherclient-tempest-functional
|
||||
parent: legacy-dsvm-base
|
||||
run: playbooks/legacy/watcherclient-tempest-functional/run.yaml
|
||||
post-run: playbooks/legacy/watcherclient-tempest-functional/post.yaml
|
||||
parent: watcher-tempest-functional
|
||||
voting: false
|
||||
timeout: 4200
|
||||
required-projects:
|
||||
- openstack-dev/devstack
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/python-openstackclient
|
||||
- openstack/python-watcherclient
|
||||
- openstack/watcher
|
||||
vars:
|
||||
tempest_concurrency: 1
|
||||
devstack_localrc:
|
||||
TEMPEST_PLUGINS: '/opt/stack/python-watcherclient'
|
||||
tempest_test_regex: 'watcherclient.tests.functional'
|
||||
|
||||
@@ -8,4 +8,4 @@
|
||||
watcher Style Commandments
|
||||
==========================
|
||||
|
||||
Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/
|
||||
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
Team and repository tags
|
||||
========================
|
||||
|
||||
.. image:: https://governance.openstack.org/badges/watcher.svg
|
||||
:target: https://governance.openstack.org/reference/tags/index.html
|
||||
.. image:: https://governance.openstack.org/tc/badges/watcher.svg
|
||||
:target: https://governance.openstack.org/tc/reference/tags/index.html
|
||||
|
||||
.. Change things from this point on
|
||||
|
||||
@@ -22,10 +22,11 @@ service for multi-tenant OpenStack-based clouds.
|
||||
Watcher provides a robust framework to realize a wide range of cloud
|
||||
optimization goals, including the reduction of data center
|
||||
operating costs, increased system performance via intelligent virtual machine
|
||||
migration, increased energy efficiency-and more!
|
||||
migration, increased energy efficiency and more!
|
||||
|
||||
* Free software: Apache license
|
||||
* Wiki: https://wiki.openstack.org/wiki/Watcher
|
||||
* Source: https://github.com/openstack/watcher
|
||||
* Source: https://github.com/openstack/watcher
|
||||
* Bugs: https://bugs.launchpad.net/watcher
|
||||
* Documentation: https://docs.openstack.org/watcher/latest/
|
||||
* Release notes: https://docs.openstack.org/releasenotes/watcher/
|
||||
|
||||
@@ -177,16 +177,20 @@ function create_watcher_conf {
|
||||
iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||
iniset $WATCHER_CONF DEFAULT control_exchange watcher
|
||||
|
||||
iniset_rpc_backend watcher $WATCHER_CONF
|
||||
|
||||
iniset $WATCHER_CONF database connection $(database_connection_url watcher)
|
||||
iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST"
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||
|
||||
if is_service_enabled tls-proxy; then
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT"
|
||||
# iniset $WATCHER_CONF api enable_ssl_api "True"
|
||||
else
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||
fi
|
||||
|
||||
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML
|
||||
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST
|
||||
|
||||
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
||||
|
||||
iniset $NOVA_CONF oslo_messaging_notifications topics "notifications,watcher_notifications"
|
||||
@@ -297,8 +301,7 @@ function start_watcher_api {
|
||||
|
||||
# Start proxies if enabled
|
||||
if is_service_enabled tls-proxy; then
|
||||
start_tls_proxy '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
||||
start_tls_proxy '*' $EC2_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
||||
start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ It is used via a single directive in the .rst file
|
||||
|
||||
"""
|
||||
|
||||
from sphinx.util.compat import Directive
|
||||
from docutils.parsers.rst import Directive
|
||||
from docutils import nodes
|
||||
|
||||
from watcher.notifications import base as notification
|
||||
|
||||
@@ -42,6 +42,7 @@ extensions = [
|
||||
'ext.versioned_notifications',
|
||||
'oslo_config.sphinxconfiggen',
|
||||
'openstackdocstheme',
|
||||
'sphinx.ext.napoleon',
|
||||
]
|
||||
|
||||
wsme_protocols = ['restjson']
|
||||
|
||||
@@ -217,7 +217,7 @@ so that the watcher service is configured for your needs.
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# database (string value)
|
||||
#connection=<None>
|
||||
connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
||||
connection = mysql+pymysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
||||
|
||||
#. Configure the Watcher Service to use the RabbitMQ message broker by
|
||||
setting one or more of these options. Replace RABBIT_HOST with the
|
||||
@@ -235,21 +235,8 @@ so that the watcher service is configured for your needs.
|
||||
# option. (string value)
|
||||
control_exchange = watcher
|
||||
|
||||
...
|
||||
|
||||
[oslo_messaging_rabbit]
|
||||
|
||||
# The username used by the message broker (string value)
|
||||
rabbit_userid = RABBITMQ_USER
|
||||
|
||||
# The password of user used by the message broker (string value)
|
||||
rabbit_password = RABBITMQ_PASSWORD
|
||||
|
||||
# The host where the message brokeris installed (string value)
|
||||
rabbit_host = RABBIT_HOST
|
||||
|
||||
# The port used bythe message broker (string value)
|
||||
#rabbit_port = 5672
|
||||
# ...
|
||||
transport_url = rabbit://RABBITMQ_USER:RABBITMQ_PASSWORD@RABBIT_HOST
|
||||
|
||||
|
||||
#. Watcher API shall validate the token provided by every incoming request,
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
===================
|
||||
Configuration Guide
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:maxdepth: 2
|
||||
|
||||
configuring
|
||||
watcher
|
||||
|
||||
@@ -39,7 +39,7 @@ notifications of important events.
|
||||
|
||||
* https://launchpad.net
|
||||
* https://launchpad.net/watcher
|
||||
* https://launchpad.net/~openstack
|
||||
* https://launchpad.net/openstack
|
||||
|
||||
|
||||
Project Hosting Details
|
||||
@@ -49,7 +49,7 @@ Bug tracker
|
||||
https://launchpad.net/watcher
|
||||
|
||||
Mailing list (prefix subjects with ``[watcher]`` for faster responses)
|
||||
https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
|
||||
http://lists.openstack.org/pipermail/openstack-dev/
|
||||
|
||||
Wiki
|
||||
https://wiki.openstack.org/Watcher
|
||||
@@ -65,7 +65,7 @@ IRC Channel
|
||||
|
||||
Weekly Meetings
|
||||
On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4``
|
||||
IRC channel, 13:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
||||
IRC channel, 08:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
||||
IRC channel (`meetings logs`_)
|
||||
|
||||
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
|
||||
|
||||
@@ -123,9 +123,10 @@ You can re-activate this virtualenv for your current shell using:
|
||||
|
||||
$ workon watcher
|
||||
|
||||
For more information on virtual environments, see virtualenv_.
|
||||
For more information on virtual environments, see virtualenv_ and
|
||||
virtualenvwrapper_.
|
||||
|
||||
.. _virtualenv: https://www.virtualenv.org/
|
||||
.. _virtualenv: https://pypi.python.org/pypi/virtualenv/
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ Here below is how to register ``DummyClusterDataModelCollector`` using pbr_:
|
||||
watcher_cluster_data_model_collectors =
|
||||
dummy = thirdparty.dummy:DummyClusterDataModelCollector
|
||||
|
||||
.. _pbr: http://docs.openstack.org/pbr/latest
|
||||
.. _pbr: https://docs.openstack.org/pbr/latest/
|
||||
|
||||
|
||||
Add new notification endpoints
|
||||
|
||||
@@ -31,7 +31,7 @@ the following::
|
||||
(watcher) $ tox -e pep8
|
||||
|
||||
.. _tox: https://tox.readthedocs.org/
|
||||
.. _Gerrit: http://review.openstack.org/
|
||||
.. _Gerrit: https://review.openstack.org/
|
||||
|
||||
You may pass options to the test programs using positional arguments. To run a
|
||||
specific unit test, you can pass extra options to `os-testr`_ after putting
|
||||
|
||||
@@ -267,14 +267,14 @@ the same goal and same workload of the :ref:`Cluster <cluster_definition>`.
|
||||
Project
|
||||
=======
|
||||
|
||||
:ref:`Projects <project_definition>` represent the base unit of “ownership”
|
||||
:ref:`Projects <project_definition>` represent the base unit of "ownership"
|
||||
in OpenStack, in that all :ref:`resources <managed_resource_definition>` in
|
||||
OpenStack should be owned by a specific :ref:`project <project_definition>`.
|
||||
In OpenStack Identity, a :ref:`project <project_definition>` must be owned by a
|
||||
specific domain.
|
||||
|
||||
Please, read `the official OpenStack definition of a Project
|
||||
<http://docs.openstack.org/glossary/content/glossary.html>`_.
|
||||
<https://docs.openstack.org/doc-contrib-guide/common/glossary.html>`_.
|
||||
|
||||
.. _scoring_engine_definition:
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ metrics receiver, complex event processor and profiler, optimization processor
|
||||
and an action plan applier. This provides a robust framework to realize a wide
|
||||
range of cloud optimization goals, including the reduction of data center
|
||||
operating costs, increased system performance via intelligent virtual machine
|
||||
migration, increased energy efficiency—and more!
|
||||
migration, increased energy efficiency and more!
|
||||
|
||||
Watcher project consists of several source code repositories:
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
[keystone_authtoken]
|
||||
...
|
||||
auth_uri = http://controller:5000
|
||||
www_authenticate_uri = http://controller:5000
|
||||
auth_url = http://controller:35357
|
||||
memcached_servers = controller:11211
|
||||
auth_type = password
|
||||
|
||||
@@ -10,7 +10,7 @@ Infrastructure Optimization service
|
||||
verify.rst
|
||||
next-steps.rst
|
||||
|
||||
The Infrastructure Optimization service (watcher) provides
|
||||
The Infrastructure Optimization service (Watcher) provides
|
||||
flexible and scalable resource optimization service for
|
||||
multi-tenant OpenStack-based clouds.
|
||||
|
||||
@@ -21,17 +21,17 @@ applier. This provides a robust framework to realize a wide
|
||||
range of cloud optimization goals, including the reduction
|
||||
of data center operating costs, increased system performance
|
||||
via intelligent virtual machine migration, increased energy
|
||||
efficiency—and more!
|
||||
efficiency and more!
|
||||
|
||||
Watcher also supports a pluggable architecture by which custom
|
||||
optimization algorithms, data metrics and data profilers can be
|
||||
developed and inserted into the Watcher framework.
|
||||
|
||||
Check the documentation for watcher optimization strategies at
|
||||
https://docs.openstack.org/watcher/latest/strategies/index.html
|
||||
`Strategies <https://docs.openstack.org/watcher/latest/strategies/index.html>`_.
|
||||
|
||||
Check watcher glossary at
|
||||
https://docs.openstack.org/watcher/latest/glossary.html
|
||||
Check watcher glossary at `Glossary
|
||||
<https://docs.openstack.org/watcher/latest/glossary.html>`_.
|
||||
|
||||
|
||||
This chapter assumes a working setup of OpenStack following the
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher API
|
||||
---------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher Applier
|
||||
-------------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher Decision Engine
|
||||
---------------------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
86
doc/source/strategies/actuation.rst
Normal file
86
doc/source/strategies/actuation.rst
Normal file
@@ -0,0 +1,86 @@
|
||||
=============
|
||||
Actuator
|
||||
=============
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
|
||||
**display name**: ``Actuator``
|
||||
|
||||
**goal**: ``unclassified``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.actuation.Actuator
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Metrics
|
||||
*******
|
||||
|
||||
None
|
||||
|
||||
Cluster data model
|
||||
******************
|
||||
|
||||
None
|
||||
|
||||
Actions
|
||||
*******
|
||||
|
||||
Default Watcher's actions.
|
||||
|
||||
Planner
|
||||
*******
|
||||
|
||||
Default Watcher's planner:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Strategy parameters are:
|
||||
|
||||
==================== ====== ===================== =============================
|
||||
parameter type default Value description
|
||||
==================== ====== ===================== =============================
|
||||
``actions`` array None Actions to be executed.
|
||||
==================== ====== ===================== =============================
|
||||
|
||||
The elements of actions array are:
|
||||
|
||||
==================== ====== ===================== =============================
|
||||
parameter type default Value description
|
||||
==================== ====== ===================== =============================
|
||||
``action_type`` string None Action name defined in
|
||||
setup.cfg(mandatory)
|
||||
``resource_id`` string None Resource_id of the action.
|
||||
``input_parameters`` object None Input_parameters of the
|
||||
action(mandatory).
|
||||
==================== ====== ===================== =============================
|
||||
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
None
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
This strategy create an action plan with a predefined set of actions.
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ openstack optimize audittemplate create \
|
||||
at1 unclassified --strategy actuator
|
||||
|
||||
$ openstack optimize audit create -a at1 \
|
||||
-p actions='[{"action_type": "migrate", "resource_id": "56a40802-6fde-4b59-957c-c84baec7eaed", "input_parameters": {"migration_type": "live", "source_node": "s01"}}]'
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
None
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``server_consolidation``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation.BasicConsolidation
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,11 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``thermal_optimization``
|
||||
|
||||
Outlet (Exhaust Air) temperature is a new thermal telemetry which can be
|
||||
used to measure the host's thermal/workload status. This strategy makes
|
||||
decisions to migrate workloads to the hosts with good thermal condition
|
||||
(lowest outlet temperature) when the outlet temperature of source hosts
|
||||
reach a configurable threshold.
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.outlet_temp_control
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``saving_energy``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy.SavingEnergy
|
||||
|
||||
Requirements
|
||||
------------
|
||||
@@ -67,13 +67,13 @@ parameter type default description
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
Energy saving strategy efficacy indicator is unclassified.
|
||||
https://github.com/openstack/watcher/blob/master/watcher/decision_engine/goal/goals.py#L215-L218
|
||||
None
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
For more information on the Energy Saving Strategy please refer to:http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
For more information on the Energy Saving Strategy please refer to:
|
||||
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
@@ -91,10 +91,10 @@ step 2: Create audit to do optimization
|
||||
$ openstack optimize audittemplate create \
|
||||
at1 saving_energy --strategy saving_energy
|
||||
|
||||
$ openstack optimize audit create -a at1
|
||||
$ openstack optimize audit create -a at1 \
|
||||
-p free_used_percent=20.0
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
*Spec URL*
|
||||
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
None
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``airflow_optimization``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow.UniformAirflow
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``vm_consolidation``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation.VMWorkloadConsolidation
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``workload_balancing``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization.WorkloadStabilization
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``workload_balancing``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
154
doc/source/strategies/zone_migration.rst
Normal file
154
doc/source/strategies/zone_migration.rst
Normal file
@@ -0,0 +1,154 @@
|
||||
==============
|
||||
Zone migration
|
||||
==============
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
|
||||
**display name**: ``Zone migration``
|
||||
|
||||
**goal**: ``hardware_maintenance``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Metrics
|
||||
*******
|
||||
|
||||
None
|
||||
|
||||
Cluster data model
|
||||
******************
|
||||
|
||||
Default Watcher's Compute cluster data model:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector
|
||||
|
||||
Storage cluster data model is also required:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector
|
||||
|
||||
Actions
|
||||
*******
|
||||
|
||||
|
||||
Default Watcher's actions:
|
||||
|
||||
.. list-table::
|
||||
:widths: 30 30
|
||||
:header-rows: 1
|
||||
|
||||
* - action
|
||||
- description
|
||||
* - ``migrate``
|
||||
- .. watcher-term:: watcher.applier.actions.migration.Migrate
|
||||
* - ``volume_migrate``
|
||||
- .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate
|
||||
|
||||
Planner
|
||||
*******
|
||||
|
||||
Default Watcher's planner:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Strategy parameters are:
|
||||
|
||||
======================== ======== ============= ==============================
|
||||
parameter type default Value description
|
||||
======================== ======== ============= ==============================
|
||||
``compute_nodes`` array None Compute nodes to migrate.
|
||||
``storage_pools`` array None Storage pools to migrate.
|
||||
``parallel_total`` integer 6 The number of actions to be
|
||||
run in parallel in total.
|
||||
``parallel_per_node`` integer 2 The number of actions to be
|
||||
run in parallel per compute
|
||||
node.
|
||||
``parallel_per_pool`` integer 2 The number of actions to be
|
||||
run in parallel per storage
|
||||
pool.
|
||||
``priority`` object None List prioritizes instances
|
||||
and volumes.
|
||||
``with_attached_volume`` boolean False False: Instances will migrate
|
||||
after all volumes migrate.
|
||||
True: An instance will migrate
|
||||
after the attached volumes
|
||||
migrate.
|
||||
======================== ======== ============= ==============================
|
||||
|
||||
The elements of compute_nodes array are:
|
||||
|
||||
============= ======= =============== =============================
|
||||
parameter type default Value description
|
||||
============= ======= =============== =============================
|
||||
``src_node`` string None Compute node from which
|
||||
instances migrate(mandatory).
|
||||
``dst_node`` string None Compute node to which
|
||||
instances migrate.
|
||||
============= ======= =============== =============================
|
||||
|
||||
The elements of storage_pools array are:
|
||||
|
||||
============= ======= =============== ==============================
|
||||
parameter type default Value description
|
||||
============= ======= =============== ==============================
|
||||
``src_pool`` string None Storage pool from which
|
||||
volumes migrate(mandatory).
|
||||
``dst_pool`` string None Storage pool to which
|
||||
volumes migrate.
|
||||
``src_type`` string None Source volume type(mandatory).
|
||||
``dst_type`` string None Destination volume type
|
||||
(mandatory).
|
||||
============= ======= =============== ==============================
|
||||
|
||||
The elements of priority object are:
|
||||
|
||||
================ ======= =============== ======================
|
||||
parameter type default Value description
|
||||
================ ======= =============== ======================
|
||||
``project`` array None Project names.
|
||||
``compute_node`` array None Compute node names.
|
||||
``storage_pool`` array None Storage pool names.
|
||||
``compute`` enum None Instance attributes.
|
||||
|compute|
|
||||
``storage`` enum None Volume attributes.
|
||||
|storage|
|
||||
================ ======= =============== ======================
|
||||
|
||||
.. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"]
|
||||
.. |storage| replace:: ["size", "created_at"]
|
||||
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
.. watcher-func::
|
||||
:format: literal_block
|
||||
|
||||
watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
For more information on the zone migration strategy please refer
|
||||
to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/zone-migration-strategy.html
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ openstack optimize audittemplate create \
|
||||
at1 hardware_maintenance --strategy zone_migration
|
||||
|
||||
$ openstack optimize audit create -a at1 \
|
||||
-p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]'
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
None
|
||||
@@ -39,6 +39,22 @@ named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``.
|
||||
If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon
|
||||
plugin installation guide`_.
|
||||
|
||||
.. note::
|
||||
|
||||
Notice, that in this guide we'll use `OpenStack CLI`_ as major interface.
|
||||
Nevertheless, you can use `Watcher CLI`_ in the same way. It can be
|
||||
achieved by replacing
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ openstack optimize ...
|
||||
|
||||
with
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher ...
|
||||
|
||||
.. _`installation guide`: https://docs.openstack.org/python-watcherclient/latest
|
||||
.. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html
|
||||
.. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html
|
||||
@@ -51,10 +67,6 @@ watcher binary without options.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher help
|
||||
|
||||
or::
|
||||
|
||||
$ openstack help optimize
|
||||
|
||||
How do I run an audit of my cluster ?
|
||||
@@ -64,10 +76,6 @@ First, you need to find the :ref:`goal <goal_definition>` you want to achieve:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher goal list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize goal list
|
||||
|
||||
.. note::
|
||||
@@ -81,10 +89,6 @@ An :ref:`audit template <audit_template_definition>` defines an optimization
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate create my_first_audit_template <your_goal>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal>
|
||||
|
||||
Although optional, you may want to actually set a specific strategy for your
|
||||
@@ -93,10 +97,6 @@ following command:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher strategy list --goal <your_goal_uuid_or_name>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize strategy list --goal <your_goal_uuid_or_name>
|
||||
|
||||
You can use the following command to check strategy details including which
|
||||
@@ -104,21 +104,12 @@ parameters of which format it supports:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher strategy show <your_strategy>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize strategy show <your_strategy>
|
||||
|
||||
The command to create your audit template would then be:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate create my_first_audit_template <your_goal> \
|
||||
--strategy <your_strategy>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal> \
|
||||
--strategy <your_strategy>
|
||||
|
||||
@@ -133,10 +124,6 @@ audit) that you want to use.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate list
|
||||
|
||||
- Start an audit based on this :ref:`audit template
|
||||
@@ -144,10 +131,6 @@ or::
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audit create -a <your_audit_template>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audit create -a <your_audit_template>
|
||||
|
||||
If your_audit_template was created by --strategy <your_strategy>, and it
|
||||
@@ -156,11 +139,6 @@ format), your can append `-p` to input required parameters:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audit create -a <your_audit_template> \
|
||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audit create -a <your_audit_template> \
|
||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
||||
|
||||
@@ -173,19 +151,13 @@ Input parameter could cause audit creation failure, when:
|
||||
Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
|
||||
composed of a list of potential optimization :ref:`actions <action_definition>`
|
||||
(instance migration, disabling of a compute node, ...) according to the
|
||||
:ref:`goal <goal_definition>` to achieve. You can see all of the goals
|
||||
available in section ``[watcher_strategies]`` of the Watcher service
|
||||
configuration file.
|
||||
:ref:`goal <goal_definition>` to achieve.
|
||||
|
||||
- Wait until the Watcher audit has produced a new :ref:`action plan
|
||||
<action_plan_definition>`, and get it:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher actionplan list --audit <the_audit_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize actionplan list --audit <the_audit_uuid>
|
||||
|
||||
- Have a look on the list of optimization :ref:`actions <action_definition>`
|
||||
@@ -193,10 +165,6 @@ or::
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action list --action-plan <the_action_plan_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action list --action-plan <the_action_plan_uuid>
|
||||
|
||||
Once you have learned how to create an :ref:`Action Plan
|
||||
@@ -207,10 +175,6 @@ cluster:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher actionplan start <the_action_plan_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize actionplan start <the_action_plan_uuid>
|
||||
|
||||
You can follow the states of the :ref:`actions <action_definition>` by
|
||||
@@ -218,19 +182,11 @@ periodically calling:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action list
|
||||
|
||||
You can also obtain more detailed information about a specific action:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action show <the_action_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action show <the_action_uuid>
|
||||
|
||||
|
||||
163
lower-constraints.txt
Normal file
163
lower-constraints.txt
Normal file
@@ -0,0 +1,163 @@
|
||||
alabaster==0.7.10
|
||||
alembic==0.9.8
|
||||
amqp==2.2.2
|
||||
appdirs==1.4.3
|
||||
APScheduler==3.5.1
|
||||
asn1crypto==0.24.0
|
||||
automaton==1.14.0
|
||||
Babel==2.5.3
|
||||
bandit==1.4.0
|
||||
beautifulsoup4==4.6.0
|
||||
cachetools==2.0.1
|
||||
certifi==2018.1.18
|
||||
cffi==1.11.5
|
||||
chardet==3.0.4
|
||||
cliff==2.11.0
|
||||
cmd2==0.8.1
|
||||
contextlib2==0.5.5
|
||||
coverage==4.5.1
|
||||
croniter==0.3.20
|
||||
cryptography==2.1.4
|
||||
debtcollector==1.19.0
|
||||
decorator==4.2.1
|
||||
deprecation==2.0
|
||||
doc8==0.8.0
|
||||
docutils==0.14
|
||||
dogpile.cache==0.6.5
|
||||
dulwich==0.19.0
|
||||
enum-compat==0.0.2
|
||||
eventlet==0.20.0
|
||||
extras==1.0.0
|
||||
fasteners==0.14.1
|
||||
fixtures==3.0.0
|
||||
flake8==2.5.5
|
||||
freezegun==0.3.10
|
||||
future==0.16.0
|
||||
futurist==1.6.0
|
||||
gitdb2==2.0.3
|
||||
GitPython==2.1.8
|
||||
gnocchiclient==7.0.1
|
||||
greenlet==0.4.13
|
||||
hacking==0.12.0
|
||||
idna==2.6
|
||||
imagesize==1.0.0
|
||||
iso8601==0.1.12
|
||||
Jinja2==2.10
|
||||
jmespath==0.9.3
|
||||
jsonpatch==1.21
|
||||
jsonpointer==2.0
|
||||
jsonschema==2.6.0
|
||||
keystoneauth1==3.4.0
|
||||
keystonemiddleware==4.21.0
|
||||
kombu==4.1.0
|
||||
linecache2==1.0.0
|
||||
logutils==0.3.5
|
||||
lxml==4.1.1
|
||||
Mako==1.0.7
|
||||
MarkupSafe==1.0
|
||||
mccabe==0.2.1
|
||||
mock==2.0.0
|
||||
monotonic==1.4
|
||||
mox3==0.25.0
|
||||
msgpack==0.5.6
|
||||
munch==2.2.0
|
||||
netaddr==0.7.19
|
||||
netifaces==0.10.6
|
||||
networkx==1.11
|
||||
openstackdocstheme==1.20.0
|
||||
openstacksdk==0.12.0
|
||||
os-client-config==1.29.0
|
||||
os-service-types==1.2.0
|
||||
os-testr==1.0.0
|
||||
osc-lib==1.10.0
|
||||
oslo.cache==1.29.0
|
||||
oslo.concurrency==3.26.0
|
||||
oslo.config==5.2.0
|
||||
oslo.context==2.20.0
|
||||
oslo.db==4.35.0
|
||||
oslo.i18n==3.20.0
|
||||
oslo.log==3.37.0
|
||||
oslo.messaging==5.36.0
|
||||
oslo.middleware==3.35.0
|
||||
oslo.policy==1.34.0
|
||||
oslo.reports==1.27.0
|
||||
oslo.serialization==2.25.0
|
||||
oslo.service==1.30.0
|
||||
oslo.utils==3.36.0
|
||||
oslo.versionedobjects==1.32.0
|
||||
oslotest==3.3.0
|
||||
packaging==17.1
|
||||
Paste==2.0.3
|
||||
PasteDeploy==1.5.2
|
||||
pbr==3.1.1
|
||||
pecan==1.2.1
|
||||
pep8==1.5.7
|
||||
pika==0.10.0
|
||||
pika-pool==0.1.3
|
||||
prettytable==0.7.2
|
||||
psutil==5.4.3
|
||||
pycadf==2.7.0
|
||||
pycparser==2.18
|
||||
pyflakes==0.8.1
|
||||
Pygments==2.2.0
|
||||
pyinotify==0.9.6
|
||||
pyOpenSSL==17.5.0
|
||||
pyparsing==2.2.0
|
||||
pyperclip==1.6.0
|
||||
python-ceilometerclient==2.9.0
|
||||
python-cinderclient==3.5.0
|
||||
python-dateutil==2.7.0
|
||||
python-editor==1.0.3
|
||||
python-glanceclient==2.9.1
|
||||
python-ironicclient==2.3.0
|
||||
python-keystoneclient==3.15.0
|
||||
python-mimeparse==1.6.0
|
||||
python-monascaclient==1.10.0
|
||||
python-neutronclient==6.7.0
|
||||
python-novaclient==10.1.0
|
||||
python-openstackclient==3.14.0
|
||||
python-subunit==1.2.0
|
||||
pytz==2018.3
|
||||
PyYAML==3.12
|
||||
reno==2.7.0
|
||||
repoze.lru==0.7
|
||||
requests==2.18.4
|
||||
requestsexceptions==1.4.0
|
||||
restructuredtext-lint==1.1.3
|
||||
rfc3986==1.1.0
|
||||
Routes==2.4.1
|
||||
simplegeneric==0.8.1
|
||||
simplejson==3.13.2
|
||||
six==1.11.0
|
||||
smmap2==2.0.3
|
||||
snowballstemmer==1.2.1
|
||||
Sphinx==1.6.5
|
||||
sphinxcontrib-httpdomain==1.6.1
|
||||
sphinxcontrib-pecanwsme==0.8.0
|
||||
sphinxcontrib-websupport==1.0.1
|
||||
SQLAlchemy==1.2.5
|
||||
sqlalchemy-migrate==0.11.0
|
||||
sqlparse==0.2.4
|
||||
statsd==3.2.2
|
||||
stestr==2.0.0
|
||||
stevedore==1.28.0
|
||||
taskflow==3.1.0
|
||||
Tempita==0.5.2
|
||||
tenacity==4.9.0
|
||||
testrepository==0.0.20
|
||||
testresources==2.0.1
|
||||
testscenarios==0.5.0
|
||||
testtools==2.3.0
|
||||
traceback2==1.4.0
|
||||
tzlocal==1.5.1
|
||||
ujson==1.35
|
||||
unittest2==1.1.0
|
||||
urllib3==1.22
|
||||
vine==1.1.4
|
||||
voluptuous==0.11.1
|
||||
waitress==1.1.0
|
||||
warlock==1.3.0
|
||||
WebOb==1.7.4
|
||||
WebTest==2.0.29
|
||||
wrapt==1.10.11
|
||||
WSME==0.9.2
|
||||
@@ -1,15 +0,0 @@
|
||||
- hosts: primary
|
||||
tasks:
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/logs/**
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
@@ -1,67 +0,0 @@
|
||||
- hosts: primary
|
||||
name: Legacy Watcher tempest base multinode
|
||||
tasks:
|
||||
|
||||
- name: Ensure legacy workspace directory
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/workspace'
|
||||
state: directory
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat > clonemap.yaml << EOF
|
||||
clonemap:
|
||||
- name: openstack-infra/devstack-gate
|
||||
dest: devstack-gate
|
||||
EOF
|
||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
||||
git://git.openstack.org \
|
||||
openstack-infra/devstack-gate
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
||||
[[local|localrc]]
|
||||
TEMPEST_PLUGINS='/opt/stack/new/watcher-tempest-plugin'
|
||||
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
|
||||
# Enable watcher devstack plugin.
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
|
||||
EOF
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
|
||||
export DEVSTACK_SUBNODE_CONFIG=" "
|
||||
export PYTHONUNBUFFERED=true
|
||||
export DEVSTACK_GATE_TEMPEST=1
|
||||
export DEVSTACK_GATE_NEUTRON=1
|
||||
export DEVSTACK_GATE_TOPOLOGY="multinode"
|
||||
export PROJECTS="openstack/watcher $PROJECTS"
|
||||
export PROJECTS="openstack/python-watcherclient $PROJECTS"
|
||||
export PROJECTS="openstack/watcher-tempest-plugin $PROJECTS"
|
||||
|
||||
export DEVSTACK_GATE_TEMPEST_REGEX="watcher_tempest_plugin"
|
||||
|
||||
export BRANCH_OVERRIDE=default
|
||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
||||
fi
|
||||
|
||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
||||
./safe-devstack-vm-gate-wrap.sh
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
@@ -1,80 +0,0 @@
|
||||
- hosts: primary
|
||||
tasks:
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*nose_results.html
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*testr_results.html.gz
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/.testrepository/tmp*
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*testrepository.subunit.gz
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}/tox'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/.tox/*/log/*
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/logs/**
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
@@ -1,64 +0,0 @@
|
||||
- hosts: all
|
||||
name: Legacy watcherclient-dsvm-functional
|
||||
tasks:
|
||||
|
||||
- name: Ensure legacy workspace directory
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/workspace'
|
||||
state: directory
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat > clonemap.yaml << EOF
|
||||
clonemap:
|
||||
- name: openstack-infra/devstack-gate
|
||||
dest: devstack-gate
|
||||
EOF
|
||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
||||
git://git.openstack.org \
|
||||
openstack-infra/devstack-gate
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
||||
[[local|localrc]]
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
|
||||
EOF
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
ENABLED_SERVICES=tempest
|
||||
ENABLED_SERVICES+=,watcher-api,watcher-decision-engine,watcher-applier
|
||||
export ENABLED_SERVICES
|
||||
|
||||
export PYTHONUNBUFFERED=true
|
||||
export BRANCH_OVERRIDE=default
|
||||
export PROJECTS="openstack/watcher $PROJECTS"
|
||||
export DEVSTACK_PROJECT_FROM_GIT=python-watcherclient
|
||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
||||
fi
|
||||
function post_test_hook {
|
||||
# Configure and run functional tests
|
||||
$BASE/new/python-watcherclient/watcherclient/tests/functional/hooks/post_test_hook.sh
|
||||
}
|
||||
export -f post_test_hook
|
||||
|
||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
||||
./safe-devstack-vm-gate-wrap.sh
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
14
playbooks/orchestrate-tempest.yaml
Normal file
14
playbooks/orchestrate-tempest.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
- hosts: all
|
||||
# This is the default strategy, however since orchestrate-devstack requires
|
||||
# "linear", it is safer to enforce it in case this is running in an
|
||||
# environment configured with a different default strategy.
|
||||
strategy: linear
|
||||
roles:
|
||||
- orchestrate-devstack
|
||||
|
||||
- hosts: tempest
|
||||
roles:
|
||||
- setup-tempest-run-dir
|
||||
- setup-tempest-data-dir
|
||||
- acl-devstack-files
|
||||
- run-tempest
|
||||
3
playbooks/pre.yaml
Normal file
3
playbooks/pre.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
- hosts: all
|
||||
roles:
|
||||
- add-hostnames-to-hosts
|
||||
@@ -29,7 +29,7 @@ Useful links
|
||||
|
||||
* How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html
|
||||
|
||||
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||
|
||||
* More about Rally: https://docs.openstack.org/rally/latest/
|
||||
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Audits have 'name' field now, that is more friendly to end users.
|
||||
Audit's name can't exceed 63 characters.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- Watcher has a whole scope of the cluster, when building
|
||||
compute CDM which includes all instances.
|
||||
It filters excluded instances when migration during the
|
||||
audit.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- Watcher got an ability to calculate multiple global efficacy indicators
|
||||
during audit's execution. Now global efficacy can be calculated for many
|
||||
resource types (like volumes, instances, network) if strategy supports
|
||||
efficacy indicators.
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- Added notifications about cancelling of action plan.
|
||||
Now event based plugins know when action plan cancel
|
||||
started and completed.
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Instance cold migration logic is now replaced with using Nova migrate
|
||||
Server(migrate Action) API which has host option since v2.56.
|
||||
upgrade:
|
||||
- |
|
||||
Nova API version is now set to 2.56 by default. This needs the migrate
|
||||
action of migration type cold with destination_node parameter to work.
|
||||
fixes:
|
||||
- |
|
||||
The migrate action of migration type cold with destination_node parameter
|
||||
was fixed. Before fixing, it booted an instance in the service project
|
||||
as a migrated instance.
|
||||
@@ -21,6 +21,7 @@ Contents:
|
||||
:maxdepth: 1
|
||||
|
||||
unreleased
|
||||
queens
|
||||
pike
|
||||
ocata
|
||||
newton
|
||||
|
||||
@@ -4,15 +4,15 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: watcher\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2018-01-19 11:46+0000\n"
|
||||
"POT-Creation-Date: 2018-02-28 12:27+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2018-01-19 07:16+0000\n"
|
||||
"PO-Revision-Date: 2018-02-16 07:20+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom)\n"
|
||||
"Language: en-GB\n"
|
||||
"X-Generator: Zanata 3.9.6\n"
|
||||
"Language: en_GB\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
msgid "0.29.0"
|
||||
@@ -42,8 +42,8 @@ msgstr "1.5.0"
|
||||
msgid "1.6.0"
|
||||
msgstr "1.6.0"
|
||||
|
||||
msgid "1.6.0-32"
|
||||
msgstr "1.6.0-32"
|
||||
msgid "1.7.0"
|
||||
msgstr "1.7.0"
|
||||
|
||||
msgid "Add a service supervisor to watch Watcher deamons."
|
||||
msgstr "Add a service supervisor to watch Watcher daemons."
|
||||
@@ -136,6 +136,17 @@ msgstr ""
|
||||
"Added a way to add a new action without having to amend the source code of "
|
||||
"the default planner."
|
||||
|
||||
msgid ""
|
||||
"Added a way to check state of strategy before audit's execution. "
|
||||
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
|
||||
"get information about metrics' availability, datasource's availability and "
|
||||
"CDM's availability."
|
||||
msgstr ""
|
||||
"Added a way to check state of strategy before audit's execution. "
|
||||
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
|
||||
"get information about metrics' availability, datasource's availability and "
|
||||
"CDM's availability."
|
||||
|
||||
msgid ""
|
||||
"Added a way to compare the efficacy of different strategies for a give "
|
||||
"optimization goal."
|
||||
@@ -183,9 +194,28 @@ msgstr ""
|
||||
"Added Gnocchi support as data source for metrics. Administrator can change "
|
||||
"data source for each strategy using config file."
|
||||
|
||||
msgid ""
|
||||
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||
"know when action plan cancel started and completed."
|
||||
msgstr ""
|
||||
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||
"know when action plan cancel started and completed."
|
||||
|
||||
msgid "Added policies to handle user rights to access Watcher API."
|
||||
msgstr "Added policies to handle user rights to access Watcher API."
|
||||
|
||||
msgid "Added storage capacity balance strategy."
|
||||
msgstr "Added storage capacity balance strategy."
|
||||
|
||||
msgid ""
|
||||
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
|
||||
"The strategy migrates many instances and volumes efficiently with minimum "
|
||||
"downtime automatically."
|
||||
msgstr ""
|
||||
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
|
||||
"The strategy migrates many instances and volumes efficiently with minimum "
|
||||
"downtime automatically."
|
||||
|
||||
msgid ""
|
||||
"Added strategy to identify and migrate a Noisy Neighbor - a low priority VM "
|
||||
"that negatively affects peformance of a high priority VM by over utilizing "
|
||||
@@ -212,6 +242,13 @@ msgstr "Added using of JSONSchema instead of voluptuous to validate Actions."
|
||||
msgid "Added volume migrate action"
|
||||
msgstr "Added volume migrate action"
|
||||
|
||||
msgid ""
|
||||
"Adds audit scoper for storage data model, now watcher users can specify "
|
||||
"audit scope for storage CDM in the same manner as compute scope."
|
||||
msgstr ""
|
||||
"Adds audit scoper for storage data model, now watcher users can specify "
|
||||
"audit scope for storage CDM in the same manner as compute scope."
|
||||
|
||||
msgid "Adds baremetal data model in Watcher"
|
||||
msgstr "Adds baremetal data model in Watcher"
|
||||
|
||||
@@ -224,6 +261,13 @@ msgstr ""
|
||||
"threshold, to selected strategy, also strategy to provide parameters info to "
|
||||
"end user."
|
||||
|
||||
msgid ""
|
||||
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||
"name can't exceed 63 characters."
|
||||
msgstr ""
|
||||
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||
"name can't exceed 63 characters."
|
||||
|
||||
msgid "Centralize all configuration options for Watcher."
|
||||
msgstr "Centralise all configuration options for Watcher."
|
||||
|
||||
@@ -305,6 +349,9 @@ msgstr ""
|
||||
"resources will be called \"Audit scope\" and will be defined in each audit "
|
||||
"template (which contains the audit settings)."
|
||||
|
||||
msgid "Queens Series Release Notes"
|
||||
msgstr "Queens Series Release Notes"
|
||||
|
||||
msgid ""
|
||||
"The graph model describes how VMs are associated to compute hosts. This "
|
||||
"allows for seeing relationships upfront between the entities and hence can "
|
||||
@@ -348,6 +395,17 @@ msgstr ""
|
||||
msgid "Watcher database can now be upgraded thanks to Alembic."
|
||||
msgstr "Watcher database can now be upgraded thanks to Alembic."
|
||||
|
||||
msgid ""
|
||||
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||
"during audit's execution. Now global efficacy can be calculated for many "
|
||||
"resource types (like volumes, instances, network) if strategy supports "
|
||||
"efficacy indicators."
|
||||
msgstr ""
|
||||
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||
"during audit's execution. Now global efficacy can be calculated for many "
|
||||
"resource types (like volumes, instances, network) if strategy supports "
|
||||
"efficacy indicators."
|
||||
|
||||
msgid ""
|
||||
"Watcher supports multiple metrics backend and relies on Ceilometer and "
|
||||
"Monasca."
|
||||
|
||||
6
releasenotes/source/queens.rst
Normal file
6
releasenotes/source/queens.rst
Normal file
@@ -0,0 +1,6 @@
|
||||
===================================
|
||||
Queens Series Release Notes
|
||||
===================================
|
||||
|
||||
.. release-notes::
|
||||
:branch: stable/queens
|
||||
@@ -5,14 +5,14 @@
|
||||
apscheduler>=3.0.5 # MIT License
|
||||
enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||
jsonpatch!=1.20,>=1.16 # BSD
|
||||
keystoneauth1>=3.3.0 # Apache-2.0
|
||||
keystoneauth1>=3.4.0 # Apache-2.0
|
||||
jsonschema<3.0.0,>=2.6.0 # MIT
|
||||
keystonemiddleware>=4.17.0 # Apache-2.0
|
||||
lxml!=3.7.0,>=3.4.1 # BSD
|
||||
croniter>=0.3.4 # MIT License
|
||||
oslo.concurrency>=3.25.0 # Apache-2.0
|
||||
oslo.concurrency>=3.26.0 # Apache-2.0
|
||||
oslo.cache>=1.26.0 # Apache-2.0
|
||||
oslo.config>=5.1.0 # Apache-2.0
|
||||
oslo.config>=5.2.0 # Apache-2.0
|
||||
oslo.context>=2.19.2 # Apache-2.0
|
||||
oslo.db>=4.27.0 # Apache-2.0
|
||||
oslo.i18n>=3.15.3 # Apache-2.0
|
||||
@@ -23,7 +23,7 @@ oslo.reports>=1.18.0 # Apache-2.0
|
||||
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
|
||||
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
|
||||
oslo.utils>=3.33.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.28.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.31.2 # Apache-2.0
|
||||
PasteDeploy>=1.5.0 # MIT
|
||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
|
||||
@@ -35,10 +35,10 @@ python-cinderclient>=3.3.0 # Apache-2.0
|
||||
python-glanceclient>=2.8.0 # Apache-2.0
|
||||
python-keystoneclient>=3.8.0 # Apache-2.0
|
||||
python-monascaclient>=1.7.0 # Apache-2.0
|
||||
python-neutronclient>=6.3.0 # Apache-2.0
|
||||
python-neutronclient>=6.7.0 # Apache-2.0
|
||||
python-novaclient>=9.1.0 # Apache-2.0
|
||||
python-openstackclient>=3.12.0 # Apache-2.0
|
||||
python-ironicclient>=1.14.0 # Apache-2.0
|
||||
python-ironicclient>=2.3.0 # Apache-2.0
|
||||
six>=1.10.0 # MIT
|
||||
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
|
||||
stevedore>=1.20.0 # Apache-2.0
|
||||
|
||||
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
- name: Set up the list of hostnames and addresses
|
||||
set_fact:
|
||||
hostname_addresses: >
|
||||
{% set hosts = {} -%}
|
||||
{% for host, vars in hostvars.items() -%}
|
||||
{% set _ = hosts.update({vars['ansible_hostname']: vars['nodepool']['private_ipv4']}) -%}
|
||||
{% endfor -%}
|
||||
{{- hosts -}}
|
||||
- name: Add inventory hostnames to the hosts file
|
||||
become: yes
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
state: present
|
||||
insertafter: EOF
|
||||
line: "{{ item.value }} {{ item.key }}"
|
||||
with_dict: "{{ hostname_addresses }}"
|
||||
@@ -15,7 +15,7 @@ testtools>=2.2.0 # MIT
|
||||
|
||||
# Doc requirements
|
||||
openstackdocstheme>=1.18.1 # Apache-2.0
|
||||
sphinx!=1.6.6,>=1.6.2 # BSD
|
||||
sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
|
||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||
|
||||
|
||||
|
||||
9
tox.ini
9
tox.ini
@@ -55,7 +55,7 @@ filename = *.py,app.wsgi
|
||||
show-source=True
|
||||
ignore= H105,E123,E226,N320,H202
|
||||
builtins= _
|
||||
enable-extensions = H106,H203
|
||||
enable-extensions = H106,H203,H904
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
|
||||
|
||||
[testenv:wheel]
|
||||
@@ -76,3 +76,10 @@ commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasen
|
||||
[testenv:bandit]
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = bandit -r watcher -x tests -n5 -ll -s B320
|
||||
|
||||
[testenv:lower-constraints]
|
||||
basepython = python3
|
||||
deps =
|
||||
-c{toxinidir}/lower-constraints.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
@@ -205,7 +205,7 @@ class ActionCollection(collection.Collection):
|
||||
collection = ActionCollection()
|
||||
collection.actions = [Action.convert_with_links(p, expand)
|
||||
for p in actions]
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@classmethod
|
||||
@@ -232,6 +232,10 @@ class ActionsController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None,
|
||||
action_plan_uuid=None, audit_uuid=None):
|
||||
additional_fields = ['action_plan_uuid']
|
||||
|
||||
api_utils.validate_sort_key(sort_key, list(objects.Action.fields) +
|
||||
additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -247,7 +251,10 @@ class ActionsController(rest.RestController):
|
||||
if audit_uuid:
|
||||
filters['audit_uuid'] = audit_uuid
|
||||
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
actions = objects.Action.list(pecan.request.context,
|
||||
limit,
|
||||
@@ -255,11 +262,15 @@ class ActionsController(rest.RestController):
|
||||
sort_dir=sort_dir,
|
||||
filters=filters)
|
||||
|
||||
return ActionCollection.convert_with_links(actions, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
actions_collection = ActionCollection.convert_with_links(
|
||||
actions, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(actions_collection.actions,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return actions_collection
|
||||
|
||||
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
||||
wtypes.text, wtypes.text, types.uuid,
|
||||
|
||||
@@ -305,17 +305,6 @@ class ActionPlanCollection(collection.Collection):
|
||||
ap_collection = ActionPlanCollection()
|
||||
ap_collection.action_plans = [ActionPlan.convert_with_links(
|
||||
p, expand) for p in rpc_action_plans]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'audit_uuid':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
ap_collection.action_plans = sorted(
|
||||
ap_collection.action_plans,
|
||||
key=lambda action_plan: action_plan.audit_uuid,
|
||||
reverse=reverse)
|
||||
|
||||
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
|
||||
return ap_collection
|
||||
|
||||
@@ -344,7 +333,10 @@ class ActionPlansController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None, audit_uuid=None,
|
||||
strategy=None):
|
||||
additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.ActionPlan.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -363,10 +355,10 @@ class ActionPlansController(rest.RestController):
|
||||
else:
|
||||
filters['strategy_name'] = strategy
|
||||
|
||||
if sort_key == 'audit_uuid':
|
||||
sort_db_key = None
|
||||
else:
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
action_plans = objects.ActionPlan.list(
|
||||
pecan.request.context,
|
||||
@@ -374,12 +366,15 @@ class ActionPlansController(rest.RestController):
|
||||
marker_obj, sort_key=sort_db_key,
|
||||
sort_dir=sort_dir, filters=filters)
|
||||
|
||||
return ActionPlanCollection.convert_with_links(
|
||||
action_plans, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
action_plans_collection = ActionPlanCollection.convert_with_links(
|
||||
action_plans, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(action_plans_collection.action_plans,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return action_plans_collection
|
||||
|
||||
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
|
||||
wtypes.text, types.uuid, wtypes.text)
|
||||
|
||||
@@ -389,17 +389,6 @@ class AuditCollection(collection.Collection):
|
||||
collection = AuditCollection()
|
||||
collection.audits = [Audit.convert_with_links(p, expand)
|
||||
for p in rpc_audits]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'goal_uuid':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
collection.audits = sorted(
|
||||
collection.audits,
|
||||
key=lambda audit: audit.goal_uuid,
|
||||
reverse=reverse)
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@@ -427,8 +416,14 @@ class AuditsController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None, goal=None,
|
||||
strategy=None):
|
||||
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||
"strategy_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Audit.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Audit.get_by_uuid(pecan.request.context,
|
||||
@@ -449,23 +444,25 @@ class AuditsController(rest.RestController):
|
||||
# TODO(michaelgugino): add method to get goal by name.
|
||||
filters['strategy_name'] = strategy
|
||||
|
||||
if sort_key == 'goal_uuid':
|
||||
sort_db_key = 'goal_id'
|
||||
elif sort_key == 'strategy_uuid':
|
||||
sort_db_key = 'strategy_id'
|
||||
else:
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
audits = objects.Audit.list(pecan.request.context,
|
||||
limit,
|
||||
marker_obj, sort_key=sort_db_key,
|
||||
sort_dir=sort_dir, filters=filters)
|
||||
|
||||
return AuditCollection.convert_with_links(audits, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
audits_collection = AuditCollection.convert_with_links(
|
||||
audits, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(audits_collection.audits, sort_key,
|
||||
sort_dir)
|
||||
|
||||
return audits_collection
|
||||
|
||||
@wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text,
|
||||
wtypes.text, wtypes.text, wtypes.text, int)
|
||||
|
||||
@@ -474,9 +474,13 @@ class AuditTemplatesController(rest.RestController):
|
||||
def _get_audit_templates_collection(self, filters, marker, limit,
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None):
|
||||
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||
"strategy_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.AuditTemplate.fields) + additional_fields)
|
||||
api_utils.validate_search_filters(
|
||||
filters, list(objects.audit_template.AuditTemplate.fields) +
|
||||
["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"])
|
||||
filters, list(objects.AuditTemplate.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -486,19 +490,26 @@ class AuditTemplatesController(rest.RestController):
|
||||
pecan.request.context,
|
||||
marker)
|
||||
|
||||
audit_templates = objects.AuditTemplate.list(
|
||||
pecan.request.context,
|
||||
filters,
|
||||
limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
return AuditTemplateCollection.convert_with_links(audit_templates,
|
||||
limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
audit_templates = objects.AuditTemplate.list(
|
||||
pecan.request.context, filters, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
audit_templates_collection = \
|
||||
AuditTemplateCollection.convert_with_links(
|
||||
audit_templates, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(
|
||||
audit_templates_collection.audit_templates, sort_key,
|
||||
sort_dir)
|
||||
|
||||
return audit_templates_collection
|
||||
|
||||
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
|
||||
types.uuid, int, wtypes.text, wtypes.text)
|
||||
|
||||
@@ -130,17 +130,6 @@ class GoalCollection(collection.Collection):
|
||||
goal_collection = GoalCollection()
|
||||
goal_collection.goals = [
|
||||
Goal.convert_with_links(g, expand) for g in goals]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'strategy':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
goal_collection.goals = sorted(
|
||||
goal_collection.goals,
|
||||
key=lambda goal: goal.uuid,
|
||||
reverse=reverse)
|
||||
|
||||
goal_collection.next = goal_collection.get_next(
|
||||
limit, url=url, **kwargs)
|
||||
return goal_collection
|
||||
@@ -167,17 +156,19 @@ class GoalsController(rest.RestController):
|
||||
|
||||
def _get_goals_collection(self, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Goal.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Goal.get_by_uuid(
|
||||
pecan.request.context, marker)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
||||
else None)
|
||||
|
||||
goals = objects.Goal.list(pecan.request.context, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
|
||||
@@ -123,17 +123,6 @@ class ScoringEngineCollection(collection.Collection):
|
||||
collection = ScoringEngineCollection()
|
||||
collection.scoring_engines = [ScoringEngine.convert_with_links(
|
||||
se, expand) for se in scoring_engines]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'name':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
collection.goals = sorted(
|
||||
collection.scoring_engines,
|
||||
key=lambda se: se.name,
|
||||
reverse=reverse)
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@@ -160,7 +149,8 @@ class ScoringEngineController(rest.RestController):
|
||||
def _get_scoring_engines_collection(self, marker, limit,
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None):
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.ScoringEngine.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -171,7 +161,8 @@ class ScoringEngineController(rest.RestController):
|
||||
|
||||
filters = {}
|
||||
|
||||
sort_db_key = sort_key
|
||||
sort_db_key = (sort_key if sort_key in objects.ScoringEngine.fields
|
||||
else None)
|
||||
|
||||
scoring_engines = objects.ScoringEngine.list(
|
||||
context=pecan.request.context,
|
||||
|
||||
@@ -154,17 +154,6 @@ class ServiceCollection(collection.Collection):
|
||||
service_collection = ServiceCollection()
|
||||
service_collection.services = [
|
||||
Service.convert_with_links(g, expand) for g in services]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'service':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
service_collection.services = sorted(
|
||||
service_collection.services,
|
||||
key=lambda service: service.id,
|
||||
reverse=reverse)
|
||||
|
||||
service_collection.next = service_collection.get_next(
|
||||
limit, url=url, marker_field='id', **kwargs)
|
||||
return service_collection
|
||||
@@ -191,17 +180,19 @@ class ServicesController(rest.RestController):
|
||||
|
||||
def _get_services_collection(self, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Service.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Service.get(
|
||||
pecan.request.context, marker)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
||||
else None)
|
||||
|
||||
services = objects.Service.list(
|
||||
pecan.request.context, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
@@ -173,17 +173,6 @@ class StrategyCollection(collection.Collection):
|
||||
strategy_collection = StrategyCollection()
|
||||
strategy_collection.strategies = [
|
||||
Strategy.convert_with_links(g, expand) for g in strategies]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'strategy':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
strategy_collection.strategies = sorted(
|
||||
strategy_collection.strategies,
|
||||
key=lambda strategy: strategy.uuid,
|
||||
reverse=reverse)
|
||||
|
||||
strategy_collection.next = strategy_collection.get_next(
|
||||
limit, url=url, **kwargs)
|
||||
return strategy_collection
|
||||
@@ -211,28 +200,39 @@ class StrategiesController(rest.RestController):
|
||||
|
||||
def _get_strategies_collection(self, filters, marker, limit, sort_key,
|
||||
sort_dir, expand=False, resource_url=None):
|
||||
additional_fields = ["goal_uuid", "goal_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Strategy.fields) + additional_fields)
|
||||
api_utils.validate_search_filters(
|
||||
filters, list(objects.strategy.Strategy.fields) +
|
||||
["goal_uuid", "goal_name"])
|
||||
filters, list(objects.Strategy.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Strategy.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Strategy.get_by_uuid(
|
||||
pecan.request.context, marker)
|
||||
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
strategies = objects.Strategy.list(
|
||||
pecan.request.context, limit, marker_obj, filters=filters,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
return StrategyCollection.convert_with_links(
|
||||
strategies_collection = StrategyCollection.convert_with_links(
|
||||
strategies, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(strategies_collection.strategies,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return strategies_collection
|
||||
|
||||
@wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text,
|
||||
int, wtypes.text, wtypes.text)
|
||||
def get_all(self, goal=None, marker=None, limit=None,
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from operator import attrgetter
|
||||
|
||||
import jsonpatch
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import reflection
|
||||
@@ -54,6 +56,13 @@ def validate_sort_dir(sort_dir):
|
||||
"'asc' or 'desc'") % sort_dir)
|
||||
|
||||
|
||||
def validate_sort_key(sort_key, allowed_fields):
|
||||
# Very lightweight validation for now
|
||||
if sort_key not in allowed_fields:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Invalid sort key: %s") % sort_key)
|
||||
|
||||
|
||||
def validate_search_filters(filters, allowed_fields):
|
||||
# Very lightweight validation for now
|
||||
# todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries)
|
||||
@@ -63,6 +72,19 @@ def validate_search_filters(filters, allowed_fields):
|
||||
_("Invalid filter: %s") % filter_name)
|
||||
|
||||
|
||||
def check_need_api_sort(sort_key, additional_fields):
|
||||
return sort_key in additional_fields
|
||||
|
||||
|
||||
def make_api_sort(sorting_list, sort_key, sort_dir):
|
||||
# First sort by uuid field, than sort by sort_key
|
||||
# sort() ensures stable sorting, so we could
|
||||
# make lexicographical sort
|
||||
reverse_direction = (sort_dir == 'desc')
|
||||
sorting_list.sort(key=attrgetter('uuid'), reverse=reverse_direction)
|
||||
sorting_list.sort(key=attrgetter(sort_key), reverse=reverse_direction)
|
||||
|
||||
|
||||
def apply_jsonpatch(doc, patch):
|
||||
for p in patch:
|
||||
if p['op'] == 'add' and p['path'].count('/') == 1:
|
||||
|
||||
@@ -63,7 +63,7 @@ class ContextHook(hooks.PecanHook):
|
||||
auth_url = headers.get('X-Auth-Url')
|
||||
if auth_url is None:
|
||||
importutils.import_module('keystonemiddleware.auth_token')
|
||||
auth_url = cfg.CONF.keystone_authtoken.auth_uri
|
||||
auth_url = cfg.CONF.keystone_authtoken.www_authenticate_uri
|
||||
|
||||
state.request.context = context.make_context(
|
||||
auth_token=auth_token,
|
||||
|
||||
@@ -50,6 +50,12 @@ class Migrate(base.BaseAction):
|
||||
source and the destination compute hostname (list of available compute
|
||||
hosts is returned by this command: ``nova service-list --binary
|
||||
nova-compute``).
|
||||
|
||||
.. note::
|
||||
|
||||
Nova API version must be 2.56 or above if `destination_node` parameter
|
||||
is given.
|
||||
|
||||
"""
|
||||
|
||||
# input parameters constants
|
||||
@@ -113,8 +119,10 @@ class Migrate(base.BaseAction):
|
||||
dest_hostname=destination)
|
||||
except nova_helper.nvexceptions.ClientException as e:
|
||||
LOG.debug("Nova client exception occurred while live "
|
||||
"migrating instance %s.Exception: %s" %
|
||||
(self.instance_uuid, e))
|
||||
"migrating instance "
|
||||
"%(instance)s.Exception: %(exception)s",
|
||||
{'instance': self.instance_uuid, 'exception': e})
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.critical("Unexpected error occurred. Migration failed for "
|
||||
|
||||
@@ -40,10 +40,10 @@ def main():
|
||||
|
||||
if host == '127.0.0.1':
|
||||
LOG.info('serving on 127.0.0.1:%(port)s, '
|
||||
'view at %(protocol)s://127.0.0.1:%(port)s' %
|
||||
'view at %(protocol)s://127.0.0.1:%(port)s',
|
||||
dict(protocol=protocol, port=port))
|
||||
else:
|
||||
LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
|
||||
LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
|
||||
dict(protocol=protocol, host=host, port=port))
|
||||
|
||||
api_schedule = scheduling.APISchedulingService()
|
||||
|
||||
@@ -75,7 +75,7 @@ class CinderHelper(object):
|
||||
search_opts={'all_tenants': True})
|
||||
|
||||
def get_volume_type_by_backendname(self, backendname):
|
||||
"""Retrun a list of volume type"""
|
||||
"""Return a list of volume type"""
|
||||
volume_type_list = self.get_volume_type_list()
|
||||
|
||||
volume_type = [volume_type.name for volume_type in volume_type_list
|
||||
@@ -139,13 +139,13 @@ class CinderHelper(object):
|
||||
volume = self.get_volume(volume.id)
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
LOG.debug("Waiting to complete deletion of volume %s", volume.id)
|
||||
if self._can_get_volume(volume.id):
|
||||
LOG.error("Volume deletion error: %s" % volume.id)
|
||||
LOG.error("Volume deletion error: %s", volume.id)
|
||||
return False
|
||||
|
||||
LOG.debug("Volume %s was deleted successfully." % volume.id)
|
||||
LOG.debug("Volume %s was deleted successfully.", volume.id)
|
||||
return True
|
||||
|
||||
def check_migrated(self, volume, retry_interval=10):
|
||||
@@ -179,8 +179,7 @@ class CinderHelper(object):
|
||||
LOG.error(error_msg)
|
||||
return False
|
||||
LOG.debug(
|
||||
"Volume migration succeeded : "
|
||||
"volume %s is now on host '%s'." % (
|
||||
"Volume migration succeeded : volume %s is now on host '%s'.", (
|
||||
volume.id, host_name))
|
||||
return True
|
||||
|
||||
@@ -194,8 +193,8 @@ class CinderHelper(object):
|
||||
message=(_("Volume type must be same for migrating")))
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug("Volume %s found on host '%s'."
|
||||
% (volume.id, source_node))
|
||||
LOG.debug("Volume %s found on host '%s'.",
|
||||
(volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.migrate_volume(
|
||||
volume, dest_node, False, True)
|
||||
@@ -211,8 +210,8 @@ class CinderHelper(object):
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug(
|
||||
"Volume %s found on host '%s'." % (
|
||||
volume.id, source_node))
|
||||
"Volume %s found on host '%s'.",
|
||||
(volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.retype(
|
||||
volume, dest_type, "on-demand")
|
||||
@@ -234,14 +233,14 @@ class CinderHelper(object):
|
||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
|
||||
if getattr(new_volume, 'status') != 'available':
|
||||
error_msg = (_("Failed to create volume '%(volume)s. ") %
|
||||
{'volume': new_volume.id})
|
||||
raise Exception(error_msg)
|
||||
|
||||
LOG.debug("Volume %s was created successfully." % new_volume)
|
||||
LOG.debug("Volume %s was created successfully.", new_volume)
|
||||
return new_volume
|
||||
|
||||
def delete_volume(self, volume):
|
||||
|
||||
@@ -62,6 +62,7 @@ class RequestContext(context.RequestContext):
|
||||
# safely ignore this as we don't use it.
|
||||
kwargs.pop('user_identity', None)
|
||||
kwargs.pop('global_request_id', None)
|
||||
kwargs.pop('project', None)
|
||||
if kwargs:
|
||||
LOG.warning('Arguments dropped when creating context: %s',
|
||||
str(kwargs))
|
||||
|
||||
@@ -305,7 +305,7 @@ class ActionFilterCombinationProhibited(Invalid):
|
||||
|
||||
|
||||
class UnsupportedActionType(UnsupportedError):
|
||||
msg_fmt = _("Provided %(action_type) is not supported yet")
|
||||
msg_fmt = _("Provided %(action_type)s is not supported yet")
|
||||
|
||||
|
||||
class EfficacyIndicatorNotFound(ResourceNotFound):
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from novaclient import api_versions
|
||||
from oslo_log import log
|
||||
|
||||
import cinderclient.exceptions as ciexceptions
|
||||
@@ -29,9 +29,12 @@ import novaclient.exceptions as nvexceptions
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher import conf
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CONF = conf.CONF
|
||||
|
||||
|
||||
class NovaHelper(object):
|
||||
|
||||
@@ -52,14 +55,21 @@ class NovaHelper(object):
|
||||
return self.nova.hypervisors.get(utils.Struct(id=node_id))
|
||||
|
||||
def get_compute_node_by_hostname(self, node_hostname):
|
||||
"""Get compute node by ID (*not* UUID)"""
|
||||
# We need to pass an object with an 'id' attribute to make it work
|
||||
"""Get compute node by hostname"""
|
||||
try:
|
||||
compute_nodes = self.nova.hypervisors.search(node_hostname)
|
||||
if len(compute_nodes) != 1:
|
||||
hypervisors = [hv for hv in self.get_compute_node_list()
|
||||
if hv.service['host'] == node_hostname]
|
||||
if len(hypervisors) != 1:
|
||||
# TODO(hidekazu)
|
||||
# this may occur if VMware vCenter driver is used
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
else:
|
||||
compute_nodes = self.nova.hypervisors.search(
|
||||
hypervisors[0].hypervisor_hostname)
|
||||
if len(compute_nodes) != 1:
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
|
||||
return self.get_compute_node_by_id(compute_nodes[0].id)
|
||||
return self.get_compute_node_by_id(compute_nodes[0].id)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
@@ -99,7 +109,7 @@ class NovaHelper(object):
|
||||
return True
|
||||
else:
|
||||
LOG.debug("confirm resize failed for the "
|
||||
"instance %s" % instance.id)
|
||||
"instance %s", instance.id)
|
||||
return False
|
||||
|
||||
def wait_for_volume_status(self, volume, status, timeout=60,
|
||||
@@ -123,240 +133,68 @@ class NovaHelper(object):
|
||||
return volume.status == status
|
||||
|
||||
def watcher_non_live_migrate_instance(self, instance_id, dest_hostname,
|
||||
keep_original_image_name=True,
|
||||
retry=120):
|
||||
"""This method migrates a given instance
|
||||
|
||||
using an image of this instance and creating a new instance
|
||||
from this image. It saves some configuration information
|
||||
about the original instance : security group, list of networks,
|
||||
list of attached volumes, floating IP, ...
|
||||
in order to apply the same settings to the new instance.
|
||||
At the end of the process the original instance is deleted.
|
||||
This method uses the Nova built-in migrate()
|
||||
action to do a migration of a given instance.
|
||||
For migrating a given dest_hostname, Nova API version
|
||||
must be 2.56 or higher.
|
||||
|
||||
It returns True if the migration was successful,
|
||||
False otherwise.
|
||||
|
||||
if destination hostname not given, this method calls nova api
|
||||
to migrate the instance.
|
||||
|
||||
:param instance_id: the unique id of the instance to migrate.
|
||||
:param keep_original_image_name: flag indicating whether the
|
||||
image name from which the original instance was built must be
|
||||
used as the name of the intermediate image used for migration.
|
||||
If this flag is False, a temporary image name is built
|
||||
:param dest_hostname: the name of the destination compute node, if
|
||||
destination_node is None, nova scheduler choose
|
||||
the destination host
|
||||
"""
|
||||
new_image_name = ""
|
||||
LOG.debug(
|
||||
"Trying a non-live migrate of instance '%s' " % instance_id)
|
||||
"Trying a cold migrate of instance '%s' ", instance_id)
|
||||
|
||||
# Looking for the instance to migrate
|
||||
instance = self.find_instance(instance_id)
|
||||
if not instance:
|
||||
LOG.debug("Instance %s not found !" % instance_id)
|
||||
LOG.debug("Instance %s not found !", instance_id)
|
||||
return False
|
||||
else:
|
||||
# NOTE: If destination node is None call Nova API to migrate
|
||||
# instance
|
||||
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
if dest_hostname is None:
|
||||
previous_status = getattr(instance, 'status')
|
||||
previous_status = getattr(instance, 'status')
|
||||
|
||||
instance.migrate()
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
while (getattr(instance, 'status') not in
|
||||
["VERIFY_RESIZE", "ERROR"] and retry):
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
time.sleep(2)
|
||||
retry -= 1
|
||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
if (dest_hostname and
|
||||
not self._check_nova_api_version(self.nova, "2.56")):
|
||||
LOG.error("For migrating a given dest_hostname,"
|
||||
"Nova API version must be 2.56 or higher")
|
||||
return False
|
||||
|
||||
if (host_name != new_hostname and
|
||||
instance.status == 'VERIFY_RESIZE'):
|
||||
if not self.confirm_resize(instance, previous_status):
|
||||
return False
|
||||
LOG.debug(
|
||||
"cold migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
instance_id, new_hostname))
|
||||
return True
|
||||
else:
|
||||
LOG.debug(
|
||||
"cold migration for instance %s failed" % instance_id)
|
||||
instance.migrate(host=dest_hostname)
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
|
||||
while (getattr(instance, 'status') not in
|
||||
["VERIFY_RESIZE", "ERROR"] and retry):
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
time.sleep(2)
|
||||
retry -= 1
|
||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
|
||||
if (host_name != new_hostname and
|
||||
instance.status == 'VERIFY_RESIZE'):
|
||||
if not self.confirm_resize(instance, previous_status):
|
||||
return False
|
||||
|
||||
if not keep_original_image_name:
|
||||
# randrange gives you an integral value
|
||||
irand = random.randint(0, 1000)
|
||||
|
||||
# Building the temporary image name
|
||||
# which will be used for the migration
|
||||
new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand)
|
||||
LOG.debug(
|
||||
"cold migration succeeded : "
|
||||
"instance %(instance)s is now on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': new_hostname})
|
||||
return True
|
||||
else:
|
||||
# Get the image name of the current instance.
|
||||
# We'll use the same name for the new instance.
|
||||
imagedict = getattr(instance, "image")
|
||||
image_id = imagedict["id"]
|
||||
image = self.glance.images.get(image_id)
|
||||
new_image_name = getattr(image, "name")
|
||||
|
||||
instance_name = getattr(instance, "name")
|
||||
flavor_name = instance.flavor.get('original_name')
|
||||
keypair_name = getattr(instance, "key_name")
|
||||
|
||||
addresses = getattr(instance, "addresses")
|
||||
|
||||
floating_ip = ""
|
||||
network_names_list = []
|
||||
|
||||
for network_name, network_conf_obj in addresses.items():
|
||||
LOG.debug(
|
||||
"Extracting network configuration for network '%s'" %
|
||||
network_name)
|
||||
|
||||
network_names_list.append(network_name)
|
||||
|
||||
for net_conf_item in network_conf_obj:
|
||||
if net_conf_item['OS-EXT-IPS:type'] == "floating":
|
||||
floating_ip = net_conf_item['addr']
|
||||
break
|
||||
|
||||
sec_groups_list = getattr(instance, "security_groups")
|
||||
sec_groups = []
|
||||
|
||||
for sec_group_dict in sec_groups_list:
|
||||
sec_groups.append(sec_group_dict['name'])
|
||||
|
||||
# Stopping the old instance properly so
|
||||
# that no new data is sent to it and to its attached volumes
|
||||
stopped_ok = self.stop_instance(instance_id)
|
||||
|
||||
if not stopped_ok:
|
||||
LOG.debug("Could not stop instance: %s" % instance_id)
|
||||
"cold migration for instance %s failed", instance_id)
|
||||
return False
|
||||
|
||||
# Building the temporary image which will be used
|
||||
# to re-build the same instance on another target host
|
||||
image_uuid = self.create_image_from_instance(instance_id,
|
||||
new_image_name)
|
||||
|
||||
if not image_uuid:
|
||||
LOG.debug(
|
||||
"Could not build temporary image of instance: %s" %
|
||||
instance_id)
|
||||
return False
|
||||
|
||||
#
|
||||
# We need to get the list of attached volumes and detach
|
||||
# them from the instance in order to attache them later
|
||||
# to the new instance
|
||||
#
|
||||
blocks = []
|
||||
|
||||
# Looks like this :
|
||||
# os-extended-volumes:volumes_attached |
|
||||
# [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}]
|
||||
attached_volumes = getattr(instance,
|
||||
"os-extended-volumes:volumes_attached")
|
||||
|
||||
for attached_volume in attached_volumes:
|
||||
volume_id = attached_volume['id']
|
||||
|
||||
try:
|
||||
volume = self.cinder.volumes.get(volume_id)
|
||||
|
||||
attachments_list = getattr(volume, "attachments")
|
||||
|
||||
device_name = attachments_list[0]['device']
|
||||
# When a volume is attached to an instance
|
||||
# it contains the following property :
|
||||
# attachments = [{u'device': u'/dev/vdb',
|
||||
# u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814',
|
||||
# u'id': u'f6d62785-04b8-400d-9626-88640610f65e',
|
||||
# u'host_name': None, u'volume_id':
|
||||
# u'f6d62785-04b8-400d-9626-88640610f65e'}]
|
||||
|
||||
# boot_index indicates a number
|
||||
# designating the boot order of the device.
|
||||
# Use -1 for the boot volume,
|
||||
# choose 0 for an attached volume.
|
||||
block_device_mapping_v2_item = {"device_name": device_name,
|
||||
"source_type": "volume",
|
||||
"destination_type":
|
||||
"volume",
|
||||
"uuid": volume_id,
|
||||
"boot_index": "0"}
|
||||
|
||||
blocks.append(
|
||||
block_device_mapping_v2_item)
|
||||
|
||||
LOG.debug("Detaching volume %s from instance: %s" % (
|
||||
volume_id, instance_id))
|
||||
# volume.detach()
|
||||
self.nova.volumes.delete_server_volume(instance_id,
|
||||
volume_id)
|
||||
|
||||
if not self.wait_for_volume_status(volume, "available", 5,
|
||||
10):
|
||||
LOG.debug(
|
||||
"Could not detach volume %s from instance: %s" % (
|
||||
volume_id, instance_id))
|
||||
return False
|
||||
except ciexceptions.NotFound:
|
||||
LOG.debug("Volume '%s' not found " % image_id)
|
||||
return False
|
||||
|
||||
# We create the new instance from
|
||||
# the intermediate image of the original instance
|
||||
new_instance = self. \
|
||||
create_instance(dest_hostname,
|
||||
instance_name,
|
||||
image_uuid,
|
||||
flavor_name,
|
||||
sec_groups,
|
||||
network_names_list=network_names_list,
|
||||
keypair_name=keypair_name,
|
||||
create_new_floating_ip=False,
|
||||
block_device_mapping_v2=blocks)
|
||||
|
||||
if not new_instance:
|
||||
LOG.debug(
|
||||
"Could not create new instance "
|
||||
"for non-live migration of instance %s" % instance_id)
|
||||
return False
|
||||
|
||||
try:
|
||||
LOG.debug("Detaching floating ip '%s' from instance %s" % (
|
||||
floating_ip, instance_id))
|
||||
# We detach the floating ip from the current instance
|
||||
instance.remove_floating_ip(floating_ip)
|
||||
|
||||
LOG.debug(
|
||||
"Attaching floating ip '%s' to the new instance %s" % (
|
||||
floating_ip, new_instance.id))
|
||||
|
||||
# We attach the same floating ip to the new instance
|
||||
new_instance.add_floating_ip(floating_ip)
|
||||
except Exception as e:
|
||||
LOG.debug(e)
|
||||
|
||||
new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host")
|
||||
|
||||
# Deleting the old instance (because no more useful)
|
||||
delete_ok = self.delete_instance(instance_id)
|
||||
if not delete_ok:
|
||||
LOG.debug("Could not delete instance: %s" % instance_id)
|
||||
return False
|
||||
|
||||
LOG.debug(
|
||||
"Instance %s has been successfully migrated "
|
||||
"to new host '%s' and its new id is %s." % (
|
||||
instance_id, new_host_name, new_instance.id))
|
||||
|
||||
return True
|
||||
|
||||
def resize_instance(self, instance_id, flavor, retry=120):
|
||||
"""This method resizes given instance with specified flavor.
|
||||
|
||||
@@ -369,8 +207,10 @@ class NovaHelper(object):
|
||||
:param instance_id: the unique id of the instance to resize.
|
||||
:param flavor: the name or ID of the flavor to resize to.
|
||||
"""
|
||||
LOG.debug("Trying a resize of instance %s to flavor '%s'" % (
|
||||
instance_id, flavor))
|
||||
LOG.debug(
|
||||
"Trying a resize of instance %(instance)s to "
|
||||
"flavor '%(flavor)s'",
|
||||
{'instance': instance_id, 'flavor': flavor})
|
||||
|
||||
# Looking for the instance to resize
|
||||
instance = self.find_instance(instance_id)
|
||||
@@ -387,17 +227,17 @@ class NovaHelper(object):
|
||||
"instance %s. Exception: %s", instance_id, e)
|
||||
|
||||
if not flavor_id:
|
||||
LOG.debug("Flavor not found: %s" % flavor)
|
||||
LOG.debug("Flavor not found: %s", flavor)
|
||||
return False
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
|
||||
instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
|
||||
LOG.debug(
|
||||
"Instance %s is in '%s' status." % (instance_id,
|
||||
instance_status))
|
||||
"Instance %(id)s is in '%(status)s' status.",
|
||||
{'id': instance_id, 'status': instance_status})
|
||||
|
||||
instance.resize(flavor=flavor_id)
|
||||
while getattr(instance,
|
||||
@@ -435,17 +275,20 @@ class NovaHelper(object):
|
||||
destination_node is None, nova scheduler choose
|
||||
the destination host
|
||||
"""
|
||||
LOG.debug("Trying to live migrate instance %s " % (instance_id))
|
||||
LOG.debug(
|
||||
"Trying a live migrate instance %(instance)s ",
|
||||
{'instance': instance_id})
|
||||
|
||||
# Looking for the instance to migrate
|
||||
instance = self.find_instance(instance_id)
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
else:
|
||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
# From nova api version 2.25(Mitaka release), the default value of
|
||||
# block_migration is None which is mapped to 'auto'.
|
||||
@@ -467,7 +310,7 @@ class NovaHelper(object):
|
||||
if host_name != new_hostname and instance.status == 'ACTIVE':
|
||||
LOG.debug(
|
||||
"Live migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
"instance %s is now on host '%s'.", (
|
||||
instance_id, new_hostname))
|
||||
return True
|
||||
else:
|
||||
@@ -478,7 +321,7 @@ class NovaHelper(object):
|
||||
and retry:
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||
LOG.debug("Instance task state: %s is null" % instance_id)
|
||||
LOG.debug("Instance task state: %s is null", instance_id)
|
||||
break
|
||||
LOG.debug(
|
||||
'Waiting the migration of {0} to {1}'.format(
|
||||
@@ -494,13 +337,13 @@ class NovaHelper(object):
|
||||
|
||||
LOG.debug(
|
||||
"Live migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
instance_id, host_name))
|
||||
"instance %(instance)s is now on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
return True
|
||||
|
||||
def abort_live_migrate(self, instance_id, source, destination, retry=240):
|
||||
LOG.debug("Aborting live migration of instance %s" % instance_id)
|
||||
LOG.debug("Aborting live migration of instance %s", instance_id)
|
||||
migration = self.get_running_migration(instance_id)
|
||||
if migration:
|
||||
migration_id = getattr(migration[0], "id")
|
||||
@@ -513,7 +356,7 @@ class NovaHelper(object):
|
||||
LOG.exception(e)
|
||||
else:
|
||||
LOG.debug(
|
||||
"No running migrations found for instance %s" % instance_id)
|
||||
"No running migrations found for instance %s", instance_id)
|
||||
|
||||
while retry:
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
@@ -537,21 +380,31 @@ class NovaHelper(object):
|
||||
"for the instance %s" % instance_id)
|
||||
|
||||
def enable_service_nova_compute(self, hostname):
|
||||
if self.nova.services.enable(host=hostname,
|
||||
binary='nova-compute'). \
|
||||
status == 'enabled':
|
||||
return True
|
||||
if float(CONF.nova_client.api_version) < 2.53:
|
||||
status = self.nova.services.enable(
|
||||
host=hostname, binary='nova-compute').status == 'enabled'
|
||||
else:
|
||||
return False
|
||||
service_uuid = self.nova.services.list(host=hostname,
|
||||
binary='nova-compute')[0].id
|
||||
status = self.nova.services.enable(
|
||||
service_uuid=service_uuid).status == 'enabled'
|
||||
|
||||
return status
|
||||
|
||||
def disable_service_nova_compute(self, hostname, reason=None):
|
||||
if self.nova.services.disable_log_reason(host=hostname,
|
||||
binary='nova-compute',
|
||||
reason=reason). \
|
||||
status == 'disabled':
|
||||
return True
|
||||
if float(CONF.nova_client.api_version) < 2.53:
|
||||
status = self.nova.services.disable_log_reason(
|
||||
host=hostname,
|
||||
binary='nova-compute',
|
||||
reason=reason).status == 'disabled'
|
||||
else:
|
||||
return False
|
||||
service_uuid = self.nova.services.list(host=hostname,
|
||||
binary='nova-compute')[0].id
|
||||
status = self.nova.services.disable_log_reason(
|
||||
service_uuid=service_uuid,
|
||||
reason=reason).status == 'disabled'
|
||||
|
||||
return status
|
||||
|
||||
def set_host_offline(self, hostname):
|
||||
# See API on https://developer.openstack.org/api-ref/compute/
|
||||
@@ -578,7 +431,7 @@ class NovaHelper(object):
|
||||
host = self.nova.hosts.get(hostname)
|
||||
|
||||
if not host:
|
||||
LOG.debug("host not found: %s" % hostname)
|
||||
LOG.debug("host not found: %s", hostname)
|
||||
return False
|
||||
else:
|
||||
host[0].update(
|
||||
@@ -600,18 +453,19 @@ class NovaHelper(object):
|
||||
key-value pairs to associate to the image as metadata.
|
||||
"""
|
||||
LOG.debug(
|
||||
"Trying to create an image from instance %s ..." % instance_id)
|
||||
"Trying to create an image from instance %s ...", instance_id)
|
||||
|
||||
# Looking for the instance
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return None
|
||||
else:
|
||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
# We need to wait for an appropriate status
|
||||
# of the instance before we can build an image from it
|
||||
@@ -638,14 +492,15 @@ class NovaHelper(object):
|
||||
if not image:
|
||||
break
|
||||
status = image.status
|
||||
LOG.debug("Current image status: %s" % status)
|
||||
LOG.debug("Current image status: %s", status)
|
||||
|
||||
if not image:
|
||||
LOG.debug("Image not found: %s" % image_uuid)
|
||||
LOG.debug("Image not found: %s", image_uuid)
|
||||
else:
|
||||
LOG.debug(
|
||||
"Image %s successfully created for instance %s" % (
|
||||
image_uuid, instance_id))
|
||||
"Image %(image)s successfully created for "
|
||||
"instance %(instance)s",
|
||||
{'image': image_uuid, 'instance': instance_id})
|
||||
return image_uuid
|
||||
return None
|
||||
|
||||
@@ -654,16 +509,16 @@ class NovaHelper(object):
|
||||
|
||||
:param instance_id: the unique id of the instance to delete.
|
||||
"""
|
||||
LOG.debug("Trying to remove instance %s ..." % instance_id)
|
||||
LOG.debug("Trying to remove instance %s ...", instance_id)
|
||||
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
else:
|
||||
self.nova.servers.delete(instance_id)
|
||||
LOG.debug("Instance %s removed." % instance_id)
|
||||
LOG.debug("Instance %s removed.", instance_id)
|
||||
return True
|
||||
|
||||
def stop_instance(self, instance_id):
|
||||
@@ -671,21 +526,21 @@ class NovaHelper(object):
|
||||
|
||||
:param instance_id: the unique id of the instance to stop.
|
||||
"""
|
||||
LOG.debug("Trying to stop instance %s ..." % instance_id)
|
||||
LOG.debug("Trying to stop instance %s ...", instance_id)
|
||||
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
|
||||
LOG.debug("Instance has been stopped: %s" % instance_id)
|
||||
LOG.debug("Instance has been stopped: %s", instance_id)
|
||||
return True
|
||||
else:
|
||||
self.nova.servers.stop(instance_id)
|
||||
|
||||
if self.wait_for_instance_state(instance, "stopped", 8, 10):
|
||||
LOG.debug("Instance %s stopped." % instance_id)
|
||||
LOG.debug("Instance %s stopped.", instance_id)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -726,11 +581,11 @@ class NovaHelper(object):
|
||||
return False
|
||||
|
||||
while instance.status not in status_list and retry:
|
||||
LOG.debug("Current instance status: %s" % instance.status)
|
||||
LOG.debug("Current instance status: %s", instance.status)
|
||||
time.sleep(sleep)
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
retry -= 1
|
||||
LOG.debug("Current instance status: %s" % instance.status)
|
||||
LOG.debug("Current instance status: %s", instance.status)
|
||||
return instance.status in status_list
|
||||
|
||||
def create_instance(self, node_id, inst_name="test", image_id=None,
|
||||
@@ -746,26 +601,26 @@ class NovaHelper(object):
|
||||
It returns the unique id of the created instance.
|
||||
"""
|
||||
LOG.debug(
|
||||
"Trying to create new instance '%s' "
|
||||
"from image '%s' with flavor '%s' ..." % (
|
||||
inst_name, image_id, flavor_name))
|
||||
"Trying to create new instance '%(inst)s' "
|
||||
"from image '%(image)s' with flavor '%(flavor)s' ...",
|
||||
{'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
|
||||
|
||||
try:
|
||||
self.nova.keypairs.findall(name=keypair_name)
|
||||
except nvexceptions.NotFound:
|
||||
LOG.debug("Key pair '%s' not found " % keypair_name)
|
||||
LOG.debug("Key pair '%s' not found ", keypair_name)
|
||||
return
|
||||
|
||||
try:
|
||||
image = self.glance.images.get(image_id)
|
||||
except glexceptions.NotFound:
|
||||
LOG.debug("Image '%s' not found " % image_id)
|
||||
LOG.debug("Image '%s' not found ", image_id)
|
||||
return
|
||||
|
||||
try:
|
||||
flavor = self.nova.flavors.find(name=flavor_name)
|
||||
except nvexceptions.NotFound:
|
||||
LOG.debug("Flavor '%s' not found " % flavor_name)
|
||||
LOG.debug("Flavor '%s' not found ", flavor_name)
|
||||
return
|
||||
|
||||
# Make sure all security groups exist
|
||||
@@ -773,7 +628,7 @@ class NovaHelper(object):
|
||||
group_id = self.get_security_group_id_from_name(sec_group_name)
|
||||
|
||||
if not group_id:
|
||||
LOG.debug("Security group '%s' not found " % sec_group_name)
|
||||
LOG.debug("Security group '%s' not found ", sec_group_name)
|
||||
return
|
||||
|
||||
net_list = list()
|
||||
@@ -782,7 +637,7 @@ class NovaHelper(object):
|
||||
nic_id = self.get_network_id_from_name(network_name)
|
||||
|
||||
if not nic_id:
|
||||
LOG.debug("Network '%s' not found " % network_name)
|
||||
LOG.debug("Network '%s' not found ", network_name)
|
||||
return
|
||||
net_obj = {"net-id": nic_id}
|
||||
net_list.append(net_obj)
|
||||
@@ -808,14 +663,16 @@ class NovaHelper(object):
|
||||
if create_new_floating_ip and instance.status == 'ACTIVE':
|
||||
LOG.debug(
|
||||
"Creating a new floating IP"
|
||||
" for instance '%s'" % instance.id)
|
||||
" for instance '%s'", instance.id)
|
||||
# Creating floating IP for the new instance
|
||||
floating_ip = self.nova.floating_ips.create()
|
||||
|
||||
instance.add_floating_ip(floating_ip)
|
||||
|
||||
LOG.debug("Instance %s associated to Floating IP '%s'" % (
|
||||
instance.id, floating_ip.ip))
|
||||
LOG.debug(
|
||||
"Instance %(instance)s associated to "
|
||||
"Floating IP '%(ip)s'",
|
||||
{'instance': instance.id, 'ip': floating_ip.ip})
|
||||
|
||||
return instance
|
||||
|
||||
@@ -889,7 +746,7 @@ class NovaHelper(object):
|
||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
if getattr(new_volume, 'status') != "in-use":
|
||||
LOG.error("Volume update retry timeout or error")
|
||||
return False
|
||||
@@ -897,5 +754,15 @@ class NovaHelper(object):
|
||||
host_name = getattr(new_volume, "os-vol-host-attr:host")
|
||||
LOG.debug(
|
||||
"Volume update succeeded : "
|
||||
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
|
||||
"Volume %s is now on host '%s'.",
|
||||
(new_volume.id, host_name))
|
||||
return True
|
||||
|
||||
def _check_nova_api_version(self, client, version):
|
||||
api_version = api_versions.APIVersion(version_str=version)
|
||||
try:
|
||||
api_versions.discover_version(client, api_version)
|
||||
return True
|
||||
except nvexceptions.UnsupportedVersion as e:
|
||||
LOG.exception(e)
|
||||
return False
|
||||
|
||||
@@ -289,7 +289,7 @@ class Service(service.ServiceBase):
|
||||
return api_manager_version
|
||||
|
||||
|
||||
def launch(conf, service_, workers=1, restart_method='reload'):
|
||||
def launch(conf, service_, workers=1, restart_method='mutate'):
|
||||
return service.launch(conf, service_, workers, restart_method)
|
||||
|
||||
|
||||
|
||||
@@ -44,18 +44,21 @@ WATCHER_DECISION_ENGINE_OPTS = [
|
||||
'execute strategies'),
|
||||
cfg.IntOpt('action_plan_expiry',
|
||||
default=24,
|
||||
mutable=True,
|
||||
help='An expiry timespan(hours). Watcher invalidates any '
|
||||
'action plan for which its creation time '
|
||||
'-whose number of hours has been offset by this value-'
|
||||
' is older that the current time.'),
|
||||
cfg.IntOpt('check_periodic_interval',
|
||||
default=30 * 60,
|
||||
mutable=True,
|
||||
help='Interval (in seconds) for checking action plan expiry.')
|
||||
]
|
||||
|
||||
WATCHER_CONTINUOUS_OPTS = [
|
||||
cfg.IntOpt('continuous_audit_interval',
|
||||
default=10,
|
||||
mutable=True,
|
||||
help='Interval (in seconds) for checking newly created '
|
||||
'continuous audits.')
|
||||
]
|
||||
|
||||
@@ -32,9 +32,11 @@ GNOCCHI_CLIENT_OPTS = [
|
||||
'The default is public.'),
|
||||
cfg.IntOpt('query_max_retries',
|
||||
default=10,
|
||||
mutable=True,
|
||||
help='How many times Watcher is trying to query again'),
|
||||
cfg.IntOpt('query_timeout',
|
||||
default=1,
|
||||
mutable=True,
|
||||
help='How many seconds Watcher should wait to do query again')]
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ nova_client = cfg.OptGroup(name='nova_client',
|
||||
|
||||
NOVA_CLIENT_OPTS = [
|
||||
cfg.StrOpt('api_version',
|
||||
default='2.53',
|
||||
default='2.56',
|
||||
help='Version of Nova API to use in novaclient.'),
|
||||
cfg.StrOpt('endpoint_type',
|
||||
default='publicURL',
|
||||
|
||||
@@ -25,6 +25,7 @@ from watcher._i18n import _
|
||||
SERVICE_OPTS = [
|
||||
cfg.IntOpt('periodic_interval',
|
||||
default=60,
|
||||
mutable=True,
|
||||
help=_('Seconds between running periodic tasks.')),
|
||||
cfg.HostAddressOpt('host',
|
||||
default=socket.gethostname(),
|
||||
|
||||
@@ -57,6 +57,12 @@ class DataSourceBase(object):
|
||||
),
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def statistic_aggregation(self, resource_id=None, meter_name=None,
|
||||
period=300, granularity=300, dimensions=None,
|
||||
aggregation='avg', group_by='*'):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_metrics(self):
|
||||
pass
|
||||
|
||||
@@ -145,24 +145,28 @@ class CeilometerHelper(base.DataSourceBase):
|
||||
else:
|
||||
return meters
|
||||
|
||||
def statistic_aggregation(self,
|
||||
resource_id,
|
||||
meter_name,
|
||||
period,
|
||||
aggregate='avg'):
|
||||
def statistic_aggregation(self, resource_id=None, meter_name=None,
|
||||
period=300, granularity=300, dimensions=None,
|
||||
aggregation='avg', group_by='*'):
|
||||
"""Representing a statistic aggregate by operators
|
||||
|
||||
:param resource_id: id of resource to list statistics for.
|
||||
:param meter_name: Name of meter to list statistics for.
|
||||
:param period: Period in seconds over which to group samples.
|
||||
:param aggregate: Available aggregates are: count, cardinality,
|
||||
min, max, sum, stddev, avg. Defaults to avg.
|
||||
:param granularity: frequency of marking metric point, in seconds.
|
||||
This param isn't used in Ceilometer datasource.
|
||||
:param dimensions: dimensions (dict). This param isn't used in
|
||||
Ceilometer datasource.
|
||||
:param aggregation: Available aggregates are: count, cardinality,
|
||||
min, max, sum, stddev, avg. Defaults to avg.
|
||||
:param group_by: list of columns to group the metrics to be returned.
|
||||
This param isn't used in Ceilometer datasource.
|
||||
:return: Return the latest statistical data, None if no data.
|
||||
"""
|
||||
|
||||
end_time = datetime.datetime.utcnow()
|
||||
if aggregate == 'mean':
|
||||
aggregate = 'avg'
|
||||
if aggregation == 'mean':
|
||||
aggregation = 'avg'
|
||||
start_time = end_time - datetime.timedelta(seconds=int(period))
|
||||
query = self.build_query(
|
||||
resource_id=resource_id, start_time=start_time, end_time=end_time)
|
||||
@@ -171,11 +175,11 @@ class CeilometerHelper(base.DataSourceBase):
|
||||
q=query,
|
||||
period=period,
|
||||
aggregates=[
|
||||
{'func': aggregate}])
|
||||
{'func': aggregation}])
|
||||
|
||||
item_value = None
|
||||
if statistic:
|
||||
item_value = statistic[-1]._info.get('aggregate').get(aggregate)
|
||||
item_value = statistic[-1]._info.get('aggregate').get(aggregation)
|
||||
return item_value
|
||||
|
||||
def get_last_sample_values(self, resource_id, meter_name, limit=1):
|
||||
@@ -204,64 +208,64 @@ class CeilometerHelper(base.DataSourceBase):
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_cpu_usage')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_instance_cpu_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('instance_cpu_usage')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_host_memory_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_memory_usage')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_instance_memory_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('instance_ram_usage')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_instance_l3_cache_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('instance_l3_cache_usage')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_instance_ram_allocated(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('instance_ram_allocated')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_instance_root_disk_allocated(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('instance_root_disk_size')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_host_outlet_temperature(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_outlet_temp')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_host_inlet_temperature(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_inlet_temp')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_host_airflow(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_airflow')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
def get_host_power(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
meter_name = self.METRIC_MAP.get('host_power')
|
||||
return self.statistic_aggregation(resource_id, meter_name, period,
|
||||
aggregate=aggregate)
|
||||
granularity, aggregate=aggregate)
|
||||
|
||||
@@ -58,32 +58,35 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
return 'not available'
|
||||
return 'available'
|
||||
|
||||
def _statistic_aggregation(self,
|
||||
resource_id,
|
||||
metric,
|
||||
granularity,
|
||||
start_time=None,
|
||||
stop_time=None,
|
||||
aggregation='mean'):
|
||||
def list_metrics(self):
|
||||
"""List the user's meters."""
|
||||
try:
|
||||
response = self.query_retry(f=self.gnocchi.metric.list)
|
||||
except Exception:
|
||||
return set()
|
||||
else:
|
||||
return set([metric['name'] for metric in response])
|
||||
|
||||
def statistic_aggregation(self, resource_id=None, meter_name=None,
|
||||
period=300, granularity=300, dimensions=None,
|
||||
aggregation='avg', group_by='*'):
|
||||
"""Representing a statistic aggregate by operators
|
||||
|
||||
:param metric: metric name of which we want the statistics
|
||||
:param resource_id: id of resource to list statistics for
|
||||
:param start_time: Start datetime from which metrics will be used
|
||||
:param stop_time: End datetime from which metrics will be used
|
||||
:param granularity: frequency of marking metric point, in seconds
|
||||
:param resource_id: id of resource to list statistics for.
|
||||
:param meter_name: meter name of which we want the statistics.
|
||||
:param period: Period in seconds over which to group samples.
|
||||
:param granularity: frequency of marking metric point, in seconds.
|
||||
:param dimensions: dimensions (dict). This param isn't used in
|
||||
Gnocchi datasource.
|
||||
:param aggregation: Should be chosen in accordance with policy
|
||||
aggregations
|
||||
aggregations.
|
||||
:param group_by: list of columns to group the metrics to be returned.
|
||||
This param isn't used in Gnocchi datasource.
|
||||
:return: value of aggregated metric
|
||||
"""
|
||||
|
||||
if start_time is not None and not isinstance(start_time, datetime):
|
||||
raise exception.InvalidParameter(parameter='start_time',
|
||||
parameter_type=datetime)
|
||||
|
||||
if stop_time is not None and not isinstance(stop_time, datetime):
|
||||
raise exception.InvalidParameter(parameter='stop_time',
|
||||
parameter_type=datetime)
|
||||
stop_time = datetime.utcnow()
|
||||
start_time = stop_time - timedelta(seconds=(int(period)))
|
||||
|
||||
if not common_utils.is_uuid_like(resource_id):
|
||||
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
|
||||
@@ -97,7 +100,7 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
resource_id = resources[0]['id']
|
||||
|
||||
raw_kwargs = dict(
|
||||
metric=metric,
|
||||
metric=meter_name,
|
||||
start=start_time,
|
||||
stop=stop_time,
|
||||
resource_id=resource_id,
|
||||
@@ -115,27 +118,6 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
# measure has structure [time, granularity, value]
|
||||
return statistics[-1][2]
|
||||
|
||||
def list_metrics(self):
|
||||
"""List the user's meters."""
|
||||
try:
|
||||
response = self.query_retry(f=self.gnocchi.metric.list)
|
||||
except Exception:
|
||||
return set()
|
||||
else:
|
||||
return set([metric['name'] for metric in response])
|
||||
|
||||
def statistic_aggregation(self, resource_id, metric, period, granularity,
|
||||
aggregation='mean'):
|
||||
stop_time = datetime.utcnow()
|
||||
start_time = stop_time - timedelta(seconds=(int(period)))
|
||||
return self._statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
metric=metric,
|
||||
granularity=granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation=aggregation)
|
||||
|
||||
def get_host_cpu_usage(self, resource_id, period, aggregate,
|
||||
granularity=300):
|
||||
meter_name = self.METRIC_MAP.get('host_cpu_usage')
|
||||
|
||||
@@ -21,6 +21,7 @@ import datetime
|
||||
from monascaclient import exc
|
||||
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.datasource import base
|
||||
|
||||
|
||||
@@ -97,41 +98,42 @@ class MonascaHelper(base.DataSourceBase):
|
||||
|
||||
return statistics
|
||||
|
||||
def statistic_aggregation(self,
|
||||
meter_name,
|
||||
dimensions,
|
||||
start_time=None,
|
||||
end_time=None,
|
||||
period=None,
|
||||
aggregate='avg',
|
||||
group_by='*'):
|
||||
def statistic_aggregation(self, resource_id=None, meter_name=None,
|
||||
period=300, granularity=300, dimensions=None,
|
||||
aggregation='avg', group_by='*'):
|
||||
"""Representing a statistic aggregate by operators
|
||||
|
||||
:param meter_name: meter names of which we want the statistics
|
||||
:param dimensions: dimensions (dict)
|
||||
:param start_time: Start datetime from which metrics will be used
|
||||
:param end_time: End datetime from which metrics will be used
|
||||
:param resource_id: id of resource to list statistics for.
|
||||
This param isn't used in Monasca datasource.
|
||||
:param meter_name: meter names of which we want the statistics.
|
||||
:param period: Sampling `period`: In seconds. If no period is given,
|
||||
only one aggregate statistic is returned. If given, a
|
||||
faceted result will be returned, divided into given
|
||||
periods. Periods with no data are ignored.
|
||||
:param aggregate: Should be either 'avg', 'count', 'min' or 'max'
|
||||
:param granularity: frequency of marking metric point, in seconds.
|
||||
This param isn't used in Ceilometer datasource.
|
||||
:param dimensions: dimensions (dict).
|
||||
:param aggregation: Should be either 'avg', 'count', 'min' or 'max'.
|
||||
:param group_by: list of columns to group the metrics to be returned.
|
||||
:return: A list of dict with each dict being a distinct result row
|
||||
"""
|
||||
start_timestamp, end_timestamp, period = self._format_time_params(
|
||||
start_time, end_time, period
|
||||
)
|
||||
|
||||
if aggregate == 'mean':
|
||||
aggregate = 'avg'
|
||||
if dimensions is None:
|
||||
raise exception.UnsupportedDataSource(datasource='Monasca')
|
||||
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(seconds=(int(period)))
|
||||
|
||||
if aggregation == 'mean':
|
||||
aggregation = 'avg'
|
||||
|
||||
raw_kwargs = dict(
|
||||
name=meter_name,
|
||||
start_time=start_timestamp,
|
||||
end_time=end_timestamp,
|
||||
start_time=start_time.isoformat(),
|
||||
end_time=stop_time.isoformat(),
|
||||
dimensions=dimensions,
|
||||
period=period,
|
||||
statistics=aggregate,
|
||||
statistics=aggregation,
|
||||
group_by=group_by,
|
||||
)
|
||||
|
||||
@@ -140,45 +142,36 @@ class MonascaHelper(base.DataSourceBase):
|
||||
statistics = self.query_retry(
|
||||
f=self.monasca.metrics.list_statistics, **kwargs)
|
||||
|
||||
return statistics
|
||||
cpu_usage = None
|
||||
for stat in statistics:
|
||||
avg_col_idx = stat['columns'].index(aggregation)
|
||||
values = [r[avg_col_idx] for r in stat['statistics']]
|
||||
value = float(sum(values)) / len(values)
|
||||
cpu_usage = value
|
||||
|
||||
return cpu_usage
|
||||
|
||||
def get_host_cpu_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
metric_name = self.METRIC_MAP.get('host_cpu_usage')
|
||||
node_uuid = resource_id.split('_')[0]
|
||||
statistics = self.statistic_aggregation(
|
||||
return self.statistic_aggregation(
|
||||
meter_name=metric_name,
|
||||
dimensions=dict(hostname=node_uuid),
|
||||
period=period,
|
||||
aggregate=aggregate
|
||||
aggregation=aggregate
|
||||
)
|
||||
cpu_usage = None
|
||||
for stat in statistics:
|
||||
avg_col_idx = stat['columns'].index('avg')
|
||||
values = [r[avg_col_idx] for r in stat['statistics']]
|
||||
value = float(sum(values)) / len(values)
|
||||
cpu_usage = value
|
||||
|
||||
return cpu_usage
|
||||
|
||||
def get_instance_cpu_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
metric_name = self.METRIC_MAP.get('instance_cpu_usage')
|
||||
|
||||
statistics = self.statistic_aggregation(
|
||||
return self.statistic_aggregation(
|
||||
meter_name=metric_name,
|
||||
dimensions=dict(resource_id=resource_id),
|
||||
period=period,
|
||||
aggregate=aggregate
|
||||
aggregation=aggregate
|
||||
)
|
||||
cpu_usage = None
|
||||
for stat in statistics:
|
||||
avg_col_idx = stat['columns'].index('avg')
|
||||
values = [r[avg_col_idx] for r in stat['statistics']]
|
||||
value = float(sum(values)) / len(values)
|
||||
cpu_usage = value
|
||||
|
||||
return cpu_usage
|
||||
|
||||
def get_host_memory_usage(self, resource_id, period, aggregate,
|
||||
granularity=None):
|
||||
|
||||
@@ -48,7 +48,7 @@ class AuditEndpoint(object):
|
||||
self._oneshot_handler.execute(audit, context)
|
||||
|
||||
def trigger_audit(self, context, audit_uuid):
|
||||
LOG.debug("Trigger audit %s" % audit_uuid)
|
||||
LOG.debug("Trigger audit %s", audit_uuid)
|
||||
self.executor.submit(self.do_trigger_audit,
|
||||
context,
|
||||
audit_uuid)
|
||||
|
||||
@@ -337,7 +337,7 @@ class ModelBuilder(object):
|
||||
Create an instance node for the graph using nova and the
|
||||
`server` nova object.
|
||||
:param instance: Nova VM object.
|
||||
:return: A instance node for the graph.
|
||||
:return: An instance node for the graph.
|
||||
"""
|
||||
flavor = instance.flavor
|
||||
instance_attributes = {
|
||||
|
||||
@@ -29,7 +29,7 @@ class InstanceState(enum.Enum):
|
||||
STOPPED = 'stopped' # Instance is shut off, the disk image is still there.
|
||||
RESCUED = 'rescued' # A rescue image is running with the original image
|
||||
# attached.
|
||||
RESIZED = 'resized' # a Instance with the new size is active.
|
||||
RESIZED = 'resized' # an Instance with the new size is active.
|
||||
|
||||
SOFT_DELETED = 'soft-delete'
|
||||
# still available to restore.
|
||||
|
||||
@@ -74,7 +74,7 @@ class Pool(storage_resource.StorageResource):
|
||||
"free_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"provisioned_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"allocated_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"virtual_free": wfields.NonNegativeIntegerField(),
|
||||
"virtual_free": wfields.NonNegativeIntegerField(default=0),
|
||||
}
|
||||
|
||||
def accept(self, visitor):
|
||||
|
||||
@@ -28,6 +28,6 @@ class StorageResource(base.Element):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
"uuid": wfields.StringField(),
|
||||
"uuid": wfields.StringField(default=""),
|
||||
"human_id": wfields.StringField(default=""),
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
Openstack implementation of the cluster graph.
|
||||
"""
|
||||
|
||||
import ast
|
||||
from lxml import etree
|
||||
import networkx as nx
|
||||
from oslo_concurrency import lockutils
|
||||
@@ -57,7 +58,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("model_root")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(ModelRoot, self).add_node(node.uuid, node)
|
||||
super(ModelRoot, self).add_node(node.uuid, attr=node)
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def remove_node(self, node):
|
||||
@@ -72,7 +73,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
def add_instance(self, instance):
|
||||
self.assert_instance(instance)
|
||||
try:
|
||||
super(ModelRoot, self).add_node(instance.uuid, instance)
|
||||
super(ModelRoot, self).add_node(instance.uuid, attr=instance)
|
||||
except nx.NetworkXError as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.InstanceNotFound(name=instance.uuid)
|
||||
@@ -137,8 +138,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_all_compute_nodes(self):
|
||||
return {uuid: cn for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.ComputeNode)}
|
||||
return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.ComputeNode)}
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_node_by_uuid(self, uuid):
|
||||
@@ -156,7 +157,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.ComputeResourceNotFound(name=uuid)
|
||||
@@ -172,8 +173,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_all_instances(self):
|
||||
return {uuid: inst for uuid, inst in self.nodes(data=True)
|
||||
if isinstance(inst, element.Instance)}
|
||||
return {uuid: inst['attr'] for uuid, inst in self.nodes(data=True)
|
||||
if isinstance(inst['attr'], element.Instance)}
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_node_instances(self, node):
|
||||
@@ -225,6 +226,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
for inst in root.findall('.//Instance'):
|
||||
instance = element.Instance(**inst.attrib)
|
||||
instance.watcher_exclude = ast.literal_eval(
|
||||
inst.attrib["watcher_exclude"])
|
||||
model.add_instance(instance)
|
||||
|
||||
parent = inst.getparent()
|
||||
@@ -239,7 +242,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
@classmethod
|
||||
def is_isomorphic(cls, G1, G2):
|
||||
def node_match(node1, node2):
|
||||
return node1.as_dict() == node2.as_dict()
|
||||
return node1['attr'].as_dict() == node2['attr'].as_dict()
|
||||
return nx.algorithms.isomorphism.isomorph.is_isomorphic(
|
||||
G1, G2, node_match=node_match)
|
||||
|
||||
@@ -277,12 +280,12 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(StorageModelRoot, self).add_node(node.host, node)
|
||||
super(StorageModelRoot, self).add_node(node.host, attr=node)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_pool(self, pool):
|
||||
self.assert_pool(pool)
|
||||
super(StorageModelRoot, self).add_node(pool.name, pool)
|
||||
super(StorageModelRoot, self).add_node(pool.name, attr=pool)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def remove_node(self, node):
|
||||
@@ -335,7 +338,7 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_volume(self, volume):
|
||||
self.assert_volume(volume)
|
||||
super(StorageModelRoot, self).add_node(volume.uuid, volume)
|
||||
super(StorageModelRoot, self).add_node(volume.uuid, attr=volume)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def remove_volume(self, volume):
|
||||
@@ -382,8 +385,8 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_all_storage_nodes(self):
|
||||
return {host: cn for host, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.StorageNode)}
|
||||
return {host: cn['attr'] for host, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.StorageNode)}
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_node_by_name(self, name):
|
||||
@@ -412,14 +415,14 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.StorageResourceNotFound(name=uuid)
|
||||
|
||||
def _get_by_name(self, name):
|
||||
try:
|
||||
return self.node[name]
|
||||
return self.node[name]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.StorageResourceNotFound(name=name)
|
||||
@@ -456,8 +459,8 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_all_volumes(self):
|
||||
return {name: vol for name, vol in self.nodes(data=True)
|
||||
if isinstance(vol, element.Volume)}
|
||||
return {name: vol['attr'] for name, vol in self.nodes(data=True)
|
||||
if isinstance(vol['attr'], element.Volume)}
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_pool_volumes(self, pool):
|
||||
@@ -569,7 +572,7 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(BaremetalModelRoot, self).add_node(node.uuid, node)
|
||||
super(BaremetalModelRoot, self).add_node(node.uuid, attr=node)
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def remove_node(self, node):
|
||||
@@ -582,8 +585,8 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def get_all_ironic_nodes(self):
|
||||
return {uuid: cn for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.IronicNode)}
|
||||
return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.IronicNode)}
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def get_node_by_uuid(self, uuid):
|
||||
@@ -594,7 +597,7 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.BaremetalResourceNotFound(name=uuid)
|
||||
|
||||
@@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
@@ -229,7 +229,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -275,7 +275,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -310,7 +310,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -337,7 +337,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -372,7 +372,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -399,7 +399,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -426,7 +426,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -459,7 +459,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -486,7 +486,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -513,7 +513,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
@@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
|
||||
|
||||
for name in engines.keys():
|
||||
se_impl = default.DefaultScoringLoader().load(name)
|
||||
LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name())
|
||||
LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
|
||||
_scoring_engine_map[se_impl.get_name()] = se_impl
|
||||
|
||||
engine_containers = \
|
||||
default.DefaultScoringContainerLoader().list_available()
|
||||
|
||||
for container_id, container_cls in engine_containers.items():
|
||||
LOG.debug("Found Scoring Engine container plugin: %s" %
|
||||
LOG.debug("Found Scoring Engine container plugin: %s",
|
||||
container_id)
|
||||
for se in container_cls.get_scoring_engine_list():
|
||||
LOG.debug("Found Scoring Engine plugin: %s" %
|
||||
LOG.debug("Found Scoring Engine plugin: %s",
|
||||
se.get_name())
|
||||
_scoring_engine_map[se.get_name()] = se
|
||||
|
||||
@@ -14,16 +14,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Actuator*
|
||||
|
||||
This strategy allows anyone to create an action plan with a predefined set of
|
||||
actions. This strategy can be used for 2 different purposes:
|
||||
|
||||
- Test actions
|
||||
- Use this strategy based on an event trigger to perform some explicit task
|
||||
|
||||
"""
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
@@ -34,7 +24,17 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Actuator(base.UnclassifiedStrategy):
|
||||
"""Actuator that simply executes the actions given as parameter"""
|
||||
"""Actuator
|
||||
|
||||
Actuator that simply executes the actions given as parameter
|
||||
|
||||
This strategy allows anyone to create an action plan with a predefined
|
||||
set of actions. This strategy can be used for 2 different purposes:
|
||||
|
||||
- Test actions
|
||||
- Use this strategy based on an event trigger to perform some explicit task
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
|
||||
@@ -66,7 +66,7 @@ class StrategyEndpoint(object):
|
||||
ds_metrics = datasource.list_metrics()
|
||||
if ds_metrics is None:
|
||||
raise exception.DataSourceNotAvailable(
|
||||
datasource=strategy.config.datasource)
|
||||
datasource=datasource.NAME)
|
||||
else:
|
||||
for metric in strategy.DATASOURCE_METRICS:
|
||||
original_metric_name = datasource.METRIC_MAP.get(metric)
|
||||
@@ -81,7 +81,7 @@ class StrategyEndpoint(object):
|
||||
if not datasource:
|
||||
state = "Datasource is not presented for this strategy"
|
||||
else:
|
||||
state = "%s: %s" % (strategy.config.datasource,
|
||||
state = "%s: %s" % (datasource.NAME,
|
||||
datasource.check_availability())
|
||||
return {'type': 'Datasource',
|
||||
'state': state,
|
||||
@@ -104,7 +104,7 @@ class StrategyEndpoint(object):
|
||||
try:
|
||||
is_datasources = getattr(strategy.config, 'datasources', None)
|
||||
if is_datasources:
|
||||
datasource = is_datasources[0]
|
||||
datasource = getattr(strategy, 'datasource_backend')
|
||||
else:
|
||||
datasource = getattr(strategy, strategy.config.datasource)
|
||||
except (AttributeError, IndexError):
|
||||
@@ -272,7 +272,7 @@ class BaseStrategy(loadable.Loadable):
|
||||
collector = self.collector_manager.get_cluster_model_collector(
|
||||
'storage', osc=self.osc)
|
||||
audit_scope_handler = collector.get_audit_scope_handler(
|
||||
audit_scope=self.audit.scope)
|
||||
audit_scope=self.audit_scope)
|
||||
self._storage_model = audit_scope_handler.get_scoped_model(
|
||||
collector.get_latest_cluster_data_model())
|
||||
|
||||
|
||||
@@ -16,24 +16,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Good server consolidation strategy*
|
||||
|
||||
Consolidation of VMs is essential to achieve energy optimization in cloud
|
||||
environments such as OpenStack. As VMs are spinned up and/or moved over time,
|
||||
it becomes necessary to migrate VMs among servers to lower the costs. However,
|
||||
migration of VMs introduces runtime overheads and consumes extra energy, thus
|
||||
a good server consolidation strategy should carefully plan for migration in
|
||||
order to both minimize energy consumption and comply to the various SLAs.
|
||||
|
||||
This algorithm not only minimizes the overall number of used servers, but also
|
||||
minimizes the number of migrations.
|
||||
|
||||
It has been developed only for tests. You must have at least 2 physical compute
|
||||
nodes to run it, so you can easily run it on DevStack. It assumes that live
|
||||
migration is possible on your OpenStack cluster.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@@ -47,7 +29,25 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
"""Basic offline consolidation using live migration"""
|
||||
"""Good server consolidation strategy
|
||||
|
||||
Basic offline consolidation using live migration
|
||||
|
||||
Consolidation of VMs is essential to achieve energy optimization in cloud
|
||||
environments such as OpenStack. As VMs are spinned up and/or moved over
|
||||
time, it becomes necessary to migrate VMs among servers to lower the
|
||||
costs. However, migration of VMs introduces runtime overheads and
|
||||
consumes extra energy, thus a good server consolidation strategy should
|
||||
carefully plan for migration in order to both minimize energy consumption
|
||||
and comply to the various SLAs.
|
||||
|
||||
This algorithm not only minimizes the overall number of used servers,
|
||||
but also minimizes the number of migrations.
|
||||
|
||||
It has been developed only for tests. You must have at least 2 physical
|
||||
compute nodes to run it, so you can easily run it on DevStack. It assumes
|
||||
that live migration is possible on your OpenStack cluster.
|
||||
"""
|
||||
|
||||
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
|
||||
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
|
||||
@@ -109,6 +109,12 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def granularity(self):
|
||||
return self.input_parameters.get('granularity', 300)
|
||||
|
||||
@property
|
||||
def aggregation_method(self):
|
||||
return self.input_parameters.get(
|
||||
'aggregation_method',
|
||||
{"instance": 'mean', "node": 'mean'})
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Basic offline consolidation")
|
||||
@@ -142,6 +148,26 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
"type": "number",
|
||||
"default": 300
|
||||
},
|
||||
"aggregation_method": {
|
||||
"description": "Function used to aggregate multiple "
|
||||
"measures into an aggregate. For example, "
|
||||
"the min aggregation method will aggregate "
|
||||
"the values of different measures to the "
|
||||
"minimum value of all the measures in the "
|
||||
"time range.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instance": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
"node": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
},
|
||||
"default": {"instance": 'mean', "node": 'mean'}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -149,8 +175,10 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def get_config_opts(cls):
|
||||
return [
|
||||
cfg.ListOpt(
|
||||
"datasource",
|
||||
help="Data source to use in order to query the needed metrics",
|
||||
"datasources",
|
||||
help="Datasources to use in order to query the needed metrics."
|
||||
" If one of strategy metric isn't available in the first"
|
||||
" datasource, the next datasource will be chosen.",
|
||||
item_type=cfg.types.String(choices=['gnocchi', 'ceilometer',
|
||||
'monasca']),
|
||||
default=['gnocchi', 'ceilometer', 'monasca']),
|
||||
@@ -176,7 +204,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param source_node: the current node of the virtual machine
|
||||
:param destination_node: the destination of the virtual machine
|
||||
:param instance_to_migrate: the instance / virtual machine
|
||||
:return: True if the there is enough place otherwise false
|
||||
:return: True if there is enough place otherwise false
|
||||
"""
|
||||
if source_node == destination_node:
|
||||
return False
|
||||
@@ -256,11 +284,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def get_node_cpu_usage(self, node):
|
||||
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
||||
return self.datasource_backend.get_host_cpu_usage(
|
||||
resource_id, self.period, 'mean', granularity=300)
|
||||
resource_id, self.period, self.aggregation_method['node'],
|
||||
granularity=self.granularity)
|
||||
|
||||
def get_instance_cpu_usage(self, instance):
|
||||
return self.datasource_backend.get_instance_cpu_usage(
|
||||
instance.uuid, self.period, 'mean', granularity=300)
|
||||
instance.uuid, self.period, self.aggregation_method['instance'],
|
||||
granularity=self.granularity)
|
||||
|
||||
def calculate_score_node(self, node):
|
||||
"""Calculate the score that represent the utilization level
|
||||
@@ -275,7 +305,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
||||
LOG.error(
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
"for %(metric_name)s", dict(
|
||||
resource_id=resource_id,
|
||||
metric_name=self.METRIC_NAMES[
|
||||
self.config.datasource]['host_cpu_usage']))
|
||||
@@ -295,7 +325,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
if instance_cpu_utilization is None:
|
||||
LOG.error(
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
"for %(metric_name)s", dict(
|
||||
resource_id=instance.uuid,
|
||||
metric_name=self.METRIC_NAMES[
|
||||
self.config.datasource]['instance_cpu_usage']))
|
||||
@@ -370,6 +400,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
sorted_score):
|
||||
number_migrations = 0
|
||||
for mig_instance, __ in sorted_instances:
|
||||
# skip exclude instance when migrating
|
||||
if mig_instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", mig_instance.uuid)
|
||||
continue
|
||||
for node_uuid, __ in sorted_score:
|
||||
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||
node_to_release)
|
||||
|
||||
@@ -86,8 +86,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
||||
def get_config_opts(cls):
|
||||
return [
|
||||
cfg.ListOpt(
|
||||
"datasource",
|
||||
help="Data source to use in order to query the needed metrics",
|
||||
"datasources",
|
||||
help="Datasources to use in order to query the needed metrics."
|
||||
" If one of strategy metric isn't available in the first"
|
||||
" datasource, the next datasource will be chosen.",
|
||||
item_type=cfg.types.String(choices=['gnocchi', 'ceilometer',
|
||||
'monasca']),
|
||||
default=['gnocchi', 'ceilometer', 'monasca'])
|
||||
@@ -197,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
||||
hosts_need_release[node.uuid] = {
|
||||
'priority_vm': potential_priority_instance,
|
||||
'noisy_vm': potential_noisy_instance}
|
||||
LOG.debug("Priority VM found: %s" % (
|
||||
potential_priority_instance.uuid))
|
||||
LOG.debug("Noisy VM found: %s" % (
|
||||
potential_noisy_instance.uuid))
|
||||
LOG.debug("Priority VM found: %s",
|
||||
potential_priority_instance.uuid)
|
||||
LOG.debug("Noisy VM found: %s",
|
||||
potential_noisy_instance.uuid)
|
||||
loop_break_flag = True
|
||||
break
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#
|
||||
|
||||
"""
|
||||
*Good Thermal Strategy*:
|
||||
*Good Thermal Strategy*
|
||||
|
||||
Towards to software defined infrastructure, the power and thermal
|
||||
intelligences is being adopted to optimize workload, which can help
|
||||
@@ -26,17 +26,17 @@ improve efficiency, reduce power, as well as to improve datacenter PUE
|
||||
and lower down operation cost in data center.
|
||||
Outlet (Exhaust Air) Temperature is one of the important thermal
|
||||
telemetries to measure thermal/workload status of server.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
This strategy makes decisions to migrate workloads to the hosts with good
|
||||
thermal condition (lowest outlet temperature) when the outlet temperature
|
||||
of source hosts reach a configurable threshold.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception as wexc
|
||||
from watcher.datasource import ceilometer as ceil
|
||||
from watcher.datasource import gnocchi as gnoc
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
@@ -95,8 +95,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
:type osc: :py:class:`~.OpenStackClients` instance, optional
|
||||
"""
|
||||
super(OutletTempControl, self).__init__(config, osc)
|
||||
self._ceilometer = None
|
||||
self._gnocchi = None
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
@@ -139,26 +137,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
},
|
||||
}
|
||||
|
||||
@property
|
||||
def ceilometer(self):
|
||||
if self._ceilometer is None:
|
||||
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
|
||||
return self._ceilometer
|
||||
|
||||
@ceilometer.setter
|
||||
def ceilometer(self, c):
|
||||
self._ceilometer = c
|
||||
|
||||
@property
|
||||
def gnocchi(self):
|
||||
if self._gnocchi is None:
|
||||
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
|
||||
return self._gnocchi
|
||||
|
||||
@gnocchi.setter
|
||||
def gnocchi(self, g):
|
||||
self._gnocchi = g
|
||||
|
||||
@property
|
||||
def granularity(self):
|
||||
return self.input_parameters.get('granularity', 300)
|
||||
@@ -208,31 +186,20 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
resource_id = node.uuid
|
||||
outlet_temp = None
|
||||
|
||||
if self.config.datasource == "ceilometer":
|
||||
outlet_temp = self.ceilometer.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
meter_name=metric_name,
|
||||
period=self.period,
|
||||
aggregate='avg'
|
||||
)
|
||||
elif self.config.datasource == "gnocchi":
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(
|
||||
seconds=int(self.period))
|
||||
outlet_temp = self.gnocchi.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
metric=metric_name,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean'
|
||||
)
|
||||
outlet_temp = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
meter_name=metric_name,
|
||||
period=self.period,
|
||||
granularity=self.granularity,
|
||||
)
|
||||
|
||||
# some hosts may not have outlet temp meters, remove from target
|
||||
if outlet_temp is None:
|
||||
LOG.warning("%s: no outlet temp data", resource_id)
|
||||
continue
|
||||
|
||||
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
|
||||
LOG.debug("%(resource)s: outlet temperature %(temp)f",
|
||||
{'resource': resource_id, 'temp': outlet_temp})
|
||||
instance_data = {'node': node, 'outlet_temp': outlet_temp}
|
||||
if outlet_temp >= self.threshold:
|
||||
# mark the node to release resources
|
||||
@@ -249,6 +216,11 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
mig_source_node)
|
||||
for instance in instances_of_src:
|
||||
try:
|
||||
# NOTE: skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
# select the first active instance to migrate
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
|
||||
@@ -14,24 +14,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Workload balance using cinder volume migration*
|
||||
|
||||
*Description*
|
||||
|
||||
This strategy migrates volumes based on the workload of the
|
||||
cinder pools.
|
||||
It makes decision to migrate a volume whenever a pool's used
|
||||
utilization % is higher than the specified threshold. The volume
|
||||
to be moved should make the pool close to average workload of all
|
||||
cinder pools.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* You must have at least 2 cinder volume pools to run
|
||||
this strategy.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
@@ -16,41 +16,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
[PoC]Uniform Airflow using live migration
|
||||
|
||||
*Description*
|
||||
|
||||
It is a migration strategy based on the airflow of physical
|
||||
servers. It generates solutions to move VM whenever a server's
|
||||
airflow is higher than the specified threshold.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* Hardware: compute node with NodeManager 3.0 support
|
||||
* Software: Ceilometer component ceilometer-agent-compute running
|
||||
in each compute node, and Ceilometer API can report such telemetry
|
||||
"airflow, system power, inlet temperature" successfully.
|
||||
* You must have at least 2 physical compute nodes to run this strategy
|
||||
|
||||
*Limitations*
|
||||
|
||||
- This is a proof of concept that is not meant to be used in production.
|
||||
- We cannot forecast how many servers should be migrated. This is the
|
||||
reason why we only plan a single virtual machine migration at a time.
|
||||
So it's better to use this algorithm with `CONTINUOUS` audits.
|
||||
- It assumes that live migrations are possible.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception as wexc
|
||||
from watcher.datasource import ceilometer as ceil
|
||||
from watcher.datasource import gnocchi as gnoc
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
@@ -125,30 +96,8 @@ class UniformAirflow(base.BaseStrategy):
|
||||
self.config.datasource]['host_inlet_temp']
|
||||
self.meter_name_power = self.METRIC_NAMES[
|
||||
self.config.datasource]['host_power']
|
||||
self._ceilometer = None
|
||||
self._gnocchi = None
|
||||
self._period = self.PERIOD
|
||||
|
||||
@property
|
||||
def ceilometer(self):
|
||||
if self._ceilometer is None:
|
||||
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
|
||||
return self._ceilometer
|
||||
|
||||
@ceilometer.setter
|
||||
def ceilometer(self, c):
|
||||
self._ceilometer = c
|
||||
|
||||
@property
|
||||
def gnocchi(self):
|
||||
if self._gnocchi is None:
|
||||
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
|
||||
return self._gnocchi
|
||||
|
||||
@gnocchi.setter
|
||||
def gnocchi(self, g):
|
||||
self._gnocchi = g
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "uniform_airflow"
|
||||
@@ -247,35 +196,16 @@ class UniformAirflow(base.BaseStrategy):
|
||||
source_instances = self.compute_model.get_node_instances(
|
||||
source_node)
|
||||
if source_instances:
|
||||
if self.config.datasource == "ceilometer":
|
||||
inlet_t = self.ceilometer.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
meter_name=self.meter_name_inlet_t,
|
||||
period=self._period,
|
||||
aggregate='avg')
|
||||
power = self.ceilometer.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
meter_name=self.meter_name_power,
|
||||
period=self._period,
|
||||
aggregate='avg')
|
||||
elif self.config.datasource == "gnocchi":
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(
|
||||
seconds=int(self._period))
|
||||
inlet_t = self.gnocchi.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
metric=self.meter_name_inlet_t,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean')
|
||||
power = self.gnocchi.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
metric=self.meter_name_power,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean')
|
||||
inlet_t = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
meter_name=self.meter_name_inlet_t,
|
||||
period=self._period,
|
||||
granularity=self.granularity)
|
||||
power = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=source_node.uuid,
|
||||
meter_name=self.meter_name_power,
|
||||
period=self._period,
|
||||
granularity=self.granularity)
|
||||
if (power < self.threshold_power and
|
||||
inlet_t < self.threshold_inlet_t):
|
||||
# hardware issue, migrate all instances from this node
|
||||
@@ -285,6 +215,11 @@ class UniformAirflow(base.BaseStrategy):
|
||||
else:
|
||||
# migrate the first active instance
|
||||
for instance in source_instances:
|
||||
# NOTE: skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
LOG.info(
|
||||
@@ -353,29 +288,18 @@ class UniformAirflow(base.BaseStrategy):
|
||||
node = self.compute_model.get_node_by_uuid(
|
||||
node_id)
|
||||
resource_id = node.uuid
|
||||
if self.config.datasource == "ceilometer":
|
||||
airflow = self.ceilometer.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
meter_name=self.meter_name_airflow,
|
||||
period=self._period,
|
||||
aggregate='avg')
|
||||
elif self.config.datasource == "gnocchi":
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(
|
||||
seconds=int(self._period))
|
||||
airflow = self.gnocchi.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
metric=self.meter_name_airflow,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean')
|
||||
airflow = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=resource_id,
|
||||
meter_name=self.meter_name_airflow,
|
||||
period=self._period,
|
||||
granularity=self.granularity)
|
||||
# some hosts may not have airflow meter, remove from target
|
||||
if airflow is None:
|
||||
LOG.warning("%s: no airflow data", resource_id)
|
||||
continue
|
||||
|
||||
LOG.debug("%s: airflow %f" % (resource_id, airflow))
|
||||
LOG.debug("%(resource)s: airflow %(airflow)f",
|
||||
{'resource': resource_id, 'airflow': airflow})
|
||||
nodemap = {'node': node, 'airflow': airflow}
|
||||
if airflow >= self.threshold_airflow:
|
||||
# mark the node to release resources
|
||||
|
||||
@@ -17,42 +17,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*VM Workload Consolidation Strategy*
|
||||
|
||||
A load consolidation strategy based on heuristic first-fit
|
||||
algorithm which focuses on measured CPU utilization and tries to
|
||||
minimize hosts which have too much or too little load respecting
|
||||
resource capacity constraints.
|
||||
|
||||
This strategy produces a solution resulting in more efficient
|
||||
utilization of cluster resources using following four phases:
|
||||
|
||||
* Offload phase - handling over-utilized resources
|
||||
* Consolidation phase - handling under-utilized resources
|
||||
* Solution optimization - reducing number of migrations
|
||||
* Disability of unused compute nodes
|
||||
|
||||
A capacity coefficients (cc) might be used to adjust optimization
|
||||
thresholds. Different resources may require different coefficient
|
||||
values as well as setting up different coefficient values in both
|
||||
phases may lead to to more efficient consolidation in the end.
|
||||
If the cc equals 1 the full resource capacity may be used, cc
|
||||
values lower than 1 will lead to resource under utilization and
|
||||
values higher than 1 will lead to resource overbooking.
|
||||
e.g. If targeted utilization is 80 percent of a compute node capacity,
|
||||
the coefficient in the consolidation phase will be 0.8, but
|
||||
may any lower value in the offloading phase. The lower it gets
|
||||
the cluster will appear more released (distributed) for the
|
||||
following consolidation phase.
|
||||
|
||||
As this strategy leverages VM live migration to move the load
|
||||
from one compute node to another, this feature needs to be set up
|
||||
correctly on all compute nodes within the cluster.
|
||||
This strategy assumes it is possible to live migrate any VM from
|
||||
an active compute node to any other active compute node.
|
||||
"""
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@@ -60,8 +24,6 @@ import six
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception
|
||||
from watcher.datasource import ceilometer as ceil
|
||||
from watcher.datasource import gnocchi as gnoc
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
@@ -69,7 +31,40 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
"""VM Workload Consolidation Strategy"""
|
||||
"""VM Workload Consolidation Strategy
|
||||
|
||||
A load consolidation strategy based on heuristic first-fit
|
||||
algorithm which focuses on measured CPU utilization and tries to
|
||||
minimize hosts which have too much or too little load respecting
|
||||
resource capacity constraints.
|
||||
|
||||
This strategy produces a solution resulting in more efficient
|
||||
utilization of cluster resources using following four phases:
|
||||
|
||||
* Offload phase - handling over-utilized resources
|
||||
* Consolidation phase - handling under-utilized resources
|
||||
* Solution optimization - reducing number of migrations
|
||||
* Disability of unused compute nodes
|
||||
|
||||
A capacity coefficients (cc) might be used to adjust optimization
|
||||
thresholds. Different resources may require different coefficient
|
||||
values as well as setting up different coefficient values in both
|
||||
phases may lead to to more efficient consolidation in the end.
|
||||
If the cc equals 1 the full resource capacity may be used, cc
|
||||
values lower than 1 will lead to resource under utilization and
|
||||
values higher than 1 will lead to resource overbooking.
|
||||
e.g. If targeted utilization is 80 percent of a compute node capacity,
|
||||
the coefficient in the consolidation phase will be 0.8, but
|
||||
may any lower value in the offloading phase. The lower it gets
|
||||
the cluster will appear more released (distributed) for the
|
||||
following consolidation phase.
|
||||
|
||||
As this strategy leverages VM live migration to move the load
|
||||
from one compute node to another, this feature needs to be set up
|
||||
correctly on all compute nodes within the cluster.
|
||||
This strategy assumes it is possible to live migrate any VM from
|
||||
an active compute node to any other active compute node.
|
||||
"""
|
||||
|
||||
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
|
||||
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
|
||||
@@ -118,26 +113,6 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def period(self):
|
||||
return self.input_parameters.get('period', 3600)
|
||||
|
||||
@property
|
||||
def ceilometer(self):
|
||||
if self._ceilometer is None:
|
||||
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
|
||||
return self._ceilometer
|
||||
|
||||
@ceilometer.setter
|
||||
def ceilometer(self, ceilometer):
|
||||
self._ceilometer = ceilometer
|
||||
|
||||
@property
|
||||
def gnocchi(self):
|
||||
if self._gnocchi is None:
|
||||
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
|
||||
return self._gnocchi
|
||||
|
||||
@gnocchi.setter
|
||||
def gnocchi(self, gnocchi):
|
||||
self._gnocchi = gnocchi
|
||||
|
||||
@property
|
||||
def granularity(self):
|
||||
return self.input_parameters.get('granularity', 300)
|
||||
@@ -191,7 +166,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
return instance.state.value
|
||||
else:
|
||||
LOG.error('Unexpected instance state type, '
|
||||
'state=%(state)s, state_type=%(st)s.' %
|
||||
'state=%(state)s, state_type=%(st)s.',
|
||||
dict(state=instance.state,
|
||||
st=type(instance.state)))
|
||||
raise exception.WatcherException
|
||||
@@ -207,7 +182,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
return node.status.value
|
||||
else:
|
||||
LOG.error('Unexpected node status type, '
|
||||
'status=%(status)s, status_type=%(st)s.' %
|
||||
'status=%(status)s, status_type=%(st)s.',
|
||||
dict(status=node.status,
|
||||
st=type(node.status)))
|
||||
raise exception.WatcherException
|
||||
@@ -256,7 +231,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
# migration mechanism to move non active VMs.
|
||||
LOG.error(
|
||||
'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
|
||||
'state=%(instance_state)s.' % dict(
|
||||
'state=%(instance_state)s.', dict(
|
||||
instance_uuid=instance.uuid,
|
||||
instance_state=instance_state_str))
|
||||
return
|
||||
@@ -315,57 +290,28 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
disk_alloc_metric = self.METRIC_NAMES[
|
||||
self.config.datasource]['disk_alloc_metric']
|
||||
|
||||
if self.config.datasource == "ceilometer":
|
||||
instance_cpu_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance.uuid, meter_name=cpu_util_metric,
|
||||
period=self.period, aggregate='avg')
|
||||
instance_ram_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance.uuid, meter_name=ram_util_metric,
|
||||
period=self.period, aggregate='avg')
|
||||
if not instance_ram_util:
|
||||
instance_ram_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance.uuid, meter_name=ram_alloc_metric,
|
||||
period=self.period, aggregate='avg')
|
||||
instance_disk_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance.uuid, meter_name=disk_alloc_metric,
|
||||
period=self.period, aggregate='avg')
|
||||
elif self.config.datasource == "gnocchi":
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(
|
||||
seconds=int(self.period))
|
||||
instance_cpu_util = self.gnocchi.statistic_aggregation(
|
||||
instance_cpu_util = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
meter_name=cpu_util_metric,
|
||||
period=self.period,
|
||||
granularity=self.granularity)
|
||||
instance_ram_util = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
meter_name=ram_util_metric,
|
||||
period=self.period,
|
||||
granularity=self.granularity)
|
||||
if not instance_ram_util:
|
||||
instance_ram_util = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
metric=cpu_util_metric,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean'
|
||||
)
|
||||
instance_ram_util = self.gnocchi.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
metric=ram_util_metric,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean'
|
||||
)
|
||||
if not instance_ram_util:
|
||||
instance_ram_util = self.gnocchi.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
metric=ram_alloc_metric,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean'
|
||||
)
|
||||
instance_disk_util = self.gnocchi.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
metric=disk_alloc_metric,
|
||||
granularity=self.granularity,
|
||||
start_time=start_time,
|
||||
stop_time=stop_time,
|
||||
aggregation='mean'
|
||||
)
|
||||
meter_name=ram_alloc_metric,
|
||||
period=self.period,
|
||||
granularity=self.granularity)
|
||||
instance_disk_util = self.datasource_backend.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
meter_name=disk_alloc_metric,
|
||||
period=self.period,
|
||||
granularity=self.granularity)
|
||||
|
||||
if instance_cpu_util:
|
||||
total_cpu_utilization = (
|
||||
instance.vcpus * (instance_cpu_util / 100.0))
|
||||
@@ -556,6 +502,11 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
key=lambda x: self.get_instance_utilization(
|
||||
x)['cpu']
|
||||
):
|
||||
# skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
for destination_node in reversed(sorted_nodes):
|
||||
if self.instance_fits(
|
||||
instance, destination_node, cc):
|
||||
@@ -588,6 +539,11 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
self.compute_model.get_node_instances(node),
|
||||
key=lambda x: self.get_instance_utilization(x)['cpu'])
|
||||
for instance in reversed(instances):
|
||||
# skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
dsc = len(sorted_nodes) - 1
|
||||
for destination_node in reversed(sorted_nodes):
|
||||
if asc >= dsc:
|
||||
|
||||
@@ -16,35 +16,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*[PoC]Workload balance using live migration*
|
||||
|
||||
*Description*
|
||||
|
||||
This strategy migrates a VM based on the VM workload of the hosts.
|
||||
It makes decision to migrate a workload whenever a host's CPU or RAM
|
||||
utilization % is higher than the specified threshold. The VM to
|
||||
be moved should make the host close to average workload of all
|
||||
hosts nodes.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* Hardware: compute node should use the same physical CPUs
|
||||
* Software: Ceilometer component ceilometer-agent-compute
|
||||
running in each compute node, and Ceilometer API can
|
||||
report such telemetry "cpu_util" and "memory.resident" successfully.
|
||||
* You must have at least 2 physical compute nodes to run
|
||||
this strategy.
|
||||
|
||||
*Limitations*
|
||||
|
||||
- This is a proof of concept that is not meant to be used in
|
||||
production.
|
||||
- We cannot forecast how many servers should be migrated.
|
||||
This is the reason why we only plan a single virtual
|
||||
machine migration at a time. So it's better to use this
|
||||
algorithm with `CONTINUOUS` audits.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
@@ -76,7 +47,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
* Software: Ceilometer component ceilometer-agent-compute running
|
||||
in each compute node, and Ceilometer API can report such telemetry
|
||||
"cpu_util" and "memory.resident" successfully.
|
||||
* You must have at least 2 physical compute nodes to run this strategy
|
||||
* You must have at least 2 physical compute nodes to run this strategy.
|
||||
|
||||
*Limitations*
|
||||
|
||||
@@ -208,6 +179,11 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
instance_id = None
|
||||
for instance in source_instances:
|
||||
try:
|
||||
# NOTE: skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
# select the first active VM to migrate
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
@@ -290,8 +266,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
util = None
|
||||
try:
|
||||
util = self.datasource_backend.statistic_aggregation(
|
||||
instance.uuid, self._meter, self._period, 'mean',
|
||||
granularity=self.granularity)
|
||||
instance.uuid, self._meter, self._period,
|
||||
self._granularity, aggregation='mean',
|
||||
dimensions=dict(resource_id=instance.uuid))
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
LOG.error("Can not get %s from %s", self._meter,
|
||||
@@ -352,6 +329,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
self.threshold = self.input_parameters.threshold
|
||||
self._period = self.input_parameters.period
|
||||
self._meter = self.input_parameters.metrics
|
||||
self._granularity = self.input_parameters.granularity
|
||||
source_nodes, target_nodes, avg_workload, workload_cache = (
|
||||
self.group_hosts_by_cpu_or_ram_util())
|
||||
|
||||
|
||||
@@ -16,16 +16,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Workload Stabilization control using live migration*
|
||||
|
||||
This is workload stabilization strategy based on standard deviation
|
||||
algorithm. The goal is to determine if there is an overload in a cluster
|
||||
and respond to it by migrating VMs to stabilize the cluster.
|
||||
|
||||
It assumes that live migrations are possible in your cluster.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
@@ -57,7 +47,16 @@ def _set_memoize(conf):
|
||||
|
||||
|
||||
class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
"""Workload Stabilization control using live migration"""
|
||||
"""Workload Stabilization control using live migration
|
||||
|
||||
This is workload stabilization strategy based on standard deviation
|
||||
algorithm. The goal is to determine if there is an overload in a cluster
|
||||
and respond to it by migrating VMs to stabilize the cluster.
|
||||
|
||||
This strategy has been tested in a small (32 nodes) cluster.
|
||||
|
||||
It assumes that live migrations are possible in your cluster.
|
||||
"""
|
||||
|
||||
MIGRATION = "migrate"
|
||||
MEMOIZE = _set_memoize(CONF)
|
||||
@@ -80,6 +79,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
self.instance_metrics = None
|
||||
self.retry_count = None
|
||||
self.periods = None
|
||||
self.aggregation_method = None
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
@@ -104,19 +104,47 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
"metrics": {
|
||||
"description": "Metrics used as rates of cluster loads.",
|
||||
"type": "array",
|
||||
"default": ["cpu_util", "memory.resident"]
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": ["cpu_util", "memory.resident"]
|
||||
},
|
||||
"default": ["cpu_util"]
|
||||
},
|
||||
"thresholds": {
|
||||
"description": "Dict where key is a metric and value "
|
||||
"is a trigger value.",
|
||||
"type": "object",
|
||||
"default": {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||
"properties": {
|
||||
"cpu_util": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1
|
||||
},
|
||||
"memory.resident": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1
|
||||
}
|
||||
},
|
||||
"default": {"cpu_util": 0.1, "memory.resident": 0.1}
|
||||
},
|
||||
"weights": {
|
||||
"description": "These weights used to calculate "
|
||||
"common standard deviation. Name of weight"
|
||||
" contains meter name and _weight suffix.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpu_util_weight": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1
|
||||
},
|
||||
"memory.resident_weight": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"maximum": 1
|
||||
}
|
||||
},
|
||||
"default": {"cpu_util_weight": 1.0,
|
||||
"memory.resident_weight": 1.0}
|
||||
},
|
||||
@@ -141,6 +169,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
"retry_count": {
|
||||
"description": "Count of random returned hosts",
|
||||
"type": "number",
|
||||
"minimum": 1,
|
||||
"default": 1
|
||||
},
|
||||
"periods": {
|
||||
@@ -152,12 +181,43 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
"uses only the last period of all received"
|
||||
" ones.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instance": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"node": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
},
|
||||
"default": {"instance": 720, "node": 600}
|
||||
},
|
||||
"aggregation_method": {
|
||||
"description": "Function used to aggregate multiple "
|
||||
"measures into an aggregate. For example, "
|
||||
"the min aggregation method will aggregate "
|
||||
"the values of different measures to the "
|
||||
"minimum value of all the measures in the "
|
||||
"time range.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instance": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
"node": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
},
|
||||
"default": {"instance": 'mean', "node": 'mean'}
|
||||
},
|
||||
"granularity": {
|
||||
"description": "The time between two measures in an "
|
||||
"aggregated timeseries of a metric.",
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"default": 300
|
||||
},
|
||||
}
|
||||
@@ -198,12 +258,13 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus}
|
||||
for meter in self.metrics:
|
||||
avg_meter = self.datasource_backend.statistic_aggregation(
|
||||
instance.uuid, meter, self.periods['instance'], 'mean',
|
||||
granularity=self.granularity)
|
||||
instance.uuid, meter, self.periods['instance'],
|
||||
self.granularity,
|
||||
aggregation=self.aggregation_method['instance'])
|
||||
if avg_meter is None:
|
||||
LOG.warning(
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
"for %(metric_name)s", dict(
|
||||
resource_id=instance.uuid, metric_name=meter))
|
||||
return
|
||||
if meter == 'cpu_util':
|
||||
@@ -242,8 +303,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
resource_id = node_id
|
||||
avg_meter = self.datasource_backend.statistic_aggregation(
|
||||
resource_id, self.instance_metrics[metric],
|
||||
self.periods['node'], 'mean', granularity=self.granularity)
|
||||
|
||||
self.periods['node'], self.granularity,
|
||||
aggregation=self.aggregation_method['node'])
|
||||
if avg_meter is None:
|
||||
LOG.warning('No values returned by node %s for %s',
|
||||
node_id, meter_name)
|
||||
@@ -302,7 +363,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
s_host_vcpus = new_hosts[src_node.uuid]['vcpus']
|
||||
d_host_vcpus = new_hosts[dst_node.uuid]['vcpus']
|
||||
for metric in self.metrics:
|
||||
if metric is 'cpu_util':
|
||||
if metric == 'cpu_util':
|
||||
new_hosts[src_node.uuid][metric] -= (
|
||||
self.transform_instance_cpu(instance_load, s_host_vcpus))
|
||||
new_hosts[dst_node.uuid][metric] += (
|
||||
@@ -348,10 +409,15 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
c_nodes.remove(src_host)
|
||||
node_list = yield_nodes(c_nodes)
|
||||
for instance in self.compute_model.get_node_instances(src_node):
|
||||
min_sd_case = {'value': current_weighted_sd}
|
||||
# NOTE: skip exclude instance when migrating
|
||||
if instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", instance.uuid)
|
||||
continue
|
||||
if instance.state not in [element.InstanceState.ACTIVE.value,
|
||||
element.InstanceState.PAUSED.value]:
|
||||
continue
|
||||
min_sd_case = {'value': current_weighted_sd}
|
||||
for dst_host in next(node_list):
|
||||
dst_node = self.compute_model.get_node_by_uuid(dst_host)
|
||||
sd_case = self.calculate_migration_case(
|
||||
@@ -376,12 +442,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
normalized_load = self.normalize_hosts_load(hosts_load)
|
||||
for metric in self.metrics:
|
||||
metric_sd = self.get_sd(normalized_load, metric)
|
||||
LOG.info("Standard deviation for %s is %s."
|
||||
% (metric, metric_sd))
|
||||
LOG.info("Standard deviation for %s is %s.",
|
||||
(metric, metric_sd))
|
||||
if metric_sd > float(self.thresholds[metric]):
|
||||
LOG.info("Standard deviation of %s exceeds"
|
||||
" appropriate threshold %s."
|
||||
% (metric, metric_sd))
|
||||
" appropriate threshold %s.",
|
||||
(metric, metric_sd))
|
||||
return self.simulate_migrations(hosts_load)
|
||||
|
||||
def add_migration(self,
|
||||
@@ -434,6 +500,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
self.instance_metrics = self.input_parameters.instance_metrics
|
||||
self.retry_count = self.input_parameters.retry_count
|
||||
self.periods = self.input_parameters.periods
|
||||
self.aggregation_method = self.input_parameters.aggregation_method
|
||||
|
||||
def do_execute(self):
|
||||
migration = self.check_threshold()
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from dateutil.parser import parse
|
||||
import six
|
||||
@@ -40,6 +41,11 @@ IN_USE = "in-use"
|
||||
|
||||
|
||||
class ZoneMigration(base.ZoneMigrationBaseStrategy):
|
||||
"""Zone migration using instance and volume migration
|
||||
|
||||
This is zone migration strategy to migrate many instances and volumes
|
||||
efficiently with minimum downtime for hardware maintenance.
|
||||
"""
|
||||
|
||||
def __init__(self, config, osc=None):
|
||||
|
||||
@@ -303,7 +309,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
|
||||
else:
|
||||
self.instances_migration(targets, action_counter)
|
||||
|
||||
LOG.debug("action total: %s, pools: %s, nodes %s " % (
|
||||
LOG.debug("action total: %s, pools: %s, nodes %s ", (
|
||||
action_counter.total_count,
|
||||
action_counter.per_pool_count,
|
||||
action_counter.per_node_count))
|
||||
@@ -371,15 +377,6 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
|
||||
:param pool: pool name
|
||||
:returns: host name
|
||||
"""
|
||||
|
||||
# TODO(hidekazu) use this
|
||||
# mapping = zonemgr.get_host_pool_mapping()
|
||||
# for host, pools in six.iteritems(mapping):
|
||||
# for _pool in pools:
|
||||
# if pool == _pool:
|
||||
# return host
|
||||
# LOG.warning(self.msg_not_exist_corresponding_host % pool)
|
||||
# return pool
|
||||
return pool.split('@')[0]
|
||||
|
||||
def get_dst_node(self, src_node):
|
||||
@@ -413,13 +410,13 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
|
||||
pool = getattr(volume, 'os-vol-host-attr:host')
|
||||
if action_counter.is_pool_max(pool):
|
||||
LOG.debug("%s has objects to be migrated, but it has"
|
||||
" reached the limit of parallelization." % pool)
|
||||
" reached the limit of parallelization.", pool)
|
||||
continue
|
||||
|
||||
src_type = volume.volume_type
|
||||
dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type)
|
||||
LOG.debug(src_type)
|
||||
LOG.debug("%s %s" % (dst_pool, dst_type))
|
||||
LOG.debug("%s %s", (dst_pool, dst_type))
|
||||
|
||||
if self.is_available(volume):
|
||||
if src_type == dst_type:
|
||||
@@ -448,7 +445,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
|
||||
|
||||
if action_counter.is_node_max(src_node):
|
||||
LOG.debug("%s has objects to be migrated, but it has"
|
||||
" reached the limit of parallelization." % src_node)
|
||||
" reached the limit of parallelization.", src_node)
|
||||
continue
|
||||
|
||||
dst_node = self.get_dst_node(src_node)
|
||||
@@ -643,7 +640,7 @@ class ActionCounter(object):
|
||||
if not self.is_total_max() and not self.is_pool_max(pool):
|
||||
self.per_pool_count[pool] += 1
|
||||
self.total_count += 1
|
||||
LOG.debug("total: %s, per_pool: %s" % (
|
||||
LOG.debug("total: %s, per_pool: %s", (
|
||||
self.total_count, self.per_pool_count))
|
||||
return True
|
||||
return False
|
||||
@@ -660,7 +657,7 @@ class ActionCounter(object):
|
||||
if not self.is_total_max() and not self.is_node_max(node):
|
||||
self.per_node_count[node] += 1
|
||||
self.total_count += 1
|
||||
LOG.debug("total: %s, per_node: %s" % (
|
||||
LOG.debug("total: %s, per_node: %s", (
|
||||
self.total_count, self.per_node_count))
|
||||
return True
|
||||
return False
|
||||
@@ -679,9 +676,9 @@ class ActionCounter(object):
|
||||
"""
|
||||
if pool not in self.per_pool_count:
|
||||
self.per_pool_count[pool] = 0
|
||||
LOG.debug("the number of parallel per pool %s is %s " %
|
||||
LOG.debug("the number of parallel per pool %s is %s ",
|
||||
(pool, self.per_pool_count[pool]))
|
||||
LOG.debug("per pool limit is %s" % self.per_pool_limit)
|
||||
LOG.debug("per pool limit is %s", self.per_pool_limit)
|
||||
return self.per_pool_count[pool] >= self.per_pool_limit
|
||||
|
||||
def is_node_max(self, node):
|
||||
@@ -724,7 +721,7 @@ class BaseFilter(object):
|
||||
for k, v in six.iteritems(targets):
|
||||
if not self.is_allowed(k):
|
||||
continue
|
||||
LOG.debug("filter:%s with the key: %s" % (cond, k))
|
||||
LOG.debug("filter:%s with the key: %s", (cond, k))
|
||||
targets[k] = self.exec_filter(v, cond)
|
||||
|
||||
LOG.debug(targets)
|
||||
@@ -778,7 +775,7 @@ class ProjectSortFilter(SortMovingToFrontFilter):
|
||||
"""
|
||||
|
||||
project_id = self.get_project_id(item)
|
||||
LOG.debug("project_id: %s, sort_key: %s" % (project_id, sort_key))
|
||||
LOG.debug("project_id: %s, sort_key: %s", (project_id, sort_key))
|
||||
return project_id == sort_key
|
||||
|
||||
def get_project_id(self, item):
|
||||
@@ -812,7 +809,7 @@ class ComputeHostSortFilter(SortMovingToFrontFilter):
|
||||
"""
|
||||
|
||||
host = self.get_host(item)
|
||||
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
|
||||
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
|
||||
return host == sort_key
|
||||
|
||||
def get_host(self, item):
|
||||
@@ -840,7 +837,7 @@ class StorageHostSortFilter(SortMovingToFrontFilter):
|
||||
"""
|
||||
|
||||
host = self.get_host(item)
|
||||
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
|
||||
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
|
||||
return host == sort_key
|
||||
|
||||
def get_host(self, item):
|
||||
@@ -867,7 +864,7 @@ class ComputeSpecSortFilter(BaseFilter):
|
||||
result = items
|
||||
|
||||
if sort_key not in self.accept_keys:
|
||||
LOG.warning("Invalid key is specified: %s" % sort_key)
|
||||
LOG.warning("Invalid key is specified: %s", sort_key)
|
||||
else:
|
||||
result = self.get_sorted_items(items, sort_key)
|
||||
|
||||
@@ -912,11 +909,11 @@ class ComputeSpecSortFilter(BaseFilter):
|
||||
:returns: memory size of item
|
||||
"""
|
||||
|
||||
LOG.debug("item: %s, flavors: %s" % (item, flavors))
|
||||
LOG.debug("item: %s, flavors: %s", (item, flavors))
|
||||
for flavor in flavors:
|
||||
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
|
||||
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
|
||||
if item.flavor.get('id') == flavor.id:
|
||||
LOG.debug("flavor.ram: %s" % flavor.ram)
|
||||
LOG.debug("flavor.ram: %s", flavor.ram)
|
||||
return flavor.ram
|
||||
|
||||
def get_vcpu_num(self, item, flavors):
|
||||
@@ -927,11 +924,11 @@ class ComputeSpecSortFilter(BaseFilter):
|
||||
:returns: vcpu number of item
|
||||
"""
|
||||
|
||||
LOG.debug("item: %s, flavors: %s" % (item, flavors))
|
||||
LOG.debug("item: %s, flavors: %s", (item, flavors))
|
||||
for flavor in flavors:
|
||||
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
|
||||
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
|
||||
if item.flavor.get('id') == flavor.id:
|
||||
LOG.debug("flavor.vcpus: %s" % flavor.vcpus)
|
||||
LOG.debug("flavor.vcpus: %s", flavor.vcpus)
|
||||
return flavor.vcpus
|
||||
|
||||
def get_disk_size(self, item, flavors):
|
||||
@@ -942,11 +939,11 @@ class ComputeSpecSortFilter(BaseFilter):
|
||||
:returns: disk size of item
|
||||
"""
|
||||
|
||||
LOG.debug("item: %s, flavors: %s" % (item, flavors))
|
||||
LOG.debug("item: %s, flavors: %s", (item, flavors))
|
||||
for flavor in flavors:
|
||||
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
|
||||
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
|
||||
if item.flavor.get('id') == flavor.id:
|
||||
LOG.debug("flavor.disk: %s" % flavor.disk)
|
||||
LOG.debug("flavor.disk: %s", flavor.disk)
|
||||
return flavor.disk
|
||||
|
||||
|
||||
@@ -960,7 +957,7 @@ class StorageSpecSortFilter(BaseFilter):
|
||||
result = items
|
||||
|
||||
if sort_key not in self.accept_keys:
|
||||
LOG.warning("Invalid key is specified: %s" % sort_key)
|
||||
LOG.warning("Invalid key is specified: %s", sort_key)
|
||||
return result
|
||||
|
||||
if sort_key == 'created_at':
|
||||
|
||||
@@ -60,6 +60,7 @@ log_warn = re.compile(
|
||||
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
|
||||
unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b")
|
||||
unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b")
|
||||
re_redundant_import_alias = re.compile(r".*import (.+) as \1$")
|
||||
|
||||
|
||||
@flake8ext
|
||||
@@ -271,6 +272,18 @@ def check_builtins_gettext(logical_line, tokens, filename, lines, noqa):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
@flake8ext
|
||||
def no_redundant_import_alias(logical_line):
|
||||
"""Checking no redundant import alias.
|
||||
|
||||
https://bugs.launchpad.net/watcher/+bug/1745527
|
||||
|
||||
N342
|
||||
"""
|
||||
if re.match(re_redundant_import_alias, logical_line):
|
||||
yield(0, "N342: No redundant import alias.")
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(use_jsonutils)
|
||||
register(check_assert_called_once_with)
|
||||
@@ -286,3 +299,4 @@ def factory(register):
|
||||
register(check_log_warn_deprecated)
|
||||
register(check_oslo_i18n_wrapper)
|
||||
register(check_builtins_gettext)
|
||||
register(no_redundant_import_alias)
|
||||
|
||||
864
watcher/locale/de/LC_MESSAGES/watcher.po
Normal file
864
watcher/locale/de/LC_MESSAGES/watcher.po
Normal file
@@ -0,0 +1,864 @@
|
||||
# Frank Kloeker <eumel@arcor.de>, 2018. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: watcher VERSION\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2018-03-07 13:07+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2018-03-06 07:56+0000\n"
|
||||
"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
|
||||
"Language-Team: German\n"
|
||||
"Language: de\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
msgid " (may include orphans)"
|
||||
msgstr "(kann Waisen einschließen)"
|
||||
|
||||
msgid " (orphans excluded)"
|
||||
msgstr "(Waisen ausgeschlossen)"
|
||||
|
||||
#, python-format
|
||||
msgid "%(client)s connection failed. Reason: %(reason)s"
|
||||
msgstr "Die Verbindung von %(client)s ist fehlgeschlagen. Grund: %(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "%(field)s can't be updated."
|
||||
msgstr "%(field)s kann nicht aktualisiert werden."
|
||||
|
||||
#, python-format
|
||||
msgid "%(parameter)s has to be of type %(parameter_type)s"
|
||||
msgstr "%(parameter)s muss vom Typ %(parameter_type)s sein"
|
||||
|
||||
#, python-format
|
||||
msgid "%s is not JSON serializable"
|
||||
msgstr "%s ist nicht JSON serialisierbar"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible "
|
||||
"choices: %(choices)s"
|
||||
msgstr ""
|
||||
"Die Strategie '%(strategy)s' bezieht sich auf das Ziel'%(goal)s'. Mögliche "
|
||||
"Auswahlmöglichkeiten: %(choices)s"
|
||||
|
||||
#, python-format
|
||||
msgid "'%s' is a mandatory attribute and can not be removed"
|
||||
msgstr "'%s' ist ein obligatorisches Attribut und kann nicht entfernt werden"
|
||||
|
||||
#, python-format
|
||||
msgid "'%s' is an internal attribute and can not be updated"
|
||||
msgstr "'%s' ist ein internes Attribut und kann nicht aktualisiert werden"
|
||||
|
||||
msgid "'add' and 'replace' operations needs value"
|
||||
msgstr "'add' und 'replace' Operationen benötigt Wert"
|
||||
|
||||
msgid "'obj' argument type is not valid"
|
||||
msgstr "Der Argumenttyp 'obj' ist nicht gültig"
|
||||
|
||||
#, python-format
|
||||
msgid "'obj' argument type is not valid: %s"
|
||||
msgstr "Der Argumenttyp 'obj' ist nicht gültig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "A datetime.datetime is required here. Got %s"
|
||||
msgstr "Eine datetime.datetime ist hier erforderlich. Bekam %s"
|
||||
|
||||
#, python-format
|
||||
msgid "A goal with UUID %(uuid)s already exists"
|
||||
msgstr "Ein Ziel mit UUID %(uuid)s ist bereits vorhanden"
|
||||
|
||||
#, python-format
|
||||
msgid "A scoring engine with UUID %(uuid)s already exists"
|
||||
msgstr "Eine Scoring-Engine mit UUID %(uuid)s ist bereits vorhanden"
|
||||
|
||||
#, python-format
|
||||
msgid "A service with name %(name)s is already working on %(host)s."
|
||||
msgstr "Ein Dienst mit dem Namen %(name)s arbeitet bereits auf %(host)s."
|
||||
|
||||
#, python-format
|
||||
msgid "A strategy with UUID %(uuid)s already exists"
|
||||
msgstr "Eine Strategie mit UUID %(uuid)s ist bereits vorhanden"
|
||||
|
||||
msgid "A valid goal_id or audit_template_id must be provided"
|
||||
msgstr "Eine gültige goal_id oder audit_template_id muss angegeben werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Action %(action)s could not be found"
|
||||
msgstr "Aktion %(action)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Action %(action)s was not eagerly loaded"
|
||||
msgstr "Aktion %(action)s wurde nicht eifrig geladen"
|
||||
|
||||
#, python-format
|
||||
msgid "Action Plan %(action_plan)s is currently running."
|
||||
msgstr "Der Aktionsplan %(action_plan)s wird gerade ausgeführt."
|
||||
|
||||
#, python-format
|
||||
msgid "Action Plan %(action_plan)s is referenced by one or multiple actions"
|
||||
msgstr ""
|
||||
"Der Aktionsplan %(action_plan)s wird durch eine oder mehrere Aktionen "
|
||||
"referenziert"
|
||||
|
||||
#, python-format
|
||||
msgid "Action Plan with UUID %(uuid)s is cancelled by user"
|
||||
msgstr "Der Aktionsplan mit der UUID %(uuid)s wird vom Benutzer abgebrochen"
|
||||
|
||||
msgid "Action Plans"
|
||||
msgstr "Aktionspläne"
|
||||
|
||||
#, python-format
|
||||
msgid "Action plan %(action_plan)s is invalid"
|
||||
msgstr "Der Aktionsplan %(action_plan)s ist ungültig"
|
||||
|
||||
#, python-format
|
||||
msgid "Action plan %(action_plan)s is referenced by one or multiple goals"
|
||||
msgstr ""
|
||||
"Der Aktionsplan %(action_plan)s wird von einem oder mehreren Zielen "
|
||||
"referenziert"
|
||||
|
||||
#, python-format
|
||||
msgid "Action plan %(action_plan)s was not eagerly loaded"
|
||||
msgstr "Der Aktionsplan %(action_plan)s wurde nicht eifrig geladen"
|
||||
|
||||
#, python-format
|
||||
msgid "ActionPlan %(action_plan)s could not be found"
|
||||
msgstr "ActionPlan %(action_plan)s konnte nicht gefunden werden"
|
||||
|
||||
msgid "Actions"
|
||||
msgstr "Aktionen"
|
||||
|
||||
msgid "Actuator"
|
||||
msgstr "Betätiger"
|
||||
|
||||
#, python-format
|
||||
msgid "Adding a new attribute (%s) to the root of the resource is not allowed"
|
||||
msgstr ""
|
||||
"Das Hinzufügen eines neuen Attributs (%s) zum Stamm der Ressource ist nicht "
|
||||
"zulässig"
|
||||
|
||||
msgid "Airflow Optimization"
|
||||
msgstr "Luftstrom-Optimierung"
|
||||
|
||||
#, python-format
|
||||
msgid "An action description with type %(action_type)s is already exist."
|
||||
msgstr ""
|
||||
"Eine Aktionsbeschreibung vom Typ %(action_type)s ist bereits vorhanden."
|
||||
|
||||
#, python-format
|
||||
msgid "An action plan with UUID %(uuid)s already exists"
|
||||
msgstr "Ein Aktionsplan mit UUID %(uuid)s ist bereits vorhanden"
|
||||
|
||||
#, python-format
|
||||
msgid "An action with UUID %(uuid)s already exists"
|
||||
msgstr "Eine Aktion mit UUID %(uuid)s ist bereits vorhanden"
|
||||
|
||||
#, python-format
|
||||
msgid "An audit with UUID or name %(audit)s already exists"
|
||||
msgstr "Ein Audit mit UUID oder Name %(audit)s ist bereits vorhanden"
|
||||
|
||||
#, python-format
|
||||
msgid "An audit_template with UUID or name %(audit_template)s already exists"
|
||||
msgstr ""
|
||||
"Ein Audit_Template mit UUID oder Name %(audit_template)s ist bereits "
|
||||
"vorhanden"
|
||||
|
||||
msgid "An indicator value should be a number"
|
||||
msgstr "Ein Indikatorwert sollte eine Zahl sein"
|
||||
|
||||
#, python-format
|
||||
msgid "An object of class %s is required here"
|
||||
msgstr "Ein Objekt der Klasse %s ist hier erforderlich"
|
||||
|
||||
msgid "An unknown exception occurred"
|
||||
msgstr "Eine unbekannte Ausnahme ist aufgetreten"
|
||||
|
||||
msgid "At least one feature is required"
|
||||
msgstr "Mindestens eine Funktion ist erforderlich"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit %(audit)s could not be found"
|
||||
msgstr "Audit %(audit)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit %(audit)s is invalid"
|
||||
msgstr "Audit %(audit)s ist ungültig"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit %(audit)s is referenced by one or multiple action plans"
|
||||
msgstr ""
|
||||
"Audit %(audit)s wird von einem oder mehreren Aktionsplänen referenziert"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit %(audit)s was not eagerly loaded"
|
||||
msgstr "Audit %(audit)s wurde nicht eifrig geladen"
|
||||
|
||||
msgid "Audit Templates"
|
||||
msgstr "Prüfungsvorlagen"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit parameter %(parameter)s are not allowed"
|
||||
msgstr "Prüfparameter %(parameter)s sind nicht erlaubt"
|
||||
|
||||
#, python-format
|
||||
msgid "Audit type %(audit_type)s could not be found"
|
||||
msgstr "Audit-Typ %(audit_type)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "AuditTemplate %(audit_template)s could not be found"
|
||||
msgstr "AuditTemplate %(audit_template)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"AuditTemplate %(audit_template)s is referenced by one or multiple audits"
|
||||
msgstr ""
|
||||
"AuditTemplate %(audit_template)s wird von einem oder mehreren Audits "
|
||||
"referenziert"
|
||||
|
||||
msgid "Audits"
|
||||
msgstr "Audits"
|
||||
|
||||
msgid "Basic offline consolidation"
|
||||
msgstr "Grundlegende Offline-Konsolidierung"
|
||||
|
||||
msgid "CDMCs"
|
||||
msgstr "CDMCs"
|
||||
|
||||
msgid "Cannot compile public API routes"
|
||||
msgstr "Öffentliche API-Routen können nicht kompiliert werden"
|
||||
|
||||
msgid "Cannot create an action directly"
|
||||
msgstr "Eine Aktion kann nicht direkt erstellt werden"
|
||||
|
||||
msgid "Cannot delete an action directly"
|
||||
msgstr "Eine Aktion kann nicht direkt gelöscht werden"
|
||||
|
||||
msgid "Cannot modify an action directly"
|
||||
msgstr "Eine Aktion kann nicht direkt geändert werden"
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Action Plan."
|
||||
msgstr ""
|
||||
"UUID für einen vorhandenen Aktionsplan kann nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Action."
|
||||
msgstr "UUID kann für eine vorhandene Aktion nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Audit Template."
|
||||
msgstr "UUID für eine vorhandene Auditvorlage kann nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Audit."
|
||||
msgstr "UUID für ein vorhandenes Audit kann nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Goal."
|
||||
msgstr "UUID für ein vorhandenes Ziel kann nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Scoring Engine."
|
||||
msgstr ""
|
||||
"UUID für eine vorhandene Scoring Engine kann nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing Strategy."
|
||||
msgstr "UUID kann für eine vorhandene Strategie nicht überschrieben werden."
|
||||
|
||||
msgid "Cannot overwrite UUID for an existing efficacy indicator."
|
||||
msgstr ""
|
||||
"UUID kann für einen vorhandenen Wirksamkeitsindikator nicht überschrieben "
|
||||
"werden."
|
||||
|
||||
msgid "Cannot remove 'goal' attribute from an audit template"
|
||||
msgstr "Das Attribut 'goal' kann nicht aus einer Audit-Vorlage entfernt werden"
|
||||
|
||||
msgid "Conflict"
|
||||
msgstr "Konflikt"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Could not compute the global efficacy for the '%(goal)s' goal using the "
|
||||
"'%(strategy)s' strategy."
|
||||
msgstr ""
|
||||
"Die globale Wirksamkeit für das Ziel '%(goal)s' konnte nicht mit der "
|
||||
"Strategie '%(strategy)s' berechnet werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Could not load any strategy for goal %(goal)s"
|
||||
msgstr "Konnte keine Strategie für Ziel %(goal)s laden"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
||||
msgstr "Patch '%(patch)s' konnte nicht angewendet werden. Grund:%(reason)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Couldn't delete when state is '%(state)s'."
|
||||
msgstr "Konnte nicht gelöscht werden, wenn der Status '%(state)s' ist."
|
||||
|
||||
#, python-format
|
||||
msgid "Datasource %(datasource)s is not available."
|
||||
msgstr "Datenquelle %(datasource)s ist nicht verfügbar."
|
||||
|
||||
#, python-format
|
||||
msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s"
|
||||
msgstr ""
|
||||
"Die Datenquelle %(datasource)s wird von der Strategie %(strategy)s nicht "
|
||||
"unterstützt"
|
||||
|
||||
msgid "Do you want to delete objects up to the specified maximum number? [y/N]"
|
||||
msgstr ""
|
||||
"Möchten Sie Objekte bis zur angegebenen maximalen Anzahl löschen? [J/N]"
|
||||
|
||||
#, python-format
|
||||
msgid "Domain name seems ambiguous: %s"
|
||||
msgstr "Domänenname scheint mehrdeutig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Domain not Found: %s"
|
||||
msgstr "Domain nicht gefunden: %s"
|
||||
|
||||
msgid "Dummy Strategy using sample Scoring Engines"
|
||||
msgstr "Dummy-Strategie mit Sample Scoring Engines"
|
||||
|
||||
msgid "Dummy goal"
|
||||
msgstr "Dummy Ziel"
|
||||
|
||||
msgid "Dummy strategy"
|
||||
msgstr "Dummy-Strategie"
|
||||
|
||||
msgid "Dummy strategy with resize"
|
||||
msgstr "Dummy-Strategie mit Größenänderung"
|
||||
|
||||
#, python-format
|
||||
msgid "Efficacy indicator %(efficacy_indicator)s could not be found"
|
||||
msgstr ""
|
||||
"Der Wirksamkeitsindikator %(efficacy_indicator)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Error loading plugin '%(name)s'"
|
||||
msgstr "Fehler beim Laden des Plugins '%(name)s'"
|
||||
|
||||
#, python-format
|
||||
msgid "ErrorDocumentMiddleware received an invalid status %s"
|
||||
msgstr "ErrorDocumentMiddleware hat einen ungültigen Status %s erhalten"
|
||||
|
||||
#, python-format
|
||||
msgid "Expected a logical name but received %(name)s"
|
||||
msgstr "Erwartete einen logischen Namen, erhielt aber %(name)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Expected a logical name or uuid but received %(name)s"
|
||||
msgstr ""
|
||||
"Erwartete einen logischen Namen oder eine UUID, erhielt jedoch %(name)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Expected a uuid but received %(uuid)s"
|
||||
msgstr "Erwartet eine Uuid aber %(uuid)s erhalten"
|
||||
|
||||
#, python-format
|
||||
msgid "Expected a uuid or int but received %(identity)s"
|
||||
msgstr "Erwartet eine Uuid oder Int aber %(identity)s erhalten"
|
||||
|
||||
#, python-format
|
||||
msgid "Expected an interval or cron syntax but received %(name)s"
|
||||
msgstr "Erwartete eine Intervall- oder Cron-Syntax, aber erhielt %(name)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to create volume '%(volume)s. "
|
||||
msgstr "Fehler beim Erstellen des Datenträgers '%(volume)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Failed to delete volume '%(volume)s. "
|
||||
msgstr "Fehler beim Löschen des Datenträgers '%(volume)s."
|
||||
|
||||
#, python-format
|
||||
msgid "Filter operator is not valid: %(operator)s not in %(valid_operators)s"
|
||||
msgstr ""
|
||||
"Filter Operator ist nicht gültig: %(operator)s nicht in %(valid_operators)s"
|
||||
|
||||
msgid "Filtering actions on both audit and action-plan is prohibited"
|
||||
msgstr ""
|
||||
"Das Filtern von Aktionen sowohl im Audit- als auch im Aktionsplan ist "
|
||||
"verboten"
|
||||
|
||||
msgid "Goal"
|
||||
msgstr "Ziel"
|
||||
|
||||
#, python-format
|
||||
msgid "Goal %(goal)s could not be found"
|
||||
msgstr "Ziel %(goal)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Goal %(goal)s is invalid"
|
||||
msgstr "Ziel %(goal)s ist ungültig"
|
||||
|
||||
msgid "Goals"
|
||||
msgstr "Ziele"
|
||||
|
||||
msgid "Hardware Maintenance"
|
||||
msgstr "Hardware-Wartung"
|
||||
|
||||
#, python-format
|
||||
msgid "Here below is a table containing the objects that can be purged%s:"
|
||||
msgstr ""
|
||||
"Hier unten ist eine Tabelle mit den Objekten, die gelöscht werden können: %s"
|
||||
|
||||
msgid "Illegal argument"
|
||||
msgstr "Illegales Argument"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Incorrect mapping: could not find associated weight for %s in weight dict."
|
||||
msgstr ""
|
||||
"Inkorrektes Mapping: Die zugehörige Gewichtung für %s im Gewicht dict konnte "
|
||||
"nicht gefunden werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Interval of audit must be specified for %(audit_type)s."
|
||||
msgstr "Das Intervall der Prüfung muss für %(audit_type)s angegeben werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Interval of audit must not be set for %(audit_type)s."
|
||||
msgstr ""
|
||||
"Das Intervall der Prüfung darf nicht für %(audit_type)s festgelegt werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid filter: %s"
|
||||
msgstr "Ungültiger Filter: %s"
|
||||
|
||||
msgid "Invalid number of features, expected 9"
|
||||
msgstr "Ungültige Anzahl der erwarteten Features 9"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid query: %(start_time)s > %(end_time)s"
|
||||
msgstr "Ungültige Abfrage: %(start_time)s > %(end_time)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
||||
msgstr "Ungültige Sortierrichtung: %s. Akzeptable Werte sind 'asc' oder 'desc'"
|
||||
|
||||
msgid "Invalid state for swapping volume"
|
||||
msgstr "Ungültiger Status für das Auslagern des Datenträgers"
|
||||
|
||||
#, python-format
|
||||
msgid "Invalid state: %(state)s"
|
||||
msgstr "Ungültiger Status: %(state)s"
|
||||
|
||||
msgid "JSON list expected in feature argument"
|
||||
msgstr "JSON-Liste in Feature-Argument erwartet"
|
||||
|
||||
msgid "Keystone API endpoint is missing"
|
||||
msgstr "Der Keystone-API-Endpunkt fehlt"
|
||||
|
||||
msgid "Limit must be positive"
|
||||
msgstr "Limit muss positiv sein"
|
||||
|
||||
msgid "Limit should be positive"
|
||||
msgstr "Limit sollte positiv sein"
|
||||
|
||||
msgid "Maximum time since last check-in for up service."
|
||||
msgstr "Maximale Zeit seit dem letzten Check-in für den Up-Service."
|
||||
|
||||
#, python-format
|
||||
msgid "Migration of type '%(migration_type)s' is not supported."
|
||||
msgstr "Die Migration vom Typ '%(migration_type)s' wird nicht unterstützt."
|
||||
|
||||
msgid ""
|
||||
"Name of this node. This can be an opaque identifier. It is not necessarily a "
|
||||
"hostname, FQDN, or IP address. However, the node name must be valid within "
|
||||
"an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address."
|
||||
msgstr ""
|
||||
"Name dieses Knotens. Dies kann eine undurchsichtige Kennung sein. Es ist "
|
||||
"nicht unbedingt ein Hostname, FQDN oder IP-Adresse. Der Knotenname muss "
|
||||
"jedoch innerhalb eines AMQP-Schlüssels und bei Verwendung von ZeroMQ ein "
|
||||
"gültiger Hostname, FQDN oder eine gültige IP-Adresse sein."
|
||||
|
||||
#, python-format
|
||||
msgid "No %(metric)s metric for %(host)s found."
|
||||
msgstr "Keine %(metric)s Metrik für %(host)s gefunden."
|
||||
|
||||
msgid "No rows were returned"
|
||||
msgstr "Es wurden keine Zeilen zurückgegeben"
|
||||
|
||||
#, python-format
|
||||
msgid "No strategy could be found to achieve the '%(goal)s' goal."
|
||||
msgstr ""
|
||||
"Es konnte keine Strategie gefunden werden, um das Ziel '%(goal)s' zu "
|
||||
"erreichen."
|
||||
|
||||
msgid "No such metric"
|
||||
msgstr "Keine solche Metrik"
|
||||
|
||||
#, python-format
|
||||
msgid "No values returned by %(resource_id)s for %(metric_name)s."
|
||||
msgstr "Keine Werte von %(resource_id)s für %(metric_name)s zurückgegeben."
|
||||
|
||||
msgid "Noisy Neighbor"
|
||||
msgstr "Lauter Nachbar"
|
||||
|
||||
msgid "Not authorized"
|
||||
msgstr "Nicht berechtigt"
|
||||
|
||||
msgid "Not supported"
|
||||
msgstr "Nicht unterstützt"
|
||||
|
||||
msgid "Operation not permitted"
|
||||
msgstr "Operation unzulässig"
|
||||
|
||||
msgid "Outlet temperature based strategy"
|
||||
msgstr "Auslasstemperatur basierte Strategie"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Payload not populated when trying to send notification \"%(class_name)s\""
|
||||
msgstr ""
|
||||
"Payload wurde nicht ausgefüllt, wenn versucht wird, eine Benachrichtigung "
|
||||
"'%(class_name)s' zu senden"
|
||||
|
||||
msgid "Plugins"
|
||||
msgstr "Plugins"
|
||||
|
||||
#, python-format
|
||||
msgid "Policy doesn't allow %(action)s to be performed."
|
||||
msgstr "Die Richtlinie lässt nicht zu, dass %(action)s ausgeführt werden."
|
||||
|
||||
#, python-format
|
||||
msgid "Project name seems ambiguous: %s"
|
||||
msgstr "Der Projektname erscheint mehrdeutig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Project not Found: %s"
|
||||
msgstr "Projekt nicht gefunden: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Provided cron is invalid: %(message)s"
|
||||
msgstr "Bereitgestellter Cron ist ungültig: %(message)s"
|
||||
|
||||
#, python-format
|
||||
msgid "Purge results summary%s:"
|
||||
msgstr "Zusammenfassung der Bereinigungsergebnisse %s:"
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual attached volumes migrated to planned attached volumes "
|
||||
"migrate."
|
||||
msgstr ""
|
||||
"Das Verhältnis der tatsächlich angehängten Datenträger, die zu geplanten "
|
||||
"angehängten Datenträger migriert wurden, wird migriert."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual cold migrated instances to planned cold migrate instances."
|
||||
msgstr ""
|
||||
"Verhältnis von tatsächlichen kalt migrierten Instanzen zu geplanten kalten "
|
||||
"migrieren Instanzen."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual detached volumes migrated to planned detached volumes "
|
||||
"migrate."
|
||||
msgstr ""
|
||||
"Das Verhältnis der tatsächlich abgetrennten Datenträger, die in geplante, "
|
||||
"getrennte Datenträger migriert wurden, wird migriert."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual live migrated instances to planned live migrate instances."
|
||||
msgstr ""
|
||||
"Verhältnis von tatsächlichen migrierten Live-Instanzen zu geplanten Live-"
|
||||
"Migrationsinstanzen"
|
||||
|
||||
msgid ""
|
||||
"Ratio of released compute nodes divided by the total number of enabled "
|
||||
"compute nodes."
|
||||
msgstr ""
|
||||
"Verhältnis der freigegebenen Compute-Knoten geteilt durch die Gesamtzahl der "
|
||||
"aktivierten Compute-Knoten."
|
||||
|
||||
#, python-format
|
||||
msgid "Role name seems ambiguous: %s"
|
||||
msgstr "Der Rollenname scheint mehrdeutig: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Role not Found: %s"
|
||||
msgstr "Rolle nicht gefunden: %s"
|
||||
|
||||
msgid "Saving Energy"
|
||||
msgstr "Energie sparen"
|
||||
|
||||
msgid "Saving Energy Strategy"
|
||||
msgstr "Energiestrategie speichern"
|
||||
|
||||
#, python-format
|
||||
msgid "Scoring Engine with name=%s not found"
|
||||
msgstr "Scoring Engine mit name=%s nicht gefunden"
|
||||
|
||||
#, python-format
|
||||
msgid "ScoringEngine %(scoring_engine)s could not be found"
|
||||
msgstr "ScoringEngine %(scoring_engine)s konnte nicht gefunden werden"
|
||||
|
||||
msgid "Seconds between running periodic tasks."
|
||||
msgstr "Sekunden zwischen dem Ausführen periodischer Aufgaben."
|
||||
|
||||
msgid "Server Consolidation"
|
||||
msgstr "Serverkonsolidierung"
|
||||
|
||||
msgid ""
|
||||
"Specifies the minimum level for which to send notifications. If not set, no "
|
||||
"notifications will be sent. The default is for this option to be at the "
|
||||
"`INFO` level."
|
||||
msgstr ""
|
||||
"Gibt die Mindeststufe an, für die Benachrichtigungen gesendet werden. Wenn "
|
||||
"nicht festgelegt, werden keine Benachrichtigungen gesendet. Standardmäßig "
|
||||
"ist diese Option auf der INFO-Ebene."
|
||||
|
||||
msgid ""
|
||||
"Specify parameters but no predefined strategy for audit, or no parameter "
|
||||
"spec in predefined strategy"
|
||||
msgstr ""
|
||||
"Geben Sie Parameter, aber keine vordefinierte Strategie für das Audit oder "
|
||||
"keine Parameterspezifikation in der vordefinierten Strategie an"
|
||||
|
||||
#, python-format
|
||||
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
||||
msgstr "Statusübergang nicht erlaubt: (%(initial_state)s -> %(new_state)s)"
|
||||
|
||||
msgid "Storage Capacity Balance Strategy"
|
||||
msgstr "Storage Capacity Balance-Strategie"
|
||||
|
||||
msgid "Strategies"
|
||||
msgstr "Strategien"
|
||||
|
||||
#, python-format
|
||||
msgid "Strategy %(strategy)s could not be found"
|
||||
msgstr "Strategie %(strategy)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "Strategy %(strategy)s is invalid"
|
||||
msgstr "Strategie %(strategy)s ist ungültig"
|
||||
|
||||
#, python-format
|
||||
msgid "The %(name)s %(id)s could not be found"
|
||||
msgstr " Die %(name)s %(id)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The %(name)s resource %(id)s could not be found"
|
||||
msgstr "Die %(name)s Ressource %(id)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The %(name)s resource %(id)s is not soft deleted"
|
||||
msgstr "Die %(name)s Ressource %(id)s wird nicht weich gelöscht"
|
||||
|
||||
#, python-format
|
||||
msgid "The action %(action_id)s execution failed."
|
||||
msgstr "Die Ausführung der Aktion %(action_id)s ist fehlgeschlagen."
|
||||
|
||||
#, python-format
|
||||
msgid "The action description %(action_id)s cannot be found."
|
||||
msgstr "Die Aktionsbeschreibung %(action_id)s konnte nicht gefunden werden."
|
||||
|
||||
msgid "The audit template UUID or name specified is invalid"
|
||||
msgstr "Die UUID oder der Name der Überprüfungsvorlage ist ungültig"
|
||||
|
||||
#, python-format
|
||||
msgid "The baremetal resource '%(name)s' could not be found"
|
||||
msgstr "Die Barmetal-Ressource '%(name)s' konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The capacity %(capacity)s is not defined for '%(resource)s'"
|
||||
msgstr "Die Kapazität %(capacity)s ist nicht definiert für '%(resource)s'"
|
||||
|
||||
#, python-format
|
||||
msgid "The cluster data model '%(cdm)s' could not be built"
|
||||
msgstr "Das Clusterdatenmodell '%(cdm)s' konnte nicht erstellt werden"
|
||||
|
||||
msgid "The cluster state is not defined"
|
||||
msgstr "Der Clusterstatus ist nicht definiert"
|
||||
|
||||
msgid "The cluster state is stale"
|
||||
msgstr "Der Clusterstatus ist veraltet"
|
||||
|
||||
#, python-format
|
||||
msgid "The compute node %(name)s could not be found"
|
||||
msgstr "Der Compute-Knoten %(name)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The compute resource '%(name)s' could not be found"
|
||||
msgstr "Die Rechenressource '%(name)s' konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The identifier '%(name)s' is a reserved word"
|
||||
msgstr "Der Bezeichner '%(name)s' ist ein reserviertes Wort"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The indicator '%(name)s' with value '%(value)s' and spec type "
|
||||
"'%(spec_type)s' is invalid."
|
||||
msgstr ""
|
||||
"Das Kennzeichen '%(name)s' mit dem Wert '%(value)s' und dem "
|
||||
"Spezifikationstyp '%(spec_type)s' ist ungültig."
|
||||
|
||||
#, python-format
|
||||
msgid "The instance '%(name)s' could not be found"
|
||||
msgstr "Die Instanz '%(name)s' konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The ironic node %(uuid)s could not be found"
|
||||
msgstr "Der Ironic Knoten %(uuid)s konnte nicht gefunden werden"
|
||||
|
||||
msgid "The list of compute node(s) in the cluster is empty"
|
||||
msgstr "Die Liste der Compute-Knoten im Cluster ist leer"
|
||||
|
||||
msgid "The list of storage node(s) in the cluster is empty"
|
||||
msgstr "Die Liste der Speicherknoten im Cluster ist leer"
|
||||
|
||||
msgid "The metrics resource collector is not defined"
|
||||
msgstr "Der Metrikressourcen-Collector ist nicht definiert"
|
||||
|
||||
msgid "The number of VM migrations to be performed."
|
||||
msgstr "Die Anzahl der VM-Migrationen, die ausgeführt werden sollen."
|
||||
|
||||
msgid "The number of attached volumes actually migrated."
|
||||
msgstr "Die Anzahl der angehängten Datenträger wurde tatsächlich migriert."
|
||||
|
||||
msgid "The number of attached volumes planned to migrate."
|
||||
msgstr "Die Anzahl der angehängten Datenträger, die migriert werden sollen."
|
||||
|
||||
msgid "The number of compute nodes to be released."
|
||||
msgstr "Die Anzahl der zu veröffentlichenden Compute-Knoten."
|
||||
|
||||
msgid "The number of detached volumes actually migrated."
|
||||
msgstr "Die Anzahl der gelösten Datenträger wurde tatsächlich migriert."
|
||||
|
||||
msgid "The number of detached volumes planned to migrate."
|
||||
msgstr "Die Anzahl der gelöschten Datenträger, die migriert werden sollen."
|
||||
|
||||
msgid "The number of instances actually cold migrated."
|
||||
msgstr "Die Anzahl der tatsächlich kalten Instanzen wurde migriert."
|
||||
|
||||
msgid "The number of instances actually live migrated."
|
||||
msgstr "Die Anzahl der tatsächlich migrierten Instanzen."
|
||||
|
||||
msgid "The number of instances planned to cold migrate."
|
||||
msgstr "Die Anzahl der geplanten Fälle für eine Kaltmigration."
|
||||
|
||||
msgid "The number of instances planned to live migrate."
|
||||
msgstr "Die Anzahl der geplanten Live-Migrationen."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The number of objects (%(num)s) to delete from the database exceeds the "
|
||||
"maximum number of objects (%(max_number)s) specified."
|
||||
msgstr ""
|
||||
"Die Anzahl der zu löschenden Objekte (%(num)s) aus der Datenbank "
|
||||
"überschreitet die maximale Anzahl der angegebenen Objekte (%(max_number)s)."
|
||||
|
||||
#, python-format
|
||||
msgid "The pool %(name)s could not be found"
|
||||
msgstr "Der Pool %(name)skonnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The service %(service)s cannot be found."
|
||||
msgstr "Der Service %(service)s kann nicht gefunden werden."
|
||||
|
||||
#, python-format
|
||||
msgid "The storage node %(name)s could not be found"
|
||||
msgstr "Der Speicherknoten %(name)s konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "The storage resource '%(name)s' could not be found"
|
||||
msgstr "Die Speicherressource '%(name)s' konnte nicht gefunden werden"
|
||||
|
||||
msgid "The target state is not defined"
|
||||
msgstr "Der Zielzustand ist nicht definiert"
|
||||
|
||||
msgid "The total number of enabled compute nodes."
|
||||
msgstr "Die Gesamtzahl der aktivierten Compute-Knoten."
|
||||
|
||||
#, python-format
|
||||
msgid "The volume '%(name)s' could not be found"
|
||||
msgstr "Der Datenträger '%(name)s' konnte nicht gefunden werden"
|
||||
|
||||
#, python-format
|
||||
msgid "There are %(count)d objects set for deletion. Continue? [y/N]"
|
||||
msgstr "Es sind %(count)d Objekte zum Löschen eingestellt. Fortsetzen? [J/N]"
|
||||
|
||||
msgid "Thermal Optimization"
|
||||
msgstr "Thermische Optimierung"
|
||||
|
||||
msgid "Total"
|
||||
msgstr "Gesamt"
|
||||
|
||||
msgid "Unable to parse features: "
|
||||
msgstr "Die Analyse von Features ist nicht möglich:"
|
||||
|
||||
#, python-format
|
||||
msgid "Unable to parse features: %s"
|
||||
msgstr "Die Funktionen können nicht analysiert werden: %s"
|
||||
|
||||
msgid "Unacceptable parameters"
|
||||
msgstr "Inakzeptable Parameter"
|
||||
|
||||
msgid "Unclassified"
|
||||
msgstr "Nicht klassifiziert"
|
||||
|
||||
#, python-format
|
||||
msgid "Unexpected keystone client error occurred: %s"
|
||||
msgstr "Unerwarteter Keystone Fehler trat auf: %s"
|
||||
|
||||
msgid "Uniform airflow migration strategy"
|
||||
msgstr "Einheitliche Luftstrommigrationsstrategie"
|
||||
|
||||
#, python-format
|
||||
msgid "User name seems ambiguous: %s"
|
||||
msgstr "Der Benutzername scheint mehrdeutig zu sein: %s"
|
||||
|
||||
#, python-format
|
||||
msgid "User not Found: %s"
|
||||
msgstr "Benutzer nicht gefunden: %s"
|
||||
|
||||
msgid "VM Workload Consolidation Strategy"
|
||||
msgstr "VM-Workload-Konsolidierungsstrategie"
|
||||
|
||||
msgid "Volume type must be different for retyping"
|
||||
msgstr "Der Volume-Typ muss sich beim erneuten Eintippen unterscheiden"
|
||||
|
||||
msgid "Volume type must be same for migrating"
|
||||
msgstr "Der Volume-Typ muss für die Migration identisch sein"
|
||||
|
||||
msgid ""
|
||||
"Watcher database schema is already under version control; use upgrade() "
|
||||
"instead"
|
||||
msgstr ""
|
||||
"Watcher-Datenbankschema ist bereits unter Versionskontrolle; Verwenden Sie "
|
||||
"stattdessen upgrade()"
|
||||
|
||||
#, python-format
|
||||
msgid "Workflow execution error: %(error)s"
|
||||
msgstr "Workflow-Ausführungsfehler: %(error)s"
|
||||
|
||||
msgid "Workload Balance Migration Strategy"
|
||||
msgstr "Workload-Balance-Migrationsstrategie"
|
||||
|
||||
msgid "Workload Balancing"
|
||||
msgstr "Workload-Ausgleich"
|
||||
|
||||
msgid "Workload stabilization"
|
||||
msgstr "Workload-Stabilisierung"
|
||||
|
||||
#, python-format
|
||||
msgid "Wrong type. Expected '%(type)s', got '%(value)s'"
|
||||
msgstr "Falscher Typ. Erwartete '%(type)s', bekam '%(value)s'"
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"You shouldn't use any other IDs of %(resource)s if you use wildcard "
|
||||
"character."
|
||||
msgstr ""
|
||||
"Sie sollten keine anderen IDs von %(resource)s verwenden, wenn Sie "
|
||||
"Platzhalterzeichen verwenden."
|
||||
|
||||
msgid "Zone migration"
|
||||
msgstr "Zonenmigration"
|
||||
|
||||
msgid "destination type is required when migration type is swap"
|
||||
msgstr "Zieltyp ist erforderlich, wenn der Migrationstyp Swap ist"
|
||||
|
||||
msgid "host_aggregates can't be included and excluded together"
|
||||
msgstr ""
|
||||
"host_aggregates können nicht zusammen eingeschlossen und ausgeschlossen "
|
||||
"werden"
|
||||
@@ -4,15 +4,15 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: watcher VERSION\n"
|
||||
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
|
||||
"POT-Creation-Date: 2018-01-19 11:46+0000\n"
|
||||
"POT-Creation-Date: 2018-02-28 12:27+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2018-01-19 08:01+0000\n"
|
||||
"PO-Revision-Date: 2018-01-27 12:51+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom)\n"
|
||||
"Language: en-GB\n"
|
||||
"X-Generator: Zanata 3.9.6\n"
|
||||
"Language: en_GB\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
msgid " (may include orphans)"
|
||||
@@ -280,6 +280,10 @@ msgstr "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
||||
msgid "Couldn't delete when state is '%(state)s'."
|
||||
msgstr "Couldn't delete when state is '%(state)s'."
|
||||
|
||||
#, python-format
|
||||
msgid "Datasource %(datasource)s is not available."
|
||||
msgstr "Datasource %(datasource)s is not available."
|
||||
|
||||
#, python-format
|
||||
msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s"
|
||||
msgstr "Datasource %(datasource)s is not supported by strategy %(strategy)s"
|
||||
@@ -369,6 +373,9 @@ msgstr "Goal %(goal)s is invalid"
|
||||
msgid "Goals"
|
||||
msgstr "Goals"
|
||||
|
||||
msgid "Hardware Maintenance"
|
||||
msgstr "Hardware Maintenance"
|
||||
|
||||
#, python-format
|
||||
msgid "Here below is a table containing the objects that can be purged%s:"
|
||||
msgstr "Here below is a table containing the objects that can be purged%s:"
|
||||
@@ -506,6 +513,30 @@ msgstr "Provided cron is invalid: %(message)s"
|
||||
msgid "Purge results summary%s:"
|
||||
msgstr "Purge results summary%s:"
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual attached volumes migrated to planned attached volumes "
|
||||
"migrate."
|
||||
msgstr ""
|
||||
"Ratio of actual attached volumes migrated to planned attached volumes "
|
||||
"migrate."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual cold migrated instances to planned cold migrate instances."
|
||||
msgstr ""
|
||||
"Ratio of actual cold migrated instances to planned cold migrate instances."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual detached volumes migrated to planned detached volumes "
|
||||
"migrate."
|
||||
msgstr ""
|
||||
"Ratio of actual detached volumes migrated to planned detached volumes "
|
||||
"migrate."
|
||||
|
||||
msgid ""
|
||||
"Ratio of actual live migrated instances to planned live migrate instances."
|
||||
msgstr ""
|
||||
"Ratio of actual live migrated instances to planned live migrate instances."
|
||||
|
||||
msgid ""
|
||||
"Ratio of released compute nodes divided by the total number of enabled "
|
||||
"compute nodes."
|
||||
@@ -561,6 +592,9 @@ msgstr ""
|
||||
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
||||
msgstr "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
||||
|
||||
msgid "Storage Capacity Balance Strategy"
|
||||
msgstr "Storage Capacity Balance Strategy"
|
||||
|
||||
msgid "Strategies"
|
||||
msgstr "Strategies"
|
||||
|
||||
@@ -644,15 +678,42 @@ msgstr "The Ironic node %(uuid)s could not be found"
|
||||
msgid "The list of compute node(s) in the cluster is empty"
|
||||
msgstr "The list of compute node(s) in the cluster is empty"
|
||||
|
||||
msgid "The list of storage node(s) in the cluster is empty"
|
||||
msgstr "The list of storage node(s) in the cluster is empty"
|
||||
|
||||
msgid "The metrics resource collector is not defined"
|
||||
msgstr "The metrics resource collector is not defined"
|
||||
|
||||
msgid "The number of VM migrations to be performed."
|
||||
msgstr "The number of VM migrations to be performed."
|
||||
|
||||
msgid "The number of attached volumes actually migrated."
|
||||
msgstr "The number of attached volumes actually migrated."
|
||||
|
||||
msgid "The number of attached volumes planned to migrate."
|
||||
msgstr "The number of attached volumes planned to migrate."
|
||||
|
||||
msgid "The number of compute nodes to be released."
|
||||
msgstr "The number of compute nodes to be released."
|
||||
|
||||
msgid "The number of detached volumes actually migrated."
|
||||
msgstr "The number of detached volumes actually migrated."
|
||||
|
||||
msgid "The number of detached volumes planned to migrate."
|
||||
msgstr "The number of detached volumes planned to migrate."
|
||||
|
||||
msgid "The number of instances actually cold migrated."
|
||||
msgstr "The number of instances actually cold migrated."
|
||||
|
||||
msgid "The number of instances actually live migrated."
|
||||
msgstr "The number of instances actually live migrated."
|
||||
|
||||
msgid "The number of instances planned to cold migrate."
|
||||
msgstr "The number of instances planned to cold migrate."
|
||||
|
||||
msgid "The number of instances planned to live migrate."
|
||||
msgstr "The number of instances planned to live migrate."
|
||||
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The number of objects (%(num)s) to delete from the database exceeds the "
|
||||
@@ -766,6 +827,9 @@ msgstr ""
|
||||
"You shouldn't use any other IDs of %(resource)s if you use wildcard "
|
||||
"character."
|
||||
|
||||
msgid "Zone migration"
|
||||
msgstr "Zone migration"
|
||||
|
||||
msgid "destination type is required when migration type is swap"
|
||||
msgstr "destination type is required when migration type is swap"
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import sys
|
||||
import six
|
||||
|
||||
from watcher.notifications import base as notificationbase
|
||||
from watcher.objects import base as base
|
||||
from watcher.objects import base
|
||||
from watcher.objects import fields as wfields
|
||||
|
||||
|
||||
|
||||
@@ -222,7 +222,7 @@ class TestContextHook(base.FunctionalTest):
|
||||
user_id=headers['X-User-Id'],
|
||||
domain_id=headers['X-User-Domain-Id'],
|
||||
domain_name=headers['X-User-Domain-Name'],
|
||||
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||
auth_url=cfg.CONF.keystone_authtoken.www_authenticate_uri,
|
||||
project=headers['X-Project-Name'],
|
||||
project_id=headers['X-Project-Id'],
|
||||
show_deleted=None,
|
||||
@@ -243,7 +243,7 @@ class TestContextHook(base.FunctionalTest):
|
||||
user_id=headers['X-User-Id'],
|
||||
domain_id=headers['X-User-Domain-Id'],
|
||||
domain_name=headers['X-User-Domain-Name'],
|
||||
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||
auth_url=cfg.CONF.keystone_authtoken.www_authenticate_uri,
|
||||
project=headers['X-Project-Name'],
|
||||
project_id=headers['X-Project-Id'],
|
||||
show_deleted=None,
|
||||
@@ -265,7 +265,7 @@ class TestContextHook(base.FunctionalTest):
|
||||
user_id=headers['X-User-Id'],
|
||||
domain_id=headers['X-User-Domain-Id'],
|
||||
domain_name=headers['X-User-Domain-Name'],
|
||||
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||
auth_url=cfg.CONF.keystone_authtoken.www_authenticate_uri,
|
||||
project=headers['X-Project-Name'],
|
||||
project_id=headers['X-Project-Id'],
|
||||
show_deleted=None,
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import itertools
|
||||
import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
@@ -267,6 +268,67 @@ class TestListAction(api_base.FunctionalTest):
|
||||
response = self.get_json(url, expect_errors=True)
|
||||
self.assertEqual(400, response.status_int)
|
||||
|
||||
def test_many_with_sort_key_uuid(self):
|
||||
action_plan = obj_utils.create_test_action_plan(
|
||||
self.context,
|
||||
uuid=utils.generate_uuid(),
|
||||
audit_id=self.audit.id)
|
||||
|
||||
actions_list = []
|
||||
for id_ in range(1, 3):
|
||||
action = obj_utils.create_test_action(
|
||||
self.context, id=id_,
|
||||
action_plan_id=action_plan.id,
|
||||
uuid=utils.generate_uuid())
|
||||
actions_list.append(action)
|
||||
|
||||
response = self.get_json('/actions?sort_key=%s' % 'uuid')
|
||||
names = [s['uuid'] for s in response['actions']]
|
||||
|
||||
self.assertEqual(
|
||||
sorted([a.uuid for a in actions_list]),
|
||||
names)
|
||||
|
||||
def test_many_with_sort_key_action_plan_uuid(self):
|
||||
action_plan_1 = obj_utils.create_test_action_plan(
|
||||
self.context,
|
||||
uuid=utils.generate_uuid(),
|
||||
audit_id=self.audit.id)
|
||||
|
||||
action_plan_2 = obj_utils.create_test_action_plan(
|
||||
self.context,
|
||||
uuid=utils.generate_uuid(),
|
||||
audit_id=self.audit.id)
|
||||
|
||||
action_plans_uuid_list = []
|
||||
for id_, action_plan_id in enumerate(itertools.chain.from_iterable([
|
||||
itertools.repeat(action_plan_1.id, 3),
|
||||
itertools.repeat(action_plan_2.id, 2)]), 1):
|
||||
action = obj_utils.create_test_action(
|
||||
self.context, id=id_,
|
||||
action_plan_id=action_plan_id,
|
||||
uuid=utils.generate_uuid())
|
||||
action_plans_uuid_list.append(action.action_plan.uuid)
|
||||
|
||||
for direction in ['asc', 'desc']:
|
||||
response = self.get_json(
|
||||
'/actions?sort_key={0}&sort_dir={1}'
|
||||
.format('action_plan_uuid', direction))
|
||||
|
||||
action_plan_uuids = \
|
||||
[s['action_plan_uuid'] for s in response['actions']]
|
||||
|
||||
self.assertEqual(
|
||||
sorted(action_plans_uuid_list, reverse=(direction == 'desc')),
|
||||
action_plan_uuids,
|
||||
message='Failed on %s direction' % direction)
|
||||
|
||||
def test_sort_key_validation(self):
|
||||
response = self.get_json(
|
||||
'/actions?sort_key=%s' % 'bad_name',
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, response.status_int)
|
||||
|
||||
def test_many_with_soft_deleted_action_plan_uuid(self):
|
||||
action_plan1 = obj_utils.create_test_action_plan(
|
||||
self.context,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user