Compare commits
139 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3591d9fa0a | ||
|
|
44fc7d5799 | ||
|
|
a330576eae | ||
|
|
70d05214c7 | ||
|
|
ca9644f4d8 | ||
|
|
44061cf333 | ||
|
|
18bf1f4e8d | ||
|
|
f2df0da0b2 | ||
|
|
3c83077724 | ||
|
|
d8872a743b | ||
|
|
7556d19638 | ||
|
|
58276ec79e | ||
|
|
36ad9e12da | ||
|
|
cdb1975530 | ||
|
|
6efffd6d89 | ||
|
|
95ec79626b | ||
|
|
00aa77651b | ||
|
|
7d62175b23 | ||
|
|
5107cfa30f | ||
|
|
ff57eb73f9 | ||
|
|
4c035a7cbd | ||
|
|
b5d9eb6acb | ||
|
|
904b72cf5e | ||
|
|
d23e7f0f8c | ||
|
|
55cbb15fbc | ||
|
|
3a5b42302c | ||
|
|
4fdb22cba2 | ||
|
|
431f17d999 | ||
|
|
b586612d25 | ||
|
|
ad1593bb36 | ||
|
|
bbd0ae5b16 | ||
|
|
5a30f814bf | ||
|
|
7f6a300ea0 | ||
|
|
93a8ba804f | ||
|
|
415bab4bc9 | ||
|
|
fc388d8292 | ||
|
|
5b70c28047 | ||
|
|
b290ad7368 | ||
|
|
8c8e58e7d9 | ||
|
|
391bb92bd2 | ||
|
|
171654c0ea | ||
|
|
0157fa7dad | ||
|
|
3912075c19 | ||
|
|
d42a89f70f | ||
|
|
6bb25d2c36 | ||
|
|
4179c3527c | ||
|
|
3b1356346a | ||
|
|
67be974861 | ||
|
|
8c916930c8 | ||
|
|
b537979e45 | ||
|
|
aa74817686 | ||
|
|
831e58df10 | ||
|
|
3dd03b2d45 | ||
|
|
2548f0bbba | ||
|
|
39d7ce9ee8 | ||
|
|
1f8c073cb3 | ||
|
|
0353a0ac77 | ||
|
|
921584ac4b | ||
|
|
65a09ce32d | ||
|
|
92dad3be2d | ||
|
|
d86fee294f | ||
|
|
95a01c4e12 | ||
|
|
b9456e242e | ||
|
|
4e49ad64c0 | ||
|
|
184b1b1ce6 | ||
|
|
f49d0555e7 | ||
|
|
9d8a0feab4 | ||
|
|
52a5c99fc5 | ||
|
|
cfaab0cbdc | ||
|
|
6bb0432ee7 | ||
|
|
99837d6339 | ||
|
|
3075723da9 | ||
|
|
b0bdeea7cf | ||
|
|
5eaad33709 | ||
|
|
24b6432490 | ||
|
|
ca61594511 | ||
|
|
bd57077bfe | ||
|
|
56bcba2dc0 | ||
|
|
73928412b3 | ||
|
|
29f41b7dff | ||
|
|
02f86ffe02 | ||
|
|
20c6bf1b5a | ||
|
|
083f070d17 | ||
|
|
4022b59d79 | ||
|
|
3d1cb11ea6 | ||
|
|
d0b1dacec1 | ||
|
|
45a06445f3 | ||
|
|
2f173bba56 | ||
|
|
cb497d2642 | ||
|
|
e1fd686272 | ||
|
|
8f7127a874 | ||
|
|
3a529a0f7b | ||
|
|
5c81f1bd7f | ||
|
|
e0c019002a | ||
|
|
cc24ef6e08 | ||
|
|
7e27abc5db | ||
|
|
4844baa816 | ||
|
|
e771ae9e95 | ||
|
|
a2488045ea | ||
|
|
cce5ebd3f0 | ||
|
|
a7ab77078e | ||
|
|
9af32bce5b | ||
|
|
4cf35e7e62 | ||
|
|
6f27e50cf0 | ||
|
|
bd8c5c684c | ||
|
|
1834db853b | ||
|
|
59ef0d24d1 | ||
|
|
c53817c33d | ||
|
|
b33b7a0474 | ||
|
|
033bc072c0 | ||
|
|
f32ed6bc79 | ||
|
|
707590143b | ||
|
|
b2663de513 | ||
|
|
dd210292ae | ||
|
|
abb9155eb4 | ||
|
|
f607ae8ec0 | ||
|
|
b3ded34244 | ||
|
|
bdfb074aa4 | ||
|
|
b3be5f16fc | ||
|
|
dad60fb878 | ||
|
|
fb66a9f2c3 | ||
|
|
dc9ef6f49c | ||
|
|
8e8a43ed48 | ||
|
|
5ac65b7bfc | ||
|
|
7b9b726577 | ||
|
|
c81cd675a5 | ||
|
|
ab926bf6c5 | ||
|
|
08c688ed11 | ||
|
|
e399d96661 | ||
|
|
ba54b30d4a | ||
|
|
44d9183d36 | ||
|
|
f6f3c00206 | ||
|
|
cc87b823fa | ||
|
|
ba2395f7e7 | ||
|
|
b546ce8777 | ||
|
|
0900eaa9df | ||
|
|
9fb5b2a4e7 | ||
|
|
d80edea218 | ||
|
|
26d6074689 |
145
.zuul.yaml
145
.zuul.yaml
@@ -1,38 +1,139 @@
|
||||
- project:
|
||||
check:
|
||||
jobs:
|
||||
- watcher-tempest-multinode
|
||||
- watcher-tempest-functional
|
||||
- watcher-tempest-dummy_optim
|
||||
- watcher-tempest-actuator
|
||||
- watcher-tempest-basic_optim
|
||||
- watcher-tempest-workload_balancing
|
||||
- watcherclient-tempest-functional
|
||||
- legacy-rally-dsvm-watcher-rally
|
||||
- openstack-tox-lower-constraints
|
||||
gate:
|
||||
jobs:
|
||||
- watcher-tempest-functional
|
||||
- watcher-tempest-dummy_optim
|
||||
- watcher-tempest-actuator
|
||||
- watcher-tempest-basic_optim
|
||||
- watcher-tempest-workload_balancing
|
||||
- watcherclient-tempest-functional
|
||||
- legacy-rally-dsvm-watcher-rally
|
||||
- openstack-tox-lower-constraints
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-base-multinode
|
||||
parent: legacy-dsvm-base-multinode
|
||||
run: playbooks/legacy/watcher-tempest-base-multinode/run.yaml
|
||||
post-run: playbooks/legacy/watcher-tempest-base-multinode/post.yaml
|
||||
timeout: 4200
|
||||
name: watcher-tempest-dummy_optim
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-actuator
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_actuator'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-basic_optim
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_basic_optim'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-workload_balancing
|
||||
parent: watcher-tempest-multinode
|
||||
vars:
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing'
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-multinode
|
||||
parent: watcher-tempest-functional
|
||||
voting: false
|
||||
nodeset: openstack-two-node
|
||||
pre-run: playbooks/pre.yaml
|
||||
run: playbooks/orchestrate-tempest.yaml
|
||||
roles:
|
||||
- zuul: openstack/tempest
|
||||
group-vars:
|
||||
subnode:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$NOVA_CONF:
|
||||
libvirt:
|
||||
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||
devstack_services:
|
||||
watcher-api: false
|
||||
watcher-decision-engine: false
|
||||
watcher-applier: false
|
||||
# We need to add TLS support for watcher plugin
|
||||
tls-proxy: false
|
||||
ceilometer: false
|
||||
ceilometer-acompute: false
|
||||
ceilometer-acentral: false
|
||||
ceilometer-anotification: false
|
||||
watcher: false
|
||||
gnocchi-api: false
|
||||
gnocchi-metricd: false
|
||||
rabbit: false
|
||||
mysql: false
|
||||
vars:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$NOVA_CONF:
|
||||
libvirt:
|
||||
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||
test-config:
|
||||
$TEMPEST_CONFIG:
|
||||
compute:
|
||||
min_compute_nodes: 2
|
||||
compute-feature-enabled:
|
||||
live_migration: true
|
||||
block_migration_for_live_migration: true
|
||||
devstack_plugins:
|
||||
ceilometer: https://git.openstack.org/openstack/ceilometer
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-functional
|
||||
parent: devstack-tempest
|
||||
timeout: 7200
|
||||
required-projects:
|
||||
- openstack/ceilometer
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/python-openstackclient
|
||||
- openstack/python-watcherclient
|
||||
- openstack/watcher
|
||||
- openstack/watcher-tempest-plugin
|
||||
nodeset: legacy-ubuntu-xenial-2-node
|
||||
- openstack/tempest
|
||||
vars:
|
||||
devstack_plugins:
|
||||
watcher: https://git.openstack.org/openstack/watcher
|
||||
devstack_services:
|
||||
tls-proxy: false
|
||||
watcher-api: true
|
||||
watcher-decision-engine: true
|
||||
watcher-applier: true
|
||||
tempest: true
|
||||
s-account: false
|
||||
s-container: false
|
||||
s-object: false
|
||||
s-proxy: false
|
||||
devstack_localrc:
|
||||
TEMPEST_PLUGINS: '/opt/stack/watcher-tempest-plugin'
|
||||
tempest_test_regex: 'watcher_tempest_plugin.tests.api'
|
||||
tox_envlist: all
|
||||
tox_environment:
|
||||
# Do we really need to set this? It's cargo culted
|
||||
PYTHONUNBUFFERED: 'true'
|
||||
zuul_copy_output:
|
||||
/etc/hosts: logs
|
||||
|
||||
- job:
|
||||
name: watcher-tempest-multinode
|
||||
parent: watcher-tempest-base-multinode
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
# This job is used by python-watcherclient repo
|
||||
# This job is used in python-watcherclient repo
|
||||
name: watcherclient-tempest-functional
|
||||
parent: legacy-dsvm-base
|
||||
run: playbooks/legacy/watcherclient-tempest-functional/run.yaml
|
||||
post-run: playbooks/legacy/watcherclient-tempest-functional/post.yaml
|
||||
parent: watcher-tempest-functional
|
||||
voting: false
|
||||
timeout: 4200
|
||||
required-projects:
|
||||
- openstack-dev/devstack
|
||||
- openstack-infra/devstack-gate
|
||||
- openstack/python-openstackclient
|
||||
- openstack/python-watcherclient
|
||||
- openstack/watcher
|
||||
vars:
|
||||
tempest_concurrency: 1
|
||||
devstack_localrc:
|
||||
TEMPEST_PLUGINS: '/opt/stack/python-watcherclient'
|
||||
tempest_test_regex: 'watcherclient.tests.functional'
|
||||
|
||||
@@ -8,4 +8,4 @@
|
||||
watcher Style Commandments
|
||||
==========================
|
||||
|
||||
Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/
|
||||
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
Team and repository tags
|
||||
========================
|
||||
|
||||
.. image:: https://governance.openstack.org/badges/watcher.svg
|
||||
:target: https://governance.openstack.org/reference/tags/index.html
|
||||
.. image:: https://governance.openstack.org/tc/badges/watcher.svg
|
||||
:target: https://governance.openstack.org/tc/reference/tags/index.html
|
||||
|
||||
.. Change things from this point on
|
||||
|
||||
@@ -22,10 +22,11 @@ service for multi-tenant OpenStack-based clouds.
|
||||
Watcher provides a robust framework to realize a wide range of cloud
|
||||
optimization goals, including the reduction of data center
|
||||
operating costs, increased system performance via intelligent virtual machine
|
||||
migration, increased energy efficiency-and more!
|
||||
migration, increased energy efficiency and more!
|
||||
|
||||
* Free software: Apache license
|
||||
* Wiki: https://wiki.openstack.org/wiki/Watcher
|
||||
* Source: https://github.com/openstack/watcher
|
||||
* Source: https://github.com/openstack/watcher
|
||||
* Bugs: https://bugs.launchpad.net/watcher
|
||||
* Documentation: https://docs.openstack.org/watcher/latest/
|
||||
* Release notes: https://docs.openstack.org/releasenotes/watcher/
|
||||
|
||||
@@ -177,16 +177,20 @@ function create_watcher_conf {
|
||||
iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||
iniset $WATCHER_CONF DEFAULT control_exchange watcher
|
||||
|
||||
iniset_rpc_backend watcher $WATCHER_CONF
|
||||
|
||||
iniset $WATCHER_CONF database connection $(database_connection_url watcher)
|
||||
iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST"
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||
|
||||
if is_service_enabled tls-proxy; then
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT"
|
||||
# iniset $WATCHER_CONF api enable_ssl_api "True"
|
||||
else
|
||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||
fi
|
||||
|
||||
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML
|
||||
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST
|
||||
|
||||
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
||||
|
||||
iniset $NOVA_CONF oslo_messaging_notifications topics "notifications,watcher_notifications"
|
||||
@@ -297,8 +301,7 @@ function start_watcher_api {
|
||||
|
||||
# Start proxies if enabled
|
||||
if is_service_enabled tls-proxy; then
|
||||
start_tls_proxy '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
||||
start_tls_proxy '*' $EC2_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
||||
start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ It is used via a single directive in the .rst file
|
||||
|
||||
"""
|
||||
|
||||
from sphinx.util.compat import Directive
|
||||
from docutils.parsers.rst import Directive
|
||||
from docutils import nodes
|
||||
|
||||
from watcher.notifications import base as notification
|
||||
|
||||
@@ -19,7 +19,7 @@ The source install instructions specifically avoid using platform specific
|
||||
packages, instead using the source for the code and the Python Package Index
|
||||
(PyPi_).
|
||||
|
||||
.. _PyPi: https://pypi.python.org/pypi
|
||||
.. _PyPi: https://pypi.org/
|
||||
|
||||
It's expected that your system already has python2.7_, latest version of pip_,
|
||||
and git_ available.
|
||||
|
||||
@@ -42,6 +42,7 @@ extensions = [
|
||||
'ext.versioned_notifications',
|
||||
'oslo_config.sphinxconfiggen',
|
||||
'openstackdocstheme',
|
||||
'sphinx.ext.napoleon',
|
||||
]
|
||||
|
||||
wsme_protocols = ['restjson']
|
||||
|
||||
@@ -129,10 +129,14 @@ Configure the Identity service for the Watcher service
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openstack endpoint create --region YOUR_REGION watcher \
|
||||
--publicurl http://WATCHER_API_PUBLIC_IP:9322 \
|
||||
--internalurl http://WATCHER_API_INTERNAL_IP:9322 \
|
||||
--adminurl http://WATCHER_API_ADMIN_IP:9322
|
||||
$ openstack endpoint create --region YOUR_REGION
|
||||
watcher public http://WATCHER_API_PUBLIC_IP:9322
|
||||
|
||||
$ openstack endpoint create --region YOUR_REGION
|
||||
watcher internal http://WATCHER_API_INTERNAL_IP:9322
|
||||
|
||||
$ openstack endpoint create --region YOUR_REGION
|
||||
watcher admin http://WATCHER_API_ADMIN_IP:9322
|
||||
|
||||
.. _watcher-db_configuration:
|
||||
|
||||
@@ -217,7 +221,7 @@ so that the watcher service is configured for your needs.
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# database (string value)
|
||||
#connection=<None>
|
||||
connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
||||
connection = mysql+pymysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
||||
|
||||
#. Configure the Watcher Service to use the RabbitMQ message broker by
|
||||
setting one or more of these options. Replace RABBIT_HOST with the
|
||||
@@ -235,21 +239,8 @@ so that the watcher service is configured for your needs.
|
||||
# option. (string value)
|
||||
control_exchange = watcher
|
||||
|
||||
...
|
||||
|
||||
[oslo_messaging_rabbit]
|
||||
|
||||
# The username used by the message broker (string value)
|
||||
rabbit_userid = RABBITMQ_USER
|
||||
|
||||
# The password of user used by the message broker (string value)
|
||||
rabbit_password = RABBITMQ_PASSWORD
|
||||
|
||||
# The host where the message brokeris installed (string value)
|
||||
rabbit_host = RABBIT_HOST
|
||||
|
||||
# The port used bythe message broker (string value)
|
||||
#rabbit_port = 5672
|
||||
# ...
|
||||
transport_url = rabbit://RABBITMQ_USER:RABBITMQ_PASSWORD@RABBIT_HOST
|
||||
|
||||
|
||||
#. Watcher API shall validate the token provided by every incoming request,
|
||||
@@ -273,7 +264,7 @@ so that the watcher service is configured for your needs.
|
||||
|
||||
# Authentication URL (unknown value)
|
||||
#auth_url = <None>
|
||||
auth_url = http://IDENTITY_IP:35357
|
||||
auth_url = http://IDENTITY_IP:5000
|
||||
|
||||
# Username (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/username
|
||||
@@ -319,7 +310,7 @@ so that the watcher service is configured for your needs.
|
||||
|
||||
# Authentication URL (unknown value)
|
||||
#auth_url = <None>
|
||||
auth_url = http://IDENTITY_IP:35357
|
||||
auth_url = http://IDENTITY_IP:5000
|
||||
|
||||
# Username (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/username
|
||||
@@ -349,7 +340,7 @@ so that the watcher service is configured for your needs.
|
||||
[nova_client]
|
||||
|
||||
# Version of Nova API to use in novaclient. (string value)
|
||||
#api_version = 2.53
|
||||
#api_version = 2.56
|
||||
api_version = 2.1
|
||||
|
||||
#. Create the Watcher Service database tables::
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
===================
|
||||
Configuration Guide
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:maxdepth: 2
|
||||
|
||||
configuring
|
||||
watcher
|
||||
|
||||
@@ -39,7 +39,7 @@ notifications of important events.
|
||||
|
||||
* https://launchpad.net
|
||||
* https://launchpad.net/watcher
|
||||
* https://launchpad.net/~openstack
|
||||
* https://launchpad.net/openstack
|
||||
|
||||
|
||||
Project Hosting Details
|
||||
@@ -49,7 +49,7 @@ Bug tracker
|
||||
https://launchpad.net/watcher
|
||||
|
||||
Mailing list (prefix subjects with ``[watcher]`` for faster responses)
|
||||
https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
|
||||
http://lists.openstack.org/pipermail/openstack-dev/
|
||||
|
||||
Wiki
|
||||
https://wiki.openstack.org/Watcher
|
||||
@@ -65,7 +65,7 @@ IRC Channel
|
||||
|
||||
Weekly Meetings
|
||||
On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4``
|
||||
IRC channel, 13:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
||||
IRC channel, 08:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
||||
IRC channel (`meetings logs`_)
|
||||
|
||||
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
|
||||
|
||||
@@ -37,7 +37,7 @@ different version of the above, please document your configuration here!
|
||||
|
||||
.. _Python: https://www.python.org/
|
||||
.. _git: https://git-scm.com/
|
||||
.. _setuptools: https://pypi.python.org/pypi/setuptools
|
||||
.. _setuptools: https://pypi.org/project/setuptools
|
||||
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html
|
||||
|
||||
Getting the latest code
|
||||
@@ -69,8 +69,8 @@ itself.
|
||||
|
||||
These dependencies can be installed from PyPi_ using the Python tool pip_.
|
||||
|
||||
.. _PyPi: https://pypi.python.org/
|
||||
.. _pip: https://pypi.python.org/pypi/pip
|
||||
.. _PyPi: https://pypi.org/
|
||||
.. _pip: https://pypi.org/project/pip
|
||||
|
||||
However, your system *may* need additional dependencies that `pip` (and by
|
||||
extension, PyPi) cannot satisfy. These dependencies should be installed
|
||||
@@ -123,9 +123,10 @@ You can re-activate this virtualenv for your current shell using:
|
||||
|
||||
$ workon watcher
|
||||
|
||||
For more information on virtual environments, see virtualenv_.
|
||||
For more information on virtual environments, see virtualenv_ and
|
||||
virtualenvwrapper_.
|
||||
|
||||
.. _virtualenv: https://www.virtualenv.org/
|
||||
.. _virtualenv: https://pypi.org/project/virtualenv/
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ requirements.txt file::
|
||||
|
||||
.. _cookiecutter: https://github.com/audreyr/cookiecutter
|
||||
.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter
|
||||
.. _python-watcher: https://pypi.python.org/pypi/python-watcher
|
||||
.. _python-watcher: https://pypi.org/project/python-watcher
|
||||
|
||||
Implementing a plugin for Watcher
|
||||
=================================
|
||||
|
||||
@@ -208,7 +208,7 @@ Here below is how to register ``DummyClusterDataModelCollector`` using pbr_:
|
||||
watcher_cluster_data_model_collectors =
|
||||
dummy = thirdparty.dummy:DummyClusterDataModelCollector
|
||||
|
||||
.. _pbr: http://docs.openstack.org/pbr/latest
|
||||
.. _pbr: https://docs.openstack.org/pbr/latest/
|
||||
|
||||
|
||||
Add new notification endpoints
|
||||
|
||||
@@ -31,7 +31,7 @@ the following::
|
||||
(watcher) $ tox -e pep8
|
||||
|
||||
.. _tox: https://tox.readthedocs.org/
|
||||
.. _Gerrit: http://review.openstack.org/
|
||||
.. _Gerrit: https://review.openstack.org/
|
||||
|
||||
You may pass options to the test programs using positional arguments. To run a
|
||||
specific unit test, you can pass extra options to `os-testr`_ after putting
|
||||
|
||||
@@ -274,7 +274,7 @@ In OpenStack Identity, a :ref:`project <project_definition>` must be owned by a
|
||||
specific domain.
|
||||
|
||||
Please, read `the official OpenStack definition of a Project
|
||||
<http://docs.openstack.org/glossary/content/glossary.html>`_.
|
||||
<https://docs.openstack.org/doc-contrib-guide/common/glossary.html>`_.
|
||||
|
||||
.. _scoring_engine_definition:
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ metrics receiver, complex event processor and profiler, optimization processor
|
||||
and an action plan applier. This provides a robust framework to realize a wide
|
||||
range of cloud optimization goals, including the reduction of data center
|
||||
operating costs, increased system performance via intelligent virtual machine
|
||||
migration, increased energy efficiency—and more!
|
||||
migration, increased energy efficiency and more!
|
||||
|
||||
Watcher project consists of several source code repositories:
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
|
||||
[keystone_authtoken]
|
||||
...
|
||||
auth_uri = http://controller:5000
|
||||
auth_url = http://controller:35357
|
||||
www_authenticate_uri = http://controller:5000
|
||||
auth_url = http://controller:5000
|
||||
memcached_servers = controller:11211
|
||||
auth_type = password
|
||||
project_domain_name = default
|
||||
@@ -47,7 +47,7 @@
|
||||
[watcher_clients_auth]
|
||||
...
|
||||
auth_type = password
|
||||
auth_url = http://controller:35357
|
||||
auth_url = http://controller:5000
|
||||
username = watcher
|
||||
password = WATCHER_PASS
|
||||
project_domain_name = default
|
||||
|
||||
@@ -10,7 +10,7 @@ Infrastructure Optimization service
|
||||
verify.rst
|
||||
next-steps.rst
|
||||
|
||||
The Infrastructure Optimization service (watcher) provides
|
||||
The Infrastructure Optimization service (Watcher) provides
|
||||
flexible and scalable resource optimization service for
|
||||
multi-tenant OpenStack-based clouds.
|
||||
|
||||
@@ -21,17 +21,17 @@ applier. This provides a robust framework to realize a wide
|
||||
range of cloud optimization goals, including the reduction
|
||||
of data center operating costs, increased system performance
|
||||
via intelligent virtual machine migration, increased energy
|
||||
efficiency—and more!
|
||||
efficiency and more!
|
||||
|
||||
Watcher also supports a pluggable architecture by which custom
|
||||
optimization algorithms, data metrics and data profilers can be
|
||||
developed and inserted into the Watcher framework.
|
||||
|
||||
Check the documentation for watcher optimization strategies at
|
||||
https://docs.openstack.org/watcher/latest/strategies/index.html
|
||||
`Strategies <https://docs.openstack.org/watcher/latest/strategies/index.html>`_.
|
||||
|
||||
Check watcher glossary at
|
||||
https://docs.openstack.org/watcher/latest/glossary.html
|
||||
Check watcher glossary at `Glossary
|
||||
<https://docs.openstack.org/watcher/latest/glossary.html>`_.
|
||||
|
||||
|
||||
This chapter assumes a working setup of OpenStack following the
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher API
|
||||
---------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher Applier
|
||||
-------------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ Service for the Watcher Decision Engine
|
||||
---------------------------------------
|
||||
|
||||
:Author: openstack@lists.launchpad.net
|
||||
:Date:
|
||||
:Copyright: OpenStack Foundation
|
||||
:Version:
|
||||
:Manual section: 1
|
||||
:Manual group: cloud computing
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``unclassified``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.actuation
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.actuation.Actuator
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``server_consolidation``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation.BasicConsolidation
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
92
doc/source/strategies/host_maintenance.rst
Normal file
92
doc/source/strategies/host_maintenance.rst
Normal file
@@ -0,0 +1,92 @@
|
||||
===========================
|
||||
Host Maintenance Strategy
|
||||
===========================
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
|
||||
**display name**: ``Host Maintenance Strategy``
|
||||
|
||||
**goal**: ``cluster_maintaining``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
None.
|
||||
|
||||
Metrics
|
||||
*******
|
||||
|
||||
None
|
||||
|
||||
Cluster data model
|
||||
******************
|
||||
|
||||
Default Watcher's Compute cluster data model:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector
|
||||
|
||||
Actions
|
||||
*******
|
||||
|
||||
Default Watcher's actions:
|
||||
|
||||
.. list-table::
|
||||
:widths: 30 30
|
||||
:header-rows: 1
|
||||
|
||||
* - action
|
||||
- description
|
||||
* - ``migration``
|
||||
- .. watcher-term:: watcher.applier.actions.migration.Migrate
|
||||
|
||||
Planner
|
||||
*******
|
||||
|
||||
Default Watcher's planner:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Strategy parameters are:
|
||||
|
||||
==================== ====== ====================================
|
||||
parameter type default Value description
|
||||
==================== ====== ====================================
|
||||
``maintenance_node`` String The name of the compute node which
|
||||
need maintenance. Required.
|
||||
``backup_node`` String The name of the compute node which
|
||||
will backup the maintenance node.
|
||||
Optional.
|
||||
==================== ====== ====================================
|
||||
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
None
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
For more information on the Host Maintenance Strategy please refer
|
||||
to: https://specs.openstack.org/openstack/watcher-specs/specs/queens/approved/cluster-maintenance-strategy.html
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ openstack optimize audit create \
|
||||
-g cluster_maintaining -s host_maintenance \
|
||||
-p maintenance_node=compute01 \
|
||||
-p backup_node=compute02 \
|
||||
--auto-trigger
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
None.
|
||||
@@ -9,11 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``thermal_optimization``
|
||||
|
||||
Outlet (Exhaust Air) temperature is a new thermal telemetry which can be
|
||||
used to measure the host's thermal/workload status. This strategy makes
|
||||
decisions to migrate workloads to the hosts with good thermal condition
|
||||
(lowest outlet temperature) when the outlet temperature of source hosts
|
||||
reach a configurable threshold.
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.outlet_temp_control
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``saving_energy``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy.SavingEnergy
|
||||
|
||||
Requirements
|
||||
------------
|
||||
@@ -67,13 +67,13 @@ parameter type default description
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
Energy saving strategy efficacy indicator is unclassified.
|
||||
https://github.com/openstack/watcher/blob/master/watcher/decision_engine/goal/goals.py#L215-L218
|
||||
None
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
For more information on the Energy Saving Strategy please refer to:http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
For more information on the Energy Saving Strategy please refer to:
|
||||
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
@@ -91,10 +91,10 @@ step 2: Create audit to do optimization
|
||||
$ openstack optimize audittemplate create \
|
||||
at1 saving_energy --strategy saving_energy
|
||||
|
||||
$ openstack optimize audit create -a at1
|
||||
$ openstack optimize audit create -a at1 \
|
||||
-p free_used_percent=20.0
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
*Spec URL*
|
||||
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||
None
|
||||
|
||||
87
doc/source/strategies/storage_capacity_balance.rst
Normal file
87
doc/source/strategies/storage_capacity_balance.rst
Normal file
@@ -0,0 +1,87 @@
|
||||
========================
|
||||
Storage capacity balance
|
||||
========================
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
|
||||
**display name**: ``Storage Capacity Balance Strategy``
|
||||
|
||||
**goal**: ``workload_balancing``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.storage_capacity_balance.StorageCapacityBalance
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Metrics
|
||||
*******
|
||||
|
||||
None
|
||||
|
||||
Cluster data model
|
||||
******************
|
||||
|
||||
Storage cluster data model is required:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector
|
||||
|
||||
Actions
|
||||
*******
|
||||
|
||||
Default Watcher's actions:
|
||||
|
||||
.. list-table::
|
||||
:widths: 25 35
|
||||
:header-rows: 1
|
||||
|
||||
* - action
|
||||
- description
|
||||
* - ``volume_migrate``
|
||||
- .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate
|
||||
|
||||
Planner
|
||||
*******
|
||||
|
||||
Default Watcher's planner:
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Strategy parameter is:
|
||||
|
||||
==================== ====== ============= =====================================
|
||||
parameter type default Value description
|
||||
==================== ====== ============= =====================================
|
||||
``volume_threshold`` Number 80.0 Volume threshold for capacity balance
|
||||
==================== ====== ============= =====================================
|
||||
|
||||
|
||||
Efficacy Indicator
|
||||
------------------
|
||||
|
||||
None
|
||||
|
||||
Algorithm
|
||||
---------
|
||||
|
||||
For more information on the zone migration strategy please refer to:
|
||||
http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html
|
||||
|
||||
How to use it ?
|
||||
---------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ openstack optimize audittemplate create \
|
||||
at1 workload_balancing --strategy storage_capacity_balance
|
||||
|
||||
$ openstack optimize audit create -a at1 \
|
||||
-p volume_threshold=85.0
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
None
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``airflow_optimization``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow.UniformAirflow
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``vm_consolidation``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation.VMWorkloadConsolidation
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``workload_balancing``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization.WorkloadStabilization
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``workload_balancing``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -9,7 +9,7 @@ Synopsis
|
||||
|
||||
**goal**: ``hardware_maintenance``
|
||||
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration
|
||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
@@ -39,6 +39,22 @@ named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``.
|
||||
If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon
|
||||
plugin installation guide`_.
|
||||
|
||||
.. note::
|
||||
|
||||
Notice, that in this guide we'll use `OpenStack CLI`_ as major interface.
|
||||
Nevertheless, you can use `Watcher CLI`_ in the same way. It can be
|
||||
achieved by replacing
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ openstack optimize ...
|
||||
|
||||
with
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher ...
|
||||
|
||||
.. _`installation guide`: https://docs.openstack.org/python-watcherclient/latest
|
||||
.. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html
|
||||
.. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html
|
||||
@@ -51,10 +67,6 @@ watcher binary without options.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher help
|
||||
|
||||
or::
|
||||
|
||||
$ openstack help optimize
|
||||
|
||||
How do I run an audit of my cluster ?
|
||||
@@ -64,10 +76,6 @@ First, you need to find the :ref:`goal <goal_definition>` you want to achieve:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher goal list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize goal list
|
||||
|
||||
.. note::
|
||||
@@ -81,10 +89,6 @@ An :ref:`audit template <audit_template_definition>` defines an optimization
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate create my_first_audit_template <your_goal>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal>
|
||||
|
||||
Although optional, you may want to actually set a specific strategy for your
|
||||
@@ -93,10 +97,6 @@ following command:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher strategy list --goal <your_goal_uuid_or_name>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize strategy list --goal <your_goal_uuid_or_name>
|
||||
|
||||
You can use the following command to check strategy details including which
|
||||
@@ -104,21 +104,12 @@ parameters of which format it supports:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher strategy show <your_strategy>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize strategy show <your_strategy>
|
||||
|
||||
The command to create your audit template would then be:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate create my_first_audit_template <your_goal> \
|
||||
--strategy <your_strategy>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal> \
|
||||
--strategy <your_strategy>
|
||||
|
||||
@@ -133,10 +124,6 @@ audit) that you want to use.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audittemplate list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audittemplate list
|
||||
|
||||
- Start an audit based on this :ref:`audit template
|
||||
@@ -144,10 +131,6 @@ or::
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audit create -a <your_audit_template>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audit create -a <your_audit_template>
|
||||
|
||||
If your_audit_template was created by --strategy <your_strategy>, and it
|
||||
@@ -156,11 +139,6 @@ format), your can append `-p` to input required parameters:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher audit create -a <your_audit_template> \
|
||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize audit create -a <your_audit_template> \
|
||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
||||
|
||||
@@ -173,19 +151,13 @@ Input parameter could cause audit creation failure, when:
|
||||
Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
|
||||
composed of a list of potential optimization :ref:`actions <action_definition>`
|
||||
(instance migration, disabling of a compute node, ...) according to the
|
||||
:ref:`goal <goal_definition>` to achieve. You can see all of the goals
|
||||
available in section ``[watcher_strategies]`` of the Watcher service
|
||||
configuration file.
|
||||
:ref:`goal <goal_definition>` to achieve.
|
||||
|
||||
- Wait until the Watcher audit has produced a new :ref:`action plan
|
||||
<action_plan_definition>`, and get it:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher actionplan list --audit <the_audit_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize actionplan list --audit <the_audit_uuid>
|
||||
|
||||
- Have a look on the list of optimization :ref:`actions <action_definition>`
|
||||
@@ -193,10 +165,6 @@ or::
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action list --action-plan <the_action_plan_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action list --action-plan <the_action_plan_uuid>
|
||||
|
||||
Once you have learned how to create an :ref:`Action Plan
|
||||
@@ -207,10 +175,6 @@ cluster:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher actionplan start <the_action_plan_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize actionplan start <the_action_plan_uuid>
|
||||
|
||||
You can follow the states of the :ref:`actions <action_definition>` by
|
||||
@@ -218,19 +182,11 @@ periodically calling:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action list
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action list
|
||||
|
||||
You can also obtain more detailed information about a specific action:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ watcher action show <the_action_uuid>
|
||||
|
||||
or::
|
||||
|
||||
$ openstack optimize action show <the_action_uuid>
|
||||
|
||||
|
||||
165
lower-constraints.txt
Normal file
165
lower-constraints.txt
Normal file
@@ -0,0 +1,165 @@
|
||||
alabaster==0.7.10
|
||||
alembic==0.9.8
|
||||
amqp==2.2.2
|
||||
appdirs==1.4.3
|
||||
APScheduler==3.5.1
|
||||
asn1crypto==0.24.0
|
||||
automaton==1.14.0
|
||||
Babel==2.5.3
|
||||
bandit==1.4.0
|
||||
beautifulsoup4==4.6.0
|
||||
cachetools==2.0.1
|
||||
certifi==2018.1.18
|
||||
cffi==1.11.5
|
||||
chardet==3.0.4
|
||||
cliff==2.11.0
|
||||
cmd2==0.8.1
|
||||
contextlib2==0.5.5
|
||||
coverage==4.5.1
|
||||
croniter==0.3.20
|
||||
cryptography==2.1.4
|
||||
debtcollector==1.19.0
|
||||
decorator==4.2.1
|
||||
deprecation==2.0
|
||||
doc8==0.8.0
|
||||
docutils==0.14
|
||||
dogpile.cache==0.6.5
|
||||
dulwich==0.19.0
|
||||
enum34==1.1.6
|
||||
enum-compat==0.0.2
|
||||
eventlet==0.20.0
|
||||
extras==1.0.0
|
||||
fasteners==0.14.1
|
||||
fixtures==3.0.0
|
||||
flake8==2.5.5
|
||||
freezegun==0.3.10
|
||||
future==0.16.0
|
||||
futurist==1.6.0
|
||||
gitdb2==2.0.3
|
||||
GitPython==2.1.8
|
||||
gnocchiclient==7.0.1
|
||||
greenlet==0.4.13
|
||||
hacking==0.12.0
|
||||
idna==2.6
|
||||
imagesize==1.0.0
|
||||
iso8601==0.1.12
|
||||
Jinja2==2.10
|
||||
jmespath==0.9.3
|
||||
jsonpatch==1.21
|
||||
jsonpointer==2.0
|
||||
jsonschema==2.6.0
|
||||
keystoneauth1==3.4.0
|
||||
keystonemiddleware==4.21.0
|
||||
kombu==4.1.0
|
||||
linecache2==1.0.0
|
||||
logutils==0.3.5
|
||||
lxml==4.1.1
|
||||
Mako==1.0.7
|
||||
MarkupSafe==1.0
|
||||
mccabe==0.2.1
|
||||
mock==2.0.0
|
||||
monotonic==1.4
|
||||
mox3==0.25.0
|
||||
msgpack==0.5.6
|
||||
munch==2.2.0
|
||||
netaddr==0.7.19
|
||||
netifaces==0.10.6
|
||||
networkx==1.11
|
||||
openstackdocstheme==1.20.0
|
||||
openstacksdk==0.12.0
|
||||
os-api-ref===1.4.0
|
||||
os-client-config==1.29.0
|
||||
os-service-types==1.2.0
|
||||
os-testr==1.0.0
|
||||
osc-lib==1.10.0
|
||||
oslo.cache==1.29.0
|
||||
oslo.concurrency==3.26.0
|
||||
oslo.config==5.2.0
|
||||
oslo.context==2.20.0
|
||||
oslo.db==4.35.0
|
||||
oslo.i18n==3.20.0
|
||||
oslo.log==3.37.0
|
||||
oslo.messaging==5.36.0
|
||||
oslo.middleware==3.35.0
|
||||
oslo.policy==1.34.0
|
||||
oslo.reports==1.27.0
|
||||
oslo.serialization==2.25.0
|
||||
oslo.service==1.30.0
|
||||
oslo.utils==3.36.0
|
||||
oslo.versionedobjects==1.32.0
|
||||
oslotest==3.3.0
|
||||
packaging==17.1
|
||||
Paste==2.0.3
|
||||
PasteDeploy==1.5.2
|
||||
pbr==3.1.1
|
||||
pecan==1.2.1
|
||||
pep8==1.5.7
|
||||
pika==0.10.0
|
||||
pika-pool==0.1.3
|
||||
prettytable==0.7.2
|
||||
psutil==5.4.3
|
||||
pycadf==2.7.0
|
||||
pycparser==2.18
|
||||
pyflakes==0.8.1
|
||||
Pygments==2.2.0
|
||||
pyinotify==0.9.6
|
||||
pyOpenSSL==17.5.0
|
||||
pyparsing==2.2.0
|
||||
pyperclip==1.6.0
|
||||
python-ceilometerclient==2.9.0
|
||||
python-cinderclient==3.5.0
|
||||
python-dateutil==2.7.0
|
||||
python-editor==1.0.3
|
||||
python-glanceclient==2.9.1
|
||||
python-ironicclient==2.3.0
|
||||
python-keystoneclient==3.15.0
|
||||
python-mimeparse==1.6.0
|
||||
python-monascaclient==1.10.0
|
||||
python-neutronclient==6.7.0
|
||||
python-novaclient==10.1.0
|
||||
python-openstackclient==3.14.0
|
||||
python-subunit==1.2.0
|
||||
pytz==2018.3
|
||||
PyYAML==3.12
|
||||
reno==2.7.0
|
||||
repoze.lru==0.7
|
||||
requests==2.18.4
|
||||
requestsexceptions==1.4.0
|
||||
restructuredtext-lint==1.1.3
|
||||
rfc3986==1.1.0
|
||||
Routes==2.4.1
|
||||
simplegeneric==0.8.1
|
||||
simplejson==3.13.2
|
||||
six==1.11.0
|
||||
smmap2==2.0.3
|
||||
snowballstemmer==1.2.1
|
||||
Sphinx==1.6.5
|
||||
sphinxcontrib-httpdomain==1.6.1
|
||||
sphinxcontrib-pecanwsme==0.8.0
|
||||
sphinxcontrib-websupport==1.0.1
|
||||
SQLAlchemy==1.2.5
|
||||
sqlalchemy-migrate==0.11.0
|
||||
sqlparse==0.2.4
|
||||
statsd==3.2.2
|
||||
stestr==2.0.0
|
||||
stevedore==1.28.0
|
||||
taskflow==3.1.0
|
||||
Tempita==0.5.2
|
||||
tenacity==4.9.0
|
||||
testrepository==0.0.20
|
||||
testresources==2.0.1
|
||||
testscenarios==0.5.0
|
||||
testtools==2.3.0
|
||||
traceback2==1.4.0
|
||||
tzlocal==1.5.1
|
||||
ujson==1.35
|
||||
unittest2==1.1.0
|
||||
urllib3==1.22
|
||||
vine==1.1.4
|
||||
voluptuous==0.11.1
|
||||
waitress==1.1.0
|
||||
warlock==1.3.0
|
||||
WebOb==1.7.4
|
||||
WebTest==2.0.29
|
||||
wrapt==1.10.11
|
||||
WSME==0.9.2
|
||||
@@ -1,15 +0,0 @@
|
||||
- hosts: primary
|
||||
tasks:
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/logs/**
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
@@ -1,67 +0,0 @@
|
||||
- hosts: primary
|
||||
name: Legacy Watcher tempest base multinode
|
||||
tasks:
|
||||
|
||||
- name: Ensure legacy workspace directory
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/workspace'
|
||||
state: directory
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat > clonemap.yaml << EOF
|
||||
clonemap:
|
||||
- name: openstack-infra/devstack-gate
|
||||
dest: devstack-gate
|
||||
EOF
|
||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
||||
git://git.openstack.org \
|
||||
openstack-infra/devstack-gate
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
||||
[[local|localrc]]
|
||||
TEMPEST_PLUGINS='/opt/stack/new/watcher-tempest-plugin'
|
||||
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
|
||||
# Enable watcher devstack plugin.
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
|
||||
EOF
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
|
||||
export DEVSTACK_SUBNODE_CONFIG=" "
|
||||
export PYTHONUNBUFFERED=true
|
||||
export DEVSTACK_GATE_TEMPEST=1
|
||||
export DEVSTACK_GATE_NEUTRON=1
|
||||
export DEVSTACK_GATE_TOPOLOGY="multinode"
|
||||
export PROJECTS="openstack/watcher $PROJECTS"
|
||||
export PROJECTS="openstack/python-watcherclient $PROJECTS"
|
||||
export PROJECTS="openstack/watcher-tempest-plugin $PROJECTS"
|
||||
|
||||
export DEVSTACK_GATE_TEMPEST_REGEX="watcher_tempest_plugin"
|
||||
|
||||
export BRANCH_OVERRIDE=default
|
||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
||||
fi
|
||||
|
||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
||||
./safe-devstack-vm-gate-wrap.sh
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
@@ -1,80 +0,0 @@
|
||||
- hosts: primary
|
||||
tasks:
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*nose_results.html
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*testr_results.html.gz
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/.testrepository/tmp*
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=**/*testrepository.subunit.gz
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}/tox'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/.tox/*/log/*
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
|
||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
||||
synchronize:
|
||||
src: '{{ ansible_user_dir }}/workspace/'
|
||||
dest: '{{ zuul.executor.log_root }}'
|
||||
mode: pull
|
||||
copy_links: true
|
||||
verify_host: true
|
||||
rsync_opts:
|
||||
- --include=/logs/**
|
||||
- --include=*/
|
||||
- --exclude=*
|
||||
- --prune-empty-dirs
|
||||
@@ -1,64 +0,0 @@
|
||||
- hosts: all
|
||||
name: Legacy watcherclient-dsvm-functional
|
||||
tasks:
|
||||
|
||||
- name: Ensure legacy workspace directory
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/workspace'
|
||||
state: directory
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat > clonemap.yaml << EOF
|
||||
clonemap:
|
||||
- name: openstack-infra/devstack-gate
|
||||
dest: devstack-gate
|
||||
EOF
|
||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
||||
git://git.openstack.org \
|
||||
openstack-infra/devstack-gate
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
||||
[[local|localrc]]
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
|
||||
EOF
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
|
||||
- shell:
|
||||
cmd: |
|
||||
set -e
|
||||
set -x
|
||||
ENABLED_SERVICES=tempest
|
||||
ENABLED_SERVICES+=,watcher-api,watcher-decision-engine,watcher-applier
|
||||
export ENABLED_SERVICES
|
||||
|
||||
export PYTHONUNBUFFERED=true
|
||||
export BRANCH_OVERRIDE=default
|
||||
export PROJECTS="openstack/watcher $PROJECTS"
|
||||
export DEVSTACK_PROJECT_FROM_GIT=python-watcherclient
|
||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
||||
fi
|
||||
function post_test_hook {
|
||||
# Configure and run functional tests
|
||||
$BASE/new/python-watcherclient/watcherclient/tests/functional/hooks/post_test_hook.sh
|
||||
}
|
||||
export -f post_test_hook
|
||||
|
||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
||||
./safe-devstack-vm-gate-wrap.sh
|
||||
executable: /bin/bash
|
||||
chdir: '{{ ansible_user_dir }}/workspace'
|
||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
||||
14
playbooks/orchestrate-tempest.yaml
Normal file
14
playbooks/orchestrate-tempest.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
- hosts: all
|
||||
# This is the default strategy, however since orchestrate-devstack requires
|
||||
# "linear", it is safer to enforce it in case this is running in an
|
||||
# environment configured with a different default strategy.
|
||||
strategy: linear
|
||||
roles:
|
||||
- orchestrate-devstack
|
||||
|
||||
- hosts: tempest
|
||||
roles:
|
||||
- setup-tempest-run-dir
|
||||
- setup-tempest-data-dir
|
||||
- acl-devstack-files
|
||||
- run-tempest
|
||||
3
playbooks/pre.yaml
Normal file
3
playbooks/pre.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
- hosts: all
|
||||
roles:
|
||||
- add-hostnames-to-hosts
|
||||
@@ -29,7 +29,7 @@ Useful links
|
||||
|
||||
* How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html
|
||||
|
||||
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||
|
||||
* More about Rally: https://docs.openstack.org/rally/latest/
|
||||
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Audits have 'name' field now, that is more friendly to end users.
|
||||
Audit's name can't exceed 63 characters.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Feature to exclude instances from audit scope based on project_id is added.
|
||||
Now instances from particular project in OpenStack can be excluded from audit
|
||||
defining scope in audit templates.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- Watcher has a whole scope of the cluster, when building
|
||||
compute CDM which includes all instances.
|
||||
It filters excluded instances when migration during the
|
||||
audit.
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added a strategy for one compute node maintenance,
|
||||
without having the user's application been interrupted.
|
||||
If given one backup node, the strategy will firstly
|
||||
migrate all instances from the maintenance node to
|
||||
the backup node. If the backup node is not provided,
|
||||
it will migrate all instances, relying on nova-scheduler.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- Watcher got an ability to calculate multiple global efficacy indicators
|
||||
during audit's execution. Now global efficacy can be calculated for many
|
||||
resource types (like volumes, instances, network) if strategy supports
|
||||
efficacy indicators.
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- Added notifications about cancelling of action plan.
|
||||
Now event based plugins know when action plan cancel
|
||||
started and completed.
|
||||
@@ -0,0 +1,14 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Instance cold migration logic is now replaced with using Nova migrate
|
||||
Server(migrate Action) API which has host option since v2.56.
|
||||
upgrade:
|
||||
- |
|
||||
Nova API version is now set to 2.56 by default. This needs the migrate
|
||||
action of migration type cold with destination_node parameter to work.
|
||||
fixes:
|
||||
- |
|
||||
The migrate action of migration type cold with destination_node parameter
|
||||
was fixed. Before fixing, it booted an instance in the service project
|
||||
as a migrated instance.
|
||||
@@ -21,6 +21,7 @@ Contents:
|
||||
:maxdepth: 1
|
||||
|
||||
unreleased
|
||||
queens
|
||||
pike
|
||||
ocata
|
||||
newton
|
||||
|
||||
@@ -4,15 +4,15 @@ msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: watcher\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2018-01-26 00:18+0000\n"
|
||||
"POT-Creation-Date: 2018-02-28 12:27+0000\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"PO-Revision-Date: 2018-01-27 12:50+0000\n"
|
||||
"PO-Revision-Date: 2018-02-16 07:20+0000\n"
|
||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||
"Language-Team: English (United Kingdom)\n"
|
||||
"Language: en-GB\n"
|
||||
"X-Generator: Zanata 3.9.6\n"
|
||||
"Language: en_GB\n"
|
||||
"X-Generator: Zanata 4.3.3\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
|
||||
msgid "0.29.0"
|
||||
@@ -194,6 +194,13 @@ msgstr ""
|
||||
"Added Gnocchi support as data source for metrics. Administrator can change "
|
||||
"data source for each strategy using config file."
|
||||
|
||||
msgid ""
|
||||
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||
"know when action plan cancel started and completed."
|
||||
msgstr ""
|
||||
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||
"know when action plan cancel started and completed."
|
||||
|
||||
msgid "Added policies to handle user rights to access Watcher API."
|
||||
msgstr "Added policies to handle user rights to access Watcher API."
|
||||
|
||||
@@ -254,6 +261,13 @@ msgstr ""
|
||||
"threshold, to selected strategy, also strategy to provide parameters info to "
|
||||
"end user."
|
||||
|
||||
msgid ""
|
||||
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||
"name can't exceed 63 characters."
|
||||
msgstr ""
|
||||
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||
"name can't exceed 63 characters."
|
||||
|
||||
msgid "Centralize all configuration options for Watcher."
|
||||
msgstr "Centralise all configuration options for Watcher."
|
||||
|
||||
@@ -335,6 +349,9 @@ msgstr ""
|
||||
"resources will be called \"Audit scope\" and will be defined in each audit "
|
||||
"template (which contains the audit settings)."
|
||||
|
||||
msgid "Queens Series Release Notes"
|
||||
msgstr "Queens Series Release Notes"
|
||||
|
||||
msgid ""
|
||||
"The graph model describes how VMs are associated to compute hosts. This "
|
||||
"allows for seeing relationships upfront between the entities and hence can "
|
||||
@@ -378,6 +395,17 @@ msgstr ""
|
||||
msgid "Watcher database can now be upgraded thanks to Alembic."
|
||||
msgstr "Watcher database can now be upgraded thanks to Alembic."
|
||||
|
||||
msgid ""
|
||||
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||
"during audit's execution. Now global efficacy can be calculated for many "
|
||||
"resource types (like volumes, instances, network) if strategy supports "
|
||||
"efficacy indicators."
|
||||
msgstr ""
|
||||
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||
"during audit's execution. Now global efficacy can be calculated for many "
|
||||
"resource types (like volumes, instances, network) if strategy supports "
|
||||
"efficacy indicators."
|
||||
|
||||
msgid ""
|
||||
"Watcher supports multiple metrics backend and relies on Ceilometer and "
|
||||
"Monasca."
|
||||
|
||||
6
releasenotes/source/queens.rst
Normal file
6
releasenotes/source/queens.rst
Normal file
@@ -0,0 +1,6 @@
|
||||
===================================
|
||||
Queens Series Release Notes
|
||||
===================================
|
||||
|
||||
.. release-notes::
|
||||
:branch: stable/queens
|
||||
@@ -2,48 +2,48 @@
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
apscheduler>=3.0.5 # MIT License
|
||||
enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||
jsonpatch!=1.20,>=1.16 # BSD
|
||||
keystoneauth1>=3.3.0 # Apache-2.0
|
||||
apscheduler>=3.5.1 # MIT License
|
||||
enum34>=1.1.6;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||
jsonpatch>=1.21 # BSD
|
||||
keystoneauth1>=3.4.0 # Apache-2.0
|
||||
jsonschema<3.0.0,>=2.6.0 # MIT
|
||||
keystonemiddleware>=4.17.0 # Apache-2.0
|
||||
lxml!=3.7.0,>=3.4.1 # BSD
|
||||
croniter>=0.3.4 # MIT License
|
||||
oslo.concurrency>=3.25.0 # Apache-2.0
|
||||
oslo.cache>=1.26.0 # Apache-2.0
|
||||
oslo.config>=5.1.0 # Apache-2.0
|
||||
oslo.context>=2.19.2 # Apache-2.0
|
||||
oslo.db>=4.27.0 # Apache-2.0
|
||||
oslo.i18n>=3.15.3 # Apache-2.0
|
||||
oslo.log>=3.36.0 # Apache-2.0
|
||||
oslo.messaging>=5.29.0 # Apache-2.0
|
||||
oslo.policy>=1.30.0 # Apache-2.0
|
||||
oslo.reports>=1.18.0 # Apache-2.0
|
||||
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
|
||||
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
|
||||
oslo.utils>=3.33.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.31.2 # Apache-2.0
|
||||
PasteDeploy>=1.5.0 # MIT
|
||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
|
||||
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
|
||||
PrettyTable<0.8,>=0.7.1 # BSD
|
||||
voluptuous>=0.8.9 # BSD License
|
||||
gnocchiclient>=3.3.1 # Apache-2.0
|
||||
python-ceilometerclient>=2.5.0 # Apache-2.0
|
||||
python-cinderclient>=3.3.0 # Apache-2.0
|
||||
python-glanceclient>=2.8.0 # Apache-2.0
|
||||
python-keystoneclient>=3.8.0 # Apache-2.0
|
||||
python-monascaclient>=1.7.0 # Apache-2.0
|
||||
python-neutronclient>=6.3.0 # Apache-2.0
|
||||
python-novaclient>=9.1.0 # Apache-2.0
|
||||
python-openstackclient>=3.12.0 # Apache-2.0
|
||||
python-ironicclient>=2.2.0 # Apache-2.0
|
||||
six>=1.10.0 # MIT
|
||||
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
|
||||
stevedore>=1.20.0 # Apache-2.0
|
||||
taskflow>=2.16.0 # Apache-2.0
|
||||
WebOb>=1.7.1 # MIT
|
||||
WSME>=0.8.0 # MIT
|
||||
networkx<2.0,>=1.10 # BSD
|
||||
keystonemiddleware>=4.21.0 # Apache-2.0
|
||||
lxml>=4.1.1 # BSD
|
||||
croniter>=0.3.20 # MIT License
|
||||
oslo.concurrency>=3.26.0 # Apache-2.0
|
||||
oslo.cache>=1.29.0 # Apache-2.0
|
||||
oslo.config>=5.2.0 # Apache-2.0
|
||||
oslo.context>=2.20.0 # Apache-2.0
|
||||
oslo.db>=4.35.0 # Apache-2.0
|
||||
oslo.i18n>=3.20.0 # Apache-2.0
|
||||
oslo.log>=3.37.0 # Apache-2.0
|
||||
oslo.messaging>=5.36.0 # Apache-2.0
|
||||
oslo.policy>=1.34.0 # Apache-2.0
|
||||
oslo.reports>=1.27.0 # Apache-2.0
|
||||
oslo.serialization>=2.25.0 # Apache-2.0
|
||||
oslo.service>=1.30.0 # Apache-2.0
|
||||
oslo.utils>=3.36.0 # Apache-2.0
|
||||
oslo.versionedobjects>=1.32.0 # Apache-2.0
|
||||
PasteDeploy>=1.5.2 # MIT
|
||||
pbr>=3.1.1 # Apache-2.0
|
||||
pecan>=1.2.1 # BSD
|
||||
PrettyTable<0.8,>=0.7.2 # BSD
|
||||
voluptuous>=0.11.1 # BSD License
|
||||
gnocchiclient>=7.0.1 # Apache-2.0
|
||||
python-ceilometerclient>=2.9.0 # Apache-2.0
|
||||
python-cinderclient>=3.5.0 # Apache-2.0
|
||||
python-glanceclient>=2.9.1 # Apache-2.0
|
||||
python-keystoneclient>=3.15.0 # Apache-2.0
|
||||
python-monascaclient>=1.10.0 # Apache-2.0
|
||||
python-neutronclient>=6.7.0 # Apache-2.0
|
||||
python-novaclient>=10.1.0 # Apache-2.0
|
||||
python-openstackclient>=3.14.0 # Apache-2.0
|
||||
python-ironicclient>=2.3.0 # Apache-2.0
|
||||
six>=1.11.0 # MIT
|
||||
SQLAlchemy>=1.2.5 # MIT
|
||||
stevedore>=1.28.0 # Apache-2.0
|
||||
taskflow>=3.1.0 # Apache-2.0
|
||||
WebOb>=1.7.4 # MIT
|
||||
WSME>=0.9.2 # MIT
|
||||
networkx>=1.11 # BSD
|
||||
|
||||
|
||||
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
- name: Set up the list of hostnames and addresses
|
||||
set_fact:
|
||||
hostname_addresses: >
|
||||
{% set hosts = {} -%}
|
||||
{% for host, vars in hostvars.items() -%}
|
||||
{% set _ = hosts.update({vars['ansible_hostname']: vars['nodepool']['private_ipv4']}) -%}
|
||||
{% endfor -%}
|
||||
{{- hosts -}}
|
||||
- name: Add inventory hostnames to the hosts file
|
||||
become: yes
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
state: present
|
||||
insertafter: EOF
|
||||
line: "{{ item.value }} {{ item.key }}"
|
||||
with_dict: "{{ hostname_addresses }}"
|
||||
@@ -58,6 +58,7 @@ watcher_goals =
|
||||
noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization
|
||||
saving_energy = watcher.decision_engine.goal.goals:SavingEnergy
|
||||
hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance
|
||||
cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining
|
||||
|
||||
watcher_scoring_engines =
|
||||
dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer
|
||||
@@ -80,6 +81,7 @@ watcher_strategies =
|
||||
noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor
|
||||
storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance
|
||||
zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration
|
||||
host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance
|
||||
|
||||
watcher_actions =
|
||||
migrate = watcher.applier.actions.migration:Migrate
|
||||
|
||||
@@ -2,25 +2,27 @@
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
doc8>=0.6.0 # Apache-2.0
|
||||
freezegun>=0.3.6 # Apache-2.0
|
||||
coverage!=4.4 # Apache-2.0
|
||||
doc8 # Apache-2.0
|
||||
freezegun # Apache-2.0
|
||||
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
||||
mock>=2.0.0 # BSD
|
||||
oslotest>=3.2.0 # Apache-2.0
|
||||
os-testr>=1.0.0 # Apache-2.0
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testtools>=2.2.0 # MIT
|
||||
mock # BSD
|
||||
oslotest # Apache-2.0
|
||||
os-testr # Apache-2.0
|
||||
testrepository # Apache-2.0/BSD
|
||||
testscenarios # Apache-2.0/BSD
|
||||
testtools # MIT
|
||||
|
||||
# Doc requirements
|
||||
openstackdocstheme>=1.18.1 # Apache-2.0
|
||||
sphinx!=1.6.6,>=1.6.2 # BSD
|
||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||
openstackdocstheme # Apache-2.0
|
||||
sphinx!=1.6.6,!=1.6.7 # BSD
|
||||
sphinxcontrib-pecanwsme # Apache-2.0
|
||||
|
||||
# api-ref
|
||||
os-api-ref # Apache-2.0
|
||||
|
||||
# releasenotes
|
||||
reno>=2.5.0 # Apache-2.0
|
||||
reno # Apache-2.0
|
||||
|
||||
# bandit
|
||||
bandit>=1.1.0 # Apache-2.0
|
||||
|
||||
9
tox.ini
9
tox.ini
@@ -55,7 +55,7 @@ filename = *.py,app.wsgi
|
||||
show-source=True
|
||||
ignore= H105,E123,E226,N320,H202
|
||||
builtins= _
|
||||
enable-extensions = H106,H203
|
||||
enable-extensions = H106,H203,H904
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
|
||||
|
||||
[testenv:wheel]
|
||||
@@ -76,3 +76,10 @@ commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasen
|
||||
[testenv:bandit]
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = bandit -r watcher -x tests -n5 -ll -s B320
|
||||
|
||||
[testenv:lower-constraints]
|
||||
basepython = python3
|
||||
deps =
|
||||
-c{toxinidir}/lower-constraints.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
@@ -205,7 +205,7 @@ class ActionCollection(collection.Collection):
|
||||
collection = ActionCollection()
|
||||
collection.actions = [Action.convert_with_links(p, expand)
|
||||
for p in actions]
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@classmethod
|
||||
@@ -232,6 +232,10 @@ class ActionsController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None,
|
||||
action_plan_uuid=None, audit_uuid=None):
|
||||
additional_fields = ['action_plan_uuid']
|
||||
|
||||
api_utils.validate_sort_key(sort_key, list(objects.Action.fields) +
|
||||
additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -247,7 +251,10 @@ class ActionsController(rest.RestController):
|
||||
if audit_uuid:
|
||||
filters['audit_uuid'] = audit_uuid
|
||||
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
actions = objects.Action.list(pecan.request.context,
|
||||
limit,
|
||||
@@ -255,11 +262,15 @@ class ActionsController(rest.RestController):
|
||||
sort_dir=sort_dir,
|
||||
filters=filters)
|
||||
|
||||
return ActionCollection.convert_with_links(actions, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
actions_collection = ActionCollection.convert_with_links(
|
||||
actions, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(actions_collection.actions,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return actions_collection
|
||||
|
||||
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
||||
wtypes.text, wtypes.text, types.uuid,
|
||||
|
||||
@@ -305,17 +305,6 @@ class ActionPlanCollection(collection.Collection):
|
||||
ap_collection = ActionPlanCollection()
|
||||
ap_collection.action_plans = [ActionPlan.convert_with_links(
|
||||
p, expand) for p in rpc_action_plans]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'audit_uuid':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
ap_collection.action_plans = sorted(
|
||||
ap_collection.action_plans,
|
||||
key=lambda action_plan: action_plan.audit_uuid,
|
||||
reverse=reverse)
|
||||
|
||||
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
|
||||
return ap_collection
|
||||
|
||||
@@ -331,20 +320,25 @@ class ActionPlansController(rest.RestController):
|
||||
|
||||
def __init__(self):
|
||||
super(ActionPlansController, self).__init__()
|
||||
self.applier_client = rpcapi.ApplierAPI()
|
||||
|
||||
from_actionsPlans = False
|
||||
"""A flag to indicate if the requests to this controller are coming
|
||||
from the top-level resource ActionPlan."""
|
||||
|
||||
_custom_actions = {
|
||||
'detail': ['GET'],
|
||||
'start': ['POST'],
|
||||
'detail': ['GET']
|
||||
}
|
||||
|
||||
def _get_action_plans_collection(self, marker, limit,
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None, audit_uuid=None,
|
||||
strategy=None):
|
||||
additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.ActionPlan.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -363,10 +357,10 @@ class ActionPlansController(rest.RestController):
|
||||
else:
|
||||
filters['strategy_name'] = strategy
|
||||
|
||||
if sort_key == 'audit_uuid':
|
||||
sort_db_key = None
|
||||
else:
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
action_plans = objects.ActionPlan.list(
|
||||
pecan.request.context,
|
||||
@@ -374,12 +368,15 @@ class ActionPlansController(rest.RestController):
|
||||
marker_obj, sort_key=sort_db_key,
|
||||
sort_dir=sort_dir, filters=filters)
|
||||
|
||||
return ActionPlanCollection.convert_with_links(
|
||||
action_plans, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
action_plans_collection = ActionPlanCollection.convert_with_links(
|
||||
action_plans, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(action_plans_collection.action_plans,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return action_plans_collection
|
||||
|
||||
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
|
||||
wtypes.text, types.uuid, wtypes.text)
|
||||
@@ -540,7 +537,7 @@ class ActionPlansController(rest.RestController):
|
||||
if action_plan_to_update[field] != patch_val:
|
||||
action_plan_to_update[field] = patch_val
|
||||
|
||||
if (field == 'state'and
|
||||
if (field == 'state' and
|
||||
patch_val == objects.action_plan.State.PENDING):
|
||||
launch_action_plan = True
|
||||
|
||||
@@ -557,11 +554,39 @@ class ActionPlansController(rest.RestController):
|
||||
a.save()
|
||||
|
||||
if launch_action_plan:
|
||||
applier_client = rpcapi.ApplierAPI()
|
||||
applier_client.launch_action_plan(pecan.request.context,
|
||||
action_plan.uuid)
|
||||
self.applier_client.launch_action_plan(pecan.request.context,
|
||||
action_plan.uuid)
|
||||
|
||||
action_plan_to_update = objects.ActionPlan.get_by_uuid(
|
||||
pecan.request.context,
|
||||
action_plan_uuid)
|
||||
return ActionPlan.convert_with_links(action_plan_to_update)
|
||||
|
||||
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
|
||||
def start(self, action_plan_uuid, **kwargs):
|
||||
"""Start an action_plan
|
||||
|
||||
:param action_plan_uuid: UUID of an action_plan.
|
||||
"""
|
||||
|
||||
action_plan_to_start = api_utils.get_resource(
|
||||
'ActionPlan', action_plan_uuid, eager=True)
|
||||
context = pecan.request.context
|
||||
|
||||
policy.enforce(context, 'action_plan:start', action_plan_to_start,
|
||||
action='action_plan:start')
|
||||
|
||||
if action_plan_to_start['state'] != \
|
||||
objects.action_plan.State.RECOMMENDED:
|
||||
raise Exception.StartError(
|
||||
state=action_plan_to_start.state)
|
||||
|
||||
action_plan_to_start['state'] = objects.action_plan.State.PENDING
|
||||
action_plan_to_start.save()
|
||||
|
||||
self.applier_client.launch_action_plan(pecan.request.context,
|
||||
action_plan_uuid)
|
||||
action_plan_to_start = objects.ActionPlan.get_by_uuid(
|
||||
pecan.request.context, action_plan_uuid)
|
||||
|
||||
return ActionPlan.convert_with_links(action_plan_to_start)
|
||||
|
||||
@@ -389,17 +389,6 @@ class AuditCollection(collection.Collection):
|
||||
collection = AuditCollection()
|
||||
collection.audits = [Audit.convert_with_links(p, expand)
|
||||
for p in rpc_audits]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'goal_uuid':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
collection.audits = sorted(
|
||||
collection.audits,
|
||||
key=lambda audit: audit.goal_uuid,
|
||||
reverse=reverse)
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@@ -414,6 +403,7 @@ class AuditsController(rest.RestController):
|
||||
"""REST controller for Audits."""
|
||||
def __init__(self):
|
||||
super(AuditsController, self).__init__()
|
||||
self.dc_client = rpcapi.DecisionEngineAPI()
|
||||
|
||||
from_audits = False
|
||||
"""A flag to indicate if the requests to this controller are coming
|
||||
@@ -427,8 +417,14 @@ class AuditsController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None, goal=None,
|
||||
strategy=None):
|
||||
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||
"strategy_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Audit.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Audit.get_by_uuid(pecan.request.context,
|
||||
@@ -449,23 +445,25 @@ class AuditsController(rest.RestController):
|
||||
# TODO(michaelgugino): add method to get goal by name.
|
||||
filters['strategy_name'] = strategy
|
||||
|
||||
if sort_key == 'goal_uuid':
|
||||
sort_db_key = 'goal_id'
|
||||
elif sort_key == 'strategy_uuid':
|
||||
sort_db_key = 'strategy_id'
|
||||
else:
|
||||
sort_db_key = sort_key
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
audits = objects.Audit.list(pecan.request.context,
|
||||
limit,
|
||||
marker_obj, sort_key=sort_db_key,
|
||||
sort_dir=sort_dir, filters=filters)
|
||||
|
||||
return AuditCollection.convert_with_links(audits, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
audits_collection = AuditCollection.convert_with_links(
|
||||
audits, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(audits_collection.audits, sort_key,
|
||||
sort_dir)
|
||||
|
||||
return audits_collection
|
||||
|
||||
@wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text,
|
||||
wtypes.text, wtypes.text, wtypes.text, int)
|
||||
@@ -578,8 +576,7 @@ class AuditsController(rest.RestController):
|
||||
|
||||
# trigger decision-engine to run the audit
|
||||
if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value:
|
||||
dc_client = rpcapi.DecisionEngineAPI()
|
||||
dc_client.trigger_audit(context, new_audit.uuid)
|
||||
self.dc_client.trigger_audit(context, new_audit.uuid)
|
||||
|
||||
return Audit.convert_with_links(new_audit)
|
||||
|
||||
@@ -642,8 +639,8 @@ class AuditsController(rest.RestController):
|
||||
context = pecan.request.context
|
||||
audit_to_delete = api_utils.get_resource(
|
||||
'Audit', audit, eager=True)
|
||||
policy.enforce(context, 'audit:update', audit_to_delete,
|
||||
action='audit:update')
|
||||
policy.enforce(context, 'audit:delete', audit_to_delete,
|
||||
action='audit:delete')
|
||||
|
||||
initial_state = audit_to_delete.state
|
||||
new_state = objects.audit.State.DELETED
|
||||
|
||||
@@ -474,9 +474,13 @@ class AuditTemplatesController(rest.RestController):
|
||||
def _get_audit_templates_collection(self, filters, marker, limit,
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None):
|
||||
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||
"strategy_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.AuditTemplate.fields) + additional_fields)
|
||||
api_utils.validate_search_filters(
|
||||
filters, list(objects.audit_template.AuditTemplate.fields) +
|
||||
["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"])
|
||||
filters, list(objects.AuditTemplate.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -486,19 +490,26 @@ class AuditTemplatesController(rest.RestController):
|
||||
pecan.request.context,
|
||||
marker)
|
||||
|
||||
audit_templates = objects.AuditTemplate.list(
|
||||
pecan.request.context,
|
||||
filters,
|
||||
limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
return AuditTemplateCollection.convert_with_links(audit_templates,
|
||||
limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
audit_templates = objects.AuditTemplate.list(
|
||||
pecan.request.context, filters, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
audit_templates_collection = \
|
||||
AuditTemplateCollection.convert_with_links(
|
||||
audit_templates, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(
|
||||
audit_templates_collection.audit_templates, sort_key,
|
||||
sort_dir)
|
||||
|
||||
return audit_templates_collection
|
||||
|
||||
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
|
||||
types.uuid, int, wtypes.text, wtypes.text)
|
||||
@@ -677,8 +688,8 @@ class AuditTemplatesController(rest.RestController):
|
||||
context = pecan.request.context
|
||||
audit_template_to_delete = api_utils.get_resource('AuditTemplate',
|
||||
audit_template)
|
||||
policy.enforce(context, 'audit_template:update',
|
||||
policy.enforce(context, 'audit_template:delete',
|
||||
audit_template_to_delete,
|
||||
action='audit_template:update')
|
||||
action='audit_template:delete')
|
||||
|
||||
audit_template_to_delete.soft_delete()
|
||||
|
||||
@@ -130,17 +130,6 @@ class GoalCollection(collection.Collection):
|
||||
goal_collection = GoalCollection()
|
||||
goal_collection.goals = [
|
||||
Goal.convert_with_links(g, expand) for g in goals]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'strategy':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
goal_collection.goals = sorted(
|
||||
goal_collection.goals,
|
||||
key=lambda goal: goal.uuid,
|
||||
reverse=reverse)
|
||||
|
||||
goal_collection.next = goal_collection.get_next(
|
||||
limit, url=url, **kwargs)
|
||||
return goal_collection
|
||||
@@ -167,17 +156,19 @@ class GoalsController(rest.RestController):
|
||||
|
||||
def _get_goals_collection(self, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Goal.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Goal.get_by_uuid(
|
||||
pecan.request.context, marker)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
||||
else None)
|
||||
|
||||
goals = objects.Goal.list(pecan.request.context, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
|
||||
@@ -123,17 +123,6 @@ class ScoringEngineCollection(collection.Collection):
|
||||
collection = ScoringEngineCollection()
|
||||
collection.scoring_engines = [ScoringEngine.convert_with_links(
|
||||
se, expand) for se in scoring_engines]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'name':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
collection.goals = sorted(
|
||||
collection.scoring_engines,
|
||||
key=lambda se: se.name,
|
||||
reverse=reverse)
|
||||
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
@@ -160,7 +149,8 @@ class ScoringEngineController(rest.RestController):
|
||||
def _get_scoring_engines_collection(self, marker, limit,
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None):
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.ScoringEngine.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
@@ -171,7 +161,8 @@ class ScoringEngineController(rest.RestController):
|
||||
|
||||
filters = {}
|
||||
|
||||
sort_db_key = sort_key
|
||||
sort_db_key = (sort_key if sort_key in objects.ScoringEngine.fields
|
||||
else None)
|
||||
|
||||
scoring_engines = objects.ScoringEngine.list(
|
||||
context=pecan.request.context,
|
||||
|
||||
@@ -154,17 +154,6 @@ class ServiceCollection(collection.Collection):
|
||||
service_collection = ServiceCollection()
|
||||
service_collection.services = [
|
||||
Service.convert_with_links(g, expand) for g in services]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'service':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
service_collection.services = sorted(
|
||||
service_collection.services,
|
||||
key=lambda service: service.id,
|
||||
reverse=reverse)
|
||||
|
||||
service_collection.next = service_collection.get_next(
|
||||
limit, url=url, marker_field='id', **kwargs)
|
||||
return service_collection
|
||||
@@ -191,17 +180,19 @@ class ServicesController(rest.RestController):
|
||||
|
||||
def _get_services_collection(self, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Service.fields))
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Service.get(
|
||||
pecan.request.context, marker)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
||||
else None)
|
||||
|
||||
services = objects.Service.list(
|
||||
pecan.request.context, limit, marker_obj,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
@@ -173,17 +173,6 @@ class StrategyCollection(collection.Collection):
|
||||
strategy_collection = StrategyCollection()
|
||||
strategy_collection.strategies = [
|
||||
Strategy.convert_with_links(g, expand) for g in strategies]
|
||||
|
||||
if 'sort_key' in kwargs:
|
||||
reverse = False
|
||||
if kwargs['sort_key'] == 'strategy':
|
||||
if 'sort_dir' in kwargs:
|
||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||
strategy_collection.strategies = sorted(
|
||||
strategy_collection.strategies,
|
||||
key=lambda strategy: strategy.uuid,
|
||||
reverse=reverse)
|
||||
|
||||
strategy_collection.next = strategy_collection.get_next(
|
||||
limit, url=url, **kwargs)
|
||||
return strategy_collection
|
||||
@@ -211,28 +200,39 @@ class StrategiesController(rest.RestController):
|
||||
|
||||
def _get_strategies_collection(self, filters, marker, limit, sort_key,
|
||||
sort_dir, expand=False, resource_url=None):
|
||||
additional_fields = ["goal_uuid", "goal_name"]
|
||||
|
||||
api_utils.validate_sort_key(
|
||||
sort_key, list(objects.Strategy.fields) + additional_fields)
|
||||
api_utils.validate_search_filters(
|
||||
filters, list(objects.strategy.Strategy.fields) +
|
||||
["goal_uuid", "goal_name"])
|
||||
filters, list(objects.Strategy.fields) + additional_fields)
|
||||
limit = api_utils.validate_limit(limit)
|
||||
api_utils.validate_sort_dir(sort_dir)
|
||||
|
||||
sort_db_key = (sort_key if sort_key in objects.Strategy.fields
|
||||
else None)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Strategy.get_by_uuid(
|
||||
pecan.request.context, marker)
|
||||
|
||||
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||
additional_fields)
|
||||
sort_db_key = (sort_key if not need_api_sort
|
||||
else None)
|
||||
|
||||
strategies = objects.Strategy.list(
|
||||
pecan.request.context, limit, marker_obj, filters=filters,
|
||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||
|
||||
return StrategyCollection.convert_with_links(
|
||||
strategies_collection = StrategyCollection.convert_with_links(
|
||||
strategies, limit, url=resource_url, expand=expand,
|
||||
sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
if need_api_sort:
|
||||
api_utils.make_api_sort(strategies_collection.strategies,
|
||||
sort_key, sort_dir)
|
||||
|
||||
return strategies_collection
|
||||
|
||||
@wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text,
|
||||
int, wtypes.text, wtypes.text)
|
||||
def get_all(self, goal=None, marker=None, limit=None,
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from operator import attrgetter
|
||||
|
||||
import jsonpatch
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import reflection
|
||||
@@ -54,6 +56,13 @@ def validate_sort_dir(sort_dir):
|
||||
"'asc' or 'desc'") % sort_dir)
|
||||
|
||||
|
||||
def validate_sort_key(sort_key, allowed_fields):
|
||||
# Very lightweight validation for now
|
||||
if sort_key not in allowed_fields:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Invalid sort key: %s") % sort_key)
|
||||
|
||||
|
||||
def validate_search_filters(filters, allowed_fields):
|
||||
# Very lightweight validation for now
|
||||
# todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries)
|
||||
@@ -63,6 +72,19 @@ def validate_search_filters(filters, allowed_fields):
|
||||
_("Invalid filter: %s") % filter_name)
|
||||
|
||||
|
||||
def check_need_api_sort(sort_key, additional_fields):
|
||||
return sort_key in additional_fields
|
||||
|
||||
|
||||
def make_api_sort(sorting_list, sort_key, sort_dir):
|
||||
# First sort by uuid field, than sort by sort_key
|
||||
# sort() ensures stable sorting, so we could
|
||||
# make lexicographical sort
|
||||
reverse_direction = (sort_dir == 'desc')
|
||||
sorting_list.sort(key=attrgetter('uuid'), reverse=reverse_direction)
|
||||
sorting_list.sort(key=attrgetter(sort_key), reverse=reverse_direction)
|
||||
|
||||
|
||||
def apply_jsonpatch(doc, patch):
|
||||
for p in patch:
|
||||
if p['op'] == 'add' and p['path'].count('/') == 1:
|
||||
|
||||
@@ -63,7 +63,7 @@ class ContextHook(hooks.PecanHook):
|
||||
auth_url = headers.get('X-Auth-Url')
|
||||
if auth_url is None:
|
||||
importutils.import_module('keystonemiddleware.auth_token')
|
||||
auth_url = cfg.CONF.keystone_authtoken.auth_uri
|
||||
auth_url = cfg.CONF.keystone_authtoken.www_authenticate_uri
|
||||
|
||||
state.request.context = context.make_context(
|
||||
auth_token=auth_token,
|
||||
|
||||
@@ -50,6 +50,12 @@ class Migrate(base.BaseAction):
|
||||
source and the destination compute hostname (list of available compute
|
||||
hosts is returned by this command: ``nova service-list --binary
|
||||
nova-compute``).
|
||||
|
||||
.. note::
|
||||
|
||||
Nova API version must be 2.56 or above if `destination_node` parameter
|
||||
is given.
|
||||
|
||||
"""
|
||||
|
||||
# input parameters constants
|
||||
@@ -113,8 +119,10 @@ class Migrate(base.BaseAction):
|
||||
dest_hostname=destination)
|
||||
except nova_helper.nvexceptions.ClientException as e:
|
||||
LOG.debug("Nova client exception occurred while live "
|
||||
"migrating instance %s.Exception: %s" %
|
||||
(self.instance_uuid, e))
|
||||
"migrating instance "
|
||||
"%(instance)s.Exception: %(exception)s",
|
||||
{'instance': self.instance_uuid, 'exception': e})
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.critical("Unexpected error occurred. Migration failed for "
|
||||
|
||||
@@ -40,10 +40,10 @@ def main():
|
||||
|
||||
if host == '127.0.0.1':
|
||||
LOG.info('serving on 127.0.0.1:%(port)s, '
|
||||
'view at %(protocol)s://127.0.0.1:%(port)s' %
|
||||
'view at %(protocol)s://127.0.0.1:%(port)s',
|
||||
dict(protocol=protocol, port=port))
|
||||
else:
|
||||
LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
|
||||
LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
|
||||
dict(protocol=protocol, host=host, port=port))
|
||||
|
||||
api_schedule = scheduling.APISchedulingService()
|
||||
|
||||
@@ -75,7 +75,7 @@ class CinderHelper(object):
|
||||
search_opts={'all_tenants': True})
|
||||
|
||||
def get_volume_type_by_backendname(self, backendname):
|
||||
"""Retrun a list of volume type"""
|
||||
"""Return a list of volume type"""
|
||||
volume_type_list = self.get_volume_type_list()
|
||||
|
||||
volume_type = [volume_type.name for volume_type in volume_type_list
|
||||
@@ -139,13 +139,13 @@ class CinderHelper(object):
|
||||
volume = self.get_volume(volume.id)
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
LOG.debug("Waiting to complete deletion of volume %s", volume.id)
|
||||
if self._can_get_volume(volume.id):
|
||||
LOG.error("Volume deletion error: %s" % volume.id)
|
||||
LOG.error("Volume deletion error: %s", volume.id)
|
||||
return False
|
||||
|
||||
LOG.debug("Volume %s was deleted successfully." % volume.id)
|
||||
LOG.debug("Volume %s was deleted successfully.", volume.id)
|
||||
return True
|
||||
|
||||
def check_migrated(self, volume, retry_interval=10):
|
||||
@@ -179,8 +179,7 @@ class CinderHelper(object):
|
||||
LOG.error(error_msg)
|
||||
return False
|
||||
LOG.debug(
|
||||
"Volume migration succeeded : "
|
||||
"volume %s is now on host '%s'." % (
|
||||
"Volume migration succeeded : volume %s is now on host '%s'.", (
|
||||
volume.id, host_name))
|
||||
return True
|
||||
|
||||
@@ -194,8 +193,8 @@ class CinderHelper(object):
|
||||
message=(_("Volume type must be same for migrating")))
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug("Volume %s found on host '%s'."
|
||||
% (volume.id, source_node))
|
||||
LOG.debug("Volume %s found on host '%s'.",
|
||||
(volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.migrate_volume(
|
||||
volume, dest_node, False, True)
|
||||
@@ -211,8 +210,8 @@ class CinderHelper(object):
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug(
|
||||
"Volume %s found on host '%s'." % (
|
||||
volume.id, source_node))
|
||||
"Volume %s found on host '%s'.",
|
||||
(volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.retype(
|
||||
volume, dest_type, "on-demand")
|
||||
@@ -234,14 +233,14 @@ class CinderHelper(object):
|
||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
|
||||
if getattr(new_volume, 'status') != 'available':
|
||||
error_msg = (_("Failed to create volume '%(volume)s. ") %
|
||||
{'volume': new_volume.id})
|
||||
raise Exception(error_msg)
|
||||
|
||||
LOG.debug("Volume %s was created successfully." % new_volume)
|
||||
LOG.debug("Volume %s was created successfully.", new_volume)
|
||||
return new_volume
|
||||
|
||||
def delete_volume(self, volume):
|
||||
|
||||
@@ -62,6 +62,7 @@ class RequestContext(context.RequestContext):
|
||||
# safely ignore this as we don't use it.
|
||||
kwargs.pop('user_identity', None)
|
||||
kwargs.pop('global_request_id', None)
|
||||
kwargs.pop('project', None)
|
||||
if kwargs:
|
||||
LOG.warning('Arguments dropped when creating context: %s',
|
||||
str(kwargs))
|
||||
|
||||
@@ -305,7 +305,7 @@ class ActionFilterCombinationProhibited(Invalid):
|
||||
|
||||
|
||||
class UnsupportedActionType(UnsupportedError):
|
||||
msg_fmt = _("Provided %(action_type) is not supported yet")
|
||||
msg_fmt = _("Provided %(action_type)s is not supported yet")
|
||||
|
||||
|
||||
class EfficacyIndicatorNotFound(ResourceNotFound):
|
||||
@@ -336,6 +336,10 @@ class DeleteError(Invalid):
|
||||
msg_fmt = _("Couldn't delete when state is '%(state)s'.")
|
||||
|
||||
|
||||
class StartError(Invalid):
|
||||
msg_fmt = _("Couldn't start when state is '%(state)s'.")
|
||||
|
||||
|
||||
# decision engine
|
||||
|
||||
class WorkflowExecutionException(WatcherException):
|
||||
@@ -512,3 +516,7 @@ class NegativeLimitError(WatcherException):
|
||||
class NotificationPayloadError(WatcherException):
|
||||
_msg_fmt = _("Payload not populated when trying to send notification "
|
||||
"\"%(class_name)s\"")
|
||||
|
||||
|
||||
class InvalidPoolAttributeValue(Invalid):
|
||||
msg_fmt = _("The %(name)s pool %(attribute)s is not integer")
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
from novaclient import api_versions
|
||||
from oslo_log import log
|
||||
|
||||
import cinderclient.exceptions as ciexceptions
|
||||
@@ -29,9 +29,12 @@ import novaclient.exceptions as nvexceptions
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher import conf
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CONF = conf.CONF
|
||||
|
||||
|
||||
class NovaHelper(object):
|
||||
|
||||
@@ -106,7 +109,7 @@ class NovaHelper(object):
|
||||
return True
|
||||
else:
|
||||
LOG.debug("confirm resize failed for the "
|
||||
"instance %s" % instance.id)
|
||||
"instance %s", instance.id)
|
||||
return False
|
||||
|
||||
def wait_for_volume_status(self, volume, status, timeout=60,
|
||||
@@ -130,240 +133,68 @@ class NovaHelper(object):
|
||||
return volume.status == status
|
||||
|
||||
def watcher_non_live_migrate_instance(self, instance_id, dest_hostname,
|
||||
keep_original_image_name=True,
|
||||
retry=120):
|
||||
"""This method migrates a given instance
|
||||
|
||||
using an image of this instance and creating a new instance
|
||||
from this image. It saves some configuration information
|
||||
about the original instance : security group, list of networks,
|
||||
list of attached volumes, floating IP, ...
|
||||
in order to apply the same settings to the new instance.
|
||||
At the end of the process the original instance is deleted.
|
||||
This method uses the Nova built-in migrate()
|
||||
action to do a migration of a given instance.
|
||||
For migrating a given dest_hostname, Nova API version
|
||||
must be 2.56 or higher.
|
||||
|
||||
It returns True if the migration was successful,
|
||||
False otherwise.
|
||||
|
||||
if destination hostname not given, this method calls nova api
|
||||
to migrate the instance.
|
||||
|
||||
:param instance_id: the unique id of the instance to migrate.
|
||||
:param keep_original_image_name: flag indicating whether the
|
||||
image name from which the original instance was built must be
|
||||
used as the name of the intermediate image used for migration.
|
||||
If this flag is False, a temporary image name is built
|
||||
:param dest_hostname: the name of the destination compute node, if
|
||||
destination_node is None, nova scheduler choose
|
||||
the destination host
|
||||
"""
|
||||
new_image_name = ""
|
||||
LOG.debug(
|
||||
"Trying a non-live migrate of instance '%s' " % instance_id)
|
||||
"Trying a cold migrate of instance '%s' ", instance_id)
|
||||
|
||||
# Looking for the instance to migrate
|
||||
instance = self.find_instance(instance_id)
|
||||
if not instance:
|
||||
LOG.debug("Instance %s not found !" % instance_id)
|
||||
LOG.debug("Instance %s not found !", instance_id)
|
||||
return False
|
||||
else:
|
||||
# NOTE: If destination node is None call Nova API to migrate
|
||||
# instance
|
||||
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
if dest_hostname is None:
|
||||
previous_status = getattr(instance, 'status')
|
||||
previous_status = getattr(instance, 'status')
|
||||
|
||||
instance.migrate()
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
while (getattr(instance, 'status') not in
|
||||
["VERIFY_RESIZE", "ERROR"] and retry):
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
time.sleep(2)
|
||||
retry -= 1
|
||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
if (dest_hostname and
|
||||
not self._check_nova_api_version(self.nova, "2.56")):
|
||||
LOG.error("For migrating a given dest_hostname,"
|
||||
"Nova API version must be 2.56 or higher")
|
||||
return False
|
||||
|
||||
if (host_name != new_hostname and
|
||||
instance.status == 'VERIFY_RESIZE'):
|
||||
if not self.confirm_resize(instance, previous_status):
|
||||
return False
|
||||
LOG.debug(
|
||||
"cold migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
instance_id, new_hostname))
|
||||
return True
|
||||
else:
|
||||
LOG.debug(
|
||||
"cold migration for instance %s failed" % instance_id)
|
||||
instance.migrate(host=dest_hostname)
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
|
||||
while (getattr(instance, 'status') not in
|
||||
["VERIFY_RESIZE", "ERROR"] and retry):
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
time.sleep(2)
|
||||
retry -= 1
|
||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
|
||||
if (host_name != new_hostname and
|
||||
instance.status == 'VERIFY_RESIZE'):
|
||||
if not self.confirm_resize(instance, previous_status):
|
||||
return False
|
||||
|
||||
if not keep_original_image_name:
|
||||
# randrange gives you an integral value
|
||||
irand = random.randint(0, 1000)
|
||||
|
||||
# Building the temporary image name
|
||||
# which will be used for the migration
|
||||
new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand)
|
||||
LOG.debug(
|
||||
"cold migration succeeded : "
|
||||
"instance %(instance)s is now on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': new_hostname})
|
||||
return True
|
||||
else:
|
||||
# Get the image name of the current instance.
|
||||
# We'll use the same name for the new instance.
|
||||
imagedict = getattr(instance, "image")
|
||||
image_id = imagedict["id"]
|
||||
image = self.glance.images.get(image_id)
|
||||
new_image_name = getattr(image, "name")
|
||||
|
||||
instance_name = getattr(instance, "name")
|
||||
flavor_name = instance.flavor.get('original_name')
|
||||
keypair_name = getattr(instance, "key_name")
|
||||
|
||||
addresses = getattr(instance, "addresses")
|
||||
|
||||
floating_ip = ""
|
||||
network_names_list = []
|
||||
|
||||
for network_name, network_conf_obj in addresses.items():
|
||||
LOG.debug(
|
||||
"Extracting network configuration for network '%s'" %
|
||||
network_name)
|
||||
|
||||
network_names_list.append(network_name)
|
||||
|
||||
for net_conf_item in network_conf_obj:
|
||||
if net_conf_item['OS-EXT-IPS:type'] == "floating":
|
||||
floating_ip = net_conf_item['addr']
|
||||
break
|
||||
|
||||
sec_groups_list = getattr(instance, "security_groups")
|
||||
sec_groups = []
|
||||
|
||||
for sec_group_dict in sec_groups_list:
|
||||
sec_groups.append(sec_group_dict['name'])
|
||||
|
||||
# Stopping the old instance properly so
|
||||
# that no new data is sent to it and to its attached volumes
|
||||
stopped_ok = self.stop_instance(instance_id)
|
||||
|
||||
if not stopped_ok:
|
||||
LOG.debug("Could not stop instance: %s" % instance_id)
|
||||
"cold migration for instance %s failed", instance_id)
|
||||
return False
|
||||
|
||||
# Building the temporary image which will be used
|
||||
# to re-build the same instance on another target host
|
||||
image_uuid = self.create_image_from_instance(instance_id,
|
||||
new_image_name)
|
||||
|
||||
if not image_uuid:
|
||||
LOG.debug(
|
||||
"Could not build temporary image of instance: %s" %
|
||||
instance_id)
|
||||
return False
|
||||
|
||||
#
|
||||
# We need to get the list of attached volumes and detach
|
||||
# them from the instance in order to attache them later
|
||||
# to the new instance
|
||||
#
|
||||
blocks = []
|
||||
|
||||
# Looks like this :
|
||||
# os-extended-volumes:volumes_attached |
|
||||
# [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}]
|
||||
attached_volumes = getattr(instance,
|
||||
"os-extended-volumes:volumes_attached")
|
||||
|
||||
for attached_volume in attached_volumes:
|
||||
volume_id = attached_volume['id']
|
||||
|
||||
try:
|
||||
volume = self.cinder.volumes.get(volume_id)
|
||||
|
||||
attachments_list = getattr(volume, "attachments")
|
||||
|
||||
device_name = attachments_list[0]['device']
|
||||
# When a volume is attached to an instance
|
||||
# it contains the following property :
|
||||
# attachments = [{u'device': u'/dev/vdb',
|
||||
# u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814',
|
||||
# u'id': u'f6d62785-04b8-400d-9626-88640610f65e',
|
||||
# u'host_name': None, u'volume_id':
|
||||
# u'f6d62785-04b8-400d-9626-88640610f65e'}]
|
||||
|
||||
# boot_index indicates a number
|
||||
# designating the boot order of the device.
|
||||
# Use -1 for the boot volume,
|
||||
# choose 0 for an attached volume.
|
||||
block_device_mapping_v2_item = {"device_name": device_name,
|
||||
"source_type": "volume",
|
||||
"destination_type":
|
||||
"volume",
|
||||
"uuid": volume_id,
|
||||
"boot_index": "0"}
|
||||
|
||||
blocks.append(
|
||||
block_device_mapping_v2_item)
|
||||
|
||||
LOG.debug("Detaching volume %s from instance: %s" % (
|
||||
volume_id, instance_id))
|
||||
# volume.detach()
|
||||
self.nova.volumes.delete_server_volume(instance_id,
|
||||
volume_id)
|
||||
|
||||
if not self.wait_for_volume_status(volume, "available", 5,
|
||||
10):
|
||||
LOG.debug(
|
||||
"Could not detach volume %s from instance: %s" % (
|
||||
volume_id, instance_id))
|
||||
return False
|
||||
except ciexceptions.NotFound:
|
||||
LOG.debug("Volume '%s' not found " % image_id)
|
||||
return False
|
||||
|
||||
# We create the new instance from
|
||||
# the intermediate image of the original instance
|
||||
new_instance = self. \
|
||||
create_instance(dest_hostname,
|
||||
instance_name,
|
||||
image_uuid,
|
||||
flavor_name,
|
||||
sec_groups,
|
||||
network_names_list=network_names_list,
|
||||
keypair_name=keypair_name,
|
||||
create_new_floating_ip=False,
|
||||
block_device_mapping_v2=blocks)
|
||||
|
||||
if not new_instance:
|
||||
LOG.debug(
|
||||
"Could not create new instance "
|
||||
"for non-live migration of instance %s" % instance_id)
|
||||
return False
|
||||
|
||||
try:
|
||||
LOG.debug("Detaching floating ip '%s' from instance %s" % (
|
||||
floating_ip, instance_id))
|
||||
# We detach the floating ip from the current instance
|
||||
instance.remove_floating_ip(floating_ip)
|
||||
|
||||
LOG.debug(
|
||||
"Attaching floating ip '%s' to the new instance %s" % (
|
||||
floating_ip, new_instance.id))
|
||||
|
||||
# We attach the same floating ip to the new instance
|
||||
new_instance.add_floating_ip(floating_ip)
|
||||
except Exception as e:
|
||||
LOG.debug(e)
|
||||
|
||||
new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host")
|
||||
|
||||
# Deleting the old instance (because no more useful)
|
||||
delete_ok = self.delete_instance(instance_id)
|
||||
if not delete_ok:
|
||||
LOG.debug("Could not delete instance: %s" % instance_id)
|
||||
return False
|
||||
|
||||
LOG.debug(
|
||||
"Instance %s has been successfully migrated "
|
||||
"to new host '%s' and its new id is %s." % (
|
||||
instance_id, new_host_name, new_instance.id))
|
||||
|
||||
return True
|
||||
|
||||
def resize_instance(self, instance_id, flavor, retry=120):
|
||||
"""This method resizes given instance with specified flavor.
|
||||
|
||||
@@ -376,8 +207,10 @@ class NovaHelper(object):
|
||||
:param instance_id: the unique id of the instance to resize.
|
||||
:param flavor: the name or ID of the flavor to resize to.
|
||||
"""
|
||||
LOG.debug("Trying a resize of instance %s to flavor '%s'" % (
|
||||
instance_id, flavor))
|
||||
LOG.debug(
|
||||
"Trying a resize of instance %(instance)s to "
|
||||
"flavor '%(flavor)s'",
|
||||
{'instance': instance_id, 'flavor': flavor})
|
||||
|
||||
# Looking for the instance to resize
|
||||
instance = self.find_instance(instance_id)
|
||||
@@ -394,17 +227,17 @@ class NovaHelper(object):
|
||||
"instance %s. Exception: %s", instance_id, e)
|
||||
|
||||
if not flavor_id:
|
||||
LOG.debug("Flavor not found: %s" % flavor)
|
||||
LOG.debug("Flavor not found: %s", flavor)
|
||||
return False
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
|
||||
instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
|
||||
LOG.debug(
|
||||
"Instance %s is in '%s' status." % (instance_id,
|
||||
instance_status))
|
||||
"Instance %(id)s is in '%(status)s' status.",
|
||||
{'id': instance_id, 'status': instance_status})
|
||||
|
||||
instance.resize(flavor=flavor_id)
|
||||
while getattr(instance,
|
||||
@@ -442,17 +275,20 @@ class NovaHelper(object):
|
||||
destination_node is None, nova scheduler choose
|
||||
the destination host
|
||||
"""
|
||||
LOG.debug("Trying to live migrate instance %s " % (instance_id))
|
||||
LOG.debug(
|
||||
"Trying a live migrate instance %(instance)s ",
|
||||
{'instance': instance_id})
|
||||
|
||||
# Looking for the instance to migrate
|
||||
instance = self.find_instance(instance_id)
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
else:
|
||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
# From nova api version 2.25(Mitaka release), the default value of
|
||||
# block_migration is None which is mapped to 'auto'.
|
||||
@@ -474,7 +310,7 @@ class NovaHelper(object):
|
||||
if host_name != new_hostname and instance.status == 'ACTIVE':
|
||||
LOG.debug(
|
||||
"Live migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
"instance %s is now on host '%s'.", (
|
||||
instance_id, new_hostname))
|
||||
return True
|
||||
else:
|
||||
@@ -485,7 +321,7 @@ class NovaHelper(object):
|
||||
and retry:
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||
LOG.debug("Instance task state: %s is null" % instance_id)
|
||||
LOG.debug("Instance task state: %s is null", instance_id)
|
||||
break
|
||||
LOG.debug(
|
||||
'Waiting the migration of {0} to {1}'.format(
|
||||
@@ -501,13 +337,13 @@ class NovaHelper(object):
|
||||
|
||||
LOG.debug(
|
||||
"Live migration succeeded : "
|
||||
"instance %s is now on host '%s'." % (
|
||||
instance_id, host_name))
|
||||
"instance %(instance)s is now on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
return True
|
||||
|
||||
def abort_live_migrate(self, instance_id, source, destination, retry=240):
|
||||
LOG.debug("Aborting live migration of instance %s" % instance_id)
|
||||
LOG.debug("Aborting live migration of instance %s", instance_id)
|
||||
migration = self.get_running_migration(instance_id)
|
||||
if migration:
|
||||
migration_id = getattr(migration[0], "id")
|
||||
@@ -520,7 +356,7 @@ class NovaHelper(object):
|
||||
LOG.exception(e)
|
||||
else:
|
||||
LOG.debug(
|
||||
"No running migrations found for instance %s" % instance_id)
|
||||
"No running migrations found for instance %s", instance_id)
|
||||
|
||||
while retry:
|
||||
instance = self.nova.servers.get(instance_id)
|
||||
@@ -544,21 +380,31 @@ class NovaHelper(object):
|
||||
"for the instance %s" % instance_id)
|
||||
|
||||
def enable_service_nova_compute(self, hostname):
|
||||
if self.nova.services.enable(host=hostname,
|
||||
binary='nova-compute'). \
|
||||
status == 'enabled':
|
||||
return True
|
||||
if float(CONF.nova_client.api_version) < 2.53:
|
||||
status = self.nova.services.enable(
|
||||
host=hostname, binary='nova-compute').status == 'enabled'
|
||||
else:
|
||||
return False
|
||||
service_uuid = self.nova.services.list(host=hostname,
|
||||
binary='nova-compute')[0].id
|
||||
status = self.nova.services.enable(
|
||||
service_uuid=service_uuid).status == 'enabled'
|
||||
|
||||
return status
|
||||
|
||||
def disable_service_nova_compute(self, hostname, reason=None):
|
||||
if self.nova.services.disable_log_reason(host=hostname,
|
||||
binary='nova-compute',
|
||||
reason=reason). \
|
||||
status == 'disabled':
|
||||
return True
|
||||
if float(CONF.nova_client.api_version) < 2.53:
|
||||
status = self.nova.services.disable_log_reason(
|
||||
host=hostname,
|
||||
binary='nova-compute',
|
||||
reason=reason).status == 'disabled'
|
||||
else:
|
||||
return False
|
||||
service_uuid = self.nova.services.list(host=hostname,
|
||||
binary='nova-compute')[0].id
|
||||
status = self.nova.services.disable_log_reason(
|
||||
service_uuid=service_uuid,
|
||||
reason=reason).status == 'disabled'
|
||||
|
||||
return status
|
||||
|
||||
def set_host_offline(self, hostname):
|
||||
# See API on https://developer.openstack.org/api-ref/compute/
|
||||
@@ -585,7 +431,7 @@ class NovaHelper(object):
|
||||
host = self.nova.hosts.get(hostname)
|
||||
|
||||
if not host:
|
||||
LOG.debug("host not found: %s" % hostname)
|
||||
LOG.debug("host not found: %s", hostname)
|
||||
return False
|
||||
else:
|
||||
host[0].update(
|
||||
@@ -607,18 +453,19 @@ class NovaHelper(object):
|
||||
key-value pairs to associate to the image as metadata.
|
||||
"""
|
||||
LOG.debug(
|
||||
"Trying to create an image from instance %s ..." % instance_id)
|
||||
"Trying to create an image from instance %s ...", instance_id)
|
||||
|
||||
# Looking for the instance
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return None
|
||||
else:
|
||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
LOG.debug(
|
||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
||||
"Instance %(instance)s found on host '%(host)s'.",
|
||||
{'instance': instance_id, 'host': host_name})
|
||||
|
||||
# We need to wait for an appropriate status
|
||||
# of the instance before we can build an image from it
|
||||
@@ -645,14 +492,15 @@ class NovaHelper(object):
|
||||
if not image:
|
||||
break
|
||||
status = image.status
|
||||
LOG.debug("Current image status: %s" % status)
|
||||
LOG.debug("Current image status: %s", status)
|
||||
|
||||
if not image:
|
||||
LOG.debug("Image not found: %s" % image_uuid)
|
||||
LOG.debug("Image not found: %s", image_uuid)
|
||||
else:
|
||||
LOG.debug(
|
||||
"Image %s successfully created for instance %s" % (
|
||||
image_uuid, instance_id))
|
||||
"Image %(image)s successfully created for "
|
||||
"instance %(instance)s",
|
||||
{'image': image_uuid, 'instance': instance_id})
|
||||
return image_uuid
|
||||
return None
|
||||
|
||||
@@ -661,16 +509,16 @@ class NovaHelper(object):
|
||||
|
||||
:param instance_id: the unique id of the instance to delete.
|
||||
"""
|
||||
LOG.debug("Trying to remove instance %s ..." % instance_id)
|
||||
LOG.debug("Trying to remove instance %s ...", instance_id)
|
||||
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
else:
|
||||
self.nova.servers.delete(instance_id)
|
||||
LOG.debug("Instance %s removed." % instance_id)
|
||||
LOG.debug("Instance %s removed.", instance_id)
|
||||
return True
|
||||
|
||||
def stop_instance(self, instance_id):
|
||||
@@ -678,21 +526,21 @@ class NovaHelper(object):
|
||||
|
||||
:param instance_id: the unique id of the instance to stop.
|
||||
"""
|
||||
LOG.debug("Trying to stop instance %s ..." % instance_id)
|
||||
LOG.debug("Trying to stop instance %s ...", instance_id)
|
||||
|
||||
instance = self.find_instance(instance_id)
|
||||
|
||||
if not instance:
|
||||
LOG.debug("Instance not found: %s" % instance_id)
|
||||
LOG.debug("Instance not found: %s", instance_id)
|
||||
return False
|
||||
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
|
||||
LOG.debug("Instance has been stopped: %s" % instance_id)
|
||||
LOG.debug("Instance has been stopped: %s", instance_id)
|
||||
return True
|
||||
else:
|
||||
self.nova.servers.stop(instance_id)
|
||||
|
||||
if self.wait_for_instance_state(instance, "stopped", 8, 10):
|
||||
LOG.debug("Instance %s stopped." % instance_id)
|
||||
LOG.debug("Instance %s stopped.", instance_id)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -733,11 +581,11 @@ class NovaHelper(object):
|
||||
return False
|
||||
|
||||
while instance.status not in status_list and retry:
|
||||
LOG.debug("Current instance status: %s" % instance.status)
|
||||
LOG.debug("Current instance status: %s", instance.status)
|
||||
time.sleep(sleep)
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
retry -= 1
|
||||
LOG.debug("Current instance status: %s" % instance.status)
|
||||
LOG.debug("Current instance status: %s", instance.status)
|
||||
return instance.status in status_list
|
||||
|
||||
def create_instance(self, node_id, inst_name="test", image_id=None,
|
||||
@@ -753,26 +601,26 @@ class NovaHelper(object):
|
||||
It returns the unique id of the created instance.
|
||||
"""
|
||||
LOG.debug(
|
||||
"Trying to create new instance '%s' "
|
||||
"from image '%s' with flavor '%s' ..." % (
|
||||
inst_name, image_id, flavor_name))
|
||||
"Trying to create new instance '%(inst)s' "
|
||||
"from image '%(image)s' with flavor '%(flavor)s' ...",
|
||||
{'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
|
||||
|
||||
try:
|
||||
self.nova.keypairs.findall(name=keypair_name)
|
||||
except nvexceptions.NotFound:
|
||||
LOG.debug("Key pair '%s' not found " % keypair_name)
|
||||
LOG.debug("Key pair '%s' not found ", keypair_name)
|
||||
return
|
||||
|
||||
try:
|
||||
image = self.glance.images.get(image_id)
|
||||
except glexceptions.NotFound:
|
||||
LOG.debug("Image '%s' not found " % image_id)
|
||||
LOG.debug("Image '%s' not found ", image_id)
|
||||
return
|
||||
|
||||
try:
|
||||
flavor = self.nova.flavors.find(name=flavor_name)
|
||||
except nvexceptions.NotFound:
|
||||
LOG.debug("Flavor '%s' not found " % flavor_name)
|
||||
LOG.debug("Flavor '%s' not found ", flavor_name)
|
||||
return
|
||||
|
||||
# Make sure all security groups exist
|
||||
@@ -780,7 +628,7 @@ class NovaHelper(object):
|
||||
group_id = self.get_security_group_id_from_name(sec_group_name)
|
||||
|
||||
if not group_id:
|
||||
LOG.debug("Security group '%s' not found " % sec_group_name)
|
||||
LOG.debug("Security group '%s' not found ", sec_group_name)
|
||||
return
|
||||
|
||||
net_list = list()
|
||||
@@ -789,7 +637,7 @@ class NovaHelper(object):
|
||||
nic_id = self.get_network_id_from_name(network_name)
|
||||
|
||||
if not nic_id:
|
||||
LOG.debug("Network '%s' not found " % network_name)
|
||||
LOG.debug("Network '%s' not found ", network_name)
|
||||
return
|
||||
net_obj = {"net-id": nic_id}
|
||||
net_list.append(net_obj)
|
||||
@@ -815,14 +663,16 @@ class NovaHelper(object):
|
||||
if create_new_floating_ip and instance.status == 'ACTIVE':
|
||||
LOG.debug(
|
||||
"Creating a new floating IP"
|
||||
" for instance '%s'" % instance.id)
|
||||
" for instance '%s'", instance.id)
|
||||
# Creating floating IP for the new instance
|
||||
floating_ip = self.nova.floating_ips.create()
|
||||
|
||||
instance.add_floating_ip(floating_ip)
|
||||
|
||||
LOG.debug("Instance %s associated to Floating IP '%s'" % (
|
||||
instance.id, floating_ip.ip))
|
||||
LOG.debug(
|
||||
"Instance %(instance)s associated to "
|
||||
"Floating IP '%(ip)s'",
|
||||
{'instance': instance.id, 'ip': floating_ip.ip})
|
||||
|
||||
return instance
|
||||
|
||||
@@ -896,7 +746,7 @@ class NovaHelper(object):
|
||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("retry count: %s", retry)
|
||||
if getattr(new_volume, 'status') != "in-use":
|
||||
LOG.error("Volume update retry timeout or error")
|
||||
return False
|
||||
@@ -904,5 +754,15 @@ class NovaHelper(object):
|
||||
host_name = getattr(new_volume, "os-vol-host-attr:host")
|
||||
LOG.debug(
|
||||
"Volume update succeeded : "
|
||||
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
|
||||
"Volume %s is now on host '%s'.",
|
||||
(new_volume.id, host_name))
|
||||
return True
|
||||
|
||||
def _check_nova_api_version(self, client, version):
|
||||
api_version = api_versions.APIVersion(version_str=version)
|
||||
try:
|
||||
api_versions.discover_version(client, api_version)
|
||||
return True
|
||||
except nvexceptions.UnsupportedVersion as e:
|
||||
LOG.exception(e)
|
||||
return False
|
||||
|
||||
@@ -71,6 +71,17 @@ rules = [
|
||||
'method': 'PATCH'
|
||||
}
|
||||
]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
name=ACTION_PLAN % 'start',
|
||||
check_str=base.RULE_ADMIN_API,
|
||||
description='Start an action plans.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v1/action_plans/{action_plan_uuid}/action',
|
||||
'method': 'POST'
|
||||
}
|
||||
]
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@@ -289,7 +289,7 @@ class Service(service.ServiceBase):
|
||||
return api_manager_version
|
||||
|
||||
|
||||
def launch(conf, service_, workers=1, restart_method='reload'):
|
||||
def launch(conf, service_, workers=1, restart_method='mutate'):
|
||||
return service.launch(conf, service_, workers, restart_method)
|
||||
|
||||
|
||||
|
||||
@@ -44,18 +44,21 @@ WATCHER_DECISION_ENGINE_OPTS = [
|
||||
'execute strategies'),
|
||||
cfg.IntOpt('action_plan_expiry',
|
||||
default=24,
|
||||
mutable=True,
|
||||
help='An expiry timespan(hours). Watcher invalidates any '
|
||||
'action plan for which its creation time '
|
||||
'-whose number of hours has been offset by this value-'
|
||||
' is older that the current time.'),
|
||||
cfg.IntOpt('check_periodic_interval',
|
||||
default=30 * 60,
|
||||
mutable=True,
|
||||
help='Interval (in seconds) for checking action plan expiry.')
|
||||
]
|
||||
|
||||
WATCHER_CONTINUOUS_OPTS = [
|
||||
cfg.IntOpt('continuous_audit_interval',
|
||||
default=10,
|
||||
mutable=True,
|
||||
help='Interval (in seconds) for checking newly created '
|
||||
'continuous audits.')
|
||||
]
|
||||
|
||||
@@ -32,9 +32,11 @@ GNOCCHI_CLIENT_OPTS = [
|
||||
'The default is public.'),
|
||||
cfg.IntOpt('query_max_retries',
|
||||
default=10,
|
||||
mutable=True,
|
||||
help='How many times Watcher is trying to query again'),
|
||||
cfg.IntOpt('query_timeout',
|
||||
default=1,
|
||||
mutable=True,
|
||||
help='How many seconds Watcher should wait to do query again')]
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ nova_client = cfg.OptGroup(name='nova_client',
|
||||
|
||||
NOVA_CLIENT_OPTS = [
|
||||
cfg.StrOpt('api_version',
|
||||
default='2.53',
|
||||
default='2.56',
|
||||
help='Version of Nova API to use in novaclient.'),
|
||||
cfg.StrOpt('endpoint_type',
|
||||
default='publicURL',
|
||||
|
||||
@@ -25,6 +25,7 @@ from watcher._i18n import _
|
||||
SERVICE_OPTS = [
|
||||
cfg.IntOpt('periodic_interval',
|
||||
default=60,
|
||||
mutable=True,
|
||||
help=_('Seconds between running periodic tasks.')),
|
||||
cfg.HostAddressOpt('host',
|
||||
default=socket.gethostname(),
|
||||
|
||||
@@ -314,6 +314,21 @@ class Connection(api.BaseConnection):
|
||||
|
||||
query.delete()
|
||||
|
||||
def _get_model_list(self, model, add_filters_func, context, filters=None,
|
||||
limit=None, marker=None, sort_key=None, sort_dir=None,
|
||||
eager=False):
|
||||
query = model_query(model)
|
||||
if eager:
|
||||
query = self._set_eager_options(model, query)
|
||||
query = add_filters_func(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter(model.deleted_at.is_(None))
|
||||
return _paginate_query(model, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
|
||||
# NOTE(erakli): _add_..._filters methods should be refactored to have same
|
||||
# content. join_fieldmap should be filled with JoinMap instead of dict
|
||||
|
||||
def _add_goals_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
@@ -426,18 +441,42 @@ class Connection(api.BaseConnection):
|
||||
query=query, model=models.EfficacyIndicator, filters=filters,
|
||||
plain_fields=plain_fields, join_fieldmap=join_fieldmap)
|
||||
|
||||
def _add_scoring_engine_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'description']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.ScoringEngine, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def _add_action_descriptions_filters(self, query, filters):
|
||||
if not filters:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'action_type']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.ActionDescription, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def _add_services_filters(self, query, filters):
|
||||
if not filters:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'name', 'host']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.Service, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
# ### GOALS ### #
|
||||
|
||||
def get_goal_list(self, context, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.Goal)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.Goal, query)
|
||||
query = self._add_goals_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.Goal, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_goal_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.Goal,
|
||||
self._add_goals_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_goal(self, values):
|
||||
# ensure defaults are present for new goals
|
||||
@@ -493,17 +532,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### STRATEGIES ### #
|
||||
|
||||
def get_strategy_list(self, context, filters=None, limit=None,
|
||||
marker=None, sort_key=None, sort_dir=None,
|
||||
eager=True):
|
||||
query = model_query(models.Strategy)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.Strategy, query)
|
||||
query = self._add_strategies_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.Strategy, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_strategy_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.Strategy,
|
||||
self._add_strategies_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_strategy(self, values):
|
||||
# ensure defaults are present for new strategies
|
||||
@@ -559,18 +591,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### AUDIT TEMPLATES ### #
|
||||
|
||||
def get_audit_template_list(self, context, filters=None, limit=None,
|
||||
marker=None, sort_key=None, sort_dir=None,
|
||||
eager=False):
|
||||
|
||||
query = model_query(models.AuditTemplate)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.AuditTemplate, query)
|
||||
query = self._add_audit_templates_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.AuditTemplate, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_audit_template_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.AuditTemplate,
|
||||
self._add_audit_templates_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_audit_template(self, values):
|
||||
# ensure defaults are present for new audit_templates
|
||||
@@ -642,17 +666,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### AUDITS ### #
|
||||
|
||||
def get_audit_list(self, context, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.Audit)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.Audit, query)
|
||||
query = self._add_audits_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
|
||||
return _paginate_query(models.Audit, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_audit_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.Audit,
|
||||
self._add_audits_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_audit(self, values):
|
||||
# ensure defaults are present for new audits
|
||||
@@ -740,16 +757,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### ACTIONS ### #
|
||||
|
||||
def get_action_list(self, context, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.Action)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.Action, query)
|
||||
query = self._add_actions_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.Action, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_action_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.Action,
|
||||
self._add_actions_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_action(self, values):
|
||||
# ensure defaults are present for new actions
|
||||
@@ -819,18 +830,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### ACTION PLANS ### #
|
||||
|
||||
def get_action_plan_list(
|
||||
self, context, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.ActionPlan)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.ActionPlan, query)
|
||||
query = self._add_action_plans_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter(models.ActionPlan.deleted_at.is_(None))
|
||||
|
||||
return _paginate_query(models.ActionPlan, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_action_plan_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.ActionPlan,
|
||||
self._add_action_plans_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_action_plan(self, values):
|
||||
# ensure defaults are present for new audits
|
||||
@@ -912,18 +915,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### EFFICACY INDICATORS ### #
|
||||
|
||||
def get_efficacy_indicator_list(self, context, filters=None, limit=None,
|
||||
marker=None, sort_key=None, sort_dir=None,
|
||||
eager=False):
|
||||
|
||||
query = model_query(models.EfficacyIndicator)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.EfficacyIndicator, query)
|
||||
query = self._add_efficacy_indicators_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.EfficacyIndicator, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_efficacy_indicator_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.EfficacyIndicator,
|
||||
self._add_efficacy_indicators_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_efficacy_indicator(self, values):
|
||||
# ensure defaults are present for new efficacy indicators
|
||||
@@ -992,28 +987,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### SCORING ENGINES ### #
|
||||
|
||||
def _add_scoring_engine_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'description']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.ScoringEngine, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def get_scoring_engine_list(
|
||||
self, context, columns=None, filters=None, limit=None,
|
||||
marker=None, sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.ScoringEngine)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.ScoringEngine, query)
|
||||
query = self._add_scoring_engine_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
|
||||
return _paginate_query(models.ScoringEngine, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_scoring_engine_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.ScoringEngine,
|
||||
self._add_scoring_engine_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_scoring_engine(self, values):
|
||||
# ensure defaults are present for new scoring engines
|
||||
@@ -1078,26 +1055,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### SERVICES ### #
|
||||
|
||||
def _add_services_filters(self, query, filters):
|
||||
if not filters:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'name', 'host']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.Service, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def get_service_list(self, context, filters=None, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None, eager=False):
|
||||
query = model_query(models.Service)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.Service, query)
|
||||
query = self._add_services_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.Service, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_service_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.Service,
|
||||
self._add_services_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_service(self, values):
|
||||
try:
|
||||
@@ -1142,27 +1103,10 @@ class Connection(api.BaseConnection):
|
||||
|
||||
# ### ACTION_DESCRIPTIONS ### #
|
||||
|
||||
def _add_action_descriptions_filters(self, query, filters):
|
||||
if not filters:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'action_type']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.ActionDescription, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def get_action_description_list(self, context, filters=None, limit=None,
|
||||
marker=None, sort_key=None,
|
||||
sort_dir=None, eager=False):
|
||||
query = model_query(models.ActionDescription)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.ActionDescription, query)
|
||||
query = self._add_action_descriptions_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.ActionDescription, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
def get_action_description_list(self, *args, **kwargs):
|
||||
return self._get_model_list(models.ActionDescription,
|
||||
self._add_action_descriptions_filters,
|
||||
*args, **kwargs)
|
||||
|
||||
def create_action_description(self, values):
|
||||
try:
|
||||
|
||||
@@ -63,6 +63,7 @@ class AuditHandler(BaseAuditHandler):
|
||||
self._strategy_context = default_context.DefaultStrategyContext()
|
||||
self._planner_manager = planner_manager.PlannerManager()
|
||||
self._planner = None
|
||||
self.applier_client = rpcapi.ApplierAPI()
|
||||
|
||||
@property
|
||||
def planner(self):
|
||||
@@ -74,6 +75,13 @@ class AuditHandler(BaseAuditHandler):
|
||||
def strategy_context(self):
|
||||
return self._strategy_context
|
||||
|
||||
def do_execute(self, audit, request_context):
|
||||
# execute the strategy
|
||||
solution = self.strategy_context.execute_strategy(
|
||||
audit, request_context)
|
||||
|
||||
return solution
|
||||
|
||||
def do_schedule(self, request_context, audit, solution):
|
||||
try:
|
||||
notifications.audit.send_action_notification(
|
||||
@@ -118,9 +126,8 @@ class AuditHandler(BaseAuditHandler):
|
||||
def post_execute(self, audit, solution, request_context):
|
||||
action_plan = self.do_schedule(request_context, audit, solution)
|
||||
if audit.auto_trigger:
|
||||
applier_client = rpcapi.ApplierAPI()
|
||||
applier_client.launch_action_plan(request_context,
|
||||
action_plan.uuid)
|
||||
self.applier_client.launch_action_plan(request_context,
|
||||
action_plan.uuid)
|
||||
|
||||
def execute(self, audit, request_context):
|
||||
try:
|
||||
|
||||
@@ -71,9 +71,8 @@ class ContinuousAuditHandler(base.AuditHandler):
|
||||
return False
|
||||
|
||||
def do_execute(self, audit, request_context):
|
||||
# execute the strategy
|
||||
solution = self.strategy_context.execute_strategy(
|
||||
audit, request_context)
|
||||
solution = super(ContinuousAuditHandler, self)\
|
||||
.do_execute(audit, request_context)
|
||||
|
||||
if audit.audit_type == objects.audit.AuditType.CONTINUOUS.value:
|
||||
a_plan_filters = {'audit_uuid': audit.uuid,
|
||||
|
||||
@@ -20,13 +20,6 @@ from watcher import objects
|
||||
|
||||
class OneShotAuditHandler(base.AuditHandler):
|
||||
|
||||
def do_execute(self, audit, request_context):
|
||||
# execute the strategy
|
||||
solution = self.strategy_context.execute_strategy(
|
||||
audit, request_context)
|
||||
|
||||
return solution
|
||||
|
||||
def post_execute(self, audit, solution, request_context):
|
||||
super(OneShotAuditHandler, self).post_execute(audit, solution,
|
||||
request_context)
|
||||
|
||||
@@ -241,3 +241,28 @@ class HardwareMaintenance(base.Goal):
|
||||
def get_efficacy_specification(cls):
|
||||
"""The efficacy spec for the current goal"""
|
||||
return specs.HardwareMaintenance()
|
||||
|
||||
|
||||
class ClusterMaintaining(base.Goal):
|
||||
"""ClusterMaintenance
|
||||
|
||||
This goal is used to maintain compute nodes
|
||||
without having the user's application being interrupted.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "cluster_maintaining"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Cluster Maintaining")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Cluster Maintaining"
|
||||
|
||||
@classmethod
|
||||
def get_efficacy_specification(cls):
|
||||
"""The efficacy spec for the current goal"""
|
||||
return specs.Unclassified()
|
||||
|
||||
@@ -48,7 +48,7 @@ class AuditEndpoint(object):
|
||||
self._oneshot_handler.execute(audit, context)
|
||||
|
||||
def trigger_audit(self, context, audit_uuid):
|
||||
LOG.debug("Trigger audit %s" % audit_uuid)
|
||||
LOG.debug("Trigger audit %s", audit_uuid)
|
||||
self.executor.submit(self.do_trigger_audit,
|
||||
context,
|
||||
audit_uuid)
|
||||
|
||||
@@ -222,8 +222,21 @@ class ModelBuilder(object):
|
||||
|
||||
:param pool: A storage pool
|
||||
:type pool: :py:class:`~cinderlient.v2.capabilities.Capabilities`
|
||||
:raises: exception.InvalidPoolAttributeValue
|
||||
"""
|
||||
# build up the storage pool.
|
||||
|
||||
attrs = ["total_volumes", "total_capacity_gb",
|
||||
"free_capacity_gb", "provisioned_capacity_gb",
|
||||
"allocated_capacity_gb"]
|
||||
|
||||
for attr in attrs:
|
||||
try:
|
||||
int(getattr(pool, attr))
|
||||
except ValueError:
|
||||
raise exception.InvalidPoolAttributeValue(
|
||||
name=pool.name, attribute=attr)
|
||||
|
||||
node_attributes = {
|
||||
"name": pool.name,
|
||||
"total_volumes": pool.total_volumes,
|
||||
|
||||
@@ -104,6 +104,18 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
|
||||
"items": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"projects": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"uuid": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": False
|
||||
@@ -337,7 +349,7 @@ class ModelBuilder(object):
|
||||
Create an instance node for the graph using nova and the
|
||||
`server` nova object.
|
||||
:param instance: Nova VM object.
|
||||
:return: A instance node for the graph.
|
||||
:return: An instance node for the graph.
|
||||
"""
|
||||
flavor = instance.flavor
|
||||
instance_attributes = {
|
||||
@@ -348,7 +360,8 @@ class ModelBuilder(object):
|
||||
"disk_capacity": flavor["disk"],
|
||||
"vcpus": flavor["vcpus"],
|
||||
"state": getattr(instance, "OS-EXT-STS:vm_state"),
|
||||
"metadata": instance.metadata}
|
||||
"metadata": instance.metadata,
|
||||
"project_id": instance.tenant_id}
|
||||
|
||||
# node_attributes = dict()
|
||||
# node_attributes["layer"] = "virtual"
|
||||
|
||||
@@ -29,7 +29,7 @@ class InstanceState(enum.Enum):
|
||||
STOPPED = 'stopped' # Instance is shut off, the disk image is still there.
|
||||
RESCUED = 'rescued' # A rescue image is running with the original image
|
||||
# attached.
|
||||
RESIZED = 'resized' # a Instance with the new size is active.
|
||||
RESIZED = 'resized' # an Instance with the new size is active.
|
||||
|
||||
SOFT_DELETED = 'soft-delete'
|
||||
# still available to restore.
|
||||
@@ -52,6 +52,7 @@ class Instance(compute_resource.ComputeResource):
|
||||
"disk_capacity": wfields.NonNegativeIntegerField(),
|
||||
"vcpus": wfields.NonNegativeIntegerField(),
|
||||
"metadata": wfields.JsonField(),
|
||||
"project_id": wfields.UUIDField(),
|
||||
}
|
||||
|
||||
def accept(self, visitor):
|
||||
|
||||
@@ -74,7 +74,7 @@ class Pool(storage_resource.StorageResource):
|
||||
"free_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"provisioned_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"allocated_capacity_gb": wfields.NonNegativeIntegerField(),
|
||||
"virtual_free": wfields.NonNegativeIntegerField(),
|
||||
"virtual_free": wfields.NonNegativeIntegerField(default=0),
|
||||
}
|
||||
|
||||
def accept(self, visitor):
|
||||
|
||||
@@ -28,6 +28,6 @@ class StorageResource(base.Element):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
"uuid": wfields.StringField(),
|
||||
"uuid": wfields.StringField(default=""),
|
||||
"human_id": wfields.StringField(default=""),
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
Openstack implementation of the cluster graph.
|
||||
"""
|
||||
|
||||
import ast
|
||||
from lxml import etree
|
||||
import networkx as nx
|
||||
from oslo_concurrency import lockutils
|
||||
@@ -57,7 +58,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("model_root")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(ModelRoot, self).add_node(node.uuid, node)
|
||||
super(ModelRoot, self).add_node(node.uuid, attr=node)
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def remove_node(self, node):
|
||||
@@ -72,7 +73,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
def add_instance(self, instance):
|
||||
self.assert_instance(instance)
|
||||
try:
|
||||
super(ModelRoot, self).add_node(instance.uuid, instance)
|
||||
super(ModelRoot, self).add_node(instance.uuid, attr=instance)
|
||||
except nx.NetworkXError as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.InstanceNotFound(name=instance.uuid)
|
||||
@@ -137,8 +138,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_all_compute_nodes(self):
|
||||
return {uuid: cn for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.ComputeNode)}
|
||||
return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.ComputeNode)}
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_node_by_uuid(self, uuid):
|
||||
@@ -156,7 +157,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.ComputeResourceNotFound(name=uuid)
|
||||
@@ -172,8 +173,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_all_instances(self):
|
||||
return {uuid: inst for uuid, inst in self.nodes(data=True)
|
||||
if isinstance(inst, element.Instance)}
|
||||
return {uuid: inst['attr'] for uuid, inst in self.nodes(data=True)
|
||||
if isinstance(inst['attr'], element.Instance)}
|
||||
|
||||
@lockutils.synchronized("model_root")
|
||||
def get_node_instances(self, node):
|
||||
@@ -225,6 +226,8 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
for inst in root.findall('.//Instance'):
|
||||
instance = element.Instance(**inst.attrib)
|
||||
instance.watcher_exclude = ast.literal_eval(
|
||||
inst.attrib["watcher_exclude"])
|
||||
model.add_instance(instance)
|
||||
|
||||
parent = inst.getparent()
|
||||
@@ -239,7 +242,7 @@ class ModelRoot(nx.DiGraph, base.Model):
|
||||
@classmethod
|
||||
def is_isomorphic(cls, G1, G2):
|
||||
def node_match(node1, node2):
|
||||
return node1.as_dict() == node2.as_dict()
|
||||
return node1['attr'].as_dict() == node2['attr'].as_dict()
|
||||
return nx.algorithms.isomorphism.isomorph.is_isomorphic(
|
||||
G1, G2, node_match=node_match)
|
||||
|
||||
@@ -277,12 +280,12 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(StorageModelRoot, self).add_node(node.host, node)
|
||||
super(StorageModelRoot, self).add_node(node.host, attr=node)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_pool(self, pool):
|
||||
self.assert_pool(pool)
|
||||
super(StorageModelRoot, self).add_node(pool.name, pool)
|
||||
super(StorageModelRoot, self).add_node(pool.name, attr=pool)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def remove_node(self, node):
|
||||
@@ -335,7 +338,7 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("storage_model")
|
||||
def add_volume(self, volume):
|
||||
self.assert_volume(volume)
|
||||
super(StorageModelRoot, self).add_node(volume.uuid, volume)
|
||||
super(StorageModelRoot, self).add_node(volume.uuid, attr=volume)
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def remove_volume(self, volume):
|
||||
@@ -382,8 +385,8 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_all_storage_nodes(self):
|
||||
return {host: cn for host, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.StorageNode)}
|
||||
return {host: cn['attr'] for host, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.StorageNode)}
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_node_by_name(self, name):
|
||||
@@ -412,14 +415,14 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.StorageResourceNotFound(name=uuid)
|
||||
|
||||
def _get_by_name(self, name):
|
||||
try:
|
||||
return self.node[name]
|
||||
return self.node[name]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.StorageResourceNotFound(name=name)
|
||||
@@ -456,8 +459,8 @@ class StorageModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_all_volumes(self):
|
||||
return {name: vol for name, vol in self.nodes(data=True)
|
||||
if isinstance(vol, element.Volume)}
|
||||
return {name: vol['attr'] for name, vol in self.nodes(data=True)
|
||||
if isinstance(vol['attr'], element.Volume)}
|
||||
|
||||
@lockutils.synchronized("storage_model")
|
||||
def get_pool_volumes(self, pool):
|
||||
@@ -569,7 +572,7 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def add_node(self, node):
|
||||
self.assert_node(node)
|
||||
super(BaremetalModelRoot, self).add_node(node.uuid, node)
|
||||
super(BaremetalModelRoot, self).add_node(node.uuid, attr=node)
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def remove_node(self, node):
|
||||
@@ -582,8 +585,8 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def get_all_ironic_nodes(self):
|
||||
return {uuid: cn for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn, element.IronicNode)}
|
||||
return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True)
|
||||
if isinstance(cn['attr'], element.IronicNode)}
|
||||
|
||||
@lockutils.synchronized("baremetal_model")
|
||||
def get_node_by_uuid(self, uuid):
|
||||
@@ -594,7 +597,7 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
|
||||
|
||||
def _get_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.node[uuid]
|
||||
return self.node[uuid]['attr']
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.BaremetalResourceNotFound(name=uuid)
|
||||
|
||||
@@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
@@ -76,6 +76,7 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
'disk': disk_gb,
|
||||
'disk_capacity': disk_gb,
|
||||
'metadata': instance_metadata,
|
||||
'tenant_id': instance_data['tenant_id']
|
||||
})
|
||||
|
||||
try:
|
||||
@@ -229,7 +230,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -275,7 +276,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -310,7 +311,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -337,7 +338,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -372,7 +373,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -399,7 +400,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -426,7 +427,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -459,7 +460,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -486,7 +487,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
@@ -513,7 +514,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
|
||||
ctxt.request_id = metadata['message_id']
|
||||
ctxt.project_domain = event_type
|
||||
LOG.info("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s" %
|
||||
"with metadata %(metadata)s",
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
@@ -87,6 +87,7 @@ class ComputeScope(base.BaseScope):
|
||||
instances_to_exclude = kwargs.get('instances')
|
||||
nodes_to_exclude = kwargs.get('nodes')
|
||||
instance_metadata = kwargs.get('instance_metadata')
|
||||
projects_to_exclude = kwargs.get('projects')
|
||||
|
||||
for resource in resources:
|
||||
if 'instances' in resource:
|
||||
@@ -105,6 +106,9 @@ class ComputeScope(base.BaseScope):
|
||||
elif 'instance_metadata' in resource:
|
||||
instance_metadata.extend(
|
||||
[metadata for metadata in resource['instance_metadata']])
|
||||
elif 'projects' in resource:
|
||||
projects_to_exclude.extend(
|
||||
[project['uuid'] for project in resource['projects']])
|
||||
|
||||
def remove_nodes_from_model(self, nodes_to_remove, cluster_model):
|
||||
for node_uuid in nodes_to_remove:
|
||||
@@ -144,6 +148,13 @@ class ComputeScope(base.BaseScope):
|
||||
if str(value).lower() == str(metadata.get(key)).lower():
|
||||
instances_to_remove.add(uuid)
|
||||
|
||||
def exclude_instances_with_given_project(
|
||||
self, projects_to_exclude, cluster_model, instances_to_exclude):
|
||||
all_instances = cluster_model.get_all_instances()
|
||||
for uuid, instance in all_instances.items():
|
||||
if instance.project_id in projects_to_exclude:
|
||||
instances_to_exclude.add(uuid)
|
||||
|
||||
def get_scoped_model(self, cluster_model):
|
||||
"""Leave only nodes and instances proposed in the audit scope"""
|
||||
if not cluster_model:
|
||||
@@ -154,6 +165,7 @@ class ComputeScope(base.BaseScope):
|
||||
nodes_to_remove = set()
|
||||
instances_to_exclude = []
|
||||
instance_metadata = []
|
||||
projects_to_exclude = []
|
||||
compute_scope = []
|
||||
model_hosts = list(cluster_model.get_all_compute_nodes().keys())
|
||||
|
||||
@@ -177,7 +189,8 @@ class ComputeScope(base.BaseScope):
|
||||
self.exclude_resources(
|
||||
rule['exclude'], instances=instances_to_exclude,
|
||||
nodes=nodes_to_exclude,
|
||||
instance_metadata=instance_metadata)
|
||||
instance_metadata=instance_metadata,
|
||||
projects=projects_to_exclude)
|
||||
|
||||
instances_to_exclude = set(instances_to_exclude)
|
||||
if allowed_nodes:
|
||||
@@ -190,6 +203,10 @@ class ComputeScope(base.BaseScope):
|
||||
self.exclude_instances_with_given_metadata(
|
||||
instance_metadata, cluster_model, instances_to_exclude)
|
||||
|
||||
if projects_to_exclude:
|
||||
self.exclude_instances_with_given_project(
|
||||
projects_to_exclude, cluster_model, instances_to_exclude)
|
||||
|
||||
self.update_exclude_instance_in_model(instances_to_exclude,
|
||||
cluster_model)
|
||||
|
||||
|
||||
@@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
|
||||
|
||||
for name in engines.keys():
|
||||
se_impl = default.DefaultScoringLoader().load(name)
|
||||
LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name())
|
||||
LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
|
||||
_scoring_engine_map[se_impl.get_name()] = se_impl
|
||||
|
||||
engine_containers = \
|
||||
default.DefaultScoringContainerLoader().list_available()
|
||||
|
||||
for container_id, container_cls in engine_containers.items():
|
||||
LOG.debug("Found Scoring Engine container plugin: %s" %
|
||||
LOG.debug("Found Scoring Engine container plugin: %s",
|
||||
container_id)
|
||||
for se in container_cls.get_scoring_engine_list():
|
||||
LOG.debug("Found Scoring Engine plugin: %s" %
|
||||
LOG.debug("Found Scoring Engine plugin: %s",
|
||||
se.get_name())
|
||||
_scoring_engine_map[se.get_name()] = se
|
||||
|
||||
@@ -18,6 +18,7 @@ from watcher.decision_engine.strategy.strategies import actuation
|
||||
from watcher.decision_engine.strategy.strategies import basic_consolidation
|
||||
from watcher.decision_engine.strategy.strategies import dummy_strategy
|
||||
from watcher.decision_engine.strategy.strategies import dummy_with_scorer
|
||||
from watcher.decision_engine.strategy.strategies import host_maintenance
|
||||
from watcher.decision_engine.strategy.strategies import noisy_neighbor
|
||||
from watcher.decision_engine.strategy.strategies import outlet_temp_control
|
||||
from watcher.decision_engine.strategy.strategies import saving_energy
|
||||
@@ -44,9 +45,10 @@ WorkloadStabilization = workload_stabilization.WorkloadStabilization
|
||||
UniformAirflow = uniform_airflow.UniformAirflow
|
||||
NoisyNeighbor = noisy_neighbor.NoisyNeighbor
|
||||
ZoneMigration = zone_migration.ZoneMigration
|
||||
HostMaintenance = host_maintenance.HostMaintenance
|
||||
|
||||
__all__ = ("Actuator", "BasicConsolidation", "OutletTempControl",
|
||||
"DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation",
|
||||
"WorkloadBalance", "WorkloadStabilization", "UniformAirflow",
|
||||
"NoisyNeighbor", "SavingEnergy", "StorageCapacityBalance",
|
||||
"ZoneMigration")
|
||||
"ZoneMigration", "HostMaintenance")
|
||||
|
||||
@@ -14,16 +14,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Actuator*
|
||||
|
||||
This strategy allows anyone to create an action plan with a predefined set of
|
||||
actions. This strategy can be used for 2 different purposes:
|
||||
|
||||
- Test actions
|
||||
- Use this strategy based on an event trigger to perform some explicit task
|
||||
|
||||
"""
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
@@ -34,7 +24,17 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Actuator(base.UnclassifiedStrategy):
|
||||
"""Actuator that simply executes the actions given as parameter"""
|
||||
"""Actuator
|
||||
|
||||
Actuator that simply executes the actions given as parameter
|
||||
|
||||
This strategy allows anyone to create an action plan with a predefined
|
||||
set of actions. This strategy can be used for 2 different purposes:
|
||||
|
||||
- Test actions
|
||||
- Use this strategy based on an event trigger to perform some explicit task
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
|
||||
10
watcher/decision_engine/strategy/strategies/base.py
Normal file → Executable file
10
watcher/decision_engine/strategy/strategies/base.py
Normal file → Executable file
@@ -471,3 +471,13 @@ class ZoneMigrationBaseStrategy(BaseStrategy):
|
||||
@classmethod
|
||||
def get_goal_name(cls):
|
||||
return "hardware_maintenance"
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class HostMaintenanceBaseStrategy(BaseStrategy):
|
||||
|
||||
REASON_FOR_MAINTAINING = 'watcher_maintaining'
|
||||
|
||||
@classmethod
|
||||
def get_goal_name(cls):
|
||||
return "cluster_maintaining"
|
||||
|
||||
@@ -16,24 +16,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Good server consolidation strategy*
|
||||
|
||||
Consolidation of VMs is essential to achieve energy optimization in cloud
|
||||
environments such as OpenStack. As VMs are spinned up and/or moved over time,
|
||||
it becomes necessary to migrate VMs among servers to lower the costs. However,
|
||||
migration of VMs introduces runtime overheads and consumes extra energy, thus
|
||||
a good server consolidation strategy should carefully plan for migration in
|
||||
order to both minimize energy consumption and comply to the various SLAs.
|
||||
|
||||
This algorithm not only minimizes the overall number of used servers, but also
|
||||
minimizes the number of migrations.
|
||||
|
||||
It has been developed only for tests. You must have at least 2 physical compute
|
||||
nodes to run it, so you can easily run it on DevStack. It assumes that live
|
||||
migration is possible on your OpenStack cluster.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@@ -47,7 +29,25 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
"""Basic offline consolidation using live migration"""
|
||||
"""Good server consolidation strategy
|
||||
|
||||
Basic offline consolidation using live migration
|
||||
|
||||
Consolidation of VMs is essential to achieve energy optimization in cloud
|
||||
environments such as OpenStack. As VMs are spinned up and/or moved over
|
||||
time, it becomes necessary to migrate VMs among servers to lower the
|
||||
costs. However, migration of VMs introduces runtime overheads and
|
||||
consumes extra energy, thus a good server consolidation strategy should
|
||||
carefully plan for migration in order to both minimize energy consumption
|
||||
and comply to the various SLAs.
|
||||
|
||||
This algorithm not only minimizes the overall number of used servers,
|
||||
but also minimizes the number of migrations.
|
||||
|
||||
It has been developed only for tests. You must have at least 2 physical
|
||||
compute nodes to run it, so you can easily run it on DevStack. It assumes
|
||||
that live migration is possible on your OpenStack cluster.
|
||||
"""
|
||||
|
||||
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
|
||||
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
|
||||
@@ -109,6 +109,12 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def granularity(self):
|
||||
return self.input_parameters.get('granularity', 300)
|
||||
|
||||
@property
|
||||
def aggregation_method(self):
|
||||
return self.input_parameters.get(
|
||||
'aggregation_method',
|
||||
{"instance": 'mean', "node": 'mean'})
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Basic offline consolidation")
|
||||
@@ -142,6 +148,26 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
"type": "number",
|
||||
"default": 300
|
||||
},
|
||||
"aggregation_method": {
|
||||
"description": "Function used to aggregate multiple "
|
||||
"measures into an aggregate. For example, "
|
||||
"the min aggregation method will aggregate "
|
||||
"the values of different measures to the "
|
||||
"minimum value of all the measures in the "
|
||||
"time range.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"instance": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
"node": {
|
||||
"type": "string",
|
||||
"default": 'mean'
|
||||
},
|
||||
},
|
||||
"default": {"instance": 'mean', "node": 'mean'}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -178,7 +204,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param source_node: the current node of the virtual machine
|
||||
:param destination_node: the destination of the virtual machine
|
||||
:param instance_to_migrate: the instance / virtual machine
|
||||
:return: True if the there is enough place otherwise false
|
||||
:return: True if there is enough place otherwise false
|
||||
"""
|
||||
if source_node == destination_node:
|
||||
return False
|
||||
@@ -258,11 +284,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def get_node_cpu_usage(self, node):
|
||||
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
||||
return self.datasource_backend.get_host_cpu_usage(
|
||||
resource_id, self.period, 'mean', granularity=300)
|
||||
resource_id, self.period, self.aggregation_method['node'],
|
||||
granularity=self.granularity)
|
||||
|
||||
def get_instance_cpu_usage(self, instance):
|
||||
return self.datasource_backend.get_instance_cpu_usage(
|
||||
instance.uuid, self.period, 'mean', granularity=300)
|
||||
instance.uuid, self.period, self.aggregation_method['instance'],
|
||||
granularity=self.granularity)
|
||||
|
||||
def calculate_score_node(self, node):
|
||||
"""Calculate the score that represent the utilization level
|
||||
@@ -277,7 +305,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
||||
LOG.error(
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
"for %(metric_name)s", dict(
|
||||
resource_id=resource_id,
|
||||
metric_name=self.METRIC_NAMES[
|
||||
self.config.datasource]['host_cpu_usage']))
|
||||
@@ -297,7 +325,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
if instance_cpu_utilization is None:
|
||||
LOG.error(
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
"for %(metric_name)s", dict(
|
||||
resource_id=instance.uuid,
|
||||
metric_name=self.METRIC_NAMES[
|
||||
self.config.datasource]['instance_cpu_usage']))
|
||||
@@ -372,6 +400,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
sorted_score):
|
||||
number_migrations = 0
|
||||
for mig_instance, __ in sorted_instances:
|
||||
# skip exclude instance when migrating
|
||||
if mig_instance.watcher_exclude:
|
||||
LOG.debug("Instance is excluded by scope, "
|
||||
"skipped: %s", mig_instance.uuid)
|
||||
continue
|
||||
for node_uuid, __ in sorted_score:
|
||||
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||
node_to_release)
|
||||
|
||||
331
watcher/decision_engine/strategy/strategies/host_maintenance.py
Normal file
331
watcher/decision_engine/strategy/strategies/host_maintenance.py
Normal file
@@ -0,0 +1,331 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 chinac.com
|
||||
#
|
||||
# Authors: suzhengwei<suzhengwei@chinac.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception as wexc
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
||||
"""[PoC]Host Maintenance
|
||||
|
||||
*Description*
|
||||
|
||||
It is a migration strategy for one compute node maintenance,
|
||||
without having the user's application been interruptted.
|
||||
If given one backup node, the strategy will firstly
|
||||
migrate all instances from the maintenance node to
|
||||
the backup node. If the backup node is not provided,
|
||||
it will migrate all instances, relying on nova-scheduler.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* You must have at least 2 physical compute nodes to run this strategy.
|
||||
|
||||
*Limitations*
|
||||
|
||||
- This is a proof of concept that is not meant to be used in production
|
||||
- It migrates all instances from one host to other hosts. It's better to
|
||||
execute such strategy when load is not heavy, and use this algorithm
|
||||
with `ONESHOT` audit.
|
||||
- It assume that cold and live migrations are possible
|
||||
"""
|
||||
|
||||
INSTANCE_MIGRATION = "migrate"
|
||||
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
|
||||
REASON_FOR_DISABLE = 'watcher_disabled'
|
||||
|
||||
def __init__(self, config, osc=None):
|
||||
super(HostMaintenance, self).__init__(config, osc)
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "host_maintenance"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Host Maintenance Strategy")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Host Maintenance Strategy"
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls):
|
||||
return {
|
||||
"properties": {
|
||||
"maintenance_node": {
|
||||
"description": "The name of the compute node which "
|
||||
"need maintenance",
|
||||
"type": "string",
|
||||
},
|
||||
"backup_node": {
|
||||
"description": "The name of the compute node which "
|
||||
"will backup the maintenance node.",
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"required": ["maintenance_node"],
|
||||
}
|
||||
|
||||
def get_disabled_compute_nodes_with_reason(self, reason=None):
|
||||
return {uuid: cn for uuid, cn in
|
||||
self.compute_model.get_all_compute_nodes().items()
|
||||
if cn.state == element.ServiceState.ONLINE.value and
|
||||
cn.status == element.ServiceState.DISABLED.value and
|
||||
cn.disabled_reason == reason}
|
||||
|
||||
def get_disabled_compute_nodes(self):
|
||||
return self.get_disabled_compute_nodes_with_reason(
|
||||
self.REASON_FOR_DISABLE)
|
||||
|
||||
def get_instance_state_str(self, instance):
|
||||
"""Get instance state in string format"""
|
||||
if isinstance(instance.state, six.string_types):
|
||||
return instance.state
|
||||
elif isinstance(instance.state, element.InstanceState):
|
||||
return instance.state.value
|
||||
else:
|
||||
LOG.error('Unexpected instance state type, '
|
||||
'state=%(state)s, state_type=%(st)s.',
|
||||
dict(state=instance.state,
|
||||
st=type(instance.state)))
|
||||
raise wexc.WatcherException
|
||||
|
||||
def get_node_status_str(self, node):
|
||||
"""Get node status in string format"""
|
||||
if isinstance(node.status, six.string_types):
|
||||
return node.status
|
||||
elif isinstance(node.status, element.ServiceState):
|
||||
return node.status.value
|
||||
else:
|
||||
LOG.error('Unexpected node status type, '
|
||||
'status=%(status)s, status_type=%(st)s.',
|
||||
dict(status=node.status,
|
||||
st=type(node.status)))
|
||||
raise wexc.WatcherException
|
||||
|
||||
def get_node_capacity(self, node):
|
||||
"""Collect cpu, ram and disk capacity of a node.
|
||||
|
||||
:param node: node object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
return dict(cpu=node.vcpus,
|
||||
ram=node.memory,
|
||||
disk=node.disk_capacity)
|
||||
|
||||
def get_node_used(self, node):
|
||||
"""Collect cpu, ram and disk used of a node.
|
||||
|
||||
:param node: node object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
vcpus_used = 0
|
||||
memory_used = 0
|
||||
disk_used = 0
|
||||
for instance in self.compute_model.get_node_instances(node):
|
||||
vcpus_used += instance.vcpus
|
||||
memory_used += instance.memory
|
||||
disk_used += instance.disk
|
||||
|
||||
return dict(cpu=vcpus_used,
|
||||
ram=memory_used,
|
||||
disk=disk_used)
|
||||
|
||||
def get_node_free(self, node):
|
||||
"""Collect cpu, ram and disk free of a node.
|
||||
|
||||
:param node: node object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
node_capacity = self.get_node_capacity(node)
|
||||
node_used = self.get_node_used(node)
|
||||
return dict(cpu=node_capacity['cpu']-node_used['cpu'],
|
||||
ram=node_capacity['ram']-node_used['ram'],
|
||||
disk=node_capacity['disk']-node_used['disk'],
|
||||
)
|
||||
|
||||
def host_fits(self, source_node, destination_node):
|
||||
"""check host fits
|
||||
|
||||
return True if VMs could intensively migrate
|
||||
from source_node to destination_node.
|
||||
"""
|
||||
|
||||
source_node_used = self.get_node_used(source_node)
|
||||
destination_node_free = self.get_node_free(destination_node)
|
||||
metrics = ['cpu', 'ram']
|
||||
for m in metrics:
|
||||
if source_node_used[m] > destination_node_free[m]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def add_action_enable_compute_node(self, node):
|
||||
"""Add an action for node enabler into the solution."""
|
||||
params = {'state': element.ServiceState.ENABLED.value}
|
||||
self.solution.add_action(
|
||||
action_type=self.CHANGE_NOVA_SERVICE_STATE,
|
||||
resource_id=node.uuid,
|
||||
input_parameters=params)
|
||||
|
||||
def add_action_maintain_compute_node(self, node):
|
||||
"""Add an action for node maintenance into the solution."""
|
||||
params = {'state': element.ServiceState.DISABLED.value,
|
||||
'disabled_reason': self.REASON_FOR_MAINTAINING}
|
||||
self.solution.add_action(
|
||||
action_type=self.CHANGE_NOVA_SERVICE_STATE,
|
||||
resource_id=node.uuid,
|
||||
input_parameters=params)
|
||||
|
||||
def enable_compute_node_if_disabled(self, node):
|
||||
node_status_str = self.get_node_status_str(node)
|
||||
if node_status_str != element.ServiceState.ENABLED.value:
|
||||
self.add_action_enable_compute_node(node)
|
||||
|
||||
def instance_migration(self, instance, src_node, des_node=None):
|
||||
"""Add an action for instance migration into the solution.
|
||||
|
||||
:param instance: instance object
|
||||
:param src_node: node object
|
||||
:param des_node: node object. if None, the instance will be
|
||||
migrated relying on nova-scheduler
|
||||
:return: None
|
||||
"""
|
||||
instance_state_str = self.get_instance_state_str(instance)
|
||||
if instance_state_str == element.InstanceState.ACTIVE.value:
|
||||
migration_type = 'live'
|
||||
else:
|
||||
migration_type = 'cold'
|
||||
|
||||
params = {'migration_type': migration_type,
|
||||
'source_node': src_node.uuid}
|
||||
if des_node:
|
||||
params['destination_node'] = des_node.uuid
|
||||
self.solution.add_action(action_type=self.INSTANCE_MIGRATION,
|
||||
resource_id=instance.uuid,
|
||||
input_parameters=params)
|
||||
|
||||
def host_migration(self, source_node, destination_node):
|
||||
"""host migration
|
||||
|
||||
Migrate all instances from source_node to destination_node.
|
||||
Active instances use "live-migrate",
|
||||
and other instances use "cold-migrate"
|
||||
"""
|
||||
instances = self.compute_model.get_node_instances(source_node)
|
||||
for instance in instances:
|
||||
self.instance_migration(instance, source_node, destination_node)
|
||||
|
||||
def safe_maintain(self, maintenance_node, backup_node=None):
|
||||
"""safe maintain one compute node
|
||||
|
||||
Migrate all instances of the maintenance_node intensively to the
|
||||
backup host. If users didn't give the backup host, it will select
|
||||
one unused node to backup the maintaining node.
|
||||
|
||||
It calculate the resource both of the backup node and maintaining
|
||||
node to evaluate the migrations from maintaining node to backup node.
|
||||
If all instances of the maintaining node can migrated to
|
||||
the backup node, it will set the maintaining node in
|
||||
'watcher_maintaining' status., and add the migrations to solution.
|
||||
"""
|
||||
# If user gives a backup node with required capacity, then migrate
|
||||
# all instances from the maintaining node to the backup node.
|
||||
if backup_node:
|
||||
if self.host_fits(maintenance_node, backup_node):
|
||||
self.enable_compute_node_if_disabled(backup_node)
|
||||
self.add_action_maintain_compute_node(maintenance_node)
|
||||
self.host_migration(maintenance_node, backup_node)
|
||||
return True
|
||||
|
||||
# If uses didn't give the backup host, select one unused node
|
||||
# with required capacity, then migrate all instances
|
||||
# from maintaining node to it.
|
||||
nodes = sorted(
|
||||
self.get_disabled_compute_nodes().values(),
|
||||
key=lambda x: self.get_node_capacity(x)['cpu'])
|
||||
if maintenance_node in nodes:
|
||||
nodes.remove(maintenance_node)
|
||||
|
||||
for node in nodes:
|
||||
if self.host_fits(maintenance_node, node):
|
||||
self.enable_compute_node_if_disabled(node)
|
||||
self.add_action_maintain_compute_node(maintenance_node)
|
||||
self.host_migration(maintenance_node, node)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def try_maintain(self, maintenance_node):
|
||||
"""try to maintain one compute node
|
||||
|
||||
It firstly set the maintenance_node in 'watcher_maintaining' status.
|
||||
Then try to migrate all instances of the maintenance node, rely
|
||||
on nova-scheduler.
|
||||
"""
|
||||
self.add_action_maintain_compute_node(maintenance_node)
|
||||
instances = self.compute_model.get_node_instances(maintenance_node)
|
||||
for instance in instances:
|
||||
self.instance_migration(instance, maintenance_node)
|
||||
|
||||
def pre_execute(self):
|
||||
LOG.debug(self.compute_model.to_string())
|
||||
|
||||
if not self.compute_model:
|
||||
raise wexc.ClusterStateNotDefined()
|
||||
|
||||
if self.compute_model.stale:
|
||||
raise wexc.ClusterStateStale()
|
||||
|
||||
def do_execute(self):
|
||||
LOG.info(_('Executing Host Maintenance Migration Strategy'))
|
||||
|
||||
maintenance_node = self.input_parameters.get('maintenance_node')
|
||||
backup_node = self.input_parameters.get('backup_node')
|
||||
|
||||
# if no VMs in the maintenance_node, just maintain the compute node
|
||||
src_node = self.compute_model.get_node_by_uuid(maintenance_node)
|
||||
if len(self.compute_model.get_node_instances(src_node)) == 0:
|
||||
if (src_node.disabled_reason !=
|
||||
self.REASON_FOR_MAINTAINING):
|
||||
self.add_action_maintain_compute_node(src_node)
|
||||
return
|
||||
|
||||
if backup_node:
|
||||
des_node = self.compute_model.get_node_by_uuid(backup_node)
|
||||
else:
|
||||
des_node = None
|
||||
|
||||
if not self.safe_maintain(src_node, des_node):
|
||||
self.try_maintain(src_node)
|
||||
|
||||
def post_execute(self):
|
||||
"""Post-execution phase
|
||||
|
||||
This can be used to compute the global efficacy
|
||||
"""
|
||||
LOG.debug(self.solution.actions)
|
||||
LOG.debug(self.compute_model.to_string())
|
||||
@@ -199,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
||||
hosts_need_release[node.uuid] = {
|
||||
'priority_vm': potential_priority_instance,
|
||||
'noisy_vm': potential_noisy_instance}
|
||||
LOG.debug("Priority VM found: %s" % (
|
||||
potential_priority_instance.uuid))
|
||||
LOG.debug("Noisy VM found: %s" % (
|
||||
potential_noisy_instance.uuid))
|
||||
LOG.debug("Priority VM found: %s",
|
||||
potential_priority_instance.uuid)
|
||||
LOG.debug("Noisy VM found: %s",
|
||||
potential_noisy_instance.uuid)
|
||||
loop_break_flag = True
|
||||
break
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user