Compare commits
114 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64b5a7c3e4 | ||
|
|
40bb92f749 | ||
|
|
92bd06cf94 | ||
|
|
c9e0dfd3f5 | ||
|
|
446fe1307a | ||
|
|
2836f460e3 | ||
|
|
cb9bb7301b | ||
|
|
cb644fcef9 | ||
|
|
0a7c87eebf | ||
|
|
d7f4f42772 | ||
|
|
bdc0eb196a | ||
|
|
59427eb0d9 | ||
|
|
b6801b192a | ||
|
|
0a6c2c16a4 | ||
|
|
9a44941c66 | ||
|
|
a6508a0013 | ||
|
|
c3db66ca09 | ||
|
|
5d0fe553c4 | ||
|
|
8b8239c3d8 | ||
|
|
920bd502ec | ||
|
|
c68d33f341 | ||
|
|
8e8fdbd809 | ||
|
|
681536c8c7 | ||
|
|
083b170083 | ||
|
|
de7b0129a1 | ||
|
|
323fd01a85 | ||
|
|
c0133e6585 | ||
|
|
63fffeacd8 | ||
|
|
bc791f0e75 | ||
|
|
6ed417e6a7 | ||
|
|
4afefa3dfb | ||
|
|
9fadfbe40a | ||
|
|
f278874a93 | ||
|
|
1c5b247300 | ||
|
|
547bf5f87e | ||
|
|
96c0ac0ca8 | ||
|
|
a80fd2a51e | ||
|
|
58ea85c852 | ||
|
|
78f122f241 | ||
|
|
43eb997edb | ||
|
|
c440cdd69f | ||
|
|
b5bccba169 | ||
|
|
e058437ae0 | ||
|
|
1acacaa812 | ||
|
|
5bb1b6cbf0 | ||
|
|
de058d7ed1 | ||
|
|
98a65efb16 | ||
|
|
338539ec53 | ||
|
|
02f0f8e70a | ||
|
|
7f1bd20a09 | ||
|
|
d3d2a5ef8c | ||
|
|
3a6ae820c0 | ||
|
|
5a8860419e | ||
|
|
4aa1c7558b | ||
|
|
a8dab52376 | ||
|
|
5615d0523d | ||
|
|
10823ce133 | ||
|
|
18c098c4c1 | ||
|
|
db649d86b6 | ||
|
|
6e380b685b | ||
|
|
8dfff0e8e6 | ||
|
|
fbc7da755a | ||
|
|
b947c30910 | ||
|
|
9af96114af | ||
|
|
1ddf69a68f | ||
|
|
379ac791a8 | ||
|
|
81ea37de41 | ||
|
|
1c963fdc96 | ||
|
|
f32995228b | ||
|
|
0ec3d68994 | ||
|
|
3503e11506 | ||
|
|
c7f0ef37d0 | ||
|
|
d93b1ffe9f | ||
|
|
4e71a0c655 | ||
|
|
37dd713ed5 | ||
|
|
55aeb783e3 | ||
|
|
fe3f6e73be | ||
|
|
5baff7dc3e | ||
|
|
e3198d25a5 | ||
|
|
33ee575936 | ||
|
|
259f2562e6 | ||
|
|
1629247413 | ||
|
|
58d84aca6d | ||
|
|
236879490d | ||
|
|
79850cc89c | ||
|
|
b958214db8 | ||
|
|
a0b5f5aa1d | ||
|
|
a4a009a2c6 | ||
|
|
0ba8a35ade | ||
|
|
8bcc1b2097 | ||
|
|
b440f5c69a | ||
|
|
ad40c61ea9 | ||
|
|
858bbbf126 | ||
|
|
1d74f7e3bc | ||
|
|
b7641a9311 | ||
|
|
376d669af6 | ||
|
|
25d27f0288 | ||
|
|
3f4686ce79 | ||
|
|
86c1a9d77f | ||
|
|
9a6811ae6b | ||
|
|
e520f5f452 | ||
|
|
6a25bd983c | ||
|
|
c175ef2170 | ||
|
|
28733a5f30 | ||
|
|
7f8fec1bca | ||
|
|
278b1819d6 | ||
|
|
978bb11d4a | ||
|
|
3027b28942 | ||
|
|
2f0c1c12cf | ||
|
|
e122c61840 | ||
|
|
8f6eac819f | ||
|
|
de307e536e | ||
|
|
7406a1e713 | ||
|
|
982410dd3e |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -44,6 +44,8 @@ output/*/index.html
|
|||||||
# Sphinx
|
# Sphinx
|
||||||
doc/build
|
doc/build
|
||||||
doc/source/api
|
doc/source/api
|
||||||
|
doc/source/samples
|
||||||
|
doc/source/watcher.conf.sample
|
||||||
|
|
||||||
# pbr generates these
|
# pbr generates these
|
||||||
AUTHORS
|
AUTHORS
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ Watcher
|
|||||||
|
|
||||||
OpenStack Watcher provides a flexible and scalable resource optimization
|
OpenStack Watcher provides a flexible and scalable resource optimization
|
||||||
service for multi-tenant OpenStack-based clouds.
|
service for multi-tenant OpenStack-based clouds.
|
||||||
Watcher provides a complete optimization loop—including everything from a
|
Watcher provides a complete optimization loop-including everything from a
|
||||||
metrics receiver, complex event processor and profiler, optimization processor
|
metrics receiver, complex event processor and profiler, optimization processor
|
||||||
and an action plan applier. This provides a robust framework to realize a wide
|
and an action plan applier. This provides a robust framework to realize a wide
|
||||||
range of cloud optimization goals, including the reduction of data center
|
range of cloud optimization goals, including the reduction of data center
|
||||||
operating costs, increased system performance via intelligent virtual machine
|
operating costs, increased system performance via intelligent virtual machine
|
||||||
migration, increased energy efficiency—and more!
|
migration, increased energy efficiency-and more!
|
||||||
|
|
||||||
* Free software: Apache license
|
* Free software: Apache license
|
||||||
* Wiki: http://wiki.openstack.org/wiki/Watcher
|
* Wiki: http://wiki.openstack.org/wiki/Watcher
|
||||||
|
|||||||
@@ -80,10 +80,7 @@ function cleanup_watcher {
|
|||||||
# configure_watcher() - Set config files, create data dirs, etc
|
# configure_watcher() - Set config files, create data dirs, etc
|
||||||
function configure_watcher {
|
function configure_watcher {
|
||||||
# Put config files in ``/etc/watcher`` for everyone to find
|
# Put config files in ``/etc/watcher`` for everyone to find
|
||||||
if [[ ! -d $WATCHER_CONF_DIR ]]; then
|
sudo install -d -o $STACK_USER $WATCHER_CONF_DIR
|
||||||
sudo mkdir -p $WATCHER_CONF_DIR
|
|
||||||
sudo chown $STACK_USER $WATCHER_CONF_DIR
|
|
||||||
fi
|
|
||||||
|
|
||||||
install_default_policy watcher
|
install_default_policy watcher
|
||||||
|
|
||||||
@@ -99,15 +96,13 @@ function configure_watcher {
|
|||||||
function create_watcher_accounts {
|
function create_watcher_accounts {
|
||||||
create_service_user "watcher" "admin"
|
create_service_user "watcher" "admin"
|
||||||
|
|
||||||
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
|
local watcher_service=$(get_or_create_service "watcher" \
|
||||||
local watcher_service=$(get_or_create_service "watcher" \
|
"infra-optim" "Watcher Infrastructure Optimization Service")
|
||||||
"infra-optim" "Watcher Infrastructure Optimization Service")
|
get_or_create_endpoint $watcher_service \
|
||||||
get_or_create_endpoint $watcher_service \
|
"$REGION_NAME" \
|
||||||
"$REGION_NAME" \
|
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \
|
||||||
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \
|
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \
|
||||||
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \
|
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT"
|
||||||
"$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT"
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# create_watcher_conf() - Create a new watcher.conf file
|
# create_watcher_conf() - Create a new watcher.conf file
|
||||||
@@ -128,14 +123,8 @@ function create_watcher_conf {
|
|||||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
||||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST
|
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST
|
||||||
|
|
||||||
iniset $WATCHER_CONF keystone_authtoken admin_user watcher
|
|
||||||
iniset $WATCHER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
|
|
||||||
iniset $WATCHER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
|
|
||||||
|
|
||||||
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR
|
||||||
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth"
|
||||||
iniset $WATCHER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v3
|
|
||||||
iniset $WATCHER_CONF keystone_authtoken auth_version v3
|
|
||||||
|
|
||||||
if is_fedora || is_suse; then
|
if is_fedora || is_suse; then
|
||||||
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
||||||
@@ -178,9 +167,8 @@ function create_watcher_conf {
|
|||||||
# create_watcher_cache_dir() - Part of the init_watcher() process
|
# create_watcher_cache_dir() - Part of the init_watcher() process
|
||||||
function create_watcher_cache_dir {
|
function create_watcher_cache_dir {
|
||||||
# Create cache dir
|
# Create cache dir
|
||||||
sudo mkdir -p $WATCHER_AUTH_CACHE_DIR
|
sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR
|
||||||
sudo chown $STACK_USER $WATCHER_AUTH_CACHE_DIR
|
rm -rf $WATCHER_AUTH_CACHE_DIR/*
|
||||||
rm -f $WATCHER_AUTH_CACHE_DIR/*
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# init_watcher() - Initialize databases, etc.
|
# init_watcher() - Initialize databases, etc.
|
||||||
|
|||||||
@@ -127,7 +127,19 @@ Watcher CLI
|
|||||||
The watcher command-line interface (CLI) can be used to interact with the
|
The watcher command-line interface (CLI) can be used to interact with the
|
||||||
Watcher system in order to control it or to know its current status.
|
Watcher system in order to control it or to know its current status.
|
||||||
|
|
||||||
Please, read `the detailed documentation about Watcher CLI <https://factory.b-com.com/www/watcher/doc/python-watcherclient/>`_
|
Please, read `the detailed documentation about Watcher CLI
|
||||||
|
<https://factory.b-com.com/www/watcher/doc/python-watcherclient/>`_.
|
||||||
|
|
||||||
|
.. _archi_watcher_dashboard_definition:
|
||||||
|
|
||||||
|
Watcher Dashboard
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
The Watcher Dashboard can be used to interact with the Watcher system through
|
||||||
|
Horizon in order to control it or to know its current status.
|
||||||
|
|
||||||
|
Please, read `the detailed documentation about Watcher Dashboard
|
||||||
|
<https://factory.b-com.com/www/watcher/doc/watcher-dashboard/>`_.
|
||||||
|
|
||||||
.. _archi_watcher_database_definition:
|
.. _archi_watcher_database_definition:
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from watcher import version as watcher_version
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
extensions = [
|
extensions = [
|
||||||
|
'oslo_config.sphinxconfiggen',
|
||||||
'sphinx.ext.autodoc',
|
'sphinx.ext.autodoc',
|
||||||
'sphinx.ext.viewcode',
|
'sphinx.ext.viewcode',
|
||||||
'sphinxcontrib.httpdomain',
|
'sphinxcontrib.httpdomain',
|
||||||
@@ -28,7 +29,8 @@ extensions = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
wsme_protocols = ['restjson']
|
wsme_protocols = ['restjson']
|
||||||
|
config_generator_config_file = '../../etc/watcher/watcher-config-generator.conf'
|
||||||
|
sample_config_basename = 'watcher'
|
||||||
|
|
||||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||||
# text edit cycles.
|
# text edit cycles.
|
||||||
|
|||||||
1
doc/source/config-generator.conf
Normal file
1
doc/source/config-generator.conf
Normal file
@@ -0,0 +1 @@
|
|||||||
|
../../etc/watcher/watcher-config-generator.conf
|
||||||
14
doc/source/deploy/conf-files.rst
Normal file
14
doc/source/deploy/conf-files.rst
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
.. _watcher_sample_configuration_files:
|
||||||
|
|
||||||
|
==================================
|
||||||
|
Watcher sample configuration files
|
||||||
|
==================================
|
||||||
|
|
||||||
|
watcher.conf
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The ``watcher.conf`` file contains most of the options to configure the
|
||||||
|
Watcher services.
|
||||||
|
|
||||||
|
.. literalinclude:: ../watcher.conf.sample
|
||||||
|
:language: ini
|
||||||
@@ -34,6 +34,8 @@ The Watcher service includes the following components:
|
|||||||
- ``watcher-applier``: applies the action plan.
|
- ``watcher-applier``: applies the action plan.
|
||||||
- `python-watcherclient`_: A command-line interface (CLI) for interacting with
|
- `python-watcherclient`_: A command-line interface (CLI) for interacting with
|
||||||
the Watcher service.
|
the Watcher service.
|
||||||
|
- `watcher-dashboard`_: An Horizon plugin for interacting with the Watcher
|
||||||
|
service.
|
||||||
|
|
||||||
Additionally, the Bare Metal service has certain external dependencies, which
|
Additionally, the Bare Metal service has certain external dependencies, which
|
||||||
are very similar to other OpenStack services:
|
are very similar to other OpenStack services:
|
||||||
@@ -52,6 +54,7 @@ additional functionality:
|
|||||||
.. _`ceilometer`: https://github.com/openstack/ceilometer
|
.. _`ceilometer`: https://github.com/openstack/ceilometer
|
||||||
.. _`nova`: https://github.com/openstack/nova
|
.. _`nova`: https://github.com/openstack/nova
|
||||||
.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient
|
.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient
|
||||||
|
.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard
|
||||||
.. _`watcher metering`: https://github.com/b-com/watcher-metering
|
.. _`watcher metering`: https://github.com/b-com/watcher-metering
|
||||||
.. _`RabbitMQ`: https://www.rabbitmq.com/
|
.. _`RabbitMQ`: https://www.rabbitmq.com/
|
||||||
|
|
||||||
@@ -160,17 +163,33 @@ Configure the Watcher service
|
|||||||
The Watcher service is configured via its configuration file. This file
|
The Watcher service is configured via its configuration file. This file
|
||||||
is typically located at ``/etc/watcher/watcher.conf``.
|
is typically located at ``/etc/watcher/watcher.conf``.
|
||||||
|
|
||||||
|
You can easily generate and update a sample configuration file
|
||||||
|
named :ref:`watcher.conf.sample <watcher_sample_configuration_files>` by using
|
||||||
|
these following commands::
|
||||||
|
|
||||||
|
$ git clone git://git.openstack.org/openstack/watcher
|
||||||
|
$ cd watcher/
|
||||||
|
$ tox -econfig
|
||||||
|
$ vi etc/watcher/watcher.conf.sample
|
||||||
|
|
||||||
|
|
||||||
The configuration file is organized into the following sections:
|
The configuration file is organized into the following sections:
|
||||||
|
|
||||||
* ``[DEFAULT]`` - General configuration
|
* ``[DEFAULT]`` - General configuration
|
||||||
* ``[api]`` - API server configuration
|
* ``[api]`` - API server configuration
|
||||||
* ``[database]`` - SQL driver configuration
|
* ``[database]`` - SQL driver configuration
|
||||||
* ``[keystone_authtoken]`` - Keystone Authentication plugin configuration
|
* ``[keystone_authtoken]`` - Keystone Authentication plugin configuration
|
||||||
|
* ``[watcher_clients_auth]`` - Keystone auth configuration for clients
|
||||||
* ``[watcher_applier]`` - Watcher Applier module configuration
|
* ``[watcher_applier]`` - Watcher Applier module configuration
|
||||||
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
||||||
* ``[watcher_goals]`` - Goals mapping configuration
|
* ``[watcher_goals]`` - Goals mapping configuration
|
||||||
* ``[watcher_strategies]`` - Strategy configuration
|
* ``[watcher_strategies]`` - Strategy configuration
|
||||||
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
||||||
|
* ``[ceilometer_client]`` - Ceilometer client configuration
|
||||||
|
* ``[cinder_client]`` - Cinder client configuration
|
||||||
|
* ``[glance_client]`` - Glance client configuration
|
||||||
|
* ``[nova_client]`` - Nova client configuration
|
||||||
|
* ``[neutron_client]`` - Neutron client configuration
|
||||||
|
|
||||||
The Watcher configuration file is expected to be named
|
The Watcher configuration file is expected to be named
|
||||||
``watcher.conf``. When starting Watcher, you can specify a different
|
``watcher.conf``. When starting Watcher, you can specify a different
|
||||||
@@ -237,39 +256,105 @@ so that the watcher service is configured for your needs.
|
|||||||
#rabbit_port = 5672
|
#rabbit_port = 5672
|
||||||
|
|
||||||
|
|
||||||
#. Configure the Watcher Service to use these credentials with the Identity
|
#. Watcher API shall validate the token provided by every incoming request,
|
||||||
Service. Replace IDENTITY_IP with the IP of the Identity server, and
|
via keystonemiddleware, which requires the Watcher service to be configured
|
||||||
replace WATCHER_PASSWORD with the password you chose for the ``watcher``
|
with the right credentials for the Identity service.
|
||||||
user in the Identity Service::
|
|
||||||
|
|
||||||
[keystone_authtoken]
|
In the configuration section here below:
|
||||||
|
|
||||||
# Complete public Identity API endpoint (string value)
|
* replace IDENTITY_IP with the IP of the Identity server
|
||||||
#auth_uri=<None>
|
* replace WATCHER_PASSWORD with the password you chose for the ``watcher``
|
||||||
auth_uri=http://IDENTITY_IP:5000/v3
|
user
|
||||||
|
* replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created
|
||||||
|
for OpenStack services (e.g. ``service``) ::
|
||||||
|
|
||||||
# Complete admin Identity API endpoint. This should specify the
|
[keystone_authtoken]
|
||||||
# unversioned root endpoint e.g. https://localhost:35357/ (string
|
|
||||||
# value)
|
|
||||||
#identity_uri = <None>
|
|
||||||
identity_uri = http://IDENTITY_IP:5000
|
|
||||||
|
|
||||||
# Keystone account username (string value)
|
# Authentication type to load (unknown value)
|
||||||
#admin_user=<None>
|
# Deprecated group/name - [DEFAULT]/auth_plugin
|
||||||
admin_user=watcher
|
#auth_type = <None>
|
||||||
|
auth_type = password
|
||||||
|
|
||||||
# Keystone account password (string value)
|
# Authentication URL (unknown value)
|
||||||
#admin_password=<None>
|
#auth_url = <None>
|
||||||
admin_password=WATCHER_DBPASSWORD
|
auth_url = http://IDENTITY_IP:35357
|
||||||
|
|
||||||
# Keystone service account tenant name to validate user tokens
|
# Username (unknown value)
|
||||||
# (string value)
|
# Deprecated group/name - [DEFAULT]/username
|
||||||
#admin_tenant_name=admin
|
#username = <None>
|
||||||
admin_tenant_name=KEYSTONE_SERVICE_PROJECT_NAME
|
username=watcher
|
||||||
|
|
||||||
# Directory used to cache files related to PKI tokens (string
|
# User's password (unknown value)
|
||||||
# value)
|
#password = <None>
|
||||||
#signing_dir=<None>
|
password = WATCHER_PASSWORD
|
||||||
|
|
||||||
|
# Domain ID containing project (unknown value)
|
||||||
|
#project_domain_id = <None>
|
||||||
|
project_domain_id = default
|
||||||
|
|
||||||
|
# User's domain id (unknown value)
|
||||||
|
#user_domain_id = <None>
|
||||||
|
user_domain_id = default
|
||||||
|
|
||||||
|
# Project name to scope to (unknown value)
|
||||||
|
# Deprecated group/name - [DEFAULT]/tenant-name
|
||||||
|
#project_name = <None>
|
||||||
|
project_name = KEYSTONE_SERVICE_PROJECT_NAME
|
||||||
|
|
||||||
|
#. Watcher's decision engine and applier interact with other OpenStack
|
||||||
|
projects through those projects' clients. In order to instantiate these
|
||||||
|
clients, Watcher needs to request a new session from the Identity service
|
||||||
|
using the right credentials.
|
||||||
|
|
||||||
|
In the configuration section here below:
|
||||||
|
|
||||||
|
* replace IDENTITY_IP with the IP of the Identity server
|
||||||
|
* replace WATCHER_PASSWORD with the password you chose for the ``watcher``
|
||||||
|
user
|
||||||
|
* replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created
|
||||||
|
for OpenStack services (e.g. ``service``) ::
|
||||||
|
|
||||||
|
[watcher_clients_auth]
|
||||||
|
|
||||||
|
# Authentication type to load (unknown value)
|
||||||
|
# Deprecated group/name - [DEFAULT]/auth_plugin
|
||||||
|
#auth_type = <None>
|
||||||
|
auth_type = password
|
||||||
|
|
||||||
|
# Authentication URL (unknown value)
|
||||||
|
#auth_url = <None>
|
||||||
|
auth_url = http://IDENTITY_IP:35357
|
||||||
|
|
||||||
|
# Username (unknown value)
|
||||||
|
# Deprecated group/name - [DEFAULT]/username
|
||||||
|
#username = <None>
|
||||||
|
username=watcher
|
||||||
|
|
||||||
|
# User's password (unknown value)
|
||||||
|
#password = <None>
|
||||||
|
password = WATCHER_PASSWORD
|
||||||
|
|
||||||
|
# Domain ID containing project (unknown value)
|
||||||
|
#project_domain_id = <None>
|
||||||
|
project_domain_id = default
|
||||||
|
|
||||||
|
# User's domain id (unknown value)
|
||||||
|
#user_domain_id = <None>
|
||||||
|
user_domain_id = default
|
||||||
|
|
||||||
|
# Project name to scope to (unknown value)
|
||||||
|
# Deprecated group/name - [DEFAULT]/tenant-name
|
||||||
|
#project_name = <None>
|
||||||
|
project_name = KEYSTONE_SERVICE_PROJECT_NAME
|
||||||
|
|
||||||
|
#. Configure the clients to use a specific version if desired. For example, to
|
||||||
|
configure Watcher to use a Nova client with version 2.1, use::
|
||||||
|
|
||||||
|
[nova_client]
|
||||||
|
|
||||||
|
# Version of Nova API to use in novaclient. (string value)
|
||||||
|
#api_version = 2
|
||||||
|
api_version = 2.1
|
||||||
|
|
||||||
#. Create the Watcher Service database tables::
|
#. Create the Watcher Service database tables::
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,11 @@ If you need help on a specific command, you can use:
|
|||||||
|
|
||||||
$ watcher help COMMAND
|
$ watcher help COMMAND
|
||||||
|
|
||||||
|
If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon
|
||||||
|
plugin installation guide`_.
|
||||||
|
|
||||||
.. _`installation guide`: https://factory.b-com.com/www/watcher/doc/python-watcherclient
|
.. _`installation guide`: https://factory.b-com.com/www/watcher/doc/python-watcherclient
|
||||||
|
.. _`Watcher Horizon plugin installation guide`: https://factory.b-com.com/www/watcher/doc/watcher-dashboard/deploy/installation.html
|
||||||
|
|
||||||
Seeing what the Watcher CLI can do ?
|
Seeing what the Watcher CLI can do ?
|
||||||
------------------------------------
|
------------------------------------
|
||||||
@@ -60,7 +64,7 @@ This goal should be declared in the Watcher service configuration file
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audit-template-create my_first_audit SERVERS_CONSOLIDATION
|
$ watcher audit-template-create my_first_audit DUMMY
|
||||||
|
|
||||||
If you get "*You must provide a username via either --os-username or via
|
If you get "*You must provide a username via either --os-username or via
|
||||||
env[OS_USERNAME]*" you may have to verify your credentials.
|
env[OS_USERNAME]*" you may have to verify your credentials.
|
||||||
|
|||||||
@@ -60,3 +60,12 @@ Code Hosting
|
|||||||
Code Review
|
Code Review
|
||||||
https://review.openstack.org/#/q/status:open+project:openstack/watcher,n,z
|
https://review.openstack.org/#/q/status:open+project:openstack/watcher,n,z
|
||||||
|
|
||||||
|
IRC Channel
|
||||||
|
``#openstack-watcher`` (changelog_)
|
||||||
|
|
||||||
|
Weekly Meetings
|
||||||
|
on Wednesdays at 14:00 UTC in the ``#openstack-meeting-4`` IRC
|
||||||
|
channel (`meetings logs`_)
|
||||||
|
|
||||||
|
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
|
||||||
|
.. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/
|
||||||
|
|||||||
@@ -9,35 +9,97 @@ Set up a development environment via DevStack
|
|||||||
=============================================
|
=============================================
|
||||||
|
|
||||||
Watcher is currently able to optimize compute resources - specifically Nova
|
Watcher is currently able to optimize compute resources - specifically Nova
|
||||||
compute hosts - via operations such as live migrations. In order for you to
|
compute hosts - via operations such as live migrations. In order for you to
|
||||||
fully be able to exercise what Watcher can do, it is necessary to have a
|
fully be able to exercise what Watcher can do, it is necessary to have a
|
||||||
multinode environment to use. If you have no experience with DevStack, you
|
multinode environment to use.
|
||||||
should check out the `DevStack documentation`_ and be comfortable with the
|
|
||||||
basics of DevStack before attempting to get a multinode DevStack setup with
|
|
||||||
the Watcher plugin.
|
|
||||||
|
|
||||||
You can set up the Watcher services quickly and easily using a Watcher
|
You can set up the Watcher services quickly and easily using a Watcher
|
||||||
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
||||||
model.
|
model. To enable the Watcher plugin with DevStack, add the following to the
|
||||||
|
|
||||||
.. _DevStack documentation: http://docs.openstack.org/developer/devstack/
|
|
||||||
.. _PluginModelDocs: http://docs.openstack.org/developer/devstack/plugins.html
|
|
||||||
|
|
||||||
It is recommended that you build off of the provided example local.conf files
|
|
||||||
(`local.conf.controller`_, `local.conf.compute`_). You'll likely want to
|
|
||||||
configure something to obtain metrics, such as Ceilometer. Ceilometer is used
|
|
||||||
in the example local.conf files.
|
|
||||||
|
|
||||||
To configure the Watcher services with DevStack, add the following to the
|
|
||||||
`[[local|localrc]]` section of your controller's `local.conf` to enable the
|
`[[local|localrc]]` section of your controller's `local.conf` to enable the
|
||||||
Watcher plugin::
|
Watcher plugin::
|
||||||
|
|
||||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||||
|
|
||||||
Then run devstack normally::
|
For more detailed instructions, see `Detailed DevStack Instructions`_. Check
|
||||||
|
out the `DevStack documentation`_ for more information regarding DevStack.
|
||||||
|
|
||||||
cd /opt/stack/devstack
|
.. _PluginModelDocs: http://docs.openstack.org/developer/devstack/plugins.html
|
||||||
./stack.sh
|
.. _DevStack documentation: http://docs.openstack.org/developer/devstack/
|
||||||
|
|
||||||
|
Detailed DevStack Instructions
|
||||||
|
==============================
|
||||||
|
|
||||||
|
#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack).
|
||||||
|
One of these servers will be the controller node while the others will be
|
||||||
|
compute nodes. N is preferably >= 3 so that you have at least 2 compute
|
||||||
|
nodes, but in order to stand up the Watcher services only 1 server is
|
||||||
|
needed (i.e., no computes are needed if you want to just experiment with
|
||||||
|
the Watcher services). These servers can be VMs running on your local
|
||||||
|
machine via VirtualBox if you prefer. DevStack currently recommends that
|
||||||
|
you use Ubuntu 14.04 LTS. The servers should also have connections to the
|
||||||
|
same network such that they are all able to communicate with one another.
|
||||||
|
|
||||||
|
#. For each server, clone the DevStack repository and create the stack user::
|
||||||
|
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install git
|
||||||
|
git clone https://git.openstack.org/openstack-dev/devstack
|
||||||
|
sudo ./devstack/tools/create-stack-user.sh
|
||||||
|
|
||||||
|
Now you have a stack user that is used to run the DevStack processes. You
|
||||||
|
may want to give your stack user a password to allow SSH via a password::
|
||||||
|
|
||||||
|
sudo passwd stack
|
||||||
|
|
||||||
|
#. Switch to the stack user and clone the DevStack repo again::
|
||||||
|
|
||||||
|
sudo su stack
|
||||||
|
cd ~
|
||||||
|
git clone https://git.openstack.org/openstack-dev/devstack
|
||||||
|
|
||||||
|
#. For each compute node, copy the provided `local.conf.compute`_ example file
|
||||||
|
to the compute node's system at ~/devstack/local.conf. Make sure the
|
||||||
|
HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP
|
||||||
|
is set to the IP address of the compute node and SERVICE_HOST is set to the
|
||||||
|
IP address of the controller node.
|
||||||
|
|
||||||
|
If you need specific metrics collected (or want to use something other
|
||||||
|
than Ceilometer), be sure to configure it. For example, in the
|
||||||
|
`local.conf.compute`_ example file, the appropriate ceilometer plugins and
|
||||||
|
services are enabled and disabled. If you were using something other than
|
||||||
|
Ceilometer, then you would likely want to configure it likewise. The
|
||||||
|
example file also sets the compute monitors nova configuration option to
|
||||||
|
use the CPU virt driver. If you needed other metrics, it may be necessary
|
||||||
|
to configure similar configuration options for the projects providing those
|
||||||
|
metrics.
|
||||||
|
|
||||||
|
#. For the controller node, copy the provided `local.conf.controller`_ example
|
||||||
|
file to the controller node's system at ~/devstack/local.conf. Make sure
|
||||||
|
the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP
|
||||||
|
address of the controller node.
|
||||||
|
|
||||||
|
Note: if you want to use another Watcher git repository (such as a local
|
||||||
|
one), then change the enable plugin line::
|
||||||
|
|
||||||
|
enable_plugin watcher <your_local_git_repo> [optional_branch]
|
||||||
|
|
||||||
|
If you do this, then the Watcher DevStack plugin will try to pull the
|
||||||
|
python-watcherclient repo from <your_local_git_repo>/../, so either make
|
||||||
|
sure that is also available or specify WATCHERCLIENT_REPO in the local.conf
|
||||||
|
file.
|
||||||
|
|
||||||
|
Note: if you want to use a specific branch, specify WATCHER_BRANCH in the
|
||||||
|
local.conf file. By default it will use the master branch.
|
||||||
|
|
||||||
|
#. Start stacking from the controller node::
|
||||||
|
|
||||||
|
./devstack/stack.sh
|
||||||
|
|
||||||
|
#. Start stacking on each of the compute nodes using the same command.
|
||||||
|
|
||||||
|
#. Configure the environment for live migration via NFS. See the
|
||||||
|
`Multi-Node DevStack Environment`_ section for more details.
|
||||||
|
|
||||||
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
||||||
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
||||||
@@ -104,7 +166,6 @@ Restart the libvirt service::
|
|||||||
|
|
||||||
sudo service libvirt-bin restart
|
sudo service libvirt-bin restart
|
||||||
|
|
||||||
|
|
||||||
Setting up SSH keys between compute nodes to enable live migration
|
Setting up SSH keys between compute nodes to enable live migration
|
||||||
------------------------------------------------------------------
|
------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -113,8 +174,8 @@ each compute node:
|
|||||||
|
|
||||||
1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub)
|
1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub)
|
||||||
needs to be in the DESTINATION stack user's authorized_keys file
|
needs to be in the DESTINATION stack user's authorized_keys file
|
||||||
(~stack/.ssh/authorized_keys). This can be accomplished by manually
|
(~stack/.ssh/authorized_keys). This can be accomplished by manually
|
||||||
copying the contents from the file on the SOURCE to the DESTINATION. If
|
copying the contents from the file on the SOURCE to the DESTINATION. If
|
||||||
you have a password configured for the stack user, then you can use the
|
you have a password configured for the stack user, then you can use the
|
||||||
following command to accomplish the same thing::
|
following command to accomplish the same thing::
|
||||||
|
|
||||||
@@ -122,7 +183,7 @@ each compute node:
|
|||||||
|
|
||||||
2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub)
|
2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub)
|
||||||
needs to be in the SOURCE root user's known_hosts file
|
needs to be in the SOURCE root user's known_hosts file
|
||||||
(/root/.ssh/known_hosts). This can be accomplished by running the
|
(/root/.ssh/known_hosts). This can be accomplished by running the
|
||||||
following on the SOURCE machine (hostname must be used)::
|
following on the SOURCE machine (hostname must be used)::
|
||||||
|
|
||||||
ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
|
ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
|
||||||
@@ -131,3 +192,13 @@ In essence, this means that every compute node's root user's public RSA key
|
|||||||
must exist in every other compute node's stack user's authorized_keys file and
|
must exist in every other compute node's stack user's authorized_keys file and
|
||||||
every compute node's public ECDSA key needs to be in every other compute
|
every compute node's public ECDSA key needs to be in every other compute
|
||||||
node's root user's known_hosts file.
|
node's root user's known_hosts file.
|
||||||
|
|
||||||
|
|
||||||
|
Environment final checkup
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
If you are willing to make sure everything is in order in your DevStack
|
||||||
|
environment, you can run the Watcher Tempest tests which will validate its API
|
||||||
|
but also that you can perform the typical Watcher workflows. To do so, have a
|
||||||
|
look at the :ref:`Tempest tests <tempest_tests>` section which will explain to
|
||||||
|
you how to run them.
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
https://creativecommons.org/licenses/by/3.0/
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
.. _watcher_developement_environment:
|
||||||
|
|
||||||
=========================================
|
=========================================
|
||||||
Set up a development environment manually
|
Set up a development environment manually
|
||||||
=========================================
|
=========================================
|
||||||
@@ -143,34 +145,13 @@ You should then be able to `import watcher` using Python without issue:
|
|||||||
|
|
||||||
If you can import watcher without a traceback, you should be ready to develop.
|
If you can import watcher without a traceback, you should be ready to develop.
|
||||||
|
|
||||||
Run Watcher unit tests
|
Run Watcher tests
|
||||||
======================
|
=================
|
||||||
|
|
||||||
All unit tests should be run using tox. To run the unit tests under py27 and
|
Watcher provides both :ref:`unit tests <unit_tests>` and
|
||||||
also run the pep8 tests:
|
:ref:`functional/tempest tests <tempest_tests>`. Please refer to :doc:`testing`
|
||||||
|
to understand how to run them.
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ workon watcher
|
|
||||||
(watcher) $ pip install tox
|
|
||||||
|
|
||||||
(watcher) $ cd watcher
|
|
||||||
(watcher) $ tox -epep8 -epy27
|
|
||||||
|
|
||||||
You may pass options to the test programs using positional arguments. To run a
|
|
||||||
specific unit test, this passes the -r option and desired test (regex string)
|
|
||||||
to os-testr:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ workon watcher
|
|
||||||
(watcher) $ tox -epy27 -- tests.api
|
|
||||||
|
|
||||||
When you're done, deactivate the virtualenv:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ deactivate
|
|
||||||
|
|
||||||
Build the Watcher documentation
|
Build the Watcher documentation
|
||||||
===============================
|
===============================
|
||||||
@@ -269,6 +250,11 @@ interface.
|
|||||||
|
|
||||||
.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient
|
.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient
|
||||||
|
|
||||||
|
There is also an Horizon plugin for Watcher `watcher-dashboard`_ which
|
||||||
|
allows to interact with Watcher through a web-based interface.
|
||||||
|
|
||||||
|
.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard
|
||||||
|
|
||||||
|
|
||||||
Exercising the Watcher Services locally
|
Exercising the Watcher Services locally
|
||||||
=======================================
|
=======================================
|
||||||
|
|||||||
171
doc/source/dev/plugin/action-plugin.rst
Normal file
171
doc/source/dev/plugin/action-plugin.rst
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
==================
|
||||||
|
Build a new action
|
||||||
|
==================
|
||||||
|
|
||||||
|
Watcher Applier has an external :ref:`action <action_definition>` plugin
|
||||||
|
interface which gives anyone the ability to integrate an external
|
||||||
|
:ref:`action <action_definition>` in order to extend the initial set of actions
|
||||||
|
Watcher provides.
|
||||||
|
|
||||||
|
This section gives some guidelines on how to implement and integrate custom
|
||||||
|
actions with Watcher.
|
||||||
|
|
||||||
|
|
||||||
|
Creating a new plugin
|
||||||
|
=====================
|
||||||
|
|
||||||
|
First of all you have to extend the base :py:class:`BaseAction` class which
|
||||||
|
defines a set of abstract methods and/or properties that you will have to
|
||||||
|
implement:
|
||||||
|
|
||||||
|
- The :py:attr:`~.BaseAction.schema` is an abstract property that you have to
|
||||||
|
implement. This is the first function to be called by the
|
||||||
|
:ref:`applier <watcher_applier_definition>` before any further processing
|
||||||
|
and its role is to validate the input parameters that were provided to it.
|
||||||
|
- The :py:meth:`~.BaseAction.precondition` is called before the execution of
|
||||||
|
an action. This method is a hook that can be used to perform some
|
||||||
|
initializations or to make some more advanced validation on its input
|
||||||
|
parameters. If you wish to block the execution based on this factor, you
|
||||||
|
simply have to ``raise`` an exception.
|
||||||
|
- The :py:meth:`~.BaseAction.postcondition` is called after the execution of
|
||||||
|
an action. As this function is called regardless of whether an action
|
||||||
|
succeeded or not, this can prove itself useful to perform cleanup
|
||||||
|
operations.
|
||||||
|
- The :py:meth:`~.BaseAction.execute` is the main component of an action.
|
||||||
|
This is where you should implement the logic of your action.
|
||||||
|
- The :py:meth:`~.BaseAction.revert` allows you to roll back the targeted
|
||||||
|
resource to its original state following a faulty execution. Indeed, this
|
||||||
|
method is called by the workflow engine whenever an action raises an
|
||||||
|
exception.
|
||||||
|
|
||||||
|
Here is an example showing how you can write a plugin called ``DummyAction``:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
# Filepath = <PROJECT_DIR>/thirdparty/dummy.py
|
||||||
|
# Import path = thirdparty.dummy
|
||||||
|
import voluptuous
|
||||||
|
|
||||||
|
from watcher.applier.actions import base
|
||||||
|
|
||||||
|
|
||||||
|
class DummyAction(baseBaseAction):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema(self):
|
||||||
|
return voluptuous.Schema({})
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
# Does nothing
|
||||||
|
pass # Only returning False is considered as a failure
|
||||||
|
|
||||||
|
def revert(self):
|
||||||
|
# Does nothing
|
||||||
|
pass
|
||||||
|
|
||||||
|
def precondition(self):
|
||||||
|
# No pre-checks are done here
|
||||||
|
pass
|
||||||
|
|
||||||
|
def postcondition(self):
|
||||||
|
# Nothing done here
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
This implementation is the most basic one. So if you want to have more advanced
|
||||||
|
examples, have a look at the implementation of the actions already provided
|
||||||
|
by Watcher like.
|
||||||
|
To get a better understanding on how to implement a more advanced action,
|
||||||
|
have a look at the :py:class:`~watcher.applier.actions.migration.Migrate`
|
||||||
|
class.
|
||||||
|
|
||||||
|
Input validation
|
||||||
|
----------------
|
||||||
|
|
||||||
|
As you can see in the previous example, we are using `Voluptuous`_ to validate
|
||||||
|
the input parameters of an action. So if you want to learn more about how to
|
||||||
|
work with `Voluptuous`_, you can have a look at their `documentation`_ here:
|
||||||
|
|
||||||
|
.. _Voluptuous: https://github.com/alecthomas/voluptuous
|
||||||
|
.. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md
|
||||||
|
|
||||||
|
Abstract Plugin Class
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Here below is the abstract ``BaseAction`` class that every single action
|
||||||
|
should implement:
|
||||||
|
|
||||||
|
.. autoclass:: watcher.applier.actions.base.BaseAction
|
||||||
|
:members:
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
.. py:attribute:: schema
|
||||||
|
|
||||||
|
Defines a Schema that the input parameters shall comply to
|
||||||
|
|
||||||
|
:returns: A schema declaring the input parameters this action should be
|
||||||
|
provided along with their respective constraints
|
||||||
|
(e.g. type, value range, ...)
|
||||||
|
:rtype: :py:class:`voluptuous.Schema` instance
|
||||||
|
|
||||||
|
|
||||||
|
Register a new entry point
|
||||||
|
==========================
|
||||||
|
|
||||||
|
In order for the Watcher Applier to load your new action, the
|
||||||
|
action must be registered as a named entry point under the
|
||||||
|
``watcher_actions`` entry point of your ``setup.py`` file. If you are using
|
||||||
|
pbr_, this entry point should be placed in your ``setup.cfg`` file.
|
||||||
|
|
||||||
|
The name you give to your entry point has to be unique.
|
||||||
|
|
||||||
|
Here below is how you would proceed to register ``DummyAction`` using pbr_:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
watcher_actions =
|
||||||
|
dummy = thirdparty.dummy:DummyAction
|
||||||
|
|
||||||
|
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||||
|
|
||||||
|
|
||||||
|
Using action plugins
|
||||||
|
====================
|
||||||
|
|
||||||
|
The Watcher Applier service will automatically discover any installed plugins
|
||||||
|
when it is restarted. If a Python package containing a custom plugin is
|
||||||
|
installed within the same environment as Watcher, Watcher will automatically
|
||||||
|
make that plugin available for use.
|
||||||
|
|
||||||
|
At this point, you can use your new action plugin in your :ref:`strategy plugin
|
||||||
|
<implement_strategy_plugin>` if you reference it via the use of the
|
||||||
|
:py:meth:`~.Solution.add_action` method:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
# [...]
|
||||||
|
self.solution.add_action(
|
||||||
|
action_type="dummy", # Name of the entry point we registered earlier
|
||||||
|
applies_to="",
|
||||||
|
input_parameters={})
|
||||||
|
|
||||||
|
By doing so, your action will be saved within the Watcher Database, ready to be
|
||||||
|
processed by the planner for creating an action plan which can then be executed
|
||||||
|
by the Watcher Applier via its workflow engine.
|
||||||
|
|
||||||
|
|
||||||
|
Scheduling of an action plugin
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Watcher provides a basic built-in :ref:`planner <watcher_planner_definition>`
|
||||||
|
which is only able to process the Watcher built-in actions. Therefore, you will
|
||||||
|
either have to use an existing third-party planner or :ref:`implement another
|
||||||
|
planner <implement_planner_plugin>` that will be able to take into account your
|
||||||
|
new action plugin.
|
||||||
90
doc/source/dev/plugin/base-setup.rst
Normal file
90
doc/source/dev/plugin/base-setup.rst
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
.. _plugin-base_setup:
|
||||||
|
|
||||||
|
=======================================
|
||||||
|
Create a third-party plugin for Watcher
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
Watcher provides a plugin architecture which allows anyone to extend the
|
||||||
|
existing functionalities by implementing third-party plugins. This process can
|
||||||
|
be cumbersome so this documentation is there to help you get going as quickly
|
||||||
|
as possible.
|
||||||
|
|
||||||
|
|
||||||
|
Pre-requisites
|
||||||
|
==============
|
||||||
|
|
||||||
|
We assume that you have set up a working Watcher development environment. So if
|
||||||
|
this not already the case, you can check out our documentation which explains
|
||||||
|
how to set up a :ref:`development environment
|
||||||
|
<watcher_developement_environment>`.
|
||||||
|
|
||||||
|
.. _development environment:
|
||||||
|
|
||||||
|
Third party project scaffolding
|
||||||
|
===============================
|
||||||
|
|
||||||
|
First off, we need to create the project structure. To do so, we can use
|
||||||
|
`cookiecutter`_ and the `OpenStack cookiecutter`_ project scaffolder to
|
||||||
|
generate the skeleton of our project::
|
||||||
|
|
||||||
|
$ virtualenv thirdparty
|
||||||
|
$ source thirdparty/bin/activate
|
||||||
|
$ pip install cookiecutter
|
||||||
|
$ cookiecutter https://github.com/openstack-dev/cookiecutter
|
||||||
|
|
||||||
|
The last command will ask you for many information, and If you set
|
||||||
|
``module_name`` and ``repo_name`` as ``thirdparty``, you should end up with a
|
||||||
|
structure that looks like this::
|
||||||
|
|
||||||
|
$ cd thirdparty
|
||||||
|
$ tree .
|
||||||
|
.
|
||||||
|
├── babel.cfg
|
||||||
|
├── CONTRIBUTING.rst
|
||||||
|
├── doc
|
||||||
|
│ └── source
|
||||||
|
│ ├── conf.py
|
||||||
|
│ ├── contributing.rst
|
||||||
|
│ ├── index.rst
|
||||||
|
│ ├── installation.rst
|
||||||
|
│ ├── readme.rst
|
||||||
|
│ └── usage.rst
|
||||||
|
├── HACKING.rst
|
||||||
|
├── LICENSE
|
||||||
|
├── MANIFEST.in
|
||||||
|
├── README.rst
|
||||||
|
├── requirements.txt
|
||||||
|
├── setup.cfg
|
||||||
|
├── setup.py
|
||||||
|
├── test-requirements.txt
|
||||||
|
├── thirdparty
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ └── tests
|
||||||
|
│ ├── base.py
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ └── test_thirdparty.py
|
||||||
|
└── tox.ini
|
||||||
|
|
||||||
|
.. _cookiecutter: https://github.com/audreyr/cookiecutter
|
||||||
|
.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter
|
||||||
|
|
||||||
|
Implementing a plugin for Watcher
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Now that the project skeleton has been created, you can start the
|
||||||
|
implementation of your plugin. As of now, you can implement the following
|
||||||
|
plugins for Watcher:
|
||||||
|
|
||||||
|
- A :ref:`strategy plugin <implement_strategy_plugin>`
|
||||||
|
- A :ref:`planner plugin <implement_planner_plugin>`
|
||||||
|
- An :ref:`action plugin <implement_strategy_plugin>`
|
||||||
|
- A :ref:`workflow engine plugin <implement_workflow_engine_plugin>`
|
||||||
|
|
||||||
|
If you want to learn more on how to implement them, you can refer to their
|
||||||
|
dedicated documentation.
|
||||||
127
doc/source/dev/plugin/planner-plugin.rst
Normal file
127
doc/source/dev/plugin/planner-plugin.rst
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
.. _implement_planner_plugin:
|
||||||
|
|
||||||
|
===================
|
||||||
|
Build a new planner
|
||||||
|
===================
|
||||||
|
|
||||||
|
Watcher :ref:`Decision Engine <watcher_decision_engine_definition>` has an
|
||||||
|
external :ref:`planner <planner_definition>` plugin interface which gives
|
||||||
|
anyone the ability to integrate an external :ref:`planner <planner_definition>`
|
||||||
|
in order to extend the initial set of planners Watcher provides.
|
||||||
|
|
||||||
|
This section gives some guidelines on how to implement and integrate custom
|
||||||
|
planners with Watcher.
|
||||||
|
|
||||||
|
.. _Decision Engine: watcher_decision_engine_definition
|
||||||
|
|
||||||
|
Creating a new plugin
|
||||||
|
=====================
|
||||||
|
|
||||||
|
First of all you have to extend the base :py:class:`~.BasePlanner` class which
|
||||||
|
defines an abstract method that you will have to implement. The
|
||||||
|
:py:meth:`~.BasePlanner.schedule` is the method being called by the Decision
|
||||||
|
Engine to schedule a given solution (:py:class:`~.BaseSolution`) into an
|
||||||
|
:ref:`action plan <action_plan_definition>` by ordering/sequencing an unordered
|
||||||
|
set of actions contained in the proposed solution (for more details, see
|
||||||
|
:ref:`definition of a solution <solution_definition>`).
|
||||||
|
|
||||||
|
Here is an example showing how you can write a planner plugin called
|
||||||
|
``DummyPlanner``:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
# Filepath = third-party/third_party/dummy.py
|
||||||
|
# Import path = third_party.dummy
|
||||||
|
import uuid
|
||||||
|
from watcher.decision_engine.planner import base
|
||||||
|
|
||||||
|
|
||||||
|
class DummyPlanner(base.BasePlanner):
|
||||||
|
|
||||||
|
def _create_action_plan(self, context, audit_id):
|
||||||
|
action_plan_dict = {
|
||||||
|
'uuid': uuid.uuid4(),
|
||||||
|
'audit_id': audit_id,
|
||||||
|
'first_action_id': None,
|
||||||
|
'state': objects.action_plan.State.RECOMMENDED
|
||||||
|
}
|
||||||
|
|
||||||
|
new_action_plan = objects.ActionPlan(context, **action_plan_dict)
|
||||||
|
new_action_plan.create(context)
|
||||||
|
new_action_plan.save()
|
||||||
|
return new_action_plan
|
||||||
|
|
||||||
|
def schedule(self, context, audit_id, solution):
|
||||||
|
# Empty action plan
|
||||||
|
action_plan = self._create_action_plan(context, audit_id)
|
||||||
|
# todo: You need to create the workflow of actions here
|
||||||
|
# and attach it to the action plan
|
||||||
|
return action_plan
|
||||||
|
|
||||||
|
This implementation is the most basic one. So if you want to have more advanced
|
||||||
|
examples, have a look at the implementation of planners already provided by
|
||||||
|
Watcher like :py:class:`~.DefaultPlanner`. A list with all available planner
|
||||||
|
plugins can be found :ref:`here <watcher_planners>`.
|
||||||
|
|
||||||
|
Abstract Plugin Class
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Here below is the abstract ``BasePlanner`` class that every single planner
|
||||||
|
should implement:
|
||||||
|
|
||||||
|
.. autoclass:: watcher.decision_engine.planner.base.BasePlanner
|
||||||
|
:members:
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
|
||||||
|
Register a new entry point
|
||||||
|
==========================
|
||||||
|
|
||||||
|
In order for the Watcher Decision Engine to load your new planner, the
|
||||||
|
latter must be registered as a new entry point under the
|
||||||
|
``watcher_planners`` entry point namespace of your ``setup.py`` file. If you
|
||||||
|
are using pbr_, this entry point should be placed in your ``setup.cfg`` file.
|
||||||
|
|
||||||
|
The name you give to your entry point has to be unique.
|
||||||
|
|
||||||
|
Here below is how you would proceed to register ``DummyPlanner`` using pbr_:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
watcher_planners =
|
||||||
|
dummy = third_party.dummy:DummyPlanner
|
||||||
|
|
||||||
|
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||||
|
|
||||||
|
|
||||||
|
Using planner plugins
|
||||||
|
=====================
|
||||||
|
|
||||||
|
The :ref:`Watcher Decision Engine <watcher_decision_engine_definition>` service
|
||||||
|
will automatically discover any installed plugins when it is started. This
|
||||||
|
means that if Watcher is already running when you install your plugin, you will
|
||||||
|
have to restart the related Watcher services. If a Python package containing a
|
||||||
|
custom plugin is installed within the same environment as Watcher, Watcher will
|
||||||
|
automatically make that plugin available for use.
|
||||||
|
|
||||||
|
At this point, Watcher will use your new planner if you referenced it in the
|
||||||
|
``planner`` option under the ``[watcher_planner]`` section of your
|
||||||
|
``watcher.conf`` configuration file when you started it. For example, if you
|
||||||
|
want to use the ``dummy`` planner you just installed, you would have to
|
||||||
|
select it as followed:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[watcher_planner]
|
||||||
|
planner = dummy
|
||||||
|
|
||||||
|
As you may have noticed, only a single planner implementation can be activated
|
||||||
|
at a time, so make sure it is generic enough to support all your strategies
|
||||||
|
and actions.
|
||||||
@@ -4,17 +4,19 @@
|
|||||||
|
|
||||||
https://creativecommons.org/licenses/by/3.0/
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
.. _implement_strategy_plugin:
|
||||||
|
|
||||||
=================================
|
=================================
|
||||||
Build a new optimization strategy
|
Build a new optimization strategy
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
Watcher Decision Engine has an external :ref:`strategy <strategy_definition>`
|
Watcher Decision Engine has an external :ref:`strategy <strategy_definition>`
|
||||||
plugin interface which gives anyone the ability to integrate an external
|
plugin interface which gives anyone the ability to integrate an external
|
||||||
:ref:`strategy <strategy_definition>` in order to make use of placement
|
strategy in order to make use of placement algorithms.
|
||||||
algorithms.
|
|
||||||
|
|
||||||
This section gives some guidelines on how to implement and integrate custom
|
This section gives some guidelines on how to implement and integrate custom
|
||||||
Stategies with Watcher.
|
strategies with Watcher.
|
||||||
|
|
||||||
|
|
||||||
Pre-requisites
|
Pre-requisites
|
||||||
==============
|
==============
|
||||||
@@ -29,15 +31,14 @@ Creating a new plugin
|
|||||||
|
|
||||||
First of all you have to:
|
First of all you have to:
|
||||||
|
|
||||||
- Extend the base ``BaseStrategy`` class
|
- Extend :py:class:`~.BaseStrategy`
|
||||||
- Implement its ``execute`` method
|
- Implement its :py:meth:`~.BaseStrategy.execute` method
|
||||||
|
|
||||||
Here is an example showing how you can write a plugin called ``DummyStrategy``:
|
Here is an example showing how you can write a plugin called ``DummyStrategy``:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
# Filepath = third-party/third_party/dummy.py
|
import uuid
|
||||||
# Import path = third_party.dummy
|
|
||||||
|
|
||||||
class DummyStrategy(BaseStrategy):
|
class DummyStrategy(BaseStrategy):
|
||||||
|
|
||||||
@@ -48,14 +49,25 @@ Here is an example showing how you can write a plugin called ``DummyStrategy``:
|
|||||||
super(DummyStrategy, self).__init__(name, description)
|
super(DummyStrategy, self).__init__(name, description)
|
||||||
|
|
||||||
def execute(self, model):
|
def execute(self, model):
|
||||||
self.solution.add_change_request(
|
migration_type = 'live'
|
||||||
Migrate(vm=my_vm, src_hypervisor=src, dest_hypervisor=dest)
|
src_hypervisor = 'compute-host-1'
|
||||||
)
|
dst_hypervisor = 'compute-host-2'
|
||||||
|
instance_id = uuid.uuid4()
|
||||||
|
parameters = {'migration_type': migration_type,
|
||||||
|
'src_hypervisor': src_hypervisor,
|
||||||
|
'dst_hypervisor': dst_hypervisor}
|
||||||
|
self.solution.add_action(action_type="migration",
|
||||||
|
resource_id=instance_id,
|
||||||
|
input_parameters=parameters)
|
||||||
# Do some more stuff here ...
|
# Do some more stuff here ...
|
||||||
return self.solution
|
return self.solution
|
||||||
|
|
||||||
As you can see in the above example, the ``execute()`` method returns a
|
As you can see in the above example, the :py:meth:`~.BaseStrategy.execute`
|
||||||
solution as required.
|
method returns a :py:class:`~.BaseSolution` instance as required. This solution
|
||||||
|
is what wraps the abstract set of actions the strategy recommends to you. This
|
||||||
|
solution is then processed by a :ref:`planner <planner_definition>` to produce
|
||||||
|
an action plan which shall contain the sequenced flow of actions to be
|
||||||
|
executed by the :ref:`Watcher Applier <watcher_applier_definition>`.
|
||||||
|
|
||||||
Please note that your strategy class will be instantiated without any
|
Please note that your strategy class will be instantiated without any
|
||||||
parameter. Therefore, you should make sure not to make any of them required in
|
parameter. Therefore, you should make sure not to make any of them required in
|
||||||
@@ -65,13 +77,10 @@ your ``__init__`` method.
|
|||||||
Abstract Plugin Class
|
Abstract Plugin Class
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
Here below is the abstract ``BaseStrategy`` class that every single strategy
|
Here below is the abstract :py:class:`~.BaseStrategy` class that every single
|
||||||
should implement:
|
strategy should implement:
|
||||||
|
|
||||||
.. automodule:: watcher.decision_engine.strategy.strategies.base
|
.. autoclass:: watcher.decision_engine.strategy.strategies.base.BaseStrategy
|
||||||
:noindex:
|
|
||||||
|
|
||||||
.. autoclass:: BaseStrategy
|
|
||||||
:members:
|
:members:
|
||||||
:noindex:
|
:noindex:
|
||||||
|
|
||||||
@@ -92,11 +101,11 @@ Here below is how you would proceed to register ``DummyStrategy`` using pbr_:
|
|||||||
|
|
||||||
[entry_points]
|
[entry_points]
|
||||||
watcher_strategies =
|
watcher_strategies =
|
||||||
dummy = third_party.dummy:DummyStrategy
|
dummy = thirdparty.dummy:DummyStrategy
|
||||||
|
|
||||||
|
|
||||||
To get a better understanding on how to implement a more advanced strategy,
|
To get a better understanding on how to implement a more advanced strategy,
|
||||||
have a look at the :py:class:`BasicConsolidation` class.
|
have a look at the :py:class:`~.BasicConsolidation` class.
|
||||||
|
|
||||||
.. _pbr: http://docs.openstack.org/developer/pbr/
|
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||||
|
|
||||||
@@ -104,12 +113,12 @@ Using strategy plugins
|
|||||||
======================
|
======================
|
||||||
|
|
||||||
The Watcher Decision Engine service will automatically discover any installed
|
The Watcher Decision Engine service will automatically discover any installed
|
||||||
plugins when it is run. If a Python package containing a custom plugin is
|
plugins when it is restarted. If a Python package containing a custom plugin is
|
||||||
installed within the same environment as Watcher, Watcher will automatically
|
installed within the same environment as Watcher, Watcher will automatically
|
||||||
make that plugin available for use.
|
make that plugin available for use.
|
||||||
|
|
||||||
At this point, the way Watcher will use your new strategy if you reference it
|
At this point, Watcher will use your new strategy if you reference it in the
|
||||||
in the ``goals`` under the ``[watcher_goals]`` section of your ``watcher.conf``
|
``goals`` under the ``[watcher_goals]`` section of your ``watcher.conf``
|
||||||
configuration file. For example, if you want to use a ``dummy`` strategy you
|
configuration file. For example, if you want to use a ``dummy`` strategy you
|
||||||
just installed, you would have to associate it to a goal like this:
|
just installed, you would have to associate it to a goal like this:
|
||||||
|
|
||||||
@@ -143,13 +152,13 @@ pluggable backend.
|
|||||||
Finally, if your strategy requires new metrics not covered by Ceilometer, you
|
Finally, if your strategy requires new metrics not covered by Ceilometer, you
|
||||||
can add them through a Ceilometer `plugin`_.
|
can add them through a Ceilometer `plugin`_.
|
||||||
|
|
||||||
|
|
||||||
.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/metrics_engine/cluster_history/ceilometer.py#L31
|
.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/metrics_engine/cluster_history/ceilometer.py#L31
|
||||||
.. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data
|
.. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data
|
||||||
.. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend
|
.. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend
|
||||||
.. _`plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html
|
.. _`plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html
|
||||||
.. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py
|
.. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py
|
||||||
|
|
||||||
|
|
||||||
Read usage metrics using the Python binding
|
Read usage metrics using the Python binding
|
||||||
-------------------------------------------
|
-------------------------------------------
|
||||||
|
|
||||||
@@ -157,39 +166,43 @@ You can find the information about the Ceilometer Python binding on the
|
|||||||
OpenStack `ceilometer client python API documentation
|
OpenStack `ceilometer client python API documentation
|
||||||
<http://docs.openstack.org/developer/python-ceilometerclient/api.html>`_
|
<http://docs.openstack.org/developer/python-ceilometerclient/api.html>`_
|
||||||
|
|
||||||
The first step is to authenticate against the Ceilometer service
|
To facilitate the process, Watcher provides the ``osc`` attribute to every
|
||||||
(assuming that you already imported the Ceilometer client for Python)
|
strategy which includes clients to major OpenStack services, including
|
||||||
with this call:
|
Ceilometer. So to access it within your strategy, you can do the following:
|
||||||
|
|
||||||
.. code-block:: py
|
.. code-block:: py
|
||||||
|
|
||||||
cclient = ceilometerclient.client.get_client(VERSION, os_username=USERNAME,
|
# Within your strategy "execute()"
|
||||||
os_password=PASSWORD, os_tenant_name=PROJECT_NAME, os_auth_url=AUTH_URL)
|
cclient = self.osc.ceilometer
|
||||||
|
# TODO: Do something here
|
||||||
|
|
||||||
Using that you can now query the values for that specific metric:
|
Using that you can now query the values for that specific metric:
|
||||||
|
|
||||||
.. code-block:: py
|
.. code-block:: py
|
||||||
|
|
||||||
value_cpu = cclient.samples.list(meter_name='cpu_util', limit=10, q=query)
|
query = None # e.g. [{'field': 'foo', 'op': 'le', 'value': 34},]
|
||||||
|
value_cpu = cclient.samples.list(
|
||||||
|
meter_name='cpu_util',
|
||||||
|
limit=10, q=query)
|
||||||
|
|
||||||
|
|
||||||
Read usage metrics using the Watcher Cluster History Helper
|
Read usage metrics using the Watcher Cluster History Helper
|
||||||
-----------------------------------------------------------
|
-----------------------------------------------------------
|
||||||
|
|
||||||
Here below is the abstract ``BaseClusterHistory`` class of the Helper.
|
Here below is the abstract ``BaseClusterHistory`` class of the Helper.
|
||||||
|
|
||||||
.. automodule:: watcher.metrics_engine.cluster_history.api
|
.. autoclass:: watcher.metrics_engine.cluster_history.api.BaseClusterHistory
|
||||||
:noindex:
|
|
||||||
|
|
||||||
.. autoclass:: BaseClusterHistory
|
|
||||||
:members:
|
:members:
|
||||||
:noindex:
|
:noindex:
|
||||||
|
|
||||||
|
|
||||||
The following snippet code shows how to create a Cluster History class:
|
The following code snippet shows how to create a Cluster History class:
|
||||||
|
|
||||||
.. code-block:: py
|
.. code-block:: py
|
||||||
|
|
||||||
query_history = CeilometerClusterHistory()
|
from watcher.metrics_engine.cluster_history import ceilometer as ceil
|
||||||
|
|
||||||
|
query_history = ceil.CeilometerClusterHistory()
|
||||||
|
|
||||||
Using that you can now query the values for that specific metric:
|
Using that you can now query the values for that specific metric:
|
||||||
|
|
||||||
@@ -200,4 +213,3 @@ Using that you can now query the values for that specific metric:
|
|||||||
period="7200",
|
period="7200",
|
||||||
aggregate='avg'
|
aggregate='avg'
|
||||||
)
|
)
|
||||||
|
|
||||||
38
doc/source/dev/plugins.rst
Normal file
38
doc/source/dev/plugins.rst
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
|
||||||
|
=================
|
||||||
|
Available Plugins
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. _watcher_strategies:
|
||||||
|
|
||||||
|
Strategies
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. drivers-doc:: watcher_strategies
|
||||||
|
|
||||||
|
.. _watcher_actions:
|
||||||
|
|
||||||
|
Actions
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. drivers-doc:: watcher_actions
|
||||||
|
|
||||||
|
.. _watcher_workflow_engines:
|
||||||
|
|
||||||
|
Workflow Engines
|
||||||
|
================
|
||||||
|
|
||||||
|
.. drivers-doc:: watcher_workflow_engines
|
||||||
|
|
||||||
|
.. _watcher_planners:
|
||||||
|
|
||||||
|
Planners
|
||||||
|
========
|
||||||
|
|
||||||
|
.. drivers-doc:: watcher_planners
|
||||||
50
doc/source/dev/testing.rst
Normal file
50
doc/source/dev/testing.rst
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
=======
|
||||||
|
Testing
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. _unit_tests:
|
||||||
|
|
||||||
|
Unit tests
|
||||||
|
==========
|
||||||
|
|
||||||
|
All unit tests should be run using `tox`_. To run the same unit tests that are
|
||||||
|
executing onto `Gerrit`_ which includes ``py34``, ``py27`` and ``pep8``, you
|
||||||
|
can issue the following command::
|
||||||
|
|
||||||
|
$ workon watcher
|
||||||
|
(watcher) $ pip install tox
|
||||||
|
(watcher) $ cd watcher
|
||||||
|
(watcher) $ tox
|
||||||
|
|
||||||
|
If you want to only run one of the aforementioned, you can then issue one of
|
||||||
|
the following::
|
||||||
|
|
||||||
|
$ workon watcher
|
||||||
|
(watcher) $ tox -e py34
|
||||||
|
(watcher) $ tox -e py27
|
||||||
|
(watcher) $ tox -e pep8
|
||||||
|
|
||||||
|
.. _tox: https://tox.readthedocs.org/
|
||||||
|
.. _Gerrit: http://review.openstack.org/
|
||||||
|
|
||||||
|
You may pass options to the test programs using positional arguments. To run a
|
||||||
|
specific unit test, you can pass extra options to `os-testr`_ after putting
|
||||||
|
the ``--`` separator. So using the ``-r`` option followed by a regex string,
|
||||||
|
you can run the desired test::
|
||||||
|
|
||||||
|
$ workon watcher
|
||||||
|
(watcher) $ tox -e py27 -- -r watcher.tests.api
|
||||||
|
|
||||||
|
.. _os-testr: http://docs.openstack.org/developer/os-testr/
|
||||||
|
|
||||||
|
When you're done, deactivate the virtualenv::
|
||||||
|
|
||||||
|
$ deactivate
|
||||||
|
|
||||||
|
.. include:: ../../../watcher_tempest_plugin/README.rst
|
||||||
@@ -213,27 +213,27 @@ Here are some examples of
|
|||||||
|
|
||||||
It can be any of the `the official list of available resource types defined in OpenStack for HEAT <http://docs.openstack.org/developer/heat/template_guide/openstack.html>`_.
|
It can be any of the `the official list of available resource types defined in OpenStack for HEAT <http://docs.openstack.org/developer/heat/template_guide/openstack.html>`_.
|
||||||
|
|
||||||
.. _efficiency_definition:
|
.. _efficacy_definition:
|
||||||
|
|
||||||
Optimization Efficiency
|
Optimization Efficacy
|
||||||
=======================
|
=====================
|
||||||
|
|
||||||
The :ref:`Optimization Efficiency <efficiency_definition>` is the objective
|
The :ref:`Optimization Efficacy <efficacy_definition>` is the objective
|
||||||
measure of how much of the :ref:`Goal <goal_definition>` has been achieved in
|
measure of how much of the :ref:`Goal <goal_definition>` has been achieved in
|
||||||
respect with constraints and :ref:`SLAs <sla_definition>` defined by the
|
respect with constraints and :ref:`SLAs <sla_definition>` defined by the
|
||||||
:ref:`Customer <customer_definition>`.
|
:ref:`Customer <customer_definition>`.
|
||||||
|
|
||||||
The way efficiency is evaluated will depend on the
|
The way efficacy is evaluated will depend on the
|
||||||
:ref:`Goal <goal_definition>` to achieve.
|
:ref:`Goal <goal_definition>` to achieve.
|
||||||
|
|
||||||
Of course, the efficiency will be relevant only as long as the
|
Of course, the efficacy will be relevant only as long as the
|
||||||
:ref:`Action Plan <action_plan_definition>` is relevant
|
:ref:`Action Plan <action_plan_definition>` is relevant
|
||||||
(i.e., the current state of the :ref:`Cluster <cluster_definition>`
|
(i.e., the current state of the :ref:`Cluster <cluster_definition>`
|
||||||
has not changed in a way that a new :ref:`Audit <audit_definition>` would need
|
has not changed in a way that a new :ref:`Audit <audit_definition>` would need
|
||||||
to be launched).
|
to be launched).
|
||||||
|
|
||||||
For example, if the :ref:`Goal <goal_definition>` is to lower the energy
|
For example, if the :ref:`Goal <goal_definition>` is to lower the energy
|
||||||
consumption, the :ref:`Efficiency <efficiency_definition>` will be computed
|
consumption, the :ref:`Efficacy <efficacy_definition>` will be computed
|
||||||
using several indicators (KPIs):
|
using several indicators (KPIs):
|
||||||
|
|
||||||
- the percentage of energy gain (which must be the highest possible)
|
- the percentage of energy gain (which must be the highest possible)
|
||||||
@@ -244,7 +244,7 @@ using several indicators (KPIs):
|
|||||||
All those indicators (KPIs) are computed within a given timeframe, which is the
|
All those indicators (KPIs) are computed within a given timeframe, which is the
|
||||||
time taken to execute the whole :ref:`Action Plan <action_plan_definition>`.
|
time taken to execute the whole :ref:`Action Plan <action_plan_definition>`.
|
||||||
|
|
||||||
The efficiency also enables the :ref:`Administrator <administrator_definition>`
|
The efficacy also enables the :ref:`Administrator <administrator_definition>`
|
||||||
to objectively compare different :ref:`Strategies <strategy_definition>` for
|
to objectively compare different :ref:`Strategies <strategy_definition>` for
|
||||||
the same goal and same workload of the :ref:`Cluster <cluster_definition>`.
|
the same goal and same workload of the :ref:`Cluster <cluster_definition>`.
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
@startuml
|
@startuml
|
||||||
|
|
||||||
[*] --> RECOMMENDED: The Watcher Planner\ncreates the Action Plan
|
[*] --> RECOMMENDED: The Watcher Planner\ncreates the Action Plan
|
||||||
RECOMMENDED --> TRIGGERED: Administrator launches\nthe Action Plan
|
RECOMMENDED --> PENDING: Adminisrator launches\nthe Action Plan
|
||||||
TRIGGERED --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan
|
PENDING --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan
|
||||||
ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier
|
ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier
|
||||||
ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully
|
ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully
|
||||||
FAILED --> DELETED : Administrator removes\nAction Plan
|
FAILED --> DELETED : Administrator removes\nAction Plan
|
||||||
SUCCEEDED --> DELETED : Administrator removes\nAction Plan
|
SUCCEEDED --> DELETED : Administrator removes\nAction Plan
|
||||||
ONGOING --> CANCELLED : Administrator cancels\nAction Plan
|
ONGOING --> CANCELLED : Administrator cancels\nAction Plan
|
||||||
RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan
|
RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan
|
||||||
TRIGGERED --> CANCELLED : Administrator cancels\nAction Plan
|
PENDING --> CANCELLED : Administrator cancels\nAction Plan
|
||||||
CANCELLED --> DELETED
|
CANCELLED --> DELETED
|
||||||
DELETED --> [*]
|
DELETED --> [*]
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 48 KiB |
@@ -22,6 +22,7 @@ Watcher project consists of several source code repositories:
|
|||||||
* `watcher`_ - is the main repository. It contains code for Watcher API server,
|
* `watcher`_ - is the main repository. It contains code for Watcher API server,
|
||||||
Watcher Decision Engine and Watcher Applier.
|
Watcher Decision Engine and Watcher Applier.
|
||||||
* `python-watcherclient`_ - Client library and CLI client for Watcher.
|
* `python-watcherclient`_ - Client library and CLI client for Watcher.
|
||||||
|
* `watcher-dashboard`_ - Watcher Horizon plugin.
|
||||||
|
|
||||||
The documentation provided here is continually kept up-to-date based
|
The documentation provided here is continually kept up-to-date based
|
||||||
on the latest code, and may not represent the state of the project at any
|
on the latest code, and may not represent the state of the project at any
|
||||||
@@ -29,6 +30,7 @@ specific prior release.
|
|||||||
|
|
||||||
.. _watcher: https://git.openstack.org/cgit/openstack/watcher/
|
.. _watcher: https://git.openstack.org/cgit/openstack/watcher/
|
||||||
.. _python-watcherclient: https://git.openstack.org/cgit/openstack/python-watcherclient/
|
.. _python-watcherclient: https://git.openstack.org/cgit/openstack/python-watcherclient/
|
||||||
|
.. _watcher-dashboard: https://git.openstack.org/cgit/openstack/watcher-dashboard/
|
||||||
|
|
||||||
Developer Guide
|
Developer Guide
|
||||||
===============
|
===============
|
||||||
@@ -53,7 +55,8 @@ Getting Started
|
|||||||
dev/environment
|
dev/environment
|
||||||
dev/devstack
|
dev/devstack
|
||||||
deploy/configuration
|
deploy/configuration
|
||||||
|
deploy/conf-files
|
||||||
|
dev/testing
|
||||||
|
|
||||||
API References
|
API References
|
||||||
--------------
|
--------------
|
||||||
@@ -69,7 +72,11 @@ Plugins
|
|||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
dev/strategy-plugin
|
dev/plugin/base-setup
|
||||||
|
dev/plugin/strategy-plugin
|
||||||
|
dev/plugin/action-plugin
|
||||||
|
dev/plugin/planner-plugin
|
||||||
|
dev/plugins
|
||||||
|
|
||||||
|
|
||||||
Admin Guide
|
Admin Guide
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ run the following::
|
|||||||
|
|
||||||
Show the program's version number and exit.
|
Show the program's version number and exit.
|
||||||
|
|
||||||
.. option:: upgrade, downgrade, stamp, revision, version, create_schema
|
.. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge
|
||||||
|
|
||||||
The :ref:`command <db-manage_cmds>` to run.
|
The :ref:`command <db-manage_cmds>` to run.
|
||||||
|
|
||||||
@@ -219,3 +219,42 @@ version
|
|||||||
Show help for version and exit.
|
Show help for version and exit.
|
||||||
|
|
||||||
This command will output the current database version.
|
This command will output the current database version.
|
||||||
|
|
||||||
|
purge
|
||||||
|
-----
|
||||||
|
|
||||||
|
.. program:: purge
|
||||||
|
|
||||||
|
.. option:: -h, --help
|
||||||
|
|
||||||
|
Show help for purge and exit.
|
||||||
|
|
||||||
|
.. option:: -d, --age-in-days
|
||||||
|
|
||||||
|
The number of days (starting from today) before which we consider soft
|
||||||
|
deleted objects as expired and should hence be erased. By default, all
|
||||||
|
objects soft deleted are considered expired. This can be useful as removing
|
||||||
|
a significant amount of objects may cause a performance issues.
|
||||||
|
|
||||||
|
.. option:: -n, --max-number
|
||||||
|
|
||||||
|
The maximum number of database objects we expect to be deleted. If exceeded,
|
||||||
|
this will prevent any deletion.
|
||||||
|
|
||||||
|
.. option:: -t, --audit-template
|
||||||
|
|
||||||
|
Either the UUID or name of the soft deleted audit template to purge. This
|
||||||
|
will also include any related objects with it.
|
||||||
|
|
||||||
|
.. option:: -e, --exclude-orphans
|
||||||
|
|
||||||
|
This is a flag to indicate when we want to exclude orphan objects from
|
||||||
|
deletion.
|
||||||
|
|
||||||
|
.. option:: --dry-run
|
||||||
|
|
||||||
|
This is a flag to indicate when we want to perform a dry run. This will show
|
||||||
|
the objects that would be deleted instead of actually deleting them.
|
||||||
|
|
||||||
|
This command will purge the current database by removing both its soft deleted
|
||||||
|
and orphan objects.
|
||||||
|
|||||||
@@ -8,6 +8,19 @@
|
|||||||
RESTful Web API (v1)
|
RESTful Web API (v1)
|
||||||
====================
|
====================
|
||||||
|
|
||||||
|
Goals
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. rest-controller:: watcher.api.controllers.v1.goal:GoalsController
|
||||||
|
:webprefix: /v1/goal
|
||||||
|
|
||||||
|
.. autotype:: watcher.api.controllers.v1.goal.GoalCollection
|
||||||
|
:members:
|
||||||
|
|
||||||
|
.. autotype:: watcher.api.controllers.v1.goal.Goal
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|
||||||
Audit Templates
|
Audit Templates
|
||||||
===============
|
===============
|
||||||
|
|
||||||
|
|||||||
4
etc/watcher/README-watcher.conf.txt
Normal file
4
etc/watcher/README-watcher.conf.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
To generate the sample watcher.conf file, run the following
|
||||||
|
command from the top level of the watcher directory:
|
||||||
|
|
||||||
|
tox -econfig
|
||||||
9
etc/watcher/watcher-config-generator.conf
Normal file
9
etc/watcher/watcher-config-generator.conf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
output_file = etc/watcher/watcher.conf.sample
|
||||||
|
wrap_width = 79
|
||||||
|
|
||||||
|
namespace = watcher
|
||||||
|
namespace = keystonemiddleware.auth_token
|
||||||
|
namespace = oslo.log
|
||||||
|
namespace = oslo.db
|
||||||
|
namespace = oslo.messaging
|
||||||
@@ -1,775 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.log
|
|
||||||
#
|
|
||||||
|
|
||||||
# Use syslog for logging. Existing syslog format is DEPRECATED and
|
|
||||||
# will be changed later to honor RFC5424. This option is ignored if
|
|
||||||
# log_config_append is set. (boolean value)
|
|
||||||
#use_syslog = false
|
|
||||||
|
|
||||||
# Enables or disables syslog rfc5424 format for logging. If enabled,
|
|
||||||
# prefixes the MSG part of the syslog message with APP-NAME (RFC5424).
|
|
||||||
# The format without the APP-NAME is deprecated in Kilo, and will be
|
|
||||||
# removed in Mitaka, along with this option. This option is ignored if
|
|
||||||
# log_config_append is set. (boolean value)
|
|
||||||
# This option is deprecated for removal.
|
|
||||||
# Its value may be silently ignored in the future.
|
|
||||||
#use_syslog_rfc_format = true
|
|
||||||
|
|
||||||
# (Optional) The base directory used for relative log_file paths.
|
|
||||||
# This option is ignored if log_config_append is set. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/logdir
|
|
||||||
#log_dir = <None>
|
|
||||||
|
|
||||||
# Syslog facility to receive log lines. This option is ignored if
|
|
||||||
# log_config_append is set. (string value)
|
|
||||||
#syslog_log_facility = LOG_USER
|
|
||||||
|
|
||||||
# Log output to standard error. This option is ignored if
|
|
||||||
# log_config_append is set. (boolean value)
|
|
||||||
#use_stderr = true
|
|
||||||
|
|
||||||
# Format string to use for log messages with context. (string value)
|
|
||||||
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
|
||||||
|
|
||||||
# Format string to use for log messages when context is undefined.
|
|
||||||
# (string value)
|
|
||||||
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
|
||||||
|
|
||||||
# Additional data to append to log message when logging level for the
|
|
||||||
# message is DEBUG. (string value)
|
|
||||||
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
|
|
||||||
|
|
||||||
# If set to true, the logging level will be set to DEBUG instead of
|
|
||||||
# the default INFO level. (boolean value)
|
|
||||||
#debug = false
|
|
||||||
|
|
||||||
# Prefix each line of exception output with this format. (string
|
|
||||||
# value)
|
|
||||||
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
|
|
||||||
|
|
||||||
# If set to false, the logging level will be set to WARNING instead of
|
|
||||||
# the default INFO level. (boolean value)
|
|
||||||
# This option is deprecated for removal.
|
|
||||||
# Its value may be silently ignored in the future.
|
|
||||||
#verbose = true
|
|
||||||
|
|
||||||
# Defines the format string for %(user_identity)s that is used in
|
|
||||||
# logging_context_format_string. (string value)
|
|
||||||
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
|
|
||||||
|
|
||||||
# The name of a logging configuration file. This file is appended to
|
|
||||||
# any existing logging configuration files. For details about logging
|
|
||||||
# configuration files, see the Python logging module documentation.
|
|
||||||
# Note that when logging configuration files are used all logging
|
|
||||||
# configuration is defined in the configuration file and other logging
|
|
||||||
# configuration options are ignored (for example, log_format). (string
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/log_config
|
|
||||||
#log_config_append = <None>
|
|
||||||
|
|
||||||
# List of package logging levels in logger=LEVEL pairs. This option is
|
|
||||||
# ignored if log_config_append is set. (list value)
|
|
||||||
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN
|
|
||||||
|
|
||||||
# DEPRECATED. A logging.Formatter log message format string which may
|
|
||||||
# use any of the available logging.LogRecord attributes. This option
|
|
||||||
# is deprecated. Please use logging_context_format_string and
|
|
||||||
# logging_default_format_string instead. This option is ignored if
|
|
||||||
# log_config_append is set. (string value)
|
|
||||||
#log_format = <None>
|
|
||||||
|
|
||||||
# Enables or disables publication of error events. (boolean value)
|
|
||||||
#publish_errors = false
|
|
||||||
|
|
||||||
# Defines the format string for %%(asctime)s in log records. Default:
|
|
||||||
# %(default)s . This option is ignored if log_config_append is set.
|
|
||||||
# (string value)
|
|
||||||
#log_date_format = %Y-%m-%d %H:%M:%S
|
|
||||||
|
|
||||||
# The format for an instance that is passed with the log message.
|
|
||||||
# (string value)
|
|
||||||
#instance_format = "[instance: %(uuid)s] "
|
|
||||||
|
|
||||||
# (Optional) Name of log file to send logging output to. If no default
|
|
||||||
# is set, logging will go to stderr as defined by use_stderr. This
|
|
||||||
# option is ignored if log_config_append is set. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/logfile
|
|
||||||
#log_file = <None>
|
|
||||||
|
|
||||||
# The format for an instance UUID that is passed with the log message.
|
|
||||||
# (string value)
|
|
||||||
#instance_uuid_format = "[instance: %(uuid)s] "
|
|
||||||
|
|
||||||
# Enables or disables fatal status of deprecations. (boolean value)
|
|
||||||
#fatal_deprecations = false
|
|
||||||
|
|
||||||
# Uses logging handler designed to watch file system. When log file is
|
|
||||||
# moved or removed this handler will open a new log file with
|
|
||||||
# specified path instantaneously. It makes sense only if log_file
|
|
||||||
# option is specified and Linux platform is used. This option is
|
|
||||||
# ignored if log_config_append is set. (boolean value)
|
|
||||||
#watch_log_file = false
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Directory for holding IPC sockets. (string value)
|
|
||||||
#rpc_zmq_ipc_dir = /var/run/openstack
|
|
||||||
|
|
||||||
# Number of retries to find free port number before fail with
|
|
||||||
# ZMQBindError. (integer value)
|
|
||||||
#rpc_zmq_bind_port_retries = 100
|
|
||||||
|
|
||||||
# AMQP topic used for OpenStack notifications. (list value)
|
|
||||||
# Deprecated group/name - [rpc_notifier2]/topics
|
|
||||||
# Deprecated group/name - [DEFAULT]/notification_topics
|
|
||||||
#topics = notifications
|
|
||||||
|
|
||||||
# Name of this node. Must be a valid hostname, FQDN, or IP address.
|
|
||||||
# Must match "host" option, if running Nova. (string value)
|
|
||||||
#rpc_zmq_host = localhost
|
|
||||||
|
|
||||||
# The messaging driver to use, defaults to rabbit. Other drivers
|
|
||||||
# include amqp and zmq. (string value)
|
|
||||||
#rpc_backend = rabbit
|
|
||||||
|
|
||||||
# Host to locate redis. (string value)
|
|
||||||
#host = 127.0.0.1
|
|
||||||
|
|
||||||
# Seconds to wait before a cast expires (TTL). Only supported by
|
|
||||||
# impl_zmq. (integer value)
|
|
||||||
#rpc_cast_timeout = 30
|
|
||||||
|
|
||||||
# Seconds to wait for a response from a call. (integer value)
|
|
||||||
#rpc_response_timeout = 60
|
|
||||||
|
|
||||||
# Use this port to connect to redis host. (port value)
|
|
||||||
# Minimum value: 1
|
|
||||||
# Maximum value: 65535
|
|
||||||
#port = 6379
|
|
||||||
|
|
||||||
# The default number of seconds that poll should wait. Poll raises
|
|
||||||
# timeout exception when timeout expired. (integer value)
|
|
||||||
#rpc_poll_timeout = 1
|
|
||||||
|
|
||||||
# A URL representing the messaging driver to use and its full
|
|
||||||
# configuration. If not set, we fall back to the rpc_backend option
|
|
||||||
# and driver specific configuration. (string value)
|
|
||||||
#transport_url = <None>
|
|
||||||
|
|
||||||
# Password for Redis server (optional). (string value)
|
|
||||||
#password =
|
|
||||||
|
|
||||||
# Configures zmq-messaging to use proxy with non PUB/SUB patterns.
|
|
||||||
# (boolean value)
|
|
||||||
#direct_over_proxy = true
|
|
||||||
|
|
||||||
# The Drivers(s) to handle sending notifications. Possible values are
|
|
||||||
# messaging, messagingv2, routing, log, test, noop (multi valued)
|
|
||||||
# Deprecated group/name - [DEFAULT]/notification_driver
|
|
||||||
#driver =
|
|
||||||
|
|
||||||
# Size of executor thread pool. (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
|
|
||||||
#executor_thread_pool_size = 64
|
|
||||||
|
|
||||||
# Size of RPC connection pool. (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
|
|
||||||
#rpc_conn_pool_size = 30
|
|
||||||
|
|
||||||
# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
|
|
||||||
# (boolean value)
|
|
||||||
#use_pub_sub = true
|
|
||||||
|
|
||||||
# A URL representing the messaging driver to use for notifications. If
|
|
||||||
# not set, we fall back to the same configuration used for RPC.
|
|
||||||
# (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/notification_transport_url
|
|
||||||
#transport_url = <None>
|
|
||||||
|
|
||||||
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
|
||||||
# interface, or IP. The "host" option should point or resolve to this
|
|
||||||
# address. (string value)
|
|
||||||
#rpc_zmq_bind_address = *
|
|
||||||
|
|
||||||
# MatchMaker driver. (string value)
|
|
||||||
#rpc_zmq_matchmaker = redis
|
|
||||||
|
|
||||||
# Minimal port number for random ports range. (port value)
|
|
||||||
# Minimum value: 1
|
|
||||||
# Maximum value: 65535
|
|
||||||
#rpc_zmq_min_port = 49152
|
|
||||||
|
|
||||||
# Type of concurrency used. Either "native" or "eventlet" (string
|
|
||||||
# value)
|
|
||||||
#rpc_zmq_concurrency = eventlet
|
|
||||||
|
|
||||||
# Number of ZeroMQ contexts, defaults to 1. (integer value)
|
|
||||||
#rpc_zmq_contexts = 1
|
|
||||||
|
|
||||||
# Maximal port number for random ports range. (integer value)
|
|
||||||
# Minimum value: 1
|
|
||||||
# Maximum value: 65536
|
|
||||||
#rpc_zmq_max_port = 65536
|
|
||||||
|
|
||||||
# The default exchange under which topics are scoped. May be
|
|
||||||
# overridden by an exchange name specified in the transport_url
|
|
||||||
# option. (string value)
|
|
||||||
#control_exchange = openstack
|
|
||||||
|
|
||||||
# Maximum number of ingress messages to locally buffer per topic.
|
|
||||||
# Default is unlimited. (integer value)
|
|
||||||
#rpc_zmq_topic_backlog = <None>
|
|
||||||
|
|
||||||
|
|
||||||
[api]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From watcher
|
|
||||||
#
|
|
||||||
|
|
||||||
# The port for the watcher API server (integer value)
|
|
||||||
#port = 9322
|
|
||||||
|
|
||||||
# The listen IP for the watcher API server (string value)
|
|
||||||
#host = 0.0.0.0
|
|
||||||
|
|
||||||
# The maximum number of items returned in a single response from a
|
|
||||||
# collection resource. (integer value)
|
|
||||||
#max_limit = 1000
|
|
||||||
|
|
||||||
|
|
||||||
[database]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.db
|
|
||||||
#
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string to use to connect to the database.
|
|
||||||
# (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_connection
|
|
||||||
# Deprecated group/name - [sql]/connection
|
|
||||||
#connection = <None>
|
|
||||||
|
|
||||||
# Add Python stack traces to SQL as comment strings. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
|
||||||
#connection_trace = false
|
|
||||||
|
|
||||||
# Seconds between retries of a database transaction. (integer value)
|
|
||||||
#db_retry_interval = 1
|
|
||||||
|
|
||||||
# Timeout before idle SQL connections are reaped. (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
|
||||||
# Deprecated group/name - [sql]/idle_timeout
|
|
||||||
#idle_timeout = 3600
|
|
||||||
|
|
||||||
# If set, use this value for pool_timeout with SQLAlchemy. (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
|
||||||
#pool_timeout = <None>
|
|
||||||
|
|
||||||
# If True, SQLite uses synchronous mode. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
|
|
||||||
#sqlite_synchronous = true
|
|
||||||
|
|
||||||
# If db_inc_retry_interval is set, the maximum seconds between retries
|
|
||||||
# of a database operation. (integer value)
|
|
||||||
#db_max_retry_interval = 10
|
|
||||||
|
|
||||||
# Enable the experimental use of database reconnect on connection
|
|
||||||
# lost. (boolean value)
|
|
||||||
#use_db_reconnect = false
|
|
||||||
|
|
||||||
# Interval between retries of opening a SQL connection. (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
|
||||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
|
||||||
#retry_interval = 10
|
|
||||||
|
|
||||||
# The file name to use with SQLite. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sqlite_db
|
|
||||||
#sqlite_db = oslo.sqlite
|
|
||||||
|
|
||||||
# Maximum retries in case of connection error or deadlock error before
|
|
||||||
# error is raised. Set to -1 to specify an infinite retry count.
|
|
||||||
# (integer value)
|
|
||||||
#db_max_retries = 20
|
|
||||||
|
|
||||||
# If True, increases the interval between retries of a database
|
|
||||||
# operation up to db_max_retry_interval. (boolean value)
|
|
||||||
#db_inc_retry_interval = true
|
|
||||||
|
|
||||||
# The SQLAlchemy connection string to use to connect to the slave
|
|
||||||
# database. (string value)
|
|
||||||
#slave_connection = <None>
|
|
||||||
|
|
||||||
# Maximum number of SQL connections to keep open in a pool. (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
|
||||||
#max_pool_size = <None>
|
|
||||||
|
|
||||||
# Maximum number of database connection retries during startup. Set to
|
|
||||||
# -1 to specify an infinite retry count. (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
|
||||||
#max_retries = 10
|
|
||||||
|
|
||||||
# Minimum number of SQL connections to keep open in a pool. (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
|
||||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
|
||||||
#min_pool_size = 1
|
|
||||||
|
|
||||||
# Verbosity of SQL debugging information: 0=None, 100=Everything.
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
|
||||||
#connection_debug = 0
|
|
||||||
|
|
||||||
# If set, use this value for max_overflow with SQLAlchemy. (integer
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
|
||||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
|
||||||
#max_overflow = <None>
|
|
||||||
|
|
||||||
# The SQL mode to be used for MySQL sessions. This option, including
|
|
||||||
# the default, overrides any server-set SQL mode. To use whatever SQL
|
|
||||||
# mode is set by the server configuration, set this to no value.
|
|
||||||
# Example: mysql_sql_mode= (string value)
|
|
||||||
#mysql_sql_mode = TRADITIONAL
|
|
||||||
|
|
||||||
# The back end to use for the database. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/db_backend
|
|
||||||
#backend = sqlalchemy
|
|
||||||
|
|
||||||
|
|
||||||
[keystone_authtoken]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From keystonemiddleware.auth_token
|
|
||||||
#
|
|
||||||
|
|
||||||
# A PEM encoded Certificate Authority to use when verifying HTTPs
|
|
||||||
# connections. Defaults to system CAs. (string value)
|
|
||||||
#cafile = <None>
|
|
||||||
|
|
||||||
# (Optional) Indicate whether to set the X-Service-Catalog header. If
|
|
||||||
# False, middleware will not ask for service catalog on token
|
|
||||||
# validation and will not set the X-Service-Catalog header. (boolean
|
|
||||||
# value)
|
|
||||||
#include_service_catalog = true
|
|
||||||
|
|
||||||
# (Optional) If defined, indicate whether token data should be
|
|
||||||
# authenticated or authenticated and encrypted. If MAC, token data is
|
|
||||||
# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
|
|
||||||
# encrypted and authenticated in the cache. If the value is not one of
|
|
||||||
# these options or empty, auth_token will raise an exception on
|
|
||||||
# initialization. (string value)
|
|
||||||
# Allowed values: None, MAC, ENCRYPT
|
|
||||||
#memcache_security_strategy = None
|
|
||||||
|
|
||||||
# Verify HTTPS connections. (boolean value)
|
|
||||||
#insecure = false
|
|
||||||
|
|
||||||
# Used to control the use and type of token binding. Can be set to:
|
|
||||||
# "disabled" to not check token binding. "permissive" (default) to
|
|
||||||
# validate binding information if the bind type is of a form known to
|
|
||||||
# the server and ignore it if not. "strict" like "permissive" but if
|
|
||||||
# the bind type is unknown the token will be rejected. "required" any
|
|
||||||
# form of token binding is needed to be allowed. Finally the name of a
|
|
||||||
# binding method that must be present in tokens. (string value)
|
|
||||||
#enforce_token_bind = permissive
|
|
||||||
|
|
||||||
# The region in which the identity server can be found. (string value)
|
|
||||||
#region_name = <None>
|
|
||||||
|
|
||||||
# If true, the revocation list will be checked for cached tokens. This
|
|
||||||
# requires that PKI tokens are configured on the identity server.
|
|
||||||
# (boolean value)
|
|
||||||
#check_revocations_for_cached = false
|
|
||||||
|
|
||||||
# Request timeout value for communicating with Identity API server.
|
|
||||||
# (integer value)
|
|
||||||
#http_connect_timeout = <None>
|
|
||||||
|
|
||||||
# Directory used to cache files related to PKI tokens. (string value)
|
|
||||||
#signing_dir = <None>
|
|
||||||
|
|
||||||
# Hash algorithms to use for hashing PKI tokens. This may be a single
|
|
||||||
# algorithm or multiple. The algorithms are those supported by Python
|
|
||||||
# standard hashlib.new(). The hashes will be tried in the order given,
|
|
||||||
# so put the preferred one first for performance. The result of the
|
|
||||||
# first hash will be stored in the cache. This will typically be set
|
|
||||||
# to multiple values only while migrating from a less secure algorithm
|
|
||||||
# to a more secure one. Once all the old tokens are expired this
|
|
||||||
# option should be set to a single value for better performance. (list
|
|
||||||
# value)
|
|
||||||
#hash_algorithms = md5
|
|
||||||
|
|
||||||
# Optionally specify a list of memcached server(s) to use for caching.
|
|
||||||
# If left undefined, tokens will instead be cached in-process. (list
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/memcache_servers
|
|
||||||
#memcached_servers = <None>
|
|
||||||
|
|
||||||
# Required if identity server requires client certificate (string
|
|
||||||
# value)
|
|
||||||
#certfile = <None>
|
|
||||||
|
|
||||||
# Authentication type to load (unknown value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/auth_plugin
|
|
||||||
#auth_type = <None>
|
|
||||||
|
|
||||||
# Config Section from which to load plugin specific options (unknown
|
|
||||||
# value)
|
|
||||||
#auth_section = <None>
|
|
||||||
|
|
||||||
# In order to prevent excessive effort spent validating tokens, the
|
|
||||||
# middleware caches previously-seen tokens for a configurable duration
|
|
||||||
# (in seconds). Set to -1 to disable caching completely. (integer
|
|
||||||
# value)
|
|
||||||
#token_cache_time = 300
|
|
||||||
|
|
||||||
# API version of the admin Identity API endpoint. (string value)
|
|
||||||
#auth_version = <None>
|
|
||||||
|
|
||||||
# Determines the frequency at which the list of revoked tokens is
|
|
||||||
# retrieved from the Identity service (in seconds). A high number of
|
|
||||||
# revocation events combined with a low cache duration may
|
|
||||||
# significantly reduce performance. (integer value)
|
|
||||||
#revocation_cache_time = 10
|
|
||||||
|
|
||||||
# Complete public Identity API endpoint. (string value)
|
|
||||||
#auth_uri = <None>
|
|
||||||
|
|
||||||
# (Optional) Socket timeout in seconds for communicating with a
|
|
||||||
# memcached server. (integer value)
|
|
||||||
#memcache_pool_socket_timeout = 3
|
|
||||||
|
|
||||||
# (Optional, mandatory if memcache_security_strategy is defined) This
|
|
||||||
# string is used for key derivation. (string value)
|
|
||||||
#memcache_secret_key = <None>
|
|
||||||
|
|
||||||
# Do not handle authorization requests within the middleware, but
|
|
||||||
# delegate the authorization decision to downstream WSGI components.
|
|
||||||
# (boolean value)
|
|
||||||
#delay_auth_decision = false
|
|
||||||
|
|
||||||
# (Optional) Use the advanced (eventlet safe) memcached client pool.
|
|
||||||
# The advanced pool will only work under python 2.x. (boolean value)
|
|
||||||
#memcache_use_advanced_pool = false
|
|
||||||
|
|
||||||
# (Optional) Maximum total number of open connections to every
|
|
||||||
# memcached server. (integer value)
|
|
||||||
#memcache_pool_maxsize = 10
|
|
||||||
|
|
||||||
# How many times are we trying to reconnect when communicating with
|
|
||||||
# Identity API Server. (integer value)
|
|
||||||
#http_request_max_retries = 3
|
|
||||||
|
|
||||||
# (Optional) Number of seconds memcached server is considered dead
|
|
||||||
# before it is tried again. (integer value)
|
|
||||||
#memcache_pool_dead_retry = 300
|
|
||||||
|
|
||||||
# (Optional) Number of seconds a connection to memcached is held
|
|
||||||
# unused in the pool before it is closed. (integer value)
|
|
||||||
#memcache_pool_unused_timeout = 60
|
|
||||||
|
|
||||||
# (Optional) Number of seconds that an operation will wait to get a
|
|
||||||
# memcached client connection from the pool. (integer value)
|
|
||||||
#memcache_pool_conn_get_timeout = 10
|
|
||||||
|
|
||||||
# Env key for the swift cache. (string value)
|
|
||||||
#cache = <None>
|
|
||||||
|
|
||||||
# Required if identity server requires client certificate (string
|
|
||||||
# value)
|
|
||||||
#keyfile = <None>
|
|
||||||
|
|
||||||
|
|
||||||
[matchmaker_redis]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Password for Redis server (optional). (string value)
|
|
||||||
#password =
|
|
||||||
|
|
||||||
# Host to locate redis. (string value)
|
|
||||||
#host = 127.0.0.1
|
|
||||||
|
|
||||||
# Use this port to connect to redis host. (port value)
|
|
||||||
# Minimum value: 1
|
|
||||||
# Maximum value: 65535
|
|
||||||
#port = 6379
|
|
||||||
|
|
||||||
|
|
||||||
[oslo_messaging_amqp]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# Timeout for inactive connections (in seconds) (integer value)
|
|
||||||
# Deprecated group/name - [amqp1]/idle_timeout
|
|
||||||
#idle_timeout = 0
|
|
||||||
|
|
||||||
# Password for message broker authentication (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/password
|
|
||||||
#password =
|
|
||||||
|
|
||||||
# Identifying certificate PEM file to present to clients (string
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [amqp1]/ssl_cert_file
|
|
||||||
#ssl_cert_file =
|
|
||||||
|
|
||||||
# Private key PEM file used to sign cert_file certificate (string
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [amqp1]/ssl_key_file
|
|
||||||
#ssl_key_file =
|
|
||||||
|
|
||||||
# address prefix when sending to any server in group (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/group_request_prefix
|
|
||||||
#group_request_prefix = unicast
|
|
||||||
|
|
||||||
# Path to directory that contains the SASL configuration (string
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [amqp1]/sasl_config_dir
|
|
||||||
#sasl_config_dir =
|
|
||||||
|
|
||||||
# Debug: dump AMQP frames to stdout (boolean value)
|
|
||||||
# Deprecated group/name - [amqp1]/trace
|
|
||||||
#trace = false
|
|
||||||
|
|
||||||
# Password for decrypting ssl_key_file (if encrypted) (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/ssl_key_password
|
|
||||||
#ssl_key_password = <None>
|
|
||||||
|
|
||||||
# address prefix used when sending to a specific server (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/server_request_prefix
|
|
||||||
#server_request_prefix = exclusive
|
|
||||||
|
|
||||||
# Name of configuration file (without .conf suffix) (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/sasl_config_name
|
|
||||||
#sasl_config_name =
|
|
||||||
|
|
||||||
# Name for the AMQP container (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/container_name
|
|
||||||
#container_name = <None>
|
|
||||||
|
|
||||||
# Accept clients using either SSL or plain TCP (boolean value)
|
|
||||||
# Deprecated group/name - [amqp1]/allow_insecure_clients
|
|
||||||
#allow_insecure_clients = false
|
|
||||||
|
|
||||||
# CA certificate PEM file to verify server certificate (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/ssl_ca_file
|
|
||||||
#ssl_ca_file =
|
|
||||||
|
|
||||||
# User name for message broker authentication (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/username
|
|
||||||
#username =
|
|
||||||
|
|
||||||
# address prefix used when broadcasting to all servers (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/broadcast_prefix
|
|
||||||
#broadcast_prefix = broadcast
|
|
||||||
|
|
||||||
# Space separated list of acceptable SASL mechanisms (string value)
|
|
||||||
# Deprecated group/name - [amqp1]/sasl_mechanisms
|
|
||||||
#sasl_mechanisms =
|
|
||||||
|
|
||||||
|
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From oslo.messaging
|
|
||||||
#
|
|
||||||
|
|
||||||
# The RabbitMQ password. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_password
|
|
||||||
#rabbit_password = guest
|
|
||||||
|
|
||||||
# SSL cert file (valid only if SSL enabled). (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
|
|
||||||
#kombu_ssl_certfile =
|
|
||||||
|
|
||||||
# The RabbitMQ login method. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_login_method
|
|
||||||
#rabbit_login_method = AMQPLAIN
|
|
||||||
|
|
||||||
# How long to backoff for between retries when connecting to RabbitMQ.
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
|
|
||||||
#rabbit_retry_backoff = 2
|
|
||||||
|
|
||||||
# SSL certification authority file (valid only if SSL enabled).
|
|
||||||
# (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
|
|
||||||
#kombu_ssl_ca_certs =
|
|
||||||
|
|
||||||
# The RabbitMQ virtual host. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
|
|
||||||
#rabbit_virtual_host = /
|
|
||||||
|
|
||||||
# How long to wait before reconnecting in response to an AMQP consumer
|
|
||||||
# cancel notification. (floating point value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
|
|
||||||
#kombu_reconnect_delay = 1.0
|
|
||||||
|
|
||||||
# How frequently to retry connecting with RabbitMQ. (integer value)
|
|
||||||
#rabbit_retry_interval = 1
|
|
||||||
|
|
||||||
# How long to wait a missing client beforce abandoning to send it its
|
|
||||||
# replies. This value should not be longer than rpc_response_timeout.
|
|
||||||
# (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
|
|
||||||
#kombu_missing_consumer_retry_timeout = 60
|
|
||||||
|
|
||||||
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
|
|
||||||
# (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/fake_rabbit
|
|
||||||
#fake_rabbit = false
|
|
||||||
|
|
||||||
# Determines how the next RabbitMQ node is chosen in case the one we
|
|
||||||
# are currently connected to becomes unavailable. Takes effect only if
|
|
||||||
# more than one RabbitMQ node is provided in config. (string value)
|
|
||||||
# Allowed values: round-robin, shuffle
|
|
||||||
#kombu_failover_strategy = round-robin
|
|
||||||
|
|
||||||
# How often times during the heartbeat_timeout_threshold we check the
|
|
||||||
# heartbeat. (integer value)
|
|
||||||
#heartbeat_rate = 2
|
|
||||||
|
|
||||||
# The RabbitMQ broker address where a single node is used. (string
|
|
||||||
# value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_host
|
|
||||||
#rabbit_host = localhost
|
|
||||||
|
|
||||||
# The RabbitMQ broker port where a single node is used. (port value)
|
|
||||||
# Minimum value: 1
|
|
||||||
# Maximum value: 65535
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_port
|
|
||||||
#rabbit_port = 5672
|
|
||||||
|
|
||||||
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this
|
|
||||||
# option, you must wipe the RabbitMQ database. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
|
|
||||||
#rabbit_ha_queues = false
|
|
||||||
|
|
||||||
# Use durable queues in AMQP. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/amqp_durable_queues
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
|
||||||
#amqp_durable_queues = false
|
|
||||||
|
|
||||||
# Maximum number of RabbitMQ connection retries. Default is 0
|
|
||||||
# (infinite retry count). (integer value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
|
|
||||||
#rabbit_max_retries = 0
|
|
||||||
|
|
||||||
# RabbitMQ HA cluster host:port pairs. (list value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_hosts
|
|
||||||
#rabbit_hosts = $rabbit_host:$rabbit_port
|
|
||||||
|
|
||||||
# Auto-delete queues in AMQP. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
|
|
||||||
#amqp_auto_delete = false
|
|
||||||
|
|
||||||
# Number of seconds after which the Rabbit broker is considered down
|
|
||||||
# if heartbeat's keep-alive fails (0 disable the heartbeat).
|
|
||||||
# EXPERIMENTAL (integer value)
|
|
||||||
#heartbeat_timeout_threshold = 60
|
|
||||||
|
|
||||||
# Connect over SSL for RabbitMQ. (boolean value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
|
|
||||||
#rabbit_use_ssl = false
|
|
||||||
|
|
||||||
# SSL version to use (valid only if SSL enabled). Valid values are
|
|
||||||
# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
|
|
||||||
# available on some distributions. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
|
|
||||||
#kombu_ssl_version =
|
|
||||||
|
|
||||||
# The RabbitMQ userid. (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/rabbit_userid
|
|
||||||
#rabbit_userid = guest
|
|
||||||
|
|
||||||
# SSL key file (valid only if SSL enabled). (string value)
|
|
||||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
|
|
||||||
#kombu_ssl_keyfile =
|
|
||||||
|
|
||||||
|
|
||||||
[watcher_applier]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From watcher
|
|
||||||
#
|
|
||||||
|
|
||||||
# Number of workers for applier, default value is 1. (integer value)
|
|
||||||
# Minimum value: 1
|
|
||||||
#workers = 1
|
|
||||||
|
|
||||||
# Select the engine to use to execute the workflow (string value)
|
|
||||||
#workflow_engine = taskflow
|
|
||||||
|
|
||||||
# The topic name used forcontrol events, this topic used for rpc call
|
|
||||||
# (string value)
|
|
||||||
#topic_control = watcher.applier.control
|
|
||||||
|
|
||||||
# The identifier used by watcher module on the message broker (string
|
|
||||||
# value)
|
|
||||||
#publisher_id = watcher.applier.api
|
|
||||||
|
|
||||||
# The topic name used for status events, this topic is used so as to
|
|
||||||
# notifythe others components of the system (string value)
|
|
||||||
#topic_status = watcher.applier.status
|
|
||||||
|
|
||||||
|
|
||||||
[watcher_decision_engine]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From watcher
|
|
||||||
#
|
|
||||||
|
|
||||||
# The identifier used by watcher module on the message broker (string
|
|
||||||
# value)
|
|
||||||
#publisher_id = watcher.decision.api
|
|
||||||
|
|
||||||
# The topic name used forcontrol events, this topic used for rpc call
|
|
||||||
# (string value)
|
|
||||||
#topic_control = watcher.decision.control
|
|
||||||
|
|
||||||
# The maximum number of threads that can be used to execute strategies
|
|
||||||
# (integer value)
|
|
||||||
#max_workers = 2
|
|
||||||
|
|
||||||
# The topic name used for status events, this topic is used so as to
|
|
||||||
# notifythe others components of the system (string value)
|
|
||||||
#topic_status = watcher.decision.status
|
|
||||||
|
|
||||||
|
|
||||||
[watcher_goals]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From watcher
|
|
||||||
#
|
|
||||||
|
|
||||||
# Goals used for the optimization. Maps each goal to an associated
|
|
||||||
# strategy (for example: BASIC_CONSOLIDATION:basic,
|
|
||||||
# MY_GOAL:my_strategy_1) (dict value)
|
|
||||||
#goals = DUMMY:dummy
|
|
||||||
|
|
||||||
|
|
||||||
[watcher_planner]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From watcher
|
|
||||||
#
|
|
||||||
|
|
||||||
# The selected planner used to schedule the actions (string value)
|
|
||||||
#planner = default
|
|
||||||
@@ -2,29 +2,33 @@
|
|||||||
# of appearance. Changing the order has an impact on the overall integration
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
enum34;python_version=='2.7' or python_version=='2.6'
|
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||||
jsonpatch>=1.1
|
jsonpatch>=1.1 # BSD
|
||||||
keystonemiddleware>=2.0.0,!=2.4.0
|
keystoneauth1>=2.1.0 # Apache-2.0
|
||||||
oslo.config>=2.3.0 # Apache-2.0
|
keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
|
||||||
oslo.db>=2.4.1 # Apache-2.0
|
oslo.config>=3.7.0 # Apache-2.0
|
||||||
oslo.i18n>=1.5.0 # Apache-2.0
|
oslo.db>=4.1.0 # Apache-2.0
|
||||||
oslo.log>=1.8.0 # Apache-2.0
|
oslo.i18n>=2.1.0 # Apache-2.0
|
||||||
oslo.messaging>=1.16.0,!=1.17.0,!=1.17.1,!=2.6.0,!=2.6.1 # Apache-2.0
|
oslo.log>=1.14.0 # Apache-2.0
|
||||||
oslo.policy>=0.5.0 # Apache-2.0
|
oslo.messaging>=4.0.0 # Apache-2.0
|
||||||
oslo.service>=0.7.0 # Apache-2.0
|
oslo.policy>=0.5.0 # Apache-2.0
|
||||||
oslo.utils>=2.0.0,!=2.6.0 # Apache-2.0
|
oslo.service>=1.0.0 # Apache-2.0
|
||||||
PasteDeploy>=1.5.0
|
oslo.utils>=3.5.0 # Apache-2.0
|
||||||
pbr>=1.6
|
PasteDeploy>=1.5.0 # MIT
|
||||||
pecan>=1.0.0
|
pbr>=1.6 # Apache-2.0
|
||||||
python-ceilometerclient>=1.5.0
|
pecan>=1.0.0 # BSD
|
||||||
python-cinderclient>=1.3.1
|
PrettyTable<0.8,>=0.7 # BSD
|
||||||
python-glanceclient>=0.18.0
|
voluptuous>=0.8.6 # BSD License
|
||||||
python-keystoneclient>=1.6.0,!=1.8.0
|
python-ceilometerclient>=2.2.1 # Apache-2.0
|
||||||
python-neutronclient>=2.6.0
|
python-cinderclient>=1.3.1 # Apache-2.0
|
||||||
python-novaclient>=2.28.1,!=2.33.0
|
python-glanceclient>=2.0.0 # Apache-2.0
|
||||||
python-openstackclient>=1.5.0
|
python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0
|
||||||
six>=1.9.0
|
python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0
|
||||||
SQLAlchemy>=0.9.9,<1.1.0
|
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
|
||||||
stevedore>=1.5.0 # Apache-2.0
|
python-openstackclient>=2.1.0 # Apache-2.0
|
||||||
taskflow>=1.25.0 # Apache-2.0
|
six>=1.9.0 # MIT
|
||||||
WSME>=0.7
|
SQLAlchemy<1.1.0,>=1.0.10 # MIT
|
||||||
|
stevedore>=1.5.0 # Apache-2.0
|
||||||
|
taskflow>=1.26.0 # Apache-2.0
|
||||||
|
WebOb>=1.2.3 # MIT
|
||||||
|
WSME>=0.8 # MIT
|
||||||
|
|||||||
@@ -63,7 +63,8 @@ watcher_planners =
|
|||||||
default = watcher.decision_engine.planner.default:DefaultPlanner
|
default = watcher.decision_engine.planner.default:DefaultPlanner
|
||||||
|
|
||||||
[pbr]
|
[pbr]
|
||||||
autodoc_index_modules = True
|
warnerrors = true
|
||||||
|
autodoc_index_modules = true
|
||||||
autodoc_exclude_modules =
|
autodoc_exclude_modules =
|
||||||
watcher.db.sqlalchemy.alembic.env
|
watcher.db.sqlalchemy.alembic.env
|
||||||
watcher.db.sqlalchemy.alembic.versions.*
|
watcher.db.sqlalchemy.alembic.versions.*
|
||||||
|
|||||||
3
setup.py
Executable file → Normal file
3
setup.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@@ -26,5 +25,5 @@ except ImportError:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
setup_requires=['pbr'],
|
setup_requires=['pbr>=1.8'],
|
||||||
pbr=True)
|
pbr=True)
|
||||||
|
|||||||
@@ -2,23 +2,20 @@
|
|||||||
# of appearance. Changing the order has an impact on the overall integration
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
coverage>=3.6
|
coverage>=3.6 # Apache-2.0
|
||||||
discover
|
discover # BSD
|
||||||
doc8 # Apache-2.0
|
doc8 # Apache-2.0
|
||||||
hacking>=0.10.2,<0.11
|
freezegun # Apache-2.0
|
||||||
mock>=1.2
|
hacking<0.11,>=0.10.2
|
||||||
oslotest>=1.10.0 # Apache-2.0
|
mock>=1.2 # BSD
|
||||||
os-testr>=0.1.0
|
oslotest>=1.10.0 # Apache-2.0
|
||||||
python-subunit>=0.0.18
|
os-testr>=0.4.1 # Apache-2.0
|
||||||
testrepository>=0.0.18
|
python-subunit>=0.0.18 # Apache-2.0/BSD
|
||||||
testscenarios>=0.4
|
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||||
testtools>=1.4.0
|
testscenarios>=0.4 # Apache-2.0/BSD
|
||||||
|
testtools>=1.4.0 # MIT
|
||||||
|
|
||||||
# Doc requirements
|
# Doc requirements
|
||||||
oslosphinx>=2.5.0 # Apache-2.0
|
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
|
||||||
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
|
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
|
||||||
sphinxcontrib-pecanwsme>=0.8
|
sphinxcontrib-pecanwsme>=0.8 # Apache-2.0
|
||||||
|
|
||||||
# For PyPI distribution
|
|
||||||
twine
|
|
||||||
|
|
||||||
|
|||||||
18
tox.ini
18
tox.ini
@@ -26,7 +26,7 @@ setenv = PYTHONHASHSEED=0
|
|||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
commands = python setup.py testr --coverage --omit="watcher/tests/*" --testr-args='{posargs}'
|
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||||
|
|
||||||
[testenv:docs]
|
[testenv:docs]
|
||||||
setenv = PYTHONHASHSEED=0
|
setenv = PYTHONHASHSEED=0
|
||||||
@@ -35,17 +35,12 @@ commands =
|
|||||||
python setup.py build_sphinx
|
python setup.py build_sphinx
|
||||||
|
|
||||||
[testenv:debug]
|
[testenv:debug]
|
||||||
commands = oslo_debug_helper {posargs}
|
commands = oslo_debug_helper -t watcher/tests {posargs}
|
||||||
|
|
||||||
[testenv:config]
|
[testenv:config]
|
||||||
sitepackages = False
|
sitepackages = False
|
||||||
commands =
|
commands =
|
||||||
oslo-config-generator --namespace watcher \
|
oslo-config-generator --config-file etc/watcher/watcher-config-generator.conf
|
||||||
--namespace keystonemiddleware.auth_token \
|
|
||||||
--namespace oslo.log \
|
|
||||||
--namespace oslo.db \
|
|
||||||
--namespace oslo.messaging \
|
|
||||||
--output-file etc/watcher/watcher.conf.sample
|
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
show-source=True
|
show-source=True
|
||||||
@@ -53,11 +48,6 @@ ignore=
|
|||||||
builtins= _
|
builtins= _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/
|
||||||
|
|
||||||
[testenv:pypi]
|
|
||||||
commands =
|
|
||||||
python setup.py sdist bdist_wheel
|
|
||||||
twine upload --config-file .pypirc {posargs} dist/*
|
|
||||||
|
|
||||||
[testenv:wheel]
|
[testenv:wheel]
|
||||||
commands = python setup.py bdist_wheel
|
commands = python setup.py bdist_wheel
|
||||||
|
|
||||||
@@ -67,4 +57,4 @@ import_exceptions = watcher._i18n
|
|||||||
[doc8]
|
[doc8]
|
||||||
extension=.rst
|
extension=.rst
|
||||||
# todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed
|
# todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed
|
||||||
ignore-path=doc/source/image_src,doc/source/man
|
ignore-path=doc/source/image_src,doc/source/man,doc/source/api
|
||||||
|
|||||||
@@ -129,13 +129,10 @@ class Action(base.APIBase):
|
|||||||
alarm = types.uuid
|
alarm = types.uuid
|
||||||
"""An alarm UUID related to this action"""
|
"""An alarm UUID related to this action"""
|
||||||
|
|
||||||
applies_to = wtypes.text
|
|
||||||
"""Applies to"""
|
|
||||||
|
|
||||||
action_type = wtypes.text
|
action_type = wtypes.text
|
||||||
"""Action type"""
|
"""Action type"""
|
||||||
|
|
||||||
input_parameters = wtypes.DictType(wtypes.text, wtypes.text)
|
input_parameters = types.jsontype
|
||||||
"""One or more key/value pairs """
|
"""One or more key/value pairs """
|
||||||
|
|
||||||
next_uuid = wsme.wsproperty(types.uuid, _get_next_uuid,
|
next_uuid = wsme.wsproperty(types.uuid, _get_next_uuid,
|
||||||
@@ -257,7 +254,7 @@ class ActionsController(rest.RestController):
|
|||||||
resource_url=None,
|
resource_url=None,
|
||||||
action_plan_uuid=None, audit_uuid=None):
|
action_plan_uuid=None, audit_uuid=None):
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
@@ -288,10 +285,10 @@ class ActionsController(rest.RestController):
|
|||||||
sort_key=sort_key,
|
sort_key=sort_key,
|
||||||
sort_dir=sort_dir)
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(ActionCollection, types.uuid, types.uuid,
|
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
||||||
int, wtypes.text, wtypes.text, types.uuid,
|
wtypes.text, wtypes.text, types.uuid,
|
||||||
types.uuid)
|
types.uuid)
|
||||||
def get_all(self, action_uuid=None, marker=None, limit=None,
|
def get_all(self, marker=None, limit=None,
|
||||||
sort_key='id', sort_dir='asc', action_plan_uuid=None,
|
sort_key='id', sort_dir='asc', action_plan_uuid=None,
|
||||||
audit_uuid=None):
|
audit_uuid=None):
|
||||||
"""Retrieve a list of actions.
|
"""Retrieve a list of actions.
|
||||||
@@ -312,16 +309,14 @@ class ActionsController(rest.RestController):
|
|||||||
marker, limit, sort_key, sort_dir,
|
marker, limit, sort_key, sort_dir,
|
||||||
action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid)
|
action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid)
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(ActionCollection, types.uuid,
|
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
||||||
types.uuid, int, wtypes.text, wtypes.text,
|
wtypes.text, wtypes.text, types.uuid,
|
||||||
types.uuid, types.uuid)
|
types.uuid)
|
||||||
def detail(self, action_uuid=None, marker=None, limit=None,
|
def detail(self, marker=None, limit=None,
|
||||||
sort_key='id', sort_dir='asc', action_plan_uuid=None,
|
sort_key='id', sort_dir='asc', action_plan_uuid=None,
|
||||||
audit_uuid=None):
|
audit_uuid=None):
|
||||||
"""Retrieve a list of actions with detail.
|
"""Retrieve a list of actions with detail.
|
||||||
|
|
||||||
:param action_uuid: UUID of a action, to get only actions for that
|
|
||||||
action.
|
|
||||||
:param marker: pagination marker for large data sets.
|
:param marker: pagination marker for large data sets.
|
||||||
:param limit: maximum number of resources to return in a single result.
|
:param limit: maximum number of resources to return in a single result.
|
||||||
:param sort_key: column to sort results by. Default: id.
|
:param sort_key: column to sort results by. Default: id.
|
||||||
|
|||||||
@@ -181,7 +181,6 @@ class ActionPlan(base.APIBase):
|
|||||||
|
|
||||||
self.fields = []
|
self.fields = []
|
||||||
fields = list(objects.ActionPlan.fields)
|
fields = list(objects.ActionPlan.fields)
|
||||||
fields.append('audit_uuid')
|
|
||||||
for field in fields:
|
for field in fields:
|
||||||
# Skip fields we do not expose.
|
# Skip fields we do not expose.
|
||||||
if not hasattr(self, field):
|
if not hasattr(self, field):
|
||||||
@@ -189,14 +188,19 @@ class ActionPlan(base.APIBase):
|
|||||||
self.fields.append(field)
|
self.fields.append(field)
|
||||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||||
|
|
||||||
self.fields.append('audit_id')
|
self.fields.append('audit_uuid')
|
||||||
|
self.fields.append('first_action_uuid')
|
||||||
|
|
||||||
setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset))
|
setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset))
|
||||||
|
setattr(self, 'first_action_uuid',
|
||||||
|
kwargs.get('first_action_id', wtypes.Unset))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _convert_with_links(action_plan, url, expand=True):
|
def _convert_with_links(action_plan, url, expand=True):
|
||||||
if not expand:
|
if not expand:
|
||||||
action_plan.unset_fields_except(['uuid', 'state', 'updated_at',
|
action_plan.unset_fields_except(
|
||||||
'audit_uuid'])
|
['uuid', 'state', 'updated_at',
|
||||||
|
'audit_uuid', 'first_action_uuid'])
|
||||||
|
|
||||||
action_plan.links = [link.Link.make_link(
|
action_plan.links = [link.Link.make_link(
|
||||||
'self', url,
|
'self', url,
|
||||||
@@ -279,7 +283,7 @@ class ActionPlansController(rest.RestController):
|
|||||||
resource_url=None, audit_uuid=None):
|
resource_url=None, audit_uuid=None):
|
||||||
|
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
@@ -402,12 +406,12 @@ class ActionPlansController(rest.RestController):
|
|||||||
# transitions that are allowed via PATCH
|
# transitions that are allowed via PATCH
|
||||||
allowed_patch_transitions = [
|
allowed_patch_transitions = [
|
||||||
(ap_objects.State.RECOMMENDED,
|
(ap_objects.State.RECOMMENDED,
|
||||||
ap_objects.State.TRIGGERED),
|
ap_objects.State.PENDING),
|
||||||
(ap_objects.State.RECOMMENDED,
|
(ap_objects.State.RECOMMENDED,
|
||||||
ap_objects.State.CANCELLED),
|
ap_objects.State.CANCELLED),
|
||||||
(ap_objects.State.ONGOING,
|
(ap_objects.State.ONGOING,
|
||||||
ap_objects.State.CANCELLED),
|
ap_objects.State.CANCELLED),
|
||||||
(ap_objects.State.TRIGGERED,
|
(ap_objects.State.PENDING,
|
||||||
ap_objects.State.CANCELLED),
|
ap_objects.State.CANCELLED),
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -423,7 +427,7 @@ class ActionPlansController(rest.RestController):
|
|||||||
initial_state=action_plan_to_update.state,
|
initial_state=action_plan_to_update.state,
|
||||||
new_state=action_plan.state))
|
new_state=action_plan.state))
|
||||||
|
|
||||||
if action_plan.state == ap_objects.State.TRIGGERED:
|
if action_plan.state == ap_objects.State.PENDING:
|
||||||
launch_action_plan = True
|
launch_action_plan = True
|
||||||
|
|
||||||
# Update only the fields that have changed
|
# Update only the fields that have changed
|
||||||
@@ -439,7 +443,7 @@ class ActionPlansController(rest.RestController):
|
|||||||
action_plan_to_update[field] = patch_val
|
action_plan_to_update[field] = patch_val
|
||||||
|
|
||||||
if (field == 'state'
|
if (field == 'state'
|
||||||
and patch_val == objects.action_plan.State.TRIGGERED):
|
and patch_val == objects.action_plan.State.PENDING):
|
||||||
launch_action_plan = True
|
launch_action_plan = True
|
||||||
|
|
||||||
action_plan_to_update.save()
|
action_plan_to_update.save()
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ from watcher.api.controllers.v1 import types
|
|||||||
from watcher.api.controllers.v1 import utils as api_utils
|
from watcher.api.controllers.v1 import utils as api_utils
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
from watcher.decision_engine.rpcapi import DecisionEngineAPI
|
from watcher.decision_engine import rpcapi
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
|
|
||||||
@@ -263,7 +263,7 @@ class AuditsController(rest.RestController):
|
|||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None, audit_template=None):
|
resource_url=None, audit_template=None):
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
@@ -369,7 +369,7 @@ class AuditsController(rest.RestController):
|
|||||||
|
|
||||||
# trigger decision-engine to run the audit
|
# trigger decision-engine to run the audit
|
||||||
|
|
||||||
dc_client = DecisionEngineAPI()
|
dc_client = rpcapi.DecisionEngineAPI()
|
||||||
dc_client.trigger_audit(context, new_audit.uuid)
|
dc_client.trigger_audit(context, new_audit.uuid)
|
||||||
|
|
||||||
return Audit.convert_with_links(new_audit)
|
return Audit.convert_with_links(new_audit)
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ provided as a list of key-value pairs.
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
import pecan
|
import pecan
|
||||||
from pecan import rest
|
from pecan import rest
|
||||||
import wsme
|
import wsme
|
||||||
@@ -67,11 +68,25 @@ from watcher import objects
|
|||||||
|
|
||||||
|
|
||||||
class AuditTemplatePatchType(types.JsonPatchType):
|
class AuditTemplatePatchType(types.JsonPatchType):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def mandatory_attrs():
|
def mandatory_attrs():
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(patch):
|
||||||
|
if patch.path == "/goal":
|
||||||
|
AuditTemplatePatchType._validate_goal(patch)
|
||||||
|
return types.JsonPatchType.validate(patch)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _validate_goal(patch):
|
||||||
|
serialized_patch = {'path': patch.path, 'op': patch.op}
|
||||||
|
if patch.value is not wsme.Unset:
|
||||||
|
serialized_patch['value'] = patch.value
|
||||||
|
new_goal = patch.value
|
||||||
|
if new_goal and new_goal not in cfg.CONF.watcher_goals.goals.keys():
|
||||||
|
raise exception.InvalidGoal(goal=new_goal)
|
||||||
|
|
||||||
|
|
||||||
class AuditTemplate(base.APIBase):
|
class AuditTemplate(base.APIBase):
|
||||||
"""API representation of a audit template.
|
"""API representation of a audit template.
|
||||||
@@ -149,13 +164,19 @@ class AuditTemplate(base.APIBase):
|
|||||||
name='My Audit Template',
|
name='My Audit Template',
|
||||||
description='Description of my audit template',
|
description='Description of my audit template',
|
||||||
host_aggregate=5,
|
host_aggregate=5,
|
||||||
goal='SERVERS_CONSOLIDATION',
|
goal='DUMMY',
|
||||||
extra={'automatic': True},
|
extra={'automatic': True},
|
||||||
created_at=datetime.datetime.utcnow(),
|
created_at=datetime.datetime.utcnow(),
|
||||||
deleted_at=None,
|
deleted_at=None,
|
||||||
updated_at=datetime.datetime.utcnow())
|
updated_at=datetime.datetime.utcnow())
|
||||||
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(audit_template):
|
||||||
|
if audit_template.goal not in cfg.CONF.watcher_goals.goals.keys():
|
||||||
|
raise exception.InvalidGoal(audit_template.goal)
|
||||||
|
return audit_template
|
||||||
|
|
||||||
|
|
||||||
class AuditTemplateCollection(collection.Collection):
|
class AuditTemplateCollection(collection.Collection):
|
||||||
"""API representation of a collection of audit templates."""
|
"""API representation of a collection of audit templates."""
|
||||||
@@ -197,12 +218,13 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
'detail': ['GET'],
|
'detail': ['GET'],
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_audit_templates_collection(self, marker, limit,
|
def _get_audit_templates_collection(self, filters, marker, limit,
|
||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None):
|
resource_url=None):
|
||||||
|
api_utils.validate_search_filters(
|
||||||
|
filters, objects.audit_template.AuditTemplate.fields.keys())
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
@@ -212,6 +234,7 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
|
|
||||||
audit_templates = objects.AuditTemplate.list(
|
audit_templates = objects.AuditTemplate.list(
|
||||||
pecan.request.context,
|
pecan.request.context,
|
||||||
|
filters,
|
||||||
limit,
|
limit,
|
||||||
marker_obj, sort_key=sort_key,
|
marker_obj, sort_key=sort_key,
|
||||||
sort_dir=sort_dir)
|
sort_dir=sort_dir)
|
||||||
@@ -223,26 +246,30 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
sort_key=sort_key,
|
sort_key=sort_key,
|
||||||
sort_dir=sort_dir)
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(AuditTemplateCollection, types.uuid, int,
|
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text,
|
||||||
wtypes.text, wtypes.text)
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
def get_all(self, marker=None, limit=None,
|
def get_all(self, goal=None, marker=None, limit=None,
|
||||||
sort_key='id', sort_dir='asc'):
|
sort_key='id', sort_dir='asc'):
|
||||||
"""Retrieve a list of audit templates.
|
"""Retrieve a list of audit templates.
|
||||||
|
|
||||||
|
:param goal: goal name to filter by (case sensitive)
|
||||||
:param marker: pagination marker for large data sets.
|
:param marker: pagination marker for large data sets.
|
||||||
:param limit: maximum number of resources to return in a single result.
|
:param limit: maximum number of resources to return in a single result.
|
||||||
:param sort_key: column to sort results by. Default: id.
|
:param sort_key: column to sort results by. Default: id.
|
||||||
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||||
"""
|
"""
|
||||||
return self._get_audit_templates_collection(marker, limit, sort_key,
|
filters = api_utils.as_filters_dict(goal=goal)
|
||||||
sort_dir)
|
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(AuditTemplateCollection, types.uuid, int,
|
return self._get_audit_templates_collection(
|
||||||
|
filters, marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, types.uuid, int,
|
||||||
wtypes.text, wtypes.text)
|
wtypes.text, wtypes.text)
|
||||||
def detail(self, marker=None, limit=None,
|
def detail(self, goal=None, marker=None, limit=None,
|
||||||
sort_key='id', sort_dir='asc'):
|
sort_key='id', sort_dir='asc'):
|
||||||
"""Retrieve a list of audit templates with detail.
|
"""Retrieve a list of audit templates with detail.
|
||||||
|
|
||||||
|
:param goal: goal name to filter by (case sensitive)
|
||||||
:param marker: pagination marker for large data sets.
|
:param marker: pagination marker for large data sets.
|
||||||
:param limit: maximum number of resources to return in a single result.
|
:param limit: maximum number of resources to return in a single result.
|
||||||
:param sort_key: column to sort results by. Default: id.
|
:param sort_key: column to sort results by. Default: id.
|
||||||
@@ -253,9 +280,11 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
if parent != "audit_templates":
|
if parent != "audit_templates":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|
||||||
|
filters = api_utils.as_filters_dict(goal=goal)
|
||||||
|
|
||||||
expand = True
|
expand = True
|
||||||
resource_url = '/'.join(['audit_templates', 'detail'])
|
resource_url = '/'.join(['audit_templates', 'detail'])
|
||||||
return self._get_audit_templates_collection(marker, limit,
|
return self._get_audit_templates_collection(filters, marker, limit,
|
||||||
sort_key, sort_dir, expand,
|
sort_key, sort_dir, expand,
|
||||||
resource_url)
|
resource_url)
|
||||||
|
|
||||||
@@ -263,7 +292,7 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
def get_one(self, audit_template):
|
def get_one(self, audit_template):
|
||||||
"""Retrieve information about the given audit template.
|
"""Retrieve information about the given audit template.
|
||||||
|
|
||||||
:param audit template_uuid: UUID or name of an audit template.
|
:param audit audit_template: UUID or name of an audit template.
|
||||||
"""
|
"""
|
||||||
if self.from_audit_templates:
|
if self.from_audit_templates:
|
||||||
raise exception.OperationNotPermitted
|
raise exception.OperationNotPermitted
|
||||||
@@ -279,12 +308,14 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
|
|
||||||
return AuditTemplate.convert_with_links(rpc_audit_template)
|
return AuditTemplate.convert_with_links(rpc_audit_template)
|
||||||
|
|
||||||
|
@wsme.validate(types.uuid, AuditTemplate)
|
||||||
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplate, status_code=201)
|
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplate, status_code=201)
|
||||||
def post(self, audit_template):
|
def post(self, audit_template):
|
||||||
"""Create a new audit template.
|
"""Create a new audit template.
|
||||||
|
|
||||||
:param audit template: a audit template within the request body.
|
:param audit template: a audit template within the request body.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.from_audit_templates:
|
if self.from_audit_templates:
|
||||||
raise exception.OperationNotPermitted
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ class GoalsController(rest.RestController):
|
|||||||
resource_url=None, goal_name=None):
|
resource_url=None, goal_name=None):
|
||||||
|
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
goals = []
|
goals = []
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,15 @@ def validate_sort_dir(sort_dir):
|
|||||||
raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. "
|
raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. "
|
||||||
"Acceptable values are "
|
"Acceptable values are "
|
||||||
"'asc' or 'desc'") % sort_dir)
|
"'asc' or 'desc'") % sort_dir)
|
||||||
return sort_dir
|
|
||||||
|
|
||||||
|
def validate_search_filters(filters, allowed_fields):
|
||||||
|
# Very leightweight validation for now
|
||||||
|
# todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries)
|
||||||
|
for filter_name in filters.keys():
|
||||||
|
if filter_name not in allowed_fields:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Invalid filter: %s") % filter_name)
|
||||||
|
|
||||||
|
|
||||||
def apply_jsonpatch(doc, patch):
|
def apply_jsonpatch(doc, patch):
|
||||||
@@ -58,3 +66,12 @@ def apply_jsonpatch(doc, patch):
|
|||||||
' the resource is not allowed')
|
' the resource is not allowed')
|
||||||
raise wsme.exc.ClientSideError(msg % p['path'])
|
raise wsme.exc.ClientSideError(msg % p['path'])
|
||||||
return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
|
return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
|
||||||
|
|
||||||
|
|
||||||
|
def as_filters_dict(**filters):
|
||||||
|
filters_dict = {}
|
||||||
|
for filter_name, filter_value in filters.items():
|
||||||
|
if filter_value:
|
||||||
|
filters_dict[filter_name] = filter_value
|
||||||
|
|
||||||
|
return filters_dict
|
||||||
|
|||||||
@@ -43,8 +43,8 @@ class DefaultActionPlanHandler(base.BaseActionPlanHandler):
|
|||||||
ev.data = {}
|
ev.data = {}
|
||||||
payload = {'action_plan__uuid': uuid,
|
payload = {'action_plan__uuid': uuid,
|
||||||
'action_plan_state': state}
|
'action_plan_state': state}
|
||||||
self.applier_manager.topic_status.publish_event(ev.type.name,
|
self.applier_manager.status_topic_handler.publish_event(
|
||||||
payload)
|
ev.type.name, payload)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
try:
|
try:
|
||||||
@@ -52,17 +52,16 @@ class DefaultActionPlanHandler(base.BaseActionPlanHandler):
|
|||||||
self.notify(self.action_plan_uuid,
|
self.notify(self.action_plan_uuid,
|
||||||
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
||||||
ap_objects.State.ONGOING)
|
ap_objects.State.ONGOING)
|
||||||
applier = default.DefaultApplier(self.applier_manager, self.ctx)
|
applier = default.DefaultApplier(self.ctx, self.applier_manager)
|
||||||
result = applier.execute(self.action_plan_uuid)
|
applier.execute(self.action_plan_uuid)
|
||||||
|
state = ap_objects.State.SUCCEEDED
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
result = False
|
state = ap_objects.State.FAILED
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
if result is True:
|
|
||||||
status = ap_objects.State.SUCCEEDED
|
|
||||||
else:
|
|
||||||
status = ap_objects.State.FAILED
|
|
||||||
# update state
|
# update state
|
||||||
self.notify(self.action_plan_uuid,
|
self.notify(self.action_plan_uuid,
|
||||||
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
||||||
status)
|
state)
|
||||||
|
|||||||
@@ -22,12 +22,27 @@ import abc
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from watcher.common import clients
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class BaseAction(object):
|
class BaseAction(object):
|
||||||
def __init__(self):
|
# NOTE(jed) by convention we decided
|
||||||
|
# that the attribute "resource_id" is the unique id of
|
||||||
|
# the resource to which the Action applies to allow us to use it in the
|
||||||
|
# watcher dashboard and will be nested in input_parameters
|
||||||
|
RESOURCE_ID = 'resource_id'
|
||||||
|
|
||||||
|
def __init__(self, osc=None):
|
||||||
|
""":param osc: an OpenStackClients instance"""
|
||||||
self._input_parameters = {}
|
self._input_parameters = {}
|
||||||
self._applies_to = ""
|
self._osc = osc
|
||||||
|
|
||||||
|
@property
|
||||||
|
def osc(self):
|
||||||
|
if not self._osc:
|
||||||
|
self._osc = clients.OpenStackClients()
|
||||||
|
return self._osc
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def input_parameters(self):
|
def input_parameters(self):
|
||||||
@@ -38,25 +53,64 @@ class BaseAction(object):
|
|||||||
self._input_parameters = p
|
self._input_parameters = p
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def applies_to(self):
|
def resource_id(self):
|
||||||
return self._applies_to
|
return self.input_parameters[self.RESOURCE_ID]
|
||||||
|
|
||||||
@applies_to.setter
|
|
||||||
def applies_to(self, a):
|
|
||||||
self._applies_to = a
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def execute(self):
|
def execute(self):
|
||||||
|
"""Executes the main logic of the action
|
||||||
|
|
||||||
|
This method can be used to perform an action on a given set of input
|
||||||
|
parameters to accomplish some type of operation. This operation may
|
||||||
|
return a boolean value as a result of its execution. If False, this
|
||||||
|
will be considered as an error and will then trigger the reverting of
|
||||||
|
the actions.
|
||||||
|
|
||||||
|
:returns: A flag indicating whether or not the action succeeded
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def revert(self):
|
def revert(self):
|
||||||
|
"""Revert this action
|
||||||
|
|
||||||
|
This method should rollback the resource to its initial state in the
|
||||||
|
event of a faulty execution. This happens when the action raised an
|
||||||
|
exception during its :py:meth:`~.BaseAction.execute`.
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def precondition(self):
|
def precondition(self):
|
||||||
|
"""Hook: called before the execution of an action
|
||||||
|
|
||||||
|
This method can be used to perform some initializations or to make
|
||||||
|
some more advanced validation on its input parameters. So if you wish
|
||||||
|
to block its execution based on this factor, `raise` the related
|
||||||
|
exception.
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def postcondition(self):
|
def postcondition(self):
|
||||||
|
"""Hook: called after the execution of an action
|
||||||
|
|
||||||
|
This function is called regardless of whether an action succeded or
|
||||||
|
not. So you can use it to perform cleanup operations.
|
||||||
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abc.abstractproperty
|
||||||
|
def schema(self):
|
||||||
|
"""Defines a Schema that the input parameters shall comply to
|
||||||
|
|
||||||
|
:returns: A schema declaring the input parameters this action should be
|
||||||
|
provided along with their respective constraints
|
||||||
|
:rtype: :py:class:`voluptuous.Schema` instance
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def validate_parameters(self):
|
||||||
|
self.schema(self.input_parameters)
|
||||||
|
return True
|
||||||
|
|||||||
@@ -16,54 +16,84 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import six
|
||||||
|
import voluptuous
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.applier.actions import base
|
from watcher.applier.actions import base
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import keystone as kclient
|
from watcher.common import nova_helper
|
||||||
from watcher.common import nova as nclient
|
|
||||||
from watcher.decision_engine.model import hypervisor_state as hstate
|
from watcher.decision_engine.model import hypervisor_state as hstate
|
||||||
|
|
||||||
|
|
||||||
class ChangeNovaServiceState(base.BaseAction):
|
class ChangeNovaServiceState(base.BaseAction):
|
||||||
|
"""Disables or enables the nova-compute service, deployed on a host
|
||||||
|
|
||||||
|
By using this action, you will be able to update the state of a
|
||||||
|
nova-compute service. A disabled nova-compute service can not be selected
|
||||||
|
by the nova scheduler for future deployment of server.
|
||||||
|
|
||||||
|
The action schema is::
|
||||||
|
|
||||||
|
schema = Schema({
|
||||||
|
'resource_id': str,
|
||||||
|
'state': str,
|
||||||
|
})
|
||||||
|
|
||||||
|
The `resource_id` references a nova-compute service name (list of available
|
||||||
|
nova-compute services is returned by this command: ``nova service-list
|
||||||
|
--binary nova-compute``).
|
||||||
|
The `state` value should either be `ONLINE` or `OFFLINE`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
STATE = 'state'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema(self):
|
||||||
|
return voluptuous.Schema({
|
||||||
|
voluptuous.Required(self.RESOURCE_ID):
|
||||||
|
voluptuous.All(
|
||||||
|
voluptuous.Any(*six.string_types),
|
||||||
|
voluptuous.Length(min=1)),
|
||||||
|
voluptuous.Required(self.STATE):
|
||||||
|
voluptuous.Any(*[state.value
|
||||||
|
for state in list(hstate.HypervisorState)]),
|
||||||
|
})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def host(self):
|
def host(self):
|
||||||
return self.applies_to
|
return self.resource_id
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def state(self):
|
def state(self):
|
||||||
return self.input_parameters.get('state')
|
return self.input_parameters.get(self.STATE)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
target_state = None
|
target_state = None
|
||||||
if self.state == hstate.HypervisorState.OFFLINE.value:
|
if self.state == hstate.HypervisorState.DISABLED.value:
|
||||||
target_state = False
|
target_state = False
|
||||||
elif self.status == hstate.HypervisorState.ONLINE.value:
|
elif self.state == hstate.HypervisorState.ENABLED.value:
|
||||||
target_state = True
|
target_state = True
|
||||||
return self.nova_manage_service(target_state)
|
return self._nova_manage_service(target_state)
|
||||||
|
|
||||||
def revert(self):
|
def revert(self):
|
||||||
target_state = None
|
target_state = None
|
||||||
if self.state == hstate.HypervisorState.OFFLINE.value:
|
if self.state == hstate.HypervisorState.DISABLED.value:
|
||||||
target_state = True
|
target_state = True
|
||||||
elif self.state == hstate.HypervisorState.ONLINE.value:
|
elif self.state == hstate.HypervisorState.ENABLED.value:
|
||||||
target_state = False
|
target_state = False
|
||||||
return self.nova_manage_service(target_state)
|
return self._nova_manage_service(target_state)
|
||||||
|
|
||||||
def nova_manage_service(self, state):
|
def _nova_manage_service(self, state):
|
||||||
if state is None:
|
if state is None:
|
||||||
raise exception.IllegalArgumentException(
|
raise exception.IllegalArgumentException(
|
||||||
message=_("The target state is not defined"))
|
message=_("The target state is not defined"))
|
||||||
|
|
||||||
keystone = kclient.KeystoneClient()
|
nova = nova_helper.NovaHelper(osc=self.osc)
|
||||||
wrapper = nclient.NovaClient(keystone.get_credentials(),
|
|
||||||
session=keystone.get_session())
|
|
||||||
if state is True:
|
if state is True:
|
||||||
return wrapper.enable_service_nova_compute(self.host)
|
return nova.enable_service_nova_compute(self.host)
|
||||||
else:
|
else:
|
||||||
return wrapper.disable_service_nova_compute(self.host)
|
return nova.disable_service_nova_compute(self.host)
|
||||||
|
|
||||||
def precondition(self):
|
def precondition(self):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -28,9 +28,15 @@ class ActionFactory(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.action_loader = default.DefaultActionLoader()
|
self.action_loader = default.DefaultActionLoader()
|
||||||
|
|
||||||
def make_action(self, object_action):
|
def make_action(self, object_action, osc=None):
|
||||||
LOG.debug("Creating instance of %s", object_action.action_type)
|
LOG.debug("Creating instance of %s", object_action.action_type)
|
||||||
loaded_action = self.action_loader.load(name=object_action.action_type)
|
loaded_action = self.action_loader.load(name=object_action.action_type,
|
||||||
|
osc=osc)
|
||||||
loaded_action.input_parameters = object_action.input_parameters
|
loaded_action.input_parameters = object_action.input_parameters
|
||||||
loaded_action.applies_to = object_action.applies_to
|
LOG.debug("Checking the input parameters")
|
||||||
|
# NOTE(jed) if we change the schema of an action and we try to reload
|
||||||
|
# an older version of the Action, the validation can fail.
|
||||||
|
# We need to add the versioning of an Action or a migration tool.
|
||||||
|
# We can also create an new Action which extends the previous one.
|
||||||
|
loaded_action.validate_parameters()
|
||||||
return loaded_action
|
return loaded_action
|
||||||
|
|||||||
@@ -18,45 +18,128 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
import six
|
||||||
|
import voluptuous
|
||||||
|
|
||||||
|
from watcher._i18n import _, _LC
|
||||||
from watcher.applier.actions import base
|
from watcher.applier.actions import base
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import keystone as kclient
|
from watcher.common import nova_helper
|
||||||
from watcher.common import nova as nclient
|
from watcher.common import utils
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Migrate(base.BaseAction):
|
class Migrate(base.BaseAction):
|
||||||
|
"""Live-Migrates a server to a destination nova-compute host
|
||||||
|
|
||||||
|
This action will allow you to migrate a server to another compute
|
||||||
|
destination host. As of now, only live migration can be performed using
|
||||||
|
this action.
|
||||||
|
.. If either host uses shared storage, you can use ``live``
|
||||||
|
.. as ``migration_type``. If both source and destination hosts provide
|
||||||
|
.. local disks, you can set the block_migration parameter to True (not
|
||||||
|
.. supported for yet).
|
||||||
|
|
||||||
|
The action schema is::
|
||||||
|
|
||||||
|
schema = Schema({
|
||||||
|
'resource_id': str, # should be a UUID
|
||||||
|
'migration_type': str, # choices -> "live" only
|
||||||
|
'dst_hypervisor': str,
|
||||||
|
'src_hypervisor': str,
|
||||||
|
})
|
||||||
|
|
||||||
|
The `resource_id` is the UUID of the server to migrate. Only live migration
|
||||||
|
is supported.
|
||||||
|
The `src_hypervisor` and `dst_hypervisor` parameters are respectively the
|
||||||
|
source and the destination compute hostname (list of available compute
|
||||||
|
hosts is returned by this command: ``nova service-list --binary
|
||||||
|
nova-compute``).
|
||||||
|
"""
|
||||||
|
|
||||||
|
# input parameters constants
|
||||||
|
MIGRATION_TYPE = 'migration_type'
|
||||||
|
LIVE_MIGRATION = 'live'
|
||||||
|
DST_HYPERVISOR = 'dst_hypervisor'
|
||||||
|
SRC_HYPERVISOR = 'src_hypervisor'
|
||||||
|
|
||||||
|
def check_resource_id(self, value):
|
||||||
|
if (value is not None and
|
||||||
|
len(value) > 0 and not
|
||||||
|
utils.is_uuid_like(value)):
|
||||||
|
raise voluptuous.Invalid(_("The parameter"
|
||||||
|
" resource_id is invalid."))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema(self):
|
||||||
|
return voluptuous.Schema({
|
||||||
|
voluptuous.Required(self.RESOURCE_ID): self.check_resource_id,
|
||||||
|
voluptuous.Required(self.MIGRATION_TYPE,
|
||||||
|
default=self.LIVE_MIGRATION):
|
||||||
|
voluptuous.Any(*[self.LIVE_MIGRATION]),
|
||||||
|
voluptuous.Required(self.DST_HYPERVISOR):
|
||||||
|
voluptuous.All(voluptuous.Any(*six.string_types),
|
||||||
|
voluptuous.Length(min=1)),
|
||||||
|
voluptuous.Required(self.SRC_HYPERVISOR):
|
||||||
|
voluptuous.All(voluptuous.Any(*six.string_types),
|
||||||
|
voluptuous.Length(min=1)),
|
||||||
|
})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def instance_uuid(self):
|
def instance_uuid(self):
|
||||||
return self.applies_to
|
return self.resource_id
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def migration_type(self):
|
def migration_type(self):
|
||||||
return self.input_parameters.get('migration_type')
|
return self.input_parameters.get(self.MIGRATION_TYPE)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dst_hypervisor(self):
|
def dst_hypervisor(self):
|
||||||
return self.input_parameters.get('dst_hypervisor')
|
return self.input_parameters.get(self.DST_HYPERVISOR)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def src_hypervisor(self):
|
def src_hypervisor(self):
|
||||||
return self.input_parameters.get('src_hypervisor')
|
return self.input_parameters.get(self.SRC_HYPERVISOR)
|
||||||
|
|
||||||
|
def _live_migrate_instance(self, nova, destination):
|
||||||
|
result = None
|
||||||
|
try:
|
||||||
|
result = nova.live_migrate_instance(instance_id=self.instance_uuid,
|
||||||
|
dest_hostname=destination)
|
||||||
|
except nova_helper.nvexceptions.ClientException as e:
|
||||||
|
if e.code == 400:
|
||||||
|
LOG.debug("Live migration of instance %s failed. "
|
||||||
|
"Trying to live migrate using block migration."
|
||||||
|
% self.instance_uuid)
|
||||||
|
result = nova.live_migrate_instance(
|
||||||
|
instance_id=self.instance_uuid,
|
||||||
|
dest_hostname=destination,
|
||||||
|
block_migration=True)
|
||||||
|
else:
|
||||||
|
LOG.debug("Nova client exception occured while live migrating "
|
||||||
|
"instance %s.Exception: %s" %
|
||||||
|
(self.instance_uuid, e))
|
||||||
|
except Exception:
|
||||||
|
LOG.critical(_LC("Unexpected error occured. Migration failed for"
|
||||||
|
"instance %s. Leaving instance on previous "
|
||||||
|
"host."), self.instance_uuid)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
def migrate(self, destination):
|
def migrate(self, destination):
|
||||||
keystone = kclient.KeystoneClient()
|
nova = nova_helper.NovaHelper(osc=self.osc)
|
||||||
wrapper = nclient.NovaClient(keystone.get_credentials(),
|
LOG.debug("Migrate instance %s to %s", self.instance_uuid,
|
||||||
session=keystone.get_session())
|
|
||||||
LOG.debug("Migrate instance %s to %s ", self.instance_uuid,
|
|
||||||
destination)
|
destination)
|
||||||
instance = wrapper.find_instance(self.instance_uuid)
|
instance = nova.find_instance(self.instance_uuid)
|
||||||
if instance:
|
if instance:
|
||||||
if self.migration_type == 'live':
|
if self.migration_type == 'live':
|
||||||
return wrapper.live_migrate_instance(
|
return self._live_migrate_instance(nova, destination)
|
||||||
instance_id=self.instance_uuid, dest_hostname=destination)
|
|
||||||
else:
|
else:
|
||||||
raise exception.InvalidParameterValue(err=self.migration_type)
|
raise exception.Invalid(
|
||||||
|
message=(_('Migration of type %(migration_type)s is not '
|
||||||
|
'supported.') %
|
||||||
|
{'migration_type': self.migration_type}))
|
||||||
else:
|
else:
|
||||||
raise exception.InstanceNotFound(name=self.instance_uuid)
|
raise exception.InstanceNotFound(name=self.instance_uuid)
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,8 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
import six
|
||||||
|
import voluptuous
|
||||||
|
|
||||||
from watcher.applier.actions import base
|
from watcher.applier.actions import base
|
||||||
|
|
||||||
@@ -26,10 +28,29 @@ LOG = log.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class Nop(base.BaseAction):
|
class Nop(base.BaseAction):
|
||||||
|
"""logs a message
|
||||||
|
|
||||||
|
The action schema is::
|
||||||
|
|
||||||
|
schema = Schema({
|
||||||
|
'message': str,
|
||||||
|
})
|
||||||
|
|
||||||
|
The `message` is the actual message that will be logged.
|
||||||
|
"""
|
||||||
|
|
||||||
|
MESSAGE = 'message'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema(self):
|
||||||
|
return voluptuous.Schema({
|
||||||
|
voluptuous.Required(self.MESSAGE): voluptuous.Any(
|
||||||
|
voluptuous.Any(*six.string_types), None)
|
||||||
|
})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def message(self):
|
def message(self):
|
||||||
return self.input_parameters.get('message')
|
return self.input_parameters.get(self.MESSAGE)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.debug("executing action NOP message:%s ", self.message)
|
LOG.debug("executing action NOP message:%s ", self.message)
|
||||||
|
|||||||
@@ -18,19 +18,39 @@
|
|||||||
#
|
#
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
import voluptuous
|
||||||
|
|
||||||
from watcher.applier.actions import base
|
from watcher.applier.actions import base
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Sleep(base.BaseAction):
|
class Sleep(base.BaseAction):
|
||||||
|
"""Makes the executor of the action plan wait for a given duration
|
||||||
|
|
||||||
|
The action schema is::
|
||||||
|
|
||||||
|
schema = Schema({
|
||||||
|
'duration': float,
|
||||||
|
})
|
||||||
|
|
||||||
|
The `duration` is expressed in seconds.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DURATION = 'duration'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema(self):
|
||||||
|
return voluptuous.Schema({
|
||||||
|
voluptuous.Required(self.DURATION, default=1):
|
||||||
|
voluptuous.All(float, voluptuous.Range(min=0))
|
||||||
|
})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def duration(self):
|
def duration(self):
|
||||||
return int(self.input_parameters.get('duration'))
|
return int(self.input_parameters.get(self.DURATION))
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.debug("Starting action Sleep duration:%s ", self.duration)
|
LOG.debug("Starting action Sleep duration:%s ", self.duration)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
|
|
||||||
class DefaultApplier(base.BaseApplier):
|
class DefaultApplier(base.BaseApplier):
|
||||||
def __init__(self, applier_manager, context):
|
def __init__(self, context, applier_manager):
|
||||||
super(DefaultApplier, self).__init__()
|
super(DefaultApplier, self).__init__()
|
||||||
self._applier_manager = applier_manager
|
self._applier_manager = applier_manager
|
||||||
self._loader = default.DefaultWorkFlowEngineLoader()
|
self._loader = default.DefaultWorkFlowEngineLoader()
|
||||||
@@ -48,9 +48,10 @@ class DefaultApplier(base.BaseApplier):
|
|||||||
if self._engine is None:
|
if self._engine is None:
|
||||||
selected_workflow_engine = CONF.watcher_applier.workflow_engine
|
selected_workflow_engine = CONF.watcher_applier.workflow_engine
|
||||||
LOG.debug("Loading workflow engine %s ", selected_workflow_engine)
|
LOG.debug("Loading workflow engine %s ", selected_workflow_engine)
|
||||||
self._engine = self._loader.load(name=selected_workflow_engine)
|
self._engine = self._loader.load(
|
||||||
self._engine.context = self.context
|
name=selected_workflow_engine,
|
||||||
self._engine.applier_manager = self.applier_manager
|
context=self.context,
|
||||||
|
applier_manager=self.applier_manager)
|
||||||
return self._engine
|
return self._engine
|
||||||
|
|
||||||
def execute(self, action_plan_uuid):
|
def execute(self, action_plan_uuid):
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ APPLIER_MANAGER_OPTS = [
|
|||||||
min=1,
|
min=1,
|
||||||
required=True,
|
required=True,
|
||||||
help='Number of workers for applier, default value is 1.'),
|
help='Number of workers for applier, default value is 1.'),
|
||||||
cfg.StrOpt('topic_control',
|
cfg.StrOpt('conductor_topic',
|
||||||
default='watcher.applier.control',
|
default='watcher.applier.control',
|
||||||
help='The topic name used for'
|
help='The topic name used for'
|
||||||
'control events, this topic '
|
'control events, this topic '
|
||||||
'used for rpc call '),
|
'used for rpc call '),
|
||||||
cfg.StrOpt('topic_status',
|
cfg.StrOpt('status_topic',
|
||||||
default='watcher.applier.status',
|
default='watcher.applier.status',
|
||||||
help='The topic name used for '
|
help='The topic name used for '
|
||||||
'status events, this topic '
|
'status events, this topic '
|
||||||
@@ -67,12 +67,13 @@ class ApplierManager(messaging_core.MessagingCore):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ApplierManager, self).__init__(
|
super(ApplierManager, self).__init__(
|
||||||
CONF.watcher_applier.publisher_id,
|
CONF.watcher_applier.publisher_id,
|
||||||
CONF.watcher_applier.topic_control,
|
CONF.watcher_applier.conductor_topic,
|
||||||
CONF.watcher_applier.topic_status,
|
CONF.watcher_applier.status_topic,
|
||||||
api_version=self.API_VERSION,
|
api_version=self.API_VERSION,
|
||||||
)
|
)
|
||||||
self.topic_control.add_endpoint(trigger.TriggerActionPlan(self))
|
self.conductor_topic_handler.add_endpoint(
|
||||||
|
trigger.TriggerActionPlan(self))
|
||||||
|
|
||||||
def join(self):
|
def join(self):
|
||||||
self.topic_control.join()
|
self.conductor_topic_handler.join()
|
||||||
self.topic_status.join()
|
self.status_topic_handler.join()
|
||||||
|
|||||||
@@ -39,17 +39,17 @@ class ApplierAPI(messaging_core.MessagingCore):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ApplierAPI, self).__init__(
|
super(ApplierAPI, self).__init__(
|
||||||
CONF.watcher_applier.publisher_id,
|
CONF.watcher_applier.publisher_id,
|
||||||
CONF.watcher_applier.topic_control,
|
CONF.watcher_applier.conductor_topic,
|
||||||
CONF.watcher_applier.topic_status,
|
CONF.watcher_applier.status_topic,
|
||||||
api_version=self.API_VERSION,
|
api_version=self.API_VERSION,
|
||||||
)
|
)
|
||||||
self.handler = notification.NotificationHandler(self.publisher_id)
|
self.handler = notification.NotificationHandler(self.publisher_id)
|
||||||
self.handler.register_observer(self)
|
self.handler.register_observer(self)
|
||||||
self.topic_status.add_endpoint(self.handler)
|
self.status_topic_handler.add_endpoint(self.handler)
|
||||||
transport = om.get_transport(CONF)
|
transport = om.get_transport(CONF)
|
||||||
|
|
||||||
target = om.Target(
|
target = om.Target(
|
||||||
topic=CONF.watcher_applier.topic_control,
|
topic=CONF.watcher_applier.conductor_topic,
|
||||||
version=self.API_VERSION,
|
version=self.API_VERSION,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -63,10 +63,3 @@ class ApplierAPI(messaging_core.MessagingCore):
|
|||||||
return self.client.call(
|
return self.client.call(
|
||||||
context.to_dict(), 'launch_action_plan',
|
context.to_dict(), 'launch_action_plan',
|
||||||
action_plan_uuid=action_plan_uuid)
|
action_plan_uuid=action_plan_uuid)
|
||||||
|
|
||||||
def event_receive(self, event):
|
|
||||||
try:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
raise
|
|
||||||
|
|||||||
@@ -22,33 +22,33 @@ import six
|
|||||||
|
|
||||||
from watcher.applier.actions import factory
|
from watcher.applier.actions import factory
|
||||||
from watcher.applier.messaging import event_types
|
from watcher.applier.messaging import event_types
|
||||||
|
from watcher.common import clients
|
||||||
from watcher.common.messaging.events import event
|
from watcher.common.messaging.events import event
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class BaseWorkFlowEngine(object):
|
class BaseWorkFlowEngine(object):
|
||||||
def __init__(self):
|
def __init__(self, context=None, applier_manager=None):
|
||||||
self._applier_manager = None
|
self._context = context
|
||||||
self._context = None
|
self._applier_manager = applier_manager
|
||||||
self._action_factory = factory.ActionFactory()
|
self._action_factory = factory.ActionFactory()
|
||||||
|
self._osc = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def context(self):
|
def context(self):
|
||||||
return self._context
|
return self._context
|
||||||
|
|
||||||
@context.setter
|
@property
|
||||||
def context(self, c):
|
def osc(self):
|
||||||
self._context = c
|
if not self._osc:
|
||||||
|
self._osc = clients.OpenStackClients()
|
||||||
|
return self._osc
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def applier_manager(self):
|
def applier_manager(self):
|
||||||
return self._applier_manager
|
return self._applier_manager
|
||||||
|
|
||||||
@applier_manager.setter
|
|
||||||
def applier_manager(self, a):
|
|
||||||
self._applier_manager = a
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def action_factory(self):
|
def action_factory(self):
|
||||||
return self._action_factory
|
return self._action_factory
|
||||||
@@ -62,8 +62,8 @@ class BaseWorkFlowEngine(object):
|
|||||||
ev.data = {}
|
ev.data = {}
|
||||||
payload = {'action_uuid': action.uuid,
|
payload = {'action_uuid': action.uuid,
|
||||||
'action_state': state}
|
'action_state': state}
|
||||||
self.applier_manager.topic_status.publish_event(ev.type.name,
|
self.applier_manager.status_topic_handler.publish_event(
|
||||||
payload)
|
ev.type.name, payload)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def execute(self, actions):
|
def execute(self, actions):
|
||||||
|
|||||||
@@ -22,12 +22,19 @@ from taskflow import task
|
|||||||
|
|
||||||
from watcher._i18n import _LE, _LW, _LC
|
from watcher._i18n import _LE, _LW, _LC
|
||||||
from watcher.applier.workflow_engine import base
|
from watcher.applier.workflow_engine import base
|
||||||
|
from watcher.common import exception
|
||||||
from watcher.objects import action as obj_action
|
from watcher.objects import action as obj_action
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
|
class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
|
||||||
|
"""Taskflow as a workflow engine for Watcher
|
||||||
|
|
||||||
|
Full documentation on taskflow at
|
||||||
|
http://docs.openstack.org/developer/taskflow/
|
||||||
|
"""
|
||||||
|
|
||||||
def decider(self, history):
|
def decider(self, history):
|
||||||
# FIXME(jed) not possible with the current Watcher Planner
|
# FIXME(jed) not possible with the current Watcher Planner
|
||||||
#
|
#
|
||||||
@@ -71,10 +78,9 @@ class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
|
|||||||
|
|
||||||
e = engines.load(flow)
|
e = engines.load(flow)
|
||||||
e.run()
|
e.run()
|
||||||
return True
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
raise exception.WorkflowExecutionException(error=e)
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class TaskFlowActionContainer(task.Task):
|
class TaskFlowActionContainer(task.Task):
|
||||||
@@ -89,7 +95,9 @@ class TaskFlowActionContainer(task.Task):
|
|||||||
@property
|
@property
|
||||||
def action(self):
|
def action(self):
|
||||||
if self.loaded_action is None:
|
if self.loaded_action is None:
|
||||||
action = self.engine.action_factory.make_action(self._db_action)
|
action = self.engine.action_factory.make_action(
|
||||||
|
self._db_action,
|
||||||
|
osc=self._engine.osc)
|
||||||
self.loaded_action = action
|
self.loaded_action = action
|
||||||
return self.loaded_action
|
return self.loaded_action
|
||||||
|
|
||||||
@@ -113,14 +121,9 @@ class TaskFlowActionContainer(task.Task):
|
|||||||
try:
|
try:
|
||||||
LOG.debug("Running action %s", self.name)
|
LOG.debug("Running action %s", self.name)
|
||||||
|
|
||||||
# todo(jed) remove return (true or false) raise an Exception
|
self.action.execute()
|
||||||
result = self.action.execute()
|
self.engine.notify(self._db_action,
|
||||||
if result is not True:
|
obj_action.State.SUCCEEDED)
|
||||||
self.engine.notify(self._db_action,
|
|
||||||
obj_action.State.FAILED)
|
|
||||||
else:
|
|
||||||
self.engine.notify(self._db_action,
|
|
||||||
obj_action.State.SUCCEEDED)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
LOG.error(_LE('The WorkFlow Engine has failed '
|
LOG.error(_LE('The WorkFlow Engine has failed '
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from watcher import _i18n
|
from watcher import _i18n
|
||||||
from watcher.applier.manager import ApplierManager
|
from watcher.applier import manager
|
||||||
from watcher.common import service
|
from watcher.common import service
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -40,6 +40,6 @@ def main():
|
|||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
|
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
server = ApplierManager()
|
server = manager.ApplierManager()
|
||||||
server.connect()
|
server.connect()
|
||||||
server.join()
|
server.join()
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from oslo_config import cfg
|
|||||||
|
|
||||||
from watcher.common import service
|
from watcher.common import service
|
||||||
from watcher.db import migration
|
from watcher.db import migration
|
||||||
|
from watcher.db import purge
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@@ -56,6 +56,12 @@ class DBCommand(object):
|
|||||||
def create_schema():
|
def create_schema():
|
||||||
migration.create_schema()
|
migration.create_schema()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def purge():
|
||||||
|
purge.purge(CONF.command.age_in_days, CONF.command.max_number,
|
||||||
|
CONF.command.audit_template, CONF.command.exclude_orphans,
|
||||||
|
CONF.command.dry_run)
|
||||||
|
|
||||||
|
|
||||||
def add_command_parsers(subparsers):
|
def add_command_parsers(subparsers):
|
||||||
parser = subparsers.add_parser(
|
parser = subparsers.add_parser(
|
||||||
@@ -96,6 +102,33 @@ def add_command_parsers(subparsers):
|
|||||||
help="Create the database schema.")
|
help="Create the database schema.")
|
||||||
parser.set_defaults(func=DBCommand.create_schema)
|
parser.set_defaults(func=DBCommand.create_schema)
|
||||||
|
|
||||||
|
parser = subparsers.add_parser(
|
||||||
|
'purge',
|
||||||
|
help="Purge the database.")
|
||||||
|
parser.add_argument('-d', '--age-in-days',
|
||||||
|
help="Number of days since deletion (from today) "
|
||||||
|
"to exclude from the purge. If None, everything "
|
||||||
|
"will be purged.",
|
||||||
|
type=int, default=None, nargs='?')
|
||||||
|
parser.add_argument('-n', '--max-number',
|
||||||
|
help="Max number of objects expected to be deleted. "
|
||||||
|
"Prevents the deletion if exceeded. No limit if "
|
||||||
|
"set to None.",
|
||||||
|
type=int, default=None, nargs='?')
|
||||||
|
parser.add_argument('-t', '--audit-template',
|
||||||
|
help="UUID or name of the audit template to purge.",
|
||||||
|
type=str, default=None, nargs='?')
|
||||||
|
parser.add_argument('-e', '--exclude-orphans', action='store_true',
|
||||||
|
help="Flag to indicate whether or not you want to "
|
||||||
|
"exclude orphans from deletion (default: False).",
|
||||||
|
default=False)
|
||||||
|
parser.add_argument('--dry-run', action='store_true',
|
||||||
|
help="Flag to indicate whether or not you want to "
|
||||||
|
"perform a dry run (no deletion).",
|
||||||
|
default=False)
|
||||||
|
|
||||||
|
parser.set_defaults(func=DBCommand.purge)
|
||||||
|
|
||||||
|
|
||||||
command_opt = cfg.SubCommandOpt('command',
|
command_opt = cfg.SubCommandOpt('command',
|
||||||
title='Command',
|
title='Command',
|
||||||
@@ -114,6 +147,7 @@ def main():
|
|||||||
valid_commands = set([
|
valid_commands = set([
|
||||||
'upgrade', 'downgrade', 'revision',
|
'upgrade', 'downgrade', 'revision',
|
||||||
'version', 'stamp', 'create_schema',
|
'version', 'stamp', 'create_schema',
|
||||||
|
'purge',
|
||||||
])
|
])
|
||||||
if not set(sys.argv).intersection(valid_commands):
|
if not set(sys.argv).intersection(valid_commands):
|
||||||
sys.argv.append('upgrade')
|
sys.argv.append('upgrade')
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from watcher import _i18n
|
from watcher import _i18n
|
||||||
from watcher.common import service
|
from watcher.common import service
|
||||||
from watcher.decision_engine.manager import DecisionEngineManager
|
from watcher.decision_engine import manager
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -41,6 +41,6 @@ def main():
|
|||||||
LOG.debug("Configuration:")
|
LOG.debug("Configuration:")
|
||||||
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
|
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||||
|
|
||||||
server = DecisionEngineManager()
|
server = manager.DecisionEngineManager()
|
||||||
server.connect()
|
server.connect()
|
||||||
server.join()
|
server.join()
|
||||||
|
|||||||
@@ -17,30 +17,16 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from ceilometerclient import client
|
|
||||||
from ceilometerclient.exc import HTTPUnauthorized
|
from ceilometerclient.exc import HTTPUnauthorized
|
||||||
|
|
||||||
from watcher.common import keystone
|
from watcher.common import clients
|
||||||
|
|
||||||
|
|
||||||
class CeilometerClient(object):
|
class CeilometerHelper(object):
|
||||||
def __init__(self, api_version='2'):
|
def __init__(self, osc=None):
|
||||||
self._cmclient = None
|
""":param osc: an OpenStackClients instance"""
|
||||||
self._api_version = api_version
|
self.osc = osc if osc else clients.OpenStackClients()
|
||||||
|
self.ceilometer = self.osc.ceilometer()
|
||||||
@property
|
|
||||||
def cmclient(self):
|
|
||||||
"""Initialization of Ceilometer client."""
|
|
||||||
if not self._cmclient:
|
|
||||||
ksclient = keystone.KeystoneClient()
|
|
||||||
creds = ksclient.get_credentials()
|
|
||||||
endpoint = ksclient.get_endpoint(
|
|
||||||
service_type='metering',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
self._cmclient = client.get_client(self._api_version,
|
|
||||||
ceilometer_url=endpoint,
|
|
||||||
**creds)
|
|
||||||
return self._cmclient
|
|
||||||
|
|
||||||
def build_query(self, user_id=None, tenant_id=None, resource_id=None,
|
def build_query(self, user_id=None, tenant_id=None, resource_id=None,
|
||||||
user_ids=None, tenant_ids=None, resource_ids=None):
|
user_ids=None, tenant_ids=None, resource_ids=None):
|
||||||
@@ -83,20 +69,21 @@ class CeilometerClient(object):
|
|||||||
try:
|
try:
|
||||||
return f(*args, **kargs)
|
return f(*args, **kargs)
|
||||||
except HTTPUnauthorized:
|
except HTTPUnauthorized:
|
||||||
self.reset_client()
|
self.osc.reset_clients()
|
||||||
|
self.ceilometer = self.osc.ceilometer()
|
||||||
return f(*args, **kargs)
|
return f(*args, **kargs)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def query_sample(self, meter_name, query, limit=1):
|
def query_sample(self, meter_name, query, limit=1):
|
||||||
return self.query_retry(f=self.cmclient.samples.list,
|
return self.query_retry(f=self.ceilometer.samples.list,
|
||||||
meter_name=meter_name,
|
meter_name=meter_name,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
q=query)
|
q=query)
|
||||||
|
|
||||||
def statistic_list(self, meter_name, query=None, period=None):
|
def statistic_list(self, meter_name, query=None, period=None):
|
||||||
"""List of statistics."""
|
"""List of statistics."""
|
||||||
statistics = self.cmclient.statistics.list(
|
statistics = self.ceilometer.statistics.list(
|
||||||
meter_name=meter_name,
|
meter_name=meter_name,
|
||||||
q=query,
|
q=query,
|
||||||
period=period)
|
period=period)
|
||||||
@@ -104,7 +91,8 @@ class CeilometerClient(object):
|
|||||||
|
|
||||||
def meter_list(self, query=None):
|
def meter_list(self, query=None):
|
||||||
"""List the user's meters."""
|
"""List the user's meters."""
|
||||||
meters = self.query_retry(f=self.cmclient.meters.list, query=query)
|
meters = self.query_retry(f=self.ceilometer.meters.list,
|
||||||
|
query=query)
|
||||||
return meters
|
return meters
|
||||||
|
|
||||||
def statistic_aggregation(self,
|
def statistic_aggregation(self,
|
||||||
@@ -125,7 +113,7 @@ class CeilometerClient(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
query = self.build_query(resource_id=resource_id)
|
query = self.build_query(resource_id=resource_id)
|
||||||
statistic = self.query_retry(f=self.cmclient.statistics.list,
|
statistic = self.query_retry(f=self.ceilometer.statistics.list,
|
||||||
meter_name=meter_name,
|
meter_name=meter_name,
|
||||||
q=query,
|
q=query,
|
||||||
period=period,
|
period=period,
|
||||||
@@ -140,7 +128,8 @@ class CeilometerClient(object):
|
|||||||
|
|
||||||
def get_last_sample_values(self, resource_id, meter_name, limit=1):
|
def get_last_sample_values(self, resource_id, meter_name, limit=1):
|
||||||
samples = self.query_sample(meter_name=meter_name,
|
samples = self.query_sample(meter_name=meter_name,
|
||||||
query=self.build_query(resource_id))
|
query=self.build_query(resource_id),
|
||||||
|
limit=limit)
|
||||||
values = []
|
values = []
|
||||||
for index, sample in enumerate(samples):
|
for index, sample in enumerate(samples):
|
||||||
values.append(
|
values.append(
|
||||||
@@ -156,6 +145,3 @@ class CeilometerClient(object):
|
|||||||
return samples[-1]._info['counter_volume']
|
return samples[-1]._info['counter_volume']
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def reset_client(self):
|
|
||||||
self._cmclient = None
|
|
||||||
158
watcher/common/clients.py
Normal file
158
watcher/common/clients.py
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from ceilometerclient import client as ceclient
|
||||||
|
from cinderclient import client as ciclient
|
||||||
|
from glanceclient import client as glclient
|
||||||
|
from keystoneauth1 import loading as ka_loading
|
||||||
|
from keystoneclient import client as keyclient
|
||||||
|
from neutronclient.neutron import client as netclient
|
||||||
|
from novaclient import client as nvclient
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from watcher._i18n import _
|
||||||
|
from watcher.common import exception
|
||||||
|
|
||||||
|
|
||||||
|
NOVA_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2',
|
||||||
|
help=_('Version of Nova API to use in novaclient.'))]
|
||||||
|
|
||||||
|
GLANCE_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2',
|
||||||
|
help=_('Version of Glance API to use in glanceclient.'))]
|
||||||
|
|
||||||
|
CINDER_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2',
|
||||||
|
help=_('Version of Cinder API to use in cinderclient.'))]
|
||||||
|
|
||||||
|
CEILOMETER_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2',
|
||||||
|
help=_('Version of Ceilometer API to use in '
|
||||||
|
'ceilometerclient.'))]
|
||||||
|
|
||||||
|
NEUTRON_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2.0',
|
||||||
|
help=_('Version of Neutron API to use in neutronclient.'))]
|
||||||
|
|
||||||
|
cfg.CONF.register_opts(NOVA_CLIENT_OPTS, group='nova_client')
|
||||||
|
cfg.CONF.register_opts(GLANCE_CLIENT_OPTS, group='glance_client')
|
||||||
|
cfg.CONF.register_opts(CINDER_CLIENT_OPTS, group='cinder_client')
|
||||||
|
cfg.CONF.register_opts(CEILOMETER_CLIENT_OPTS, group='ceilometer_client')
|
||||||
|
cfg.CONF.register_opts(NEUTRON_CLIENT_OPTS, group='neutron_client')
|
||||||
|
|
||||||
|
_CLIENTS_AUTH_GROUP = 'watcher_clients_auth'
|
||||||
|
|
||||||
|
ka_loading.register_auth_conf_options(cfg.CONF, _CLIENTS_AUTH_GROUP)
|
||||||
|
ka_loading.register_session_conf_options(cfg.CONF, _CLIENTS_AUTH_GROUP)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackClients(object):
|
||||||
|
"""Convenience class to create and cache client instances."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.reset_clients()
|
||||||
|
|
||||||
|
def reset_clients(self):
|
||||||
|
self._session = None
|
||||||
|
self._keystone = None
|
||||||
|
self._nova = None
|
||||||
|
self._glance = None
|
||||||
|
self._cinder = None
|
||||||
|
self._ceilometer = None
|
||||||
|
self._neutron = None
|
||||||
|
|
||||||
|
def _get_keystone_session(self):
|
||||||
|
auth = ka_loading.load_auth_from_conf_options(cfg.CONF,
|
||||||
|
_CLIENTS_AUTH_GROUP)
|
||||||
|
sess = ka_loading.load_session_from_conf_options(cfg.CONF,
|
||||||
|
_CLIENTS_AUTH_GROUP,
|
||||||
|
auth=auth)
|
||||||
|
return sess
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_url(self):
|
||||||
|
return self.keystone().auth_url
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session(self):
|
||||||
|
if not self._session:
|
||||||
|
self._session = self._get_keystone_session()
|
||||||
|
return self._session
|
||||||
|
|
||||||
|
def _get_client_option(self, client, option):
|
||||||
|
return getattr(getattr(cfg.CONF, '%s_client' % client), option)
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def keystone(self):
|
||||||
|
if not self._keystone:
|
||||||
|
self._keystone = keyclient.Client(session=self.session)
|
||||||
|
|
||||||
|
return self._keystone
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def nova(self):
|
||||||
|
if self._nova:
|
||||||
|
return self._nova
|
||||||
|
|
||||||
|
novaclient_version = self._get_client_option('nova', 'api_version')
|
||||||
|
self._nova = nvclient.Client(novaclient_version,
|
||||||
|
session=self.session)
|
||||||
|
return self._nova
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def glance(self):
|
||||||
|
if self._glance:
|
||||||
|
return self._glance
|
||||||
|
|
||||||
|
glanceclient_version = self._get_client_option('glance', 'api_version')
|
||||||
|
self._glance = glclient.Client(glanceclient_version,
|
||||||
|
session=self.session)
|
||||||
|
return self._glance
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def cinder(self):
|
||||||
|
if self._cinder:
|
||||||
|
return self._cinder
|
||||||
|
|
||||||
|
cinderclient_version = self._get_client_option('cinder', 'api_version')
|
||||||
|
self._cinder = ciclient.Client(cinderclient_version,
|
||||||
|
session=self.session)
|
||||||
|
return self._cinder
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def ceilometer(self):
|
||||||
|
if self._ceilometer:
|
||||||
|
return self._ceilometer
|
||||||
|
|
||||||
|
ceilometerclient_version = self._get_client_option('ceilometer',
|
||||||
|
'api_version')
|
||||||
|
self._ceilometer = ceclient.get_client(ceilometerclient_version,
|
||||||
|
session=self.session)
|
||||||
|
return self._ceilometer
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def neutron(self):
|
||||||
|
if self._neutron:
|
||||||
|
return self._neutron
|
||||||
|
|
||||||
|
neutronclient_version = self._get_client_option('neutron',
|
||||||
|
'api_version')
|
||||||
|
self._neutron = netclient.Client(neutronclient_version,
|
||||||
|
session=self.session)
|
||||||
|
self._neutron.format = 'json'
|
||||||
|
return self._neutron
|
||||||
@@ -22,6 +22,8 @@ from watcher import version
|
|||||||
|
|
||||||
|
|
||||||
def parse_args(argv, default_config_files=None):
|
def parse_args(argv, default_config_files=None):
|
||||||
|
default_config_files = (default_config_files or
|
||||||
|
cfg.find_config_files(project='watcher'))
|
||||||
rpc.set_defaults(control_exchange='watcher')
|
rpc.set_defaults(control_exchange='watcher')
|
||||||
cfg.CONF(argv[1:],
|
cfg.CONF(argv[1:],
|
||||||
project='python-watcher',
|
project='python-watcher',
|
||||||
|
|||||||
@@ -22,6 +22,10 @@ SHOULD include dedicated exception logging.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from keystoneclient import exceptions as keystone_exceptions
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
@@ -40,6 +44,23 @@ CONF = cfg.CONF
|
|||||||
CONF.register_opts(exc_log_opts)
|
CONF.register_opts(exc_log_opts)
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_keystone_exception(func):
|
||||||
|
"""Wrap keystone exceptions and throw Watcher specific exceptions."""
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapped(*args, **kw):
|
||||||
|
try:
|
||||||
|
return func(*args, **kw)
|
||||||
|
except keystone_exceptions.AuthorizationFailure:
|
||||||
|
raise AuthorizationFailure(
|
||||||
|
client=func.__name__, reason=sys.exc_info()[1])
|
||||||
|
except keystone_exceptions.ClientException:
|
||||||
|
raise AuthorizationFailure(
|
||||||
|
client=func.__name__,
|
||||||
|
reason=(_('Unexpected keystone client error occurred: %s')
|
||||||
|
% sys.exc_info()[1]))
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
class WatcherException(Exception):
|
class WatcherException(Exception):
|
||||||
"""Base Watcher Exception
|
"""Base Watcher Exception
|
||||||
|
|
||||||
@@ -133,12 +154,6 @@ class InvalidGoal(Invalid):
|
|||||||
msg_fmt = _("Goal %(goal)s is not defined in Watcher configuration file")
|
msg_fmt = _("Goal %(goal)s is not defined in Watcher configuration file")
|
||||||
|
|
||||||
|
|
||||||
# Cannot be templated as the error syntax varies.
|
|
||||||
# msg needs to be constructed when raised.
|
|
||||||
class InvalidParameterValue(Invalid):
|
|
||||||
msg_fmt = _("%(err)s")
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidUUID(Invalid):
|
class InvalidUUID(Invalid):
|
||||||
msg_fmt = _("Expected a uuid but received %(uuid)s")
|
msg_fmt = _("Expected a uuid but received %(uuid)s")
|
||||||
|
|
||||||
@@ -179,7 +194,7 @@ class AuditReferenced(Invalid):
|
|||||||
|
|
||||||
|
|
||||||
class ActionPlanNotFound(ResourceNotFound):
|
class ActionPlanNotFound(ResourceNotFound):
|
||||||
msg_fmt = _("ActionPlan %(action plan)s could not be found")
|
msg_fmt = _("ActionPlan %(action_plan)s could not be found")
|
||||||
|
|
||||||
|
|
||||||
class ActionPlanAlreadyExists(Conflict):
|
class ActionPlanAlreadyExists(Conflict):
|
||||||
@@ -219,6 +234,9 @@ class PatchError(Invalid):
|
|||||||
|
|
||||||
# decision engine
|
# decision engine
|
||||||
|
|
||||||
|
class WorkflowExecutionException(WatcherException):
|
||||||
|
msg_fmt = _('Workflow execution error: %(error)s')
|
||||||
|
|
||||||
|
|
||||||
class IllegalArgumentException(WatcherException):
|
class IllegalArgumentException(WatcherException):
|
||||||
msg_fmt = _('Illegal argument')
|
msg_fmt = _('Illegal argument')
|
||||||
@@ -232,6 +250,10 @@ class NoDataFound(WatcherException):
|
|||||||
msg_fmt = _('No rows were returned')
|
msg_fmt = _('No rows were returned')
|
||||||
|
|
||||||
|
|
||||||
|
class AuthorizationFailure(WatcherException):
|
||||||
|
msg_fmt = _('%(client)s connection failed. Reason: %(reason)s')
|
||||||
|
|
||||||
|
|
||||||
class KeystoneFailure(WatcherException):
|
class KeystoneFailure(WatcherException):
|
||||||
msg_fmt = _("'Keystone API endpoint is missing''")
|
msg_fmt = _("'Keystone API endpoint is missing''")
|
||||||
|
|
||||||
@@ -260,3 +282,15 @@ class HypervisorNotFound(WatcherException):
|
|||||||
|
|
||||||
class LoadingError(WatcherException):
|
class LoadingError(WatcherException):
|
||||||
msg_fmt = _("Error loading plugin '%(name)s'")
|
msg_fmt = _("Error loading plugin '%(name)s'")
|
||||||
|
|
||||||
|
|
||||||
|
class ReservedWord(WatcherException):
|
||||||
|
msg_fmt = _("The identifier '%(name)s' is a reserved word")
|
||||||
|
|
||||||
|
|
||||||
|
class NotSoftDeletedStateError(WatcherException):
|
||||||
|
msg_fmt = _("The %(name)s resource %(id)s is not soft deleted")
|
||||||
|
|
||||||
|
|
||||||
|
class NegativeLimitError(WatcherException):
|
||||||
|
msg_fmt = _("Limit should be positive")
|
||||||
|
|||||||
@@ -1,131 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
# Copyright (c) 2015 b<>com
|
|
||||||
#
|
|
||||||
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from keystoneclient.auth.identity import generic
|
|
||||||
from keystoneclient import session as keystone_session
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from six.moves.urllib.parse import urljoin
|
|
||||||
from six.moves.urllib.parse import urlparse
|
|
||||||
|
|
||||||
from watcher._i18n import _
|
|
||||||
from watcher.common import exception
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
CONF.import_opt('admin_user', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
CONF.import_opt('admin_password', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
CONF.import_opt('auth_version', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
CONF.import_opt('insecure', 'keystonemiddleware.auth_token',
|
|
||||||
group='keystone_authtoken')
|
|
||||||
|
|
||||||
|
|
||||||
class KeystoneClient(object):
|
|
||||||
def __init__(self):
|
|
||||||
self._ks_client = None
|
|
||||||
self._session = None
|
|
||||||
self._auth = None
|
|
||||||
self._token = None
|
|
||||||
|
|
||||||
def get_endpoint(self, **kwargs):
|
|
||||||
kc = self.get_ksclient()
|
|
||||||
if not kc.has_service_catalog():
|
|
||||||
raise exception.KeystoneFailure(
|
|
||||||
_('No Keystone service catalog loaded')
|
|
||||||
)
|
|
||||||
attr = None
|
|
||||||
filter_value = None
|
|
||||||
if kwargs.get('region_name'):
|
|
||||||
attr = 'region'
|
|
||||||
filter_value = kwargs.get('region_name')
|
|
||||||
return kc.service_catalog.url_for(
|
|
||||||
service_type=kwargs.get('service_type') or 'metering',
|
|
||||||
attr=attr,
|
|
||||||
filter_value=filter_value,
|
|
||||||
endpoint_type=kwargs.get('endpoint_type') or 'publicURL')
|
|
||||||
|
|
||||||
def _is_apiv3(self, auth_url, auth_version):
|
|
||||||
return auth_version == 'v3.0' or '/v3' in urlparse(auth_url).path
|
|
||||||
|
|
||||||
def get_keystone_url(self, auth_url, auth_version):
|
|
||||||
"""Gives an http/https url to contact keystone."""
|
|
||||||
|
|
||||||
api_v3 = self._is_apiv3(auth_url, auth_version)
|
|
||||||
api_version = 'v3' if api_v3 else 'v2.0'
|
|
||||||
# NOTE(lucasagomes): Get rid of the trailing '/' otherwise urljoin()
|
|
||||||
# fails to override the version in the URL
|
|
||||||
return urljoin(auth_url.rstrip('/'), api_version)
|
|
||||||
|
|
||||||
def get_ksclient(self, creds=None):
|
|
||||||
"""Get an endpoint and auth token from Keystone."""
|
|
||||||
auth_version = CONF.keystone_authtoken.auth_version
|
|
||||||
auth_url = CONF.keystone_authtoken.auth_uri
|
|
||||||
api_v3 = self._is_apiv3(auth_url, auth_version)
|
|
||||||
if creds is None:
|
|
||||||
ks_args = self._get_credentials(api_v3)
|
|
||||||
else:
|
|
||||||
ks_args = creds
|
|
||||||
|
|
||||||
if api_v3:
|
|
||||||
from keystoneclient.v3 import client
|
|
||||||
else:
|
|
||||||
from keystoneclient.v2_0 import client
|
|
||||||
# generic
|
|
||||||
# ksclient = client.Client(version=api_version,
|
|
||||||
# session=session,**ks_args)
|
|
||||||
|
|
||||||
return client.Client(**ks_args)
|
|
||||||
|
|
||||||
def _get_credentials(self, api_v3):
|
|
||||||
if api_v3:
|
|
||||||
creds = \
|
|
||||||
{'auth_url': CONF.keystone_authtoken.auth_uri,
|
|
||||||
'username': CONF.keystone_authtoken.admin_user,
|
|
||||||
'password': CONF.keystone_authtoken.admin_password,
|
|
||||||
'project_name': CONF.keystone_authtoken.admin_tenant_name,
|
|
||||||
'user_domain_name': "default",
|
|
||||||
'project_domain_name': "default"}
|
|
||||||
else:
|
|
||||||
creds = \
|
|
||||||
{'auth_url': CONF.keystone_authtoken.auth_uri,
|
|
||||||
'username': CONF.keystone_authtoken.admin_user,
|
|
||||||
'password': CONF.keystone_authtoken.admin_password,
|
|
||||||
'tenant_name': CONF.keystone_authtoken.admin_tenant_name}
|
|
||||||
LOG.debug(creds)
|
|
||||||
return creds
|
|
||||||
|
|
||||||
def get_credentials(self):
|
|
||||||
api_v3 = self._is_apiv3(CONF.keystone_authtoken.auth_uri,
|
|
||||||
CONF.keystone_authtoken.auth_version)
|
|
||||||
return self._get_credentials(api_v3)
|
|
||||||
|
|
||||||
def get_session(self):
|
|
||||||
creds = self.get_credentials()
|
|
||||||
self._auth = generic.Password(**creds)
|
|
||||||
session = keystone_session.Session(auth=self._auth)
|
|
||||||
return session
|
|
||||||
@@ -31,7 +31,7 @@ class DefaultLoader(BaseLoader):
|
|||||||
super(DefaultLoader, self).__init__()
|
super(DefaultLoader, self).__init__()
|
||||||
self.namespace = namespace
|
self.namespace = namespace
|
||||||
|
|
||||||
def load(self, name):
|
def load(self, name, **kwargs):
|
||||||
try:
|
try:
|
||||||
LOG.debug("Loading in namespace %s => %s ", self.namespace, name)
|
LOG.debug("Loading in namespace %s => %s ", self.namespace, name)
|
||||||
driver_manager = DriverManager(namespace=self.namespace,
|
driver_manager = DriverManager(namespace=self.namespace,
|
||||||
@@ -41,7 +41,7 @@ class DefaultLoader(BaseLoader):
|
|||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
raise exception.LoadingError(name=name)
|
raise exception.LoadingError(name=name)
|
||||||
|
|
||||||
return loaded()
|
return loaded(**kwargs)
|
||||||
|
|
||||||
def list_available(self):
|
def list_available(self):
|
||||||
extension_manager = ExtensionManager(namespace=self.namespace)
|
extension_manager = ExtensionManager(namespace=self.namespace)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.messaging.events import Events
|
from watcher.decision_engine.messaging import events as messaging_events
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@@ -43,8 +43,8 @@ class EventDispatcher(object):
|
|||||||
"""
|
"""
|
||||||
Dispatch an instance of Event class
|
Dispatch an instance of Event class
|
||||||
"""
|
"""
|
||||||
if Events.ALL in self._events.keys():
|
if messaging_events.Events.ALL in self._events.keys():
|
||||||
listeners = self._events[Events.ALL]
|
listeners = self._events[messaging_events.Events.ALL]
|
||||||
for listener in listeners:
|
for listener in listeners:
|
||||||
listener(event)
|
listener(event)
|
||||||
|
|
||||||
|
|||||||
@@ -16,58 +16,100 @@
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from watcher.common.messaging.events.event_dispatcher import \
|
import oslo_messaging as om
|
||||||
EventDispatcher
|
|
||||||
from watcher.common.messaging.messaging_handler import \
|
|
||||||
MessagingHandler
|
|
||||||
from watcher.common.rpc import RequestContextSerializer
|
|
||||||
|
|
||||||
from watcher.objects.base import WatcherObjectSerializer
|
from watcher.common.messaging.events import event_dispatcher as dispatcher
|
||||||
|
from watcher.common.messaging import messaging_handler
|
||||||
|
from watcher.common import rpc
|
||||||
|
|
||||||
|
from watcher.objects import base
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
class MessagingCore(EventDispatcher):
|
class MessagingCore(dispatcher.EventDispatcher):
|
||||||
|
|
||||||
API_VERSION = '1.0'
|
API_VERSION = '1.0'
|
||||||
|
|
||||||
def __init__(self, publisher_id, topic_control, topic_status,
|
def __init__(self, publisher_id, conductor_topic, status_topic,
|
||||||
api_version=API_VERSION):
|
api_version=API_VERSION):
|
||||||
super(MessagingCore, self).__init__()
|
super(MessagingCore, self).__init__()
|
||||||
self.serializer = RequestContextSerializer(WatcherObjectSerializer())
|
self.serializer = rpc.RequestContextSerializer(
|
||||||
|
base.WatcherObjectSerializer())
|
||||||
self.publisher_id = publisher_id
|
self.publisher_id = publisher_id
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
self.topic_control = self.build_topic(topic_control)
|
|
||||||
self.topic_status = self.build_topic(topic_status)
|
|
||||||
|
|
||||||
def build_topic(self, topic_name):
|
self.conductor_topic = conductor_topic
|
||||||
return MessagingHandler(self.publisher_id, topic_name, self,
|
self.status_topic = status_topic
|
||||||
self.api_version, self.serializer)
|
self.conductor_topic_handler = self.build_topic_handler(
|
||||||
|
conductor_topic)
|
||||||
|
self.status_topic_handler = self.build_topic_handler(status_topic)
|
||||||
|
|
||||||
|
self._conductor_client = None
|
||||||
|
self._status_client = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def conductor_client(self):
|
||||||
|
if self._conductor_client is None:
|
||||||
|
transport = om.get_transport(CONF)
|
||||||
|
target = om.Target(
|
||||||
|
topic=self.conductor_topic,
|
||||||
|
version=self.API_VERSION,
|
||||||
|
)
|
||||||
|
self._conductor_client = om.RPCClient(
|
||||||
|
transport, target, serializer=self.serializer)
|
||||||
|
return self._conductor_client
|
||||||
|
|
||||||
|
@conductor_client.setter
|
||||||
|
def conductor_client(self, c):
|
||||||
|
self.conductor_client = c
|
||||||
|
|
||||||
|
@property
|
||||||
|
def status_client(self):
|
||||||
|
if self._status_client is None:
|
||||||
|
transport = om.get_transport(CONF)
|
||||||
|
target = om.Target(
|
||||||
|
topic=self.status_topic,
|
||||||
|
version=self.API_VERSION,
|
||||||
|
)
|
||||||
|
self._status_client = om.RPCClient(
|
||||||
|
transport, target, serializer=self.serializer)
|
||||||
|
return self._status_client
|
||||||
|
|
||||||
|
@status_client.setter
|
||||||
|
def status_client(self, c):
|
||||||
|
self.status_client = c
|
||||||
|
|
||||||
|
def build_topic_handler(self, topic_name):
|
||||||
|
return messaging_handler.MessagingHandler(
|
||||||
|
self.publisher_id, topic_name, self,
|
||||||
|
self.api_version, self.serializer)
|
||||||
|
|
||||||
def connect(self):
|
def connect(self):
|
||||||
LOG.debug("Connecting to '%s' (%s)",
|
LOG.debug("Connecting to '%s' (%s)",
|
||||||
CONF.transport_url, CONF.rpc_backend)
|
CONF.transport_url, CONF.rpc_backend)
|
||||||
self.topic_control.start()
|
self.conductor_topic_handler.start()
|
||||||
self.topic_status.start()
|
self.status_topic_handler.start()
|
||||||
|
|
||||||
def disconnect(self):
|
def disconnect(self):
|
||||||
LOG.debug("Disconnecting from '%s' (%s)",
|
LOG.debug("Disconnecting from '%s' (%s)",
|
||||||
CONF.transport_url, CONF.rpc_backend)
|
CONF.transport_url, CONF.rpc_backend)
|
||||||
self.topic_control.stop()
|
self.conductor_topic_handler.stop()
|
||||||
self.topic_status.stop()
|
self.status_topic_handler.stop()
|
||||||
|
|
||||||
def publish_control(self, event, payload):
|
def publish_control(self, event, payload):
|
||||||
return self.topic_control.publish_event(event, payload)
|
return self.conductor_topic_handler.publish_event(event, payload)
|
||||||
|
|
||||||
def publish_status(self, event, payload, request_id=None):
|
def publish_status(self, event, payload, request_id=None):
|
||||||
return self.topic_status.publish_event(event, payload, request_id)
|
return self.status_topic_handler.publish_event(
|
||||||
|
event, payload, request_id)
|
||||||
|
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
return self.api_version
|
return self.api_version
|
||||||
|
|
||||||
def check_api_version(self, context):
|
def check_api_version(self, context):
|
||||||
api_manager_version = self.client.call(
|
api_manager_version = self.conductor_client.call(
|
||||||
context.to_dict(), 'check_api_version',
|
context.to_dict(), 'check_api_version',
|
||||||
api_version=self.api_version)
|
api_version=self.api_version)
|
||||||
return api_manager_version
|
return api_manager_version
|
||||||
|
|||||||
@@ -38,11 +38,11 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
class MessagingHandler(threading.Thread):
|
class MessagingHandler(threading.Thread):
|
||||||
|
|
||||||
def __init__(self, publisher_id, topic_watcher, endpoint, version,
|
def __init__(self, publisher_id, topic_name, endpoint, version,
|
||||||
serializer=None):
|
serializer=None):
|
||||||
super(MessagingHandler, self).__init__()
|
super(MessagingHandler, self).__init__()
|
||||||
self.publisher_id = publisher_id
|
self.publisher_id = publisher_id
|
||||||
self.topic_watcher = topic_watcher
|
self.topic_name = topic_name
|
||||||
self.__endpoints = []
|
self.__endpoints = []
|
||||||
self.__serializer = serializer
|
self.__serializer = serializer
|
||||||
self.__version = version
|
self.__version = version
|
||||||
@@ -72,7 +72,7 @@ class MessagingHandler(threading.Thread):
|
|||||||
return om.Notifier(
|
return om.Notifier(
|
||||||
self.__transport,
|
self.__transport,
|
||||||
publisher_id=self.publisher_id,
|
publisher_id=self.publisher_id,
|
||||||
topic=self.topic_watcher,
|
topic=self.topic_name,
|
||||||
serializer=serializer
|
serializer=serializer
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -87,7 +87,7 @@ class MessagingHandler(threading.Thread):
|
|||||||
self.__notifier = self.build_notifier()
|
self.__notifier = self.build_notifier()
|
||||||
if len(self.__endpoints):
|
if len(self.__endpoints):
|
||||||
target = om.Target(
|
target = om.Target(
|
||||||
topic=self.topic_watcher,
|
topic=self.topic_name,
|
||||||
# For compatibility, we can override it with 'host' opt
|
# For compatibility, we can override it with 'host' opt
|
||||||
server=CONF.host or socket.getfqdn(),
|
server=CONF.host or socket.getfqdn(),
|
||||||
version=self.__version,
|
version=self.__version,
|
||||||
@@ -101,7 +101,7 @@ class MessagingHandler(threading.Thread):
|
|||||||
LOG.error(_LE("Messaging configuration error"))
|
LOG.error(_LE("Messaging configuration error"))
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
LOG.debug("configure MessagingHandler for %s" % self.topic_watcher)
|
LOG.debug("configure MessagingHandler for %s" % self.topic_name)
|
||||||
self._configure()
|
self._configure()
|
||||||
if len(self.__endpoints) > 0:
|
if len(self.__endpoints) > 0:
|
||||||
LOG.debug("Starting up server")
|
LOG.debug("Starting up server")
|
||||||
|
|||||||
@@ -18,17 +18,16 @@ import eventlet
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
|
|
||||||
from watcher.common.messaging.utils.observable import \
|
from watcher.common.messaging.utils import observable
|
||||||
Observable
|
|
||||||
|
|
||||||
|
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NotificationHandler(Observable):
|
class NotificationHandler(observable.Observable):
|
||||||
def __init__(self, publisher_id):
|
def __init__(self, publisher_id):
|
||||||
Observable.__init__(self)
|
super(NotificationHandler, self).__init__()
|
||||||
self.publisher_id = publisher_id
|
self.publisher_id = publisher_id
|
||||||
|
|
||||||
def info(self, ctx, publisher_id, event_type, payload, metadata):
|
def info(self, ctx, publisher_id, event_type, payload, metadata):
|
||||||
|
|||||||
@@ -14,19 +14,14 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from oslo_log import log
|
from watcher.common.messaging.utils import synchronization
|
||||||
|
|
||||||
from watcher.common.messaging.utils.synchronization import \
|
|
||||||
Synchronization
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Observable(Synchronization):
|
class Observable(synchronization.Synchronization):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
super(Observable, self).__init__()
|
||||||
self.__observers = []
|
self.__observers = []
|
||||||
self.changed = 0
|
self.changed = 0
|
||||||
Synchronization.__init__(self)
|
|
||||||
|
|
||||||
def set_changed(self):
|
def set_changed(self):
|
||||||
self.changed = 1
|
self.changed = 1
|
||||||
|
|||||||
@@ -23,29 +23,22 @@ import time
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
import cinderclient.exceptions as ciexceptions
|
import cinderclient.exceptions as ciexceptions
|
||||||
import cinderclient.v2.client as ciclient
|
|
||||||
import glanceclient.v2.client as glclient
|
|
||||||
import neutronclient.neutron.client as netclient
|
|
||||||
import novaclient.client as nvclient
|
|
||||||
import novaclient.exceptions as nvexceptions
|
import novaclient.exceptions as nvexceptions
|
||||||
|
|
||||||
from watcher.common import keystone
|
from watcher.common import clients
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NovaClient(object):
|
class NovaHelper(object):
|
||||||
NOVA_CLIENT_API_VERSION = "2"
|
|
||||||
|
|
||||||
def __init__(self, creds, session):
|
def __init__(self, osc=None):
|
||||||
self.user = creds['username']
|
""":param osc: an OpenStackClients instance"""
|
||||||
self.session = session
|
self.osc = osc if osc else clients.OpenStackClients()
|
||||||
self.neutron = None
|
self.neutron = self.osc.neutron()
|
||||||
self.cinder = None
|
self.cinder = self.osc.cinder()
|
||||||
self.nova = nvclient.Client(self.NOVA_CLIENT_API_VERSION,
|
self.nova = self.osc.nova()
|
||||||
session=session)
|
self.glance = self.osc.glance()
|
||||||
self.keystone = keystone.KeystoneClient().get_ksclient(creds)
|
|
||||||
self.glance = None
|
|
||||||
|
|
||||||
def get_hypervisors_list(self):
|
def get_hypervisors_list(self):
|
||||||
return self.nova.hypervisors.list()
|
return self.nova.hypervisors.list()
|
||||||
@@ -180,9 +173,6 @@ class NovaClient(object):
|
|||||||
volume_id = attached_volume['id']
|
volume_id = attached_volume['id']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.cinder is None:
|
|
||||||
self.cinder = ciclient.Client('2',
|
|
||||||
session=self.session)
|
|
||||||
volume = self.cinder.volumes.get(volume_id)
|
volume = self.cinder.volumes.get(volume_id)
|
||||||
|
|
||||||
attachments_list = getattr(volume, "attachments")
|
attachments_list = getattr(volume, "attachments")
|
||||||
@@ -275,58 +265,6 @@ class NovaClient(object):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def built_in_non_live_migrate_instance(self, instance_id, hypervisor_id):
|
|
||||||
"""This method does a live migration of a given instance
|
|
||||||
|
|
||||||
This method uses the Nova built-in non-live migrate()
|
|
||||||
action to migrate a given instance.
|
|
||||||
It returns True if the migration was successful, False otherwise.
|
|
||||||
|
|
||||||
:param instance_id: the unique id of the instance to migrate.
|
|
||||||
"""
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
"Trying a Nova built-in non-live "
|
|
||||||
"migrate of instance %s ..." % instance_id)
|
|
||||||
|
|
||||||
# Looking for the instance to migrate
|
|
||||||
instance = self.find_instance(instance_id)
|
|
||||||
|
|
||||||
if not instance:
|
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
|
||||||
LOG.debug(
|
|
||||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
|
||||||
|
|
||||||
instance.migrate()
|
|
||||||
|
|
||||||
# Poll at 5 second intervals, until the status is as expected
|
|
||||||
if self.wait_for_instance_status(instance,
|
|
||||||
('VERIFY_RESIZE', 'ERROR'),
|
|
||||||
5, 10):
|
|
||||||
|
|
||||||
instance = self.nova.servers.get(instance.id)
|
|
||||||
|
|
||||||
if instance.status == 'VERIFY_RESIZE':
|
|
||||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
|
||||||
LOG.debug(
|
|
||||||
"Instance %s has been successfully "
|
|
||||||
"migrated to host '%s'." % (
|
|
||||||
instance_id, host_name))
|
|
||||||
|
|
||||||
# We need to confirm that the resize() operation
|
|
||||||
# has succeeded in order to
|
|
||||||
# get back instance state to 'ACTIVE'
|
|
||||||
instance.confirm_resize()
|
|
||||||
|
|
||||||
return True
|
|
||||||
elif instance.status == 'ERROR':
|
|
||||||
LOG.debug("Instance %s migration failed" % instance_id)
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def live_migrate_instance(self, instance_id, dest_hostname,
|
def live_migrate_instance(self, instance_id, dest_hostname,
|
||||||
block_migration=False, retry=120):
|
block_migration=False, retry=120):
|
||||||
"""This method does a live migration of a given instance
|
"""This method does a live migration of a given instance
|
||||||
@@ -446,13 +384,6 @@ class NovaClient(object):
|
|||||||
:param metadata: a dictionary containing the list of
|
:param metadata: a dictionary containing the list of
|
||||||
key-value pairs to associate to the image as metadata.
|
key-value pairs to associate to the image as metadata.
|
||||||
"""
|
"""
|
||||||
if self.glance is None:
|
|
||||||
glance_endpoint = self.keystone. \
|
|
||||||
service_catalog.url_for(service_type='image',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
self.glance = glclient.Client(glance_endpoint,
|
|
||||||
token=self.keystone.auth_token)
|
|
||||||
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Trying to create an image from instance %s ..." % instance_id)
|
"Trying to create an image from instance %s ..." % instance_id)
|
||||||
|
|
||||||
@@ -676,10 +607,6 @@ class NovaClient(object):
|
|||||||
|
|
||||||
def get_network_id_from_name(self, net_name="private"):
|
def get_network_id_from_name(self, net_name="private"):
|
||||||
"""This method returns the unique id of the provided network name"""
|
"""This method returns the unique id of the provided network name"""
|
||||||
if self.neutron is None:
|
|
||||||
self.neutron = netclient.Client('2.0', session=self.session)
|
|
||||||
self.neutron.format = 'json'
|
|
||||||
|
|
||||||
networks = self.neutron.list_networks(name=net_name)
|
networks = self.neutron.list_networks(name=net_name)
|
||||||
|
|
||||||
# LOG.debug(networks)
|
# LOG.debug(networks)
|
||||||
@@ -102,7 +102,7 @@ class RPCService(service.Service):
|
|||||||
'%(host)s.'),
|
'%(host)s.'),
|
||||||
{'service': self.topic, 'host': self.host})
|
{'service': self.topic, 'host': self.host})
|
||||||
|
|
||||||
def _handle_signal(self, signo, frame):
|
def _handle_signal(self):
|
||||||
LOG.info(_LI('Got signal SIGUSR1. Not deregistering on next shutdown '
|
LOG.info(_LI('Got signal SIGUSR1. Not deregistering on next shutdown '
|
||||||
'of service %(service)s on host %(host)s.'),
|
'of service %(service)s on host %(host)s.'),
|
||||||
{'service': self.topic, 'host': self.host})
|
{'service': self.topic, 'host': self.host})
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ class BaseConnection(object):
|
|||||||
'name': 'example',
|
'name': 'example',
|
||||||
'description': 'free text description'
|
'description': 'free text description'
|
||||||
'host_aggregate': 'nova aggregate name or id'
|
'host_aggregate': 'nova aggregate name or id'
|
||||||
'goal': 'SERVER_CONSOLiDATION'
|
'goal': 'DUMMY'
|
||||||
'extra': {'automatic': True}
|
'extra': {'automatic': True}
|
||||||
}
|
}
|
||||||
:returns: An audit template.
|
:returns: An audit template.
|
||||||
@@ -98,7 +98,7 @@ class BaseConnection(object):
|
|||||||
:raises: AuditTemplateNotFound
|
:raises: AuditTemplateNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_audit_template_by__name(self, context, audit_template_name):
|
def get_audit_template_by_name(self, context, audit_template_name):
|
||||||
"""Return an audit template.
|
"""Return an audit template.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
@@ -122,7 +122,7 @@ class BaseConnection(object):
|
|||||||
:param audit_template_id: The id or uuid of an audit template.
|
:param audit_template_id: The id or uuid of an audit template.
|
||||||
:returns: An audit template.
|
:returns: An audit template.
|
||||||
:raises: AuditTemplateNotFound
|
:raises: AuditTemplateNotFound
|
||||||
:raises: InvalidParameterValue
|
:raises: Invalid
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@@ -209,7 +209,7 @@ class BaseConnection(object):
|
|||||||
:param audit_id: The id or uuid of an audit.
|
:param audit_id: The id or uuid of an audit.
|
||||||
:returns: An audit.
|
:returns: An audit.
|
||||||
:raises: AuditNotFound
|
:raises: AuditNotFound
|
||||||
:raises: InvalidParameterValue
|
:raises: Invalid
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def soft_delete_audit(self, audit_id):
|
def soft_delete_audit(self, audit_id):
|
||||||
@@ -299,6 +299,7 @@ class BaseConnection(object):
|
|||||||
:returns: A action.
|
:returns: A action.
|
||||||
:raises: ActionNotFound
|
:raises: ActionNotFound
|
||||||
:raises: ActionReferenced
|
:raises: ActionReferenced
|
||||||
|
:raises: Invalid
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
@@ -371,4 +372,5 @@ class BaseConnection(object):
|
|||||||
:returns: An action plan.
|
:returns: An action plan.
|
||||||
:raises: ActionPlanNotFound
|
:raises: ActionPlanNotFound
|
||||||
:raises: ActionPlanReferenced
|
:raises: ActionPlanReferenced
|
||||||
|
:raises: Invalid
|
||||||
"""
|
"""
|
||||||
|
|||||||
410
watcher/db/purge.py
Normal file
410
watcher/db/purge.py
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2016 b<>com
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_utils import strutils
|
||||||
|
import prettytable as ptable
|
||||||
|
from six.moves import input
|
||||||
|
|
||||||
|
from watcher._i18n import _, _LI
|
||||||
|
from watcher.common import context
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import utils
|
||||||
|
from watcher import objects
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class WatcherObjectsMap(object):
|
||||||
|
"""Wrapper to deal with watcher objects per type
|
||||||
|
|
||||||
|
This wrapper object contains a list of watcher objects per type.
|
||||||
|
Its main use is to simplify the merge of watcher objects by avoiding
|
||||||
|
duplicates, but also for representing the relationships between these
|
||||||
|
objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This is for generating the .pot translations
|
||||||
|
keymap = collections.OrderedDict([
|
||||||
|
("audit_templates", _("Audit Templates")),
|
||||||
|
("audits", _("Audits")),
|
||||||
|
("action_plans", _("Action Plans")),
|
||||||
|
("actions", _("Actions")),
|
||||||
|
])
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
for attr_name in self.__class__.keys():
|
||||||
|
setattr(self, attr_name, [])
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
return (getattr(self, key) for key in self.__class__.keys())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def keys(cls):
|
||||||
|
return cls.keymap.keys()
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return itertools.chain(*self.values())
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
new_map = self.__class__()
|
||||||
|
|
||||||
|
# Merge the 2 items dicts into a new object (and avoid dupes)
|
||||||
|
for attr_name, initials, others in zip(self.keys(), self.values(),
|
||||||
|
other.values()):
|
||||||
|
# Creates a copy
|
||||||
|
merged = initials[:]
|
||||||
|
initials_ids = [item.id for item in initials]
|
||||||
|
non_dupes = [item for item in others
|
||||||
|
if item.id not in initials_ids]
|
||||||
|
merged += non_dupes
|
||||||
|
|
||||||
|
setattr(new_map, attr_name, merged)
|
||||||
|
|
||||||
|
return new_map
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
out = ""
|
||||||
|
for key, vals in zip(self.keys(), self.values()):
|
||||||
|
ids = [val.id for val in vals]
|
||||||
|
out += "%(key)s: %(val)s" % (dict(key=key, val=ids))
|
||||||
|
out += "\n"
|
||||||
|
return out
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return sum(len(getattr(self, key)) for key in self.keys())
|
||||||
|
|
||||||
|
def get_count_table(self):
|
||||||
|
headers = list(self.keymap.values())
|
||||||
|
headers.append(_("Total")) # We also add a total count
|
||||||
|
counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)]
|
||||||
|
table = ptable.PrettyTable(field_names=headers)
|
||||||
|
table.add_row(counters)
|
||||||
|
return table.get_string()
|
||||||
|
|
||||||
|
|
||||||
|
class PurgeCommand(object):
|
||||||
|
"""Purges the DB by removing soft deleted entries
|
||||||
|
|
||||||
|
The workflow for this purge is the following:
|
||||||
|
|
||||||
|
# Find soft deleted objects which are expired
|
||||||
|
# Find orphan objects
|
||||||
|
# Find their related objects whether they are expired or not
|
||||||
|
# Merge them together
|
||||||
|
# If it does not exceed the limit, destroy them all
|
||||||
|
"""
|
||||||
|
|
||||||
|
ctx = context.make_context(show_deleted=True)
|
||||||
|
|
||||||
|
def __init__(self, age_in_days=None, max_number=None,
|
||||||
|
uuid=None, exclude_orphans=False, dry_run=None):
|
||||||
|
self.age_in_days = age_in_days
|
||||||
|
self.max_number = max_number
|
||||||
|
self.uuid = uuid
|
||||||
|
self.exclude_orphans = exclude_orphans
|
||||||
|
self.dry_run = dry_run
|
||||||
|
|
||||||
|
self._delete_up_to_max = None
|
||||||
|
self._objects_map = WatcherObjectsMap()
|
||||||
|
|
||||||
|
def get_expiry_date(self):
|
||||||
|
if not self.age_in_days:
|
||||||
|
return None
|
||||||
|
today = datetime.datetime.today()
|
||||||
|
expiry_date = today - datetime.timedelta(days=self.age_in_days)
|
||||||
|
return expiry_date
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_audit_template_uuid(cls, uuid_or_name):
|
||||||
|
if uuid_or_name is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
query_func = None
|
||||||
|
if not utils.is_uuid_like(uuid_or_name):
|
||||||
|
query_func = objects.audit_template.AuditTemplate.get_by_name
|
||||||
|
else:
|
||||||
|
query_func = objects.audit_template.AuditTemplate.get_by_uuid
|
||||||
|
|
||||||
|
try:
|
||||||
|
audit_template = query_func(cls.ctx, uuid_or_name)
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
raise exception.AuditTemplateNotFound(audit_template=uuid_or_name)
|
||||||
|
|
||||||
|
if not audit_template.deleted_at:
|
||||||
|
raise exception.NotSoftDeletedStateError(
|
||||||
|
name=_('Audit Template'), id=uuid_or_name)
|
||||||
|
|
||||||
|
return audit_template.uuid
|
||||||
|
|
||||||
|
def _find_audit_templates(self, filters=None):
|
||||||
|
return objects.audit_template.AuditTemplate.list(
|
||||||
|
self.ctx, filters=filters)
|
||||||
|
|
||||||
|
def _find_audits(self, filters=None):
|
||||||
|
return objects.audit.Audit.list(self.ctx, filters=filters)
|
||||||
|
|
||||||
|
def _find_action_plans(self, filters=None):
|
||||||
|
return objects.action_plan.ActionPlan.list(self.ctx, filters=filters)
|
||||||
|
|
||||||
|
def _find_actions(self, filters=None):
|
||||||
|
return objects.action.Action.list(self.ctx, filters=filters)
|
||||||
|
|
||||||
|
def _find_orphans(self):
|
||||||
|
orphans = WatcherObjectsMap()
|
||||||
|
|
||||||
|
filters = dict(deleted=False)
|
||||||
|
audit_templates = objects.audit_template.AuditTemplate.list(
|
||||||
|
self.ctx, filters=filters)
|
||||||
|
audits = objects.audit.Audit.list(self.ctx, filters=filters)
|
||||||
|
action_plans = objects.action_plan.ActionPlan.list(
|
||||||
|
self.ctx, filters=filters)
|
||||||
|
actions = objects.action.Action.list(self.ctx, filters=filters)
|
||||||
|
|
||||||
|
audit_template_ids = set(at.id for at in audit_templates)
|
||||||
|
orphans.audits = [
|
||||||
|
audit for audit in audits
|
||||||
|
if audit.audit_template_id not in audit_template_ids]
|
||||||
|
|
||||||
|
# Objects with orphan parents are themselves orphans
|
||||||
|
audit_ids = [audit.id for audit in (a for a in audits
|
||||||
|
if a not in orphans.audits)]
|
||||||
|
orphans.action_plans = [
|
||||||
|
ap for ap in action_plans
|
||||||
|
if ap.audit_id not in audit_ids]
|
||||||
|
|
||||||
|
# Objects with orphan parents are themselves orphans
|
||||||
|
action_plan_ids = [ap.id for ap in (a for a in action_plans
|
||||||
|
if a not in orphans.action_plans)]
|
||||||
|
orphans.actions = [
|
||||||
|
action for action in actions
|
||||||
|
if action.action_plan_id not in action_plan_ids]
|
||||||
|
|
||||||
|
LOG.debug("Orphans found:\n%s", orphans)
|
||||||
|
LOG.info(_LI("Orphans found:\n%s"), orphans.get_count_table())
|
||||||
|
|
||||||
|
return orphans
|
||||||
|
|
||||||
|
def _find_soft_deleted_objects(self):
|
||||||
|
to_be_deleted = WatcherObjectsMap()
|
||||||
|
|
||||||
|
expiry_date = self.get_expiry_date()
|
||||||
|
filters = dict(deleted=True)
|
||||||
|
if self.uuid:
|
||||||
|
filters["uuid"] = self.uuid
|
||||||
|
if expiry_date:
|
||||||
|
filters.update(dict(deleted_at__lt=expiry_date))
|
||||||
|
|
||||||
|
to_be_deleted.audit_templates.extend(
|
||||||
|
self._find_audit_templates(filters))
|
||||||
|
to_be_deleted.audits.extend(self._find_audits(filters))
|
||||||
|
to_be_deleted.action_plans.extend(self._find_action_plans(filters))
|
||||||
|
to_be_deleted.actions.extend(self._find_actions(filters))
|
||||||
|
|
||||||
|
soft_deleted_objs = self._find_related_objects(
|
||||||
|
to_be_deleted, base_filters=dict(deleted=True))
|
||||||
|
|
||||||
|
LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs)
|
||||||
|
|
||||||
|
return soft_deleted_objs
|
||||||
|
|
||||||
|
def _find_related_objects(self, objects_map, base_filters=None):
|
||||||
|
base_filters = base_filters or {}
|
||||||
|
|
||||||
|
for audit_template in objects_map.audit_templates:
|
||||||
|
filters = {}
|
||||||
|
filters.update(base_filters)
|
||||||
|
filters.update(dict(audit_template_id=audit_template.id))
|
||||||
|
related_objs = WatcherObjectsMap()
|
||||||
|
related_objs.audits = self._find_audits(filters)
|
||||||
|
objects_map += related_objs
|
||||||
|
|
||||||
|
for audit in objects_map.audits:
|
||||||
|
filters = {}
|
||||||
|
filters.update(base_filters)
|
||||||
|
filters.update(dict(audit_id=audit.id))
|
||||||
|
related_objs = WatcherObjectsMap()
|
||||||
|
related_objs.action_plans = self._find_action_plans(filters)
|
||||||
|
objects_map += related_objs
|
||||||
|
|
||||||
|
for action_plan in objects_map.action_plans:
|
||||||
|
filters = {}
|
||||||
|
filters.update(base_filters)
|
||||||
|
filters.update(dict(action_plan_id=action_plan.id))
|
||||||
|
related_objs = WatcherObjectsMap()
|
||||||
|
related_objs.actions = self._find_actions(filters)
|
||||||
|
objects_map += related_objs
|
||||||
|
|
||||||
|
return objects_map
|
||||||
|
|
||||||
|
def confirmation_prompt(self):
|
||||||
|
print(self._objects_map.get_count_table())
|
||||||
|
raw_val = input(
|
||||||
|
_("There are %(count)d objects set for deletion. "
|
||||||
|
"Continue? [y/N]") % dict(count=len(self._objects_map)))
|
||||||
|
|
||||||
|
return strutils.bool_from_string(raw_val)
|
||||||
|
|
||||||
|
def delete_up_to_max_prompt(self, objects_map):
|
||||||
|
print(objects_map.get_count_table())
|
||||||
|
print(_("The number of objects (%(num)s) to delete from the database "
|
||||||
|
"exceeds the maximum number of objects (%(max_number)s) "
|
||||||
|
"specified.") % dict(max_number=self.max_number,
|
||||||
|
num=len(objects_map)))
|
||||||
|
raw_val = input(
|
||||||
|
_("Do you want to delete objects up to the specified maximum "
|
||||||
|
"number? [y/N]"))
|
||||||
|
|
||||||
|
self._delete_up_to_max = strutils.bool_from_string(raw_val)
|
||||||
|
|
||||||
|
return self._delete_up_to_max
|
||||||
|
|
||||||
|
def _aggregate_objects(self):
|
||||||
|
"""Objects aggregated on a 'per audit template' basis"""
|
||||||
|
# todo: aggregate orphans as well
|
||||||
|
aggregate = []
|
||||||
|
for audit_template in self._objects_map.audit_templates:
|
||||||
|
related_objs = WatcherObjectsMap()
|
||||||
|
related_objs.audit_templates = [audit_template]
|
||||||
|
related_objs.audits = [
|
||||||
|
audit for audit in self._objects_map.audits
|
||||||
|
if audit.audit_template_id == audit_template.id
|
||||||
|
]
|
||||||
|
audit_ids = [audit.id for audit in related_objs.audits]
|
||||||
|
related_objs.action_plans = [
|
||||||
|
action_plan for action_plan in self._objects_map.action_plans
|
||||||
|
if action_plan.audit_id in audit_ids
|
||||||
|
]
|
||||||
|
action_plan_ids = [
|
||||||
|
action_plan.id for action_plan in related_objs.action_plans
|
||||||
|
]
|
||||||
|
related_objs.actions = [
|
||||||
|
action for action in self._objects_map.actions
|
||||||
|
if action.action_plan_id in action_plan_ids
|
||||||
|
]
|
||||||
|
aggregate.append(related_objs)
|
||||||
|
|
||||||
|
return aggregate
|
||||||
|
|
||||||
|
def _get_objects_up_to_limit(self):
|
||||||
|
aggregated_objects = self._aggregate_objects()
|
||||||
|
to_be_deleted_subset = WatcherObjectsMap()
|
||||||
|
|
||||||
|
for aggregate in aggregated_objects:
|
||||||
|
if len(aggregate) + len(to_be_deleted_subset) <= self.max_number:
|
||||||
|
to_be_deleted_subset += aggregate
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
LOG.debug(to_be_deleted_subset)
|
||||||
|
return to_be_deleted_subset
|
||||||
|
|
||||||
|
def find_objects_to_delete(self):
|
||||||
|
"""Finds all the objects to be purged
|
||||||
|
|
||||||
|
:returns: A mapping with all the Watcher objects to purged
|
||||||
|
:rtype: :py:class:`~.WatcherObjectsMap` instance
|
||||||
|
"""
|
||||||
|
to_be_deleted = self._find_soft_deleted_objects()
|
||||||
|
|
||||||
|
if not self.exclude_orphans:
|
||||||
|
to_be_deleted += self._find_orphans()
|
||||||
|
|
||||||
|
LOG.debug("Objects to be deleted:\n%s", to_be_deleted)
|
||||||
|
|
||||||
|
return to_be_deleted
|
||||||
|
|
||||||
|
def do_delete(self):
|
||||||
|
LOG.info(_LI("Deleting..."))
|
||||||
|
# Reversed to avoid errors with foreign keys
|
||||||
|
for entry in reversed(list(self._objects_map)):
|
||||||
|
entry.destroy()
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
LOG.info(_LI("Starting purge command"))
|
||||||
|
self._objects_map = self.find_objects_to_delete()
|
||||||
|
|
||||||
|
if (self.max_number is not None and
|
||||||
|
len(self._objects_map) > self.max_number):
|
||||||
|
if self.delete_up_to_max_prompt(self._objects_map):
|
||||||
|
self._objects_map = self._get_objects_up_to_limit()
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
_orphans_note = (_(" (orphans excluded)") if self.exclude_orphans
|
||||||
|
else _(" (may include orphans)"))
|
||||||
|
if not self.dry_run and self.confirmation_prompt():
|
||||||
|
self.do_delete()
|
||||||
|
print(_("Purge results summary%s:") % _orphans_note)
|
||||||
|
LOG.info(_LI("Purge results summary%s:"), _orphans_note)
|
||||||
|
else:
|
||||||
|
LOG.debug(self._objects_map)
|
||||||
|
print(_("Here below is a table containing the objects "
|
||||||
|
"that can be purged%s:") % _orphans_note)
|
||||||
|
|
||||||
|
LOG.info("\n%s", self._objects_map.get_count_table())
|
||||||
|
print(self._objects_map.get_count_table())
|
||||||
|
LOG.info(_LI("Purge process completed"))
|
||||||
|
|
||||||
|
|
||||||
|
def purge(age_in_days, max_number, audit_template, exclude_orphans, dry_run):
|
||||||
|
"""Removes soft deleted objects from the database
|
||||||
|
|
||||||
|
:param age_in_days: Number of days since deletion (from today)
|
||||||
|
to exclude from the purge. If None, everything will be purged.
|
||||||
|
:type age_in_days: int
|
||||||
|
:param max_number: Max number of objects expected to be deleted.
|
||||||
|
Prevents the deletion if exceeded. No limit if set to None.
|
||||||
|
:type max_number: int
|
||||||
|
:param audit_template: UUID or name of the audit template to purge.
|
||||||
|
:type audit_template: str
|
||||||
|
:param exclude_orphans: Flag to indicate whether or not you want to
|
||||||
|
exclude orphans from deletion (default: False).
|
||||||
|
:type exclude_orphans: bool
|
||||||
|
:param dry_run: Flag to indicate whether or not you want to perform
|
||||||
|
a dry run (no deletion).
|
||||||
|
:type dry_run: bool
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if max_number and max_number < 0:
|
||||||
|
raise exception.NegativeLimitError
|
||||||
|
|
||||||
|
LOG.info("[options] age_in_days = %s", age_in_days)
|
||||||
|
LOG.info("[options] max_number = %s", max_number)
|
||||||
|
LOG.info("[options] audit_template = %s", audit_template)
|
||||||
|
LOG.info("[options] exclude_orphans = %s", exclude_orphans)
|
||||||
|
LOG.info("[options] dry_run = %s", dry_run)
|
||||||
|
|
||||||
|
uuid = PurgeCommand.get_audit_template_uuid(audit_template)
|
||||||
|
|
||||||
|
cmd = PurgeCommand(age_in_days, max_number, uuid,
|
||||||
|
exclude_orphans, dry_run)
|
||||||
|
|
||||||
|
cmd.execute()
|
||||||
|
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
print(exc)
|
||||||
|
sys.exit(1)
|
||||||
@@ -29,7 +29,10 @@ from watcher.common import exception
|
|||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
from watcher.db import api
|
from watcher.db import api
|
||||||
from watcher.db.sqlalchemy import models
|
from watcher.db.sqlalchemy import models
|
||||||
|
from watcher.objects import action as action_objects
|
||||||
|
from watcher.objects import action_plan as ap_objects
|
||||||
from watcher.objects import audit as audit_objects
|
from watcher.objects import audit as audit_objects
|
||||||
|
from watcher.objects import utils as objutils
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@@ -105,12 +108,88 @@ class Connection(api.BaseConnection):
|
|||||||
"""SqlAlchemy connection."""
|
"""SqlAlchemy connection."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
super(Connection, self).__init__()
|
||||||
|
|
||||||
|
def __add_soft_delete_mixin_filters(self, query, filters, model):
|
||||||
|
if 'deleted' in filters:
|
||||||
|
if bool(filters['deleted']):
|
||||||
|
query = query.filter(model.deleted != 0)
|
||||||
|
else:
|
||||||
|
query = query.filter(model.deleted == 0)
|
||||||
|
if 'deleted_at__eq' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.deleted_at == objutils.datetime_or_str_or_none(
|
||||||
|
filters['deleted_at__eq']))
|
||||||
|
if 'deleted_at__gt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.deleted_at > objutils.datetime_or_str_or_none(
|
||||||
|
filters['deleted_at__gt']))
|
||||||
|
if 'deleted_at__gte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.deleted_at >= objutils.datetime_or_str_or_none(
|
||||||
|
filters['deleted_at__gte']))
|
||||||
|
if 'deleted_at__lt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.deleted_at < objutils.datetime_or_str_or_none(
|
||||||
|
filters['deleted_at__lt']))
|
||||||
|
if 'deleted_at__lte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.deleted_at <= objutils.datetime_or_str_or_none(
|
||||||
|
filters['deleted_at__lte']))
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
def __add_timestamp_mixin_filters(self, query, filters, model):
|
||||||
|
if 'created_at__eq' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.created_at == objutils.datetime_or_str_or_none(
|
||||||
|
filters['created_at__eq']))
|
||||||
|
if 'created_at__gt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.created_at > objutils.datetime_or_str_or_none(
|
||||||
|
filters['created_at__gt']))
|
||||||
|
if 'created_at__gte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.created_at >= objutils.datetime_or_str_or_none(
|
||||||
|
filters['created_at__gte']))
|
||||||
|
if 'created_at__lt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.created_at < objutils.datetime_or_str_or_none(
|
||||||
|
filters['created_at__lt']))
|
||||||
|
if 'created_at__lte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.created_at <= objutils.datetime_or_str_or_none(
|
||||||
|
filters['created_at__lte']))
|
||||||
|
|
||||||
|
if 'updated_at__eq' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.updated_at == objutils.datetime_or_str_or_none(
|
||||||
|
filters['updated_at__eq']))
|
||||||
|
if 'updated_at__gt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.updated_at > objutils.datetime_or_str_or_none(
|
||||||
|
filters['updated_at__gt']))
|
||||||
|
if 'updated_at__gte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.updated_at >= objutils.datetime_or_str_or_none(
|
||||||
|
filters['updated_at__gte']))
|
||||||
|
if 'updated_at__lt' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.updated_at < objutils.datetime_or_str_or_none(
|
||||||
|
filters['updated_at__lt']))
|
||||||
|
if 'updated_at__lte' in filters:
|
||||||
|
query = query.filter(
|
||||||
|
model.updated_at <= objutils.datetime_or_str_or_none(
|
||||||
|
filters['updated_at__lte']))
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
def _add_audit_templates_filters(self, query, filters):
|
def _add_audit_templates_filters(self, query, filters):
|
||||||
if filters is None:
|
if filters is None:
|
||||||
filters = []
|
filters = []
|
||||||
|
|
||||||
|
if 'uuid' in filters:
|
||||||
|
query = query.filter_by(uuid=filters['uuid'])
|
||||||
if 'name' in filters:
|
if 'name' in filters:
|
||||||
query = query.filter_by(name=filters['name'])
|
query = query.filter_by(name=filters['name'])
|
||||||
if 'host_aggregate' in filters:
|
if 'host_aggregate' in filters:
|
||||||
@@ -118,12 +197,19 @@ class Connection(api.BaseConnection):
|
|||||||
if 'goal' in filters:
|
if 'goal' in filters:
|
||||||
query = query.filter_by(goal=filters['goal'])
|
query = query.filter_by(goal=filters['goal'])
|
||||||
|
|
||||||
|
query = self.__add_soft_delete_mixin_filters(
|
||||||
|
query, filters, models.AuditTemplate)
|
||||||
|
query = self.__add_timestamp_mixin_filters(
|
||||||
|
query, filters, models.AuditTemplate)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def _add_audits_filters(self, query, filters):
|
def _add_audits_filters(self, query, filters):
|
||||||
if filters is None:
|
if filters is None:
|
||||||
filters = []
|
filters = []
|
||||||
|
|
||||||
|
if 'uuid' in filters:
|
||||||
|
query = query.filter_by(uuid=filters['uuid'])
|
||||||
if 'type' in filters:
|
if 'type' in filters:
|
||||||
query = query.filter_by(type=filters['type'])
|
query = query.filter_by(type=filters['type'])
|
||||||
if 'state' in filters:
|
if 'state' in filters:
|
||||||
@@ -144,12 +230,20 @@ class Connection(api.BaseConnection):
|
|||||||
query = query.filter(
|
query = query.filter(
|
||||||
models.AuditTemplate.name ==
|
models.AuditTemplate.name ==
|
||||||
filters['audit_template_name'])
|
filters['audit_template_name'])
|
||||||
|
|
||||||
|
query = self.__add_soft_delete_mixin_filters(
|
||||||
|
query, filters, models.Audit)
|
||||||
|
query = self.__add_timestamp_mixin_filters(
|
||||||
|
query, filters, models.Audit)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def _add_action_plans_filters(self, query, filters):
|
def _add_action_plans_filters(self, query, filters):
|
||||||
if filters is None:
|
if filters is None:
|
||||||
filters = []
|
filters = []
|
||||||
|
|
||||||
|
if 'uuid' in filters:
|
||||||
|
query = query.filter_by(uuid=filters['uuid'])
|
||||||
if 'state' in filters:
|
if 'state' in filters:
|
||||||
query = query.filter_by(state=filters['state'])
|
query = query.filter_by(state=filters['state'])
|
||||||
if 'audit_id' in filters:
|
if 'audit_id' in filters:
|
||||||
@@ -158,12 +252,20 @@ class Connection(api.BaseConnection):
|
|||||||
query = query.join(models.Audit,
|
query = query.join(models.Audit,
|
||||||
models.ActionPlan.audit_id == models.Audit.id)
|
models.ActionPlan.audit_id == models.Audit.id)
|
||||||
query = query.filter(models.Audit.uuid == filters['audit_uuid'])
|
query = query.filter(models.Audit.uuid == filters['audit_uuid'])
|
||||||
|
|
||||||
|
query = self.__add_soft_delete_mixin_filters(
|
||||||
|
query, filters, models.ActionPlan)
|
||||||
|
query = self.__add_timestamp_mixin_filters(
|
||||||
|
query, filters, models.ActionPlan)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def _add_actions_filters(self, query, filters):
|
def _add_actions_filters(self, query, filters):
|
||||||
if filters is None:
|
if filters is None:
|
||||||
filters = []
|
filters = []
|
||||||
|
|
||||||
|
if 'uuid' in filters:
|
||||||
|
query = query.filter_by(uuid=filters['uuid'])
|
||||||
if 'action_plan_id' in filters:
|
if 'action_plan_id' in filters:
|
||||||
query = query.filter_by(action_plan_id=filters['action_plan_id'])
|
query = query.filter_by(action_plan_id=filters['action_plan_id'])
|
||||||
if 'action_plan_uuid' in filters:
|
if 'action_plan_uuid' in filters:
|
||||||
@@ -184,6 +286,11 @@ class Connection(api.BaseConnection):
|
|||||||
if 'alarm' in filters:
|
if 'alarm' in filters:
|
||||||
query = query.filter_by(alarm=filters['alarm'])
|
query = query.filter_by(alarm=filters['alarm'])
|
||||||
|
|
||||||
|
query = self.__add_soft_delete_mixin_filters(
|
||||||
|
query, filters, models.Action)
|
||||||
|
query = self.__add_timestamp_mixin_filters(
|
||||||
|
query, filters, models.Action)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def get_audit_template_list(self, context, filters=None, limit=None,
|
def get_audit_template_list(self, context, filters=None, limit=None,
|
||||||
@@ -193,7 +300,6 @@ class Connection(api.BaseConnection):
|
|||||||
query = self._add_audit_templates_filters(query, filters)
|
query = self._add_audit_templates_filters(query, filters)
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
query = query.filter_by(deleted_at=None)
|
query = query.filter_by(deleted_at=None)
|
||||||
|
|
||||||
return _paginate_query(models.AuditTemplate, limit, marker,
|
return _paginate_query(models.AuditTemplate, limit, marker,
|
||||||
sort_key, sort_dir, query)
|
sort_key, sort_dir, query)
|
||||||
|
|
||||||
@@ -274,8 +380,9 @@ class Connection(api.BaseConnection):
|
|||||||
|
|
||||||
def update_audit_template(self, audit_template_id, values):
|
def update_audit_template(self, audit_template_id, values):
|
||||||
if 'uuid' in values:
|
if 'uuid' in values:
|
||||||
msg = _("Cannot overwrite UUID for an existing AuditTemplate.")
|
raise exception.Invalid(
|
||||||
raise exception.InvalidParameterValue(err=msg)
|
message=_("Cannot overwrite UUID for an existing "
|
||||||
|
"Audit Template."))
|
||||||
|
|
||||||
return self._do_update_audit_template(audit_template_id, values)
|
return self._do_update_audit_template(audit_template_id, values)
|
||||||
|
|
||||||
@@ -311,7 +418,8 @@ class Connection(api.BaseConnection):
|
|||||||
query = model_query(models.Audit)
|
query = model_query(models.Audit)
|
||||||
query = self._add_audits_filters(query, filters)
|
query = self._add_audits_filters(query, filters)
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
query = query.filter(~(models.Audit.state == 'DELETED'))
|
query = query.filter(
|
||||||
|
~(models.Audit.state == audit_objects.State.DELETED))
|
||||||
|
|
||||||
return _paginate_query(models.Audit, limit, marker,
|
return _paginate_query(models.Audit, limit, marker,
|
||||||
sort_key, sort_dir, query)
|
sort_key, sort_dir, query)
|
||||||
@@ -339,7 +447,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
audit = query.one()
|
audit = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if audit.state == 'DELETED':
|
if audit.state == audit_objects.State.DELETED:
|
||||||
raise exception.AuditNotFound(audit=audit_id)
|
raise exception.AuditNotFound(audit=audit_id)
|
||||||
return audit
|
return audit
|
||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
@@ -352,7 +460,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
audit = query.one()
|
audit = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if audit.state == 'DELETED':
|
if audit.state == audit_objects.State.DELETED:
|
||||||
raise exception.AuditNotFound(audit=audit_uuid)
|
raise exception.AuditNotFound(audit=audit_uuid)
|
||||||
return audit
|
return audit
|
||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
@@ -383,8 +491,9 @@ class Connection(api.BaseConnection):
|
|||||||
|
|
||||||
def update_audit(self, audit_id, values):
|
def update_audit(self, audit_id, values):
|
||||||
if 'uuid' in values:
|
if 'uuid' in values:
|
||||||
msg = _("Cannot overwrite UUID for an existing Audit.")
|
raise exception.Invalid(
|
||||||
raise exception.InvalidParameterValue(err=msg)
|
message=_("Cannot overwrite UUID for an existing "
|
||||||
|
"Audit."))
|
||||||
|
|
||||||
return self._do_update_audit(audit_id, values)
|
return self._do_update_audit(audit_id, values)
|
||||||
|
|
||||||
@@ -419,7 +528,8 @@ class Connection(api.BaseConnection):
|
|||||||
query = model_query(models.Action)
|
query = model_query(models.Action)
|
||||||
query = self._add_actions_filters(query, filters)
|
query = self._add_actions_filters(query, filters)
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
query = query.filter(~(models.Action.state == 'DELETED'))
|
query = query.filter(
|
||||||
|
~(models.Action.state == action_objects.State.DELETED))
|
||||||
return _paginate_query(models.Action, limit, marker,
|
return _paginate_query(models.Action, limit, marker,
|
||||||
sort_key, sort_dir, query)
|
sort_key, sort_dir, query)
|
||||||
|
|
||||||
@@ -442,7 +552,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
action = query.one()
|
action = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if action.state == 'DELETED':
|
if action.state == action_objects.State.DELETED:
|
||||||
raise exception.ActionNotFound(
|
raise exception.ActionNotFound(
|
||||||
action=action_id)
|
action=action_id)
|
||||||
return action
|
return action
|
||||||
@@ -455,7 +565,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
action = query.one()
|
action = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if action.state == 'DELETED':
|
if action.state == action_objects.State.DELETED:
|
||||||
raise exception.ActionNotFound(
|
raise exception.ActionNotFound(
|
||||||
action=action_uuid)
|
action=action_uuid)
|
||||||
return action
|
return action
|
||||||
@@ -474,8 +584,9 @@ class Connection(api.BaseConnection):
|
|||||||
def update_action(self, action_id, values):
|
def update_action(self, action_id, values):
|
||||||
# NOTE(dtantsur): this can lead to very strange errors
|
# NOTE(dtantsur): this can lead to very strange errors
|
||||||
if 'uuid' in values:
|
if 'uuid' in values:
|
||||||
msg = _("Cannot overwrite UUID for an existing Action.")
|
raise exception.Invalid(
|
||||||
raise exception.InvalidParameterValue(err=msg)
|
message=_("Cannot overwrite UUID for an existing "
|
||||||
|
"Action."))
|
||||||
|
|
||||||
return self._do_update_action(action_id, values)
|
return self._do_update_action(action_id, values)
|
||||||
|
|
||||||
@@ -511,7 +622,8 @@ class Connection(api.BaseConnection):
|
|||||||
query = model_query(models.ActionPlan)
|
query = model_query(models.ActionPlan)
|
||||||
query = self._add_action_plans_filters(query, filters)
|
query = self._add_action_plans_filters(query, filters)
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
query = query.filter(~(models.ActionPlan.state == 'DELETED'))
|
query = query.filter(
|
||||||
|
~(models.ActionPlan.state == ap_objects.State.DELETED))
|
||||||
|
|
||||||
return _paginate_query(models.ActionPlan, limit, marker,
|
return _paginate_query(models.ActionPlan, limit, marker,
|
||||||
sort_key, sort_dir, query)
|
sort_key, sort_dir, query)
|
||||||
@@ -536,7 +648,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
action_plan = query.one()
|
action_plan = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if action_plan.state == 'DELETED':
|
if action_plan.state == ap_objects.State.DELETED:
|
||||||
raise exception.ActionPlanNotFound(
|
raise exception.ActionPlanNotFound(
|
||||||
action_plan=action_plan_id)
|
action_plan=action_plan_id)
|
||||||
return action_plan
|
return action_plan
|
||||||
@@ -550,7 +662,7 @@ class Connection(api.BaseConnection):
|
|||||||
try:
|
try:
|
||||||
action_plan = query.one()
|
action_plan = query.one()
|
||||||
if not context.show_deleted:
|
if not context.show_deleted:
|
||||||
if action_plan.state == 'DELETED':
|
if action_plan.state == ap_objects.State.DELETED:
|
||||||
raise exception.ActionPlanNotFound(
|
raise exception.ActionPlanNotFound(
|
||||||
action_plan=action_plan__uuid)
|
action_plan=action_plan__uuid)
|
||||||
return action_plan
|
return action_plan
|
||||||
@@ -583,8 +695,9 @@ class Connection(api.BaseConnection):
|
|||||||
|
|
||||||
def update_action_plan(self, action_plan_id, values):
|
def update_action_plan(self, action_plan_id, values):
|
||||||
if 'uuid' in values:
|
if 'uuid' in values:
|
||||||
msg = _("Cannot overwrite UUID for an existing Audit.")
|
raise exception.Invalid(
|
||||||
raise exception.InvalidParameterValue(err=msg)
|
message=_("Cannot overwrite UUID for an existing "
|
||||||
|
"Action Plan."))
|
||||||
|
|
||||||
return self._do_update_action_plan(action_plan_id, values)
|
return self._do_update_action_plan(action_plan_id, values)
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ def _alembic_config():
|
|||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
def version(config=None, engine=None):
|
def version(engine=None):
|
||||||
"""Current database version.
|
"""Current database version.
|
||||||
|
|
||||||
:returns: Database version
|
:returns: Database version
|
||||||
|
|||||||
@@ -160,7 +160,6 @@ class Action(Base):
|
|||||||
nullable=False)
|
nullable=False)
|
||||||
# only for the first version
|
# only for the first version
|
||||||
action_type = Column(String(255), nullable=False)
|
action_type = Column(String(255), nullable=False)
|
||||||
applies_to = Column(String(255), nullable=True)
|
|
||||||
input_parameters = Column(JSONEncodedDict, nullable=True)
|
input_parameters = Column(JSONEncodedDict, nullable=True)
|
||||||
state = Column(String(20), nullable=True)
|
state = Column(String(20), nullable=True)
|
||||||
# todo(jed) remove parameter alarm
|
# todo(jed) remove parameter alarm
|
||||||
|
|||||||
@@ -54,8 +54,8 @@ class DefaultAuditHandler(base.BaseAuditHandler):
|
|||||||
event.data = {}
|
event.data = {}
|
||||||
payload = {'audit_uuid': audit_uuid,
|
payload = {'audit_uuid': audit_uuid,
|
||||||
'audit_status': status}
|
'audit_status': status}
|
||||||
self.messaging.topic_status.publish_event(event.type.name,
|
self.messaging.status_topic_handler.publish_event(
|
||||||
payload)
|
event.type.name, payload)
|
||||||
|
|
||||||
def update_audit_state(self, request_context, audit_uuid, state):
|
def update_audit_state(self, request_context, audit_uuid, state):
|
||||||
LOG.debug("Update audit state: %s", state)
|
LOG.debug("Update audit state: %s", state)
|
||||||
|
|||||||
@@ -40,20 +40,20 @@ See :doc:`../architecture` for more details on this component.
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.common.messaging.messaging_core import MessagingCore
|
from watcher.common.messaging import messaging_core
|
||||||
from watcher.decision_engine.messaging.audit_endpoint import AuditEndpoint
|
from watcher.decision_engine.messaging import audit_endpoint
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
WATCHER_DECISION_ENGINE_OPTS = [
|
WATCHER_DECISION_ENGINE_OPTS = [
|
||||||
cfg.StrOpt('topic_control',
|
cfg.StrOpt('conductor_topic',
|
||||||
default='watcher.decision.control',
|
default='watcher.decision.control',
|
||||||
help='The topic name used for'
|
help='The topic name used for'
|
||||||
'control events, this topic '
|
'control events, this topic '
|
||||||
'used for rpc call '),
|
'used for rpc call '),
|
||||||
cfg.StrOpt('topic_status',
|
cfg.StrOpt('status_topic',
|
||||||
default='watcher.decision.status',
|
default='watcher.decision.status',
|
||||||
help='The topic name used for '
|
help='The topic name used for '
|
||||||
'status events, this topic '
|
'status events, this topic '
|
||||||
@@ -78,18 +78,18 @@ CONF.register_group(decision_engine_opt_group)
|
|||||||
CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group)
|
CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group)
|
||||||
|
|
||||||
|
|
||||||
class DecisionEngineManager(MessagingCore):
|
class DecisionEngineManager(messaging_core.MessagingCore):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DecisionEngineManager, self).__init__(
|
super(DecisionEngineManager, self).__init__(
|
||||||
CONF.watcher_decision_engine.publisher_id,
|
CONF.watcher_decision_engine.publisher_id,
|
||||||
CONF.watcher_decision_engine.topic_control,
|
CONF.watcher_decision_engine.conductor_topic,
|
||||||
CONF.watcher_decision_engine.topic_status,
|
CONF.watcher_decision_engine.status_topic,
|
||||||
api_version=self.API_VERSION)
|
api_version=self.API_VERSION)
|
||||||
endpoint = AuditEndpoint(self,
|
endpoint = audit_endpoint.AuditEndpoint(
|
||||||
max_workers=CONF.watcher_decision_engine.
|
self,
|
||||||
max_workers)
|
max_workers=CONF.watcher_decision_engine.max_workers)
|
||||||
self.topic_control.add_endpoint(endpoint)
|
self.conductor_topic_handler.add_endpoint(endpoint)
|
||||||
|
|
||||||
def join(self):
|
def join(self):
|
||||||
self.topic_control.join()
|
self.conductor_topic_handler.join()
|
||||||
self.topic_status.join()
|
self.status_topic_handler.join()
|
||||||
|
|||||||
@@ -16,11 +16,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent import futures
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.audit.default import DefaultAuditHandler
|
from watcher.decision_engine.audit import default
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ LOG = log.getLogger(__name__)
|
|||||||
class AuditEndpoint(object):
|
class AuditEndpoint(object):
|
||||||
def __init__(self, messaging, max_workers):
|
def __init__(self, messaging, max_workers):
|
||||||
self._messaging = messaging
|
self._messaging = messaging
|
||||||
self._executor = ThreadPoolExecutor(max_workers=max_workers)
|
self._executor = futures.ThreadPoolExecutor(max_workers=max_workers)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def executor(self):
|
def executor(self):
|
||||||
@@ -39,7 +39,7 @@ class AuditEndpoint(object):
|
|||||||
return self._messaging
|
return self._messaging
|
||||||
|
|
||||||
def do_trigger_audit(self, context, audit_uuid):
|
def do_trigger_audit(self, context, audit_uuid):
|
||||||
audit = DefaultAuditHandler(self.messaging)
|
audit = default.DefaultAuditHandler(self.messaging)
|
||||||
audit.execute(audit_uuid, context)
|
audit.execute(audit_uuid, context)
|
||||||
|
|
||||||
def trigger_audit(self, context, audit_uuid):
|
def trigger_audit(self, context, audit_uuid):
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ class ResourceType(Enum):
|
|||||||
cpu_cores = 'num_cores'
|
cpu_cores = 'num_cores'
|
||||||
memory = 'memory'
|
memory = 'memory'
|
||||||
disk = 'disk'
|
disk = 'disk'
|
||||||
|
disk_capacity = 'disk_capacity'
|
||||||
|
|
||||||
|
|
||||||
class Resource(object):
|
class Resource(object):
|
||||||
|
|||||||
@@ -50,11 +50,13 @@ class BasePlanner(object):
|
|||||||
def schedule(self, context, audit_uuid, solution):
|
def schedule(self, context, audit_uuid, solution):
|
||||||
"""The planner receives a solution to schedule
|
"""The planner receives a solution to schedule
|
||||||
|
|
||||||
:param solution: the solution given by the strategy to
|
:param solution: A solution provided by a strategy for scheduling
|
||||||
|
:type solution: :py:class:`~.BaseSolution` subclass instance
|
||||||
:param audit_uuid: the audit uuid
|
:param audit_uuid: the audit uuid
|
||||||
:return: ActionPlan ordered sequence of change requests
|
:type audit_uuid: str
|
||||||
such that all security, dependency, and performance
|
:return: Action plan with an ordered sequence of actions such that all
|
||||||
requirements are met.
|
security, dependency, and performance requirements are met.
|
||||||
|
:rtype: :py:class:`watcher.objects.action_plan.ActionPlan` instance
|
||||||
"""
|
"""
|
||||||
# example: directed acyclic graph
|
# example: directed acyclic graph
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|||||||
@@ -28,6 +28,13 @@ LOG = log.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class DefaultPlanner(base.BasePlanner):
|
class DefaultPlanner(base.BasePlanner):
|
||||||
|
"""Default planner implementation
|
||||||
|
|
||||||
|
This implementation comes with basic rules with a fixed set of action types
|
||||||
|
that are weighted. An action having a lower weight will be scheduled before
|
||||||
|
the other ones.
|
||||||
|
"""
|
||||||
|
|
||||||
priorities = {
|
priorities = {
|
||||||
'nop': 0,
|
'nop': 0,
|
||||||
'sleep': 1,
|
'sleep': 1,
|
||||||
@@ -38,14 +45,12 @@ class DefaultPlanner(base.BasePlanner):
|
|||||||
def create_action(self,
|
def create_action(self,
|
||||||
action_plan_id,
|
action_plan_id,
|
||||||
action_type,
|
action_type,
|
||||||
applies_to,
|
|
||||||
input_parameters=None):
|
input_parameters=None):
|
||||||
uuid = utils.generate_uuid()
|
uuid = utils.generate_uuid()
|
||||||
action = {
|
action = {
|
||||||
'uuid': uuid,
|
'uuid': uuid,
|
||||||
'action_plan_id': int(action_plan_id),
|
'action_plan_id': int(action_plan_id),
|
||||||
'action_type': action_type,
|
'action_type': action_type,
|
||||||
'applies_to': applies_to,
|
|
||||||
'input_parameters': input_parameters,
|
'input_parameters': input_parameters,
|
||||||
'state': objects.action.State.PENDING,
|
'state': objects.action.State.PENDING,
|
||||||
'alarm': None,
|
'alarm': None,
|
||||||
@@ -63,8 +68,6 @@ class DefaultPlanner(base.BasePlanner):
|
|||||||
json_action = self.create_action(action_plan_id=action_plan.id,
|
json_action = self.create_action(action_plan_id=action_plan.id,
|
||||||
action_type=action.get(
|
action_type=action.get(
|
||||||
'action_type'),
|
'action_type'),
|
||||||
applies_to=action.get(
|
|
||||||
'applies_to'),
|
|
||||||
input_parameters=action.get(
|
input_parameters=action.get(
|
||||||
'input_parameters'))
|
'input_parameters'))
|
||||||
to_schedule.append((self.priorities[action.get('action_type')],
|
to_schedule.append((self.priorities[action.get('action_type')],
|
||||||
|
|||||||
@@ -19,10 +19,10 @@
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import oslo_messaging as om
|
|
||||||
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common.messaging.messaging_core import MessagingCore
|
from watcher.common.messaging import messaging_core
|
||||||
|
from watcher.common.messaging import notification_handler
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
from watcher.decision_engine.manager import decision_engine_opt_group
|
from watcher.decision_engine.manager import decision_engine_opt_group
|
||||||
from watcher.decision_engine.manager import WATCHER_DECISION_ENGINE_OPTS
|
from watcher.decision_engine.manager import WATCHER_DECISION_ENGINE_OPTS
|
||||||
@@ -35,27 +35,22 @@ CONF.register_group(decision_engine_opt_group)
|
|||||||
CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group)
|
CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group)
|
||||||
|
|
||||||
|
|
||||||
class DecisionEngineAPI(MessagingCore):
|
class DecisionEngineAPI(messaging_core.MessagingCore):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DecisionEngineAPI, self).__init__(
|
super(DecisionEngineAPI, self).__init__(
|
||||||
CONF.watcher_decision_engine.publisher_id,
|
CONF.watcher_decision_engine.publisher_id,
|
||||||
CONF.watcher_decision_engine.topic_control,
|
CONF.watcher_decision_engine.conductor_topic,
|
||||||
CONF.watcher_decision_engine.topic_status,
|
CONF.watcher_decision_engine.status_topic,
|
||||||
api_version=self.API_VERSION,
|
api_version=self.API_VERSION,
|
||||||
)
|
)
|
||||||
|
self.handler = notification_handler.NotificationHandler(
|
||||||
transport = om.get_transport(CONF)
|
self.publisher_id)
|
||||||
target = om.Target(
|
self.status_topic_handler.add_endpoint(self.handler)
|
||||||
topic=CONF.watcher_decision_engine.topic_control,
|
|
||||||
version=self.API_VERSION,
|
|
||||||
)
|
|
||||||
self.client = om.RPCClient(transport, target,
|
|
||||||
serializer=self.serializer)
|
|
||||||
|
|
||||||
def trigger_audit(self, context, audit_uuid=None):
|
def trigger_audit(self, context, audit_uuid=None):
|
||||||
if not utils.is_uuid_like(audit_uuid):
|
if not utils.is_uuid_like(audit_uuid):
|
||||||
raise exception.InvalidUuidOrName(name=audit_uuid)
|
raise exception.InvalidUuidOrName(name=audit_uuid)
|
||||||
|
|
||||||
return self.client.call(
|
return self.conductor_client.call(
|
||||||
context.to_dict(), 'trigger_audit', audit_uuid=audit_uuid)
|
context.to_dict(), 'trigger_audit', audit_uuid=audit_uuid)
|
||||||
|
|||||||
@@ -87,13 +87,13 @@ class BaseSolution(object):
|
|||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def add_action(self,
|
def add_action(self,
|
||||||
action_type,
|
action_type,
|
||||||
applies_to,
|
resource_id,
|
||||||
input_parameters=None):
|
input_parameters=None):
|
||||||
"""Add a new Action in the Action Plan
|
"""Add a new Action in the Action Plan
|
||||||
|
|
||||||
:param action_type: the unique id of an action type defined in
|
:param action_type: the unique id of an action type defined in
|
||||||
entry point 'watcher_actions'
|
entry point 'watcher_actions'
|
||||||
:param applies_to: the unique id of the resource to which the
|
:param resource_id: the unique id of the resource to which the
|
||||||
`Action` applies.
|
`Action` applies.
|
||||||
:param input_parameters: An array of input parameters provided as
|
:param input_parameters: An array of input parameters provided as
|
||||||
key-value pairs of strings. Each key-pair contains names and
|
key-value pairs of strings. Each key-pair contains names and
|
||||||
|
|||||||
@@ -18,12 +18,14 @@
|
|||||||
#
|
#
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.solution.base import BaseSolution
|
from watcher.applier.actions import base as baction
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.decision_engine.solution import base
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DefaultSolution(BaseSolution):
|
class DefaultSolution(base.BaseSolution):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Stores a set of actions generated by a strategy
|
"""Stores a set of actions generated by a strategy
|
||||||
|
|
||||||
@@ -34,12 +36,17 @@ class DefaultSolution(BaseSolution):
|
|||||||
self._actions = []
|
self._actions = []
|
||||||
|
|
||||||
def add_action(self, action_type,
|
def add_action(self, action_type,
|
||||||
applies_to,
|
input_parameters=None,
|
||||||
input_parameters=None):
|
resource_id=None):
|
||||||
# todo(jed) add https://pypi.python.org/pypi/schema
|
|
||||||
|
if input_parameters is not None:
|
||||||
|
if baction.BaseAction.RESOURCE_ID in input_parameters.keys():
|
||||||
|
raise exception.ReservedWord(name=baction.BaseAction.
|
||||||
|
RESOURCE_ID)
|
||||||
|
if resource_id is not None:
|
||||||
|
input_parameters[baction.BaseAction.RESOURCE_ID] = resource_id
|
||||||
action = {
|
action = {
|
||||||
'action_type': action_type,
|
'action_type': action_type,
|
||||||
'applies_to': applies_to,
|
|
||||||
'input_parameters': input_parameters
|
'input_parameters': input_parameters
|
||||||
}
|
}
|
||||||
self._actions.append(action)
|
self._actions.append(action)
|
||||||
|
|||||||
@@ -17,10 +17,10 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from enum import Enum
|
import enum
|
||||||
|
|
||||||
|
|
||||||
class StrategyLevel(Enum):
|
class StrategyLevel(enum.Enum):
|
||||||
conservative = "conservative"
|
conservative = "conservative"
|
||||||
balanced = "balanced"
|
balanced = "balanced"
|
||||||
growth = "growth"
|
growth = "growth"
|
||||||
|
|||||||
@@ -15,22 +15,22 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.strategy.context.base import BaseStrategyContext
|
from watcher.common import clients
|
||||||
from watcher.decision_engine.strategy.selection.default import \
|
from watcher.decision_engine.strategy.context import base
|
||||||
DefaultStrategySelector
|
from watcher.decision_engine.strategy.selection import default
|
||||||
from watcher.metrics_engine.cluster_model_collector.manager import \
|
from watcher.metrics_engine.cluster_model_collector import manager
|
||||||
CollectorManager
|
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DefaultStrategyContext(BaseStrategyContext):
|
class DefaultStrategyContext(base.BaseStrategyContext):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DefaultStrategyContext, self).__init__()
|
super(DefaultStrategyContext, self).__init__()
|
||||||
LOG.debug("Initializing Strategy Context")
|
LOG.debug("Initializing Strategy Context")
|
||||||
self._strategy_selector = DefaultStrategySelector()
|
self._strategy_selector = default.DefaultStrategySelector()
|
||||||
self._collector_manager = CollectorManager()
|
self._collector_manager = manager.CollectorManager()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def collector(self):
|
def collector(self):
|
||||||
@@ -47,15 +47,17 @@ class DefaultStrategyContext(BaseStrategyContext):
|
|||||||
audit_template = objects.\
|
audit_template = objects.\
|
||||||
AuditTemplate.get_by_id(request_context, audit.audit_template_id)
|
AuditTemplate.get_by_id(request_context, audit.audit_template_id)
|
||||||
|
|
||||||
|
osc = clients.OpenStackClients()
|
||||||
|
|
||||||
# todo(jed) retrieve in audit_template parameters (threshold,...)
|
# todo(jed) retrieve in audit_template parameters (threshold,...)
|
||||||
# todo(jed) create ActionPlan
|
# todo(jed) create ActionPlan
|
||||||
collector_manager = self.collector.get_cluster_model_collector()
|
collector_manager = self.collector.get_cluster_model_collector(osc=osc)
|
||||||
|
|
||||||
# todo(jed) remove call to get_latest_cluster_data_model
|
# todo(jed) remove call to get_latest_cluster_data_model
|
||||||
cluster_data_model = collector_manager.get_latest_cluster_data_model()
|
cluster_data_model = collector_manager.get_latest_cluster_data_model()
|
||||||
|
|
||||||
selected_strategy = self.strategy_selector. \
|
selected_strategy = self.strategy_selector.define_from_goal(
|
||||||
define_from_goal(audit_template.goal)
|
audit_template.goal, osc=osc)
|
||||||
|
|
||||||
# todo(jed) add parameters and remove cluster_data_model
|
# todo(jed) add parameters and remove cluster_data_model
|
||||||
return selected_strategy.execute(cluster_data_model)
|
return selected_strategy.execute(cluster_data_model)
|
||||||
|
|||||||
@@ -21,12 +21,12 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.common.loader.default import DefaultLoader
|
from watcher.common.loader import default
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DefaultStrategyLoader(DefaultLoader):
|
class DefaultStrategyLoader(default.DefaultLoader):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DefaultStrategyLoader, self).__init__(
|
super(DefaultStrategyLoader, self).__init__(
|
||||||
namespace='watcher_strategies')
|
namespace='watcher_strategies')
|
||||||
|
|||||||
@@ -48,11 +48,12 @@ class DefaultStrategySelector(base.BaseSelector):
|
|||||||
super(DefaultStrategySelector, self).__init__()
|
super(DefaultStrategySelector, self).__init__()
|
||||||
self.strategy_loader = default.DefaultStrategyLoader()
|
self.strategy_loader = default.DefaultStrategyLoader()
|
||||||
|
|
||||||
def define_from_goal(self, goal_name):
|
def define_from_goal(self, goal_name, osc=None):
|
||||||
|
""":param osc: an OpenStackClients instance"""
|
||||||
strategy_to_load = None
|
strategy_to_load = None
|
||||||
try:
|
try:
|
||||||
strategy_to_load = CONF.watcher_goals.goals[goal_name]
|
strategy_to_load = CONF.watcher_goals.goals[goal_name]
|
||||||
return self.strategy_loader.load(strategy_to_load)
|
return self.strategy_loader.load(strategy_to_load, osc=osc)
|
||||||
except KeyError as exc:
|
except KeyError as exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
raise exception.WatcherException(
|
raise exception.WatcherException(
|
||||||
|
|||||||
@@ -33,12 +33,13 @@ provided as well.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from watcher.common import clients
|
||||||
from watcher.decision_engine.solution.default import DefaultSolution
|
from watcher.decision_engine.solution import default
|
||||||
from watcher.decision_engine.strategy.common.level import StrategyLevel
|
from watcher.decision_engine.strategy.common import level
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@@ -51,25 +52,33 @@ class BaseStrategy(object):
|
|||||||
Solution for a given Goal.
|
Solution for a given Goal.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name=None, description=None):
|
def __init__(self, name=None, description=None, osc=None):
|
||||||
|
""":param osc: an OpenStackClients instance"""
|
||||||
self._name = name
|
self._name = name
|
||||||
self.description = description
|
self.description = description
|
||||||
# default strategy level
|
# default strategy level
|
||||||
self._strategy_level = StrategyLevel.conservative
|
self._strategy_level = level.StrategyLevel.conservative
|
||||||
self._cluster_state_collector = None
|
self._cluster_state_collector = None
|
||||||
# the solution given by the strategy
|
# the solution given by the strategy
|
||||||
self._solution = DefaultSolution()
|
self._solution = default.DefaultSolution()
|
||||||
|
self._osc = osc
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def execute(self, model):
|
def execute(self, original_model):
|
||||||
"""Execute a strategy
|
"""Execute a strategy
|
||||||
|
|
||||||
:param model: The name of the strategy to execute (loaded dynamically)
|
:param original_model: The model the strategy is executed on
|
||||||
:type model: str
|
:type model: str
|
||||||
:return: A computed solution (via a placement algorithm)
|
:return: A computed solution (via a placement algorithm)
|
||||||
:rtype: :class:`watcher.decision_engine.solution.base.BaseSolution`
|
:rtype: :class:`watcher.decision_engine.solution.base.BaseSolution`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def osc(self):
|
||||||
|
if not self._osc:
|
||||||
|
self._osc = clients.OpenStackClients()
|
||||||
|
return self._osc
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def solution(self):
|
def solution(self):
|
||||||
return self._solution
|
return self._solution
|
||||||
|
|||||||
@@ -16,22 +16,55 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
"""
|
||||||
|
*Good server consolidation strategy*
|
||||||
|
|
||||||
|
Consolidation of VMs is essential to achieve energy optimization in cloud
|
||||||
|
environments such as OpenStack. As VMs are spinned up and/or moved over time,
|
||||||
|
it becomes necessary to migrate VMs among servers to lower the costs. However,
|
||||||
|
migration of VMs introduces runtime overheads and consumes extra energy, thus
|
||||||
|
a good server consolidation strategy should carefully plan for migration in
|
||||||
|
order to both minimize energy consumption and comply to the various SLAs.
|
||||||
|
"""
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher._i18n import _LE, _LI, _LW
|
from watcher._i18n import _LE, _LI, _LW
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.decision_engine.model.hypervisor_state import HypervisorState
|
from watcher.decision_engine.model import hypervisor_state as hyper_state
|
||||||
from watcher.decision_engine.model.resource import ResourceType
|
from watcher.decision_engine.model import resource
|
||||||
from watcher.decision_engine.model.vm_state import VMState
|
from watcher.decision_engine.model import vm_state
|
||||||
from watcher.decision_engine.strategy.strategies.base import BaseStrategy
|
from watcher.decision_engine.strategy.strategies import base
|
||||||
from watcher.metrics_engine.cluster_history.ceilometer import \
|
from watcher.metrics_engine.cluster_history import ceilometer as \
|
||||||
CeilometerClusterHistory
|
ceilometer_cluster_history
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BasicConsolidation(BaseStrategy):
|
class BasicConsolidation(base.BaseStrategy):
|
||||||
|
"""Basic offline consolidation using live migration
|
||||||
|
|
||||||
|
*Description*
|
||||||
|
|
||||||
|
This is server consolidation algorithm which not only minimizes the overall
|
||||||
|
number of used servers, but also minimizes the number of migrations.
|
||||||
|
|
||||||
|
*Requirements*
|
||||||
|
|
||||||
|
* You must have at least 2 physical compute nodes to run this strategy.
|
||||||
|
|
||||||
|
*Limitations*
|
||||||
|
|
||||||
|
- It has been developed only for tests.
|
||||||
|
- It assumes that the virtual machine and the compute node are on the same
|
||||||
|
private network.
|
||||||
|
- It assume that live migrations are possible
|
||||||
|
|
||||||
|
*Spec URL*
|
||||||
|
|
||||||
|
<None>
|
||||||
|
"""
|
||||||
|
|
||||||
DEFAULT_NAME = "basic"
|
DEFAULT_NAME = "basic"
|
||||||
DEFAULT_DESCRIPTION = "Basic offline consolidation"
|
DEFAULT_DESCRIPTION = "Basic offline consolidation"
|
||||||
|
|
||||||
@@ -41,35 +74,17 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
MIGRATION = "migrate"
|
MIGRATION = "migrate"
|
||||||
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
|
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
|
||||||
|
|
||||||
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION):
|
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION,
|
||||||
|
osc=None):
|
||||||
"""Basic offline Consolidation using live migration
|
"""Basic offline Consolidation using live migration
|
||||||
|
|
||||||
The basic consolidation algorithm has several limitations.
|
:param name: The name of the strategy (Default: "basic")
|
||||||
It has been developed only for tests.
|
:param description: The description of the strategy
|
||||||
eg: The BasicConsolidation assumes that the virtual mahine and
|
(Default: "Basic offline consolidation")
|
||||||
the compute node are on the same private network.
|
:param osc: An :py:class:`~watcher.common.clients.OpenStackClients`
|
||||||
|
instance
|
||||||
Good Strategy :
|
|
||||||
The workloads of the VMs are changing over the time
|
|
||||||
and often tend to migrate from one physical machine to another.
|
|
||||||
Hence, the traditional and offline heuristics such as bin packing
|
|
||||||
are not applicable for the placement VM in cloud computing.
|
|
||||||
So, the decision Engine optimizer provides placement strategy considering
|
|
||||||
not only the performance effects but also the workload characteristics of
|
|
||||||
VMs and others metrics like the power consumption and
|
|
||||||
the tenants constraints (SLAs).
|
|
||||||
|
|
||||||
The watcher optimizer uses an online VM placement technique
|
|
||||||
based on machine learning and meta-heuristics that must handle :
|
|
||||||
- multi-objectives
|
|
||||||
- Contradictory objectives
|
|
||||||
- Adapt to changes dynamically
|
|
||||||
- Fast convergence
|
|
||||||
|
|
||||||
:param name: the name of the strategy
|
|
||||||
:param description: a description of the strategy
|
|
||||||
"""
|
"""
|
||||||
super(BasicConsolidation, self).__init__(name, description)
|
super(BasicConsolidation, self).__init__(name, description, osc)
|
||||||
|
|
||||||
# set default value for the number of released nodes
|
# set default value for the number of released nodes
|
||||||
self.number_of_released_nodes = 0
|
self.number_of_released_nodes = 0
|
||||||
@@ -102,12 +117,13 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
@property
|
@property
|
||||||
def ceilometer(self):
|
def ceilometer(self):
|
||||||
if self._ceilometer is None:
|
if self._ceilometer is None:
|
||||||
self._ceilometer = CeilometerClusterHistory()
|
self._ceilometer = (ceilometer_cluster_history.
|
||||||
|
CeilometerClusterHistory(osc=self.osc))
|
||||||
return self._ceilometer
|
return self._ceilometer
|
||||||
|
|
||||||
@ceilometer.setter
|
@ceilometer.setter
|
||||||
def ceilometer(self, c):
|
def ceilometer(self, ceilometer):
|
||||||
self._ceilometer = c
|
self._ceilometer = ceilometer
|
||||||
|
|
||||||
def compute_attempts(self, size_cluster):
|
def compute_attempts(self, size_cluster):
|
||||||
"""Upper bound of the number of migration
|
"""Upper bound of the number of migration
|
||||||
@@ -116,13 +132,13 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
"""
|
"""
|
||||||
self.migration_attempts = size_cluster * self.bound_migration
|
self.migration_attempts = size_cluster * self.bound_migration
|
||||||
|
|
||||||
def check_migration(self, model,
|
def check_migration(self, cluster_data_model,
|
||||||
src_hypervisor,
|
src_hypervisor,
|
||||||
dest_hypervisor,
|
dest_hypervisor,
|
||||||
vm_to_mig):
|
vm_to_mig):
|
||||||
"""check if the migration is possible
|
"""check if the migration is possible
|
||||||
|
|
||||||
:param model: the current state of the cluster
|
:param cluster_data_model: the current state of the cluster
|
||||||
:param src_hypervisor: the current node of the virtual machine
|
:param src_hypervisor: the current node of the virtual machine
|
||||||
:param dest_hypervisor: the destination of the virtual machine
|
:param dest_hypervisor: the destination of the virtual machine
|
||||||
:param vm_to_mig: the virtual machine
|
:param vm_to_mig: the virtual machine
|
||||||
@@ -139,28 +155,32 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
total_cores = 0
|
total_cores = 0
|
||||||
total_disk = 0
|
total_disk = 0
|
||||||
total_mem = 0
|
total_mem = 0
|
||||||
cap_cores = model.get_resource_from_id(ResourceType.cpu_cores)
|
cpu_capacity = cluster_data_model.get_resource_from_id(
|
||||||
cap_disk = model.get_resource_from_id(ResourceType.disk)
|
resource.ResourceType.cpu_cores)
|
||||||
cap_mem = model.get_resource_from_id(ResourceType.memory)
|
disk_capacity = cluster_data_model.get_resource_from_id(
|
||||||
|
resource.ResourceType.disk)
|
||||||
|
memory_capacity = cluster_data_model.get_resource_from_id(
|
||||||
|
resource.ResourceType.memory)
|
||||||
|
|
||||||
for vm_id in model.get_mapping().get_node_vms(dest_hypervisor):
|
for vm_id in cluster_data_model. \
|
||||||
vm = model.get_vm_from_id(vm_id)
|
get_mapping().get_node_vms(dest_hypervisor):
|
||||||
total_cores += cap_cores.get_capacity(vm)
|
vm = cluster_data_model.get_vm_from_id(vm_id)
|
||||||
total_disk += cap_disk.get_capacity(vm)
|
total_cores += cpu_capacity.get_capacity(vm)
|
||||||
total_mem += cap_mem.get_capacity(vm)
|
total_disk += disk_capacity.get_capacity(vm)
|
||||||
|
total_mem += memory_capacity.get_capacity(vm)
|
||||||
|
|
||||||
# capacity requested by hypervisor
|
# capacity requested by hypervisor
|
||||||
total_cores += cap_cores.get_capacity(vm_to_mig)
|
total_cores += cpu_capacity.get_capacity(vm_to_mig)
|
||||||
total_disk += cap_disk.get_capacity(vm_to_mig)
|
total_disk += disk_capacity.get_capacity(vm_to_mig)
|
||||||
total_mem += cap_mem.get_capacity(vm_to_mig)
|
total_mem += memory_capacity.get_capacity(vm_to_mig)
|
||||||
|
|
||||||
return self.check_threshold(model,
|
return self.check_threshold(cluster_data_model,
|
||||||
dest_hypervisor,
|
dest_hypervisor,
|
||||||
total_cores,
|
total_cores,
|
||||||
total_disk,
|
total_disk,
|
||||||
total_mem)
|
total_mem)
|
||||||
|
|
||||||
def check_threshold(self, model,
|
def check_threshold(self, cluster_data_model,
|
||||||
dest_hypervisor,
|
dest_hypervisor,
|
||||||
total_cores,
|
total_cores,
|
||||||
total_disk,
|
total_disk,
|
||||||
@@ -170,23 +190,23 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
check the threshold value defined by the ratio of
|
check the threshold value defined by the ratio of
|
||||||
aggregated CPU capacity of VMs on one node to CPU capacity
|
aggregated CPU capacity of VMs on one node to CPU capacity
|
||||||
of this node must not exceed the threshold value.
|
of this node must not exceed the threshold value.
|
||||||
:param dest_hypervisor:
|
:param cluster_data_model: the current state of the cluster
|
||||||
|
:param dest_hypervisor: the destination of the virtual machine
|
||||||
:param total_cores
|
:param total_cores
|
||||||
:param total_disk
|
:param total_disk
|
||||||
:param total_mem
|
:param total_mem
|
||||||
:return: True if the threshold is not exceed
|
:return: True if the threshold is not exceed
|
||||||
"""
|
"""
|
||||||
cap_cores = model.get_resource_from_id(ResourceType.cpu_cores)
|
cpu_capacity = cluster_data_model.get_resource_from_id(
|
||||||
cap_disk = model.get_resource_from_id(ResourceType.disk)
|
resource.ResourceType.cpu_cores).get_capacity(dest_hypervisor)
|
||||||
cap_mem = model.get_resource_from_id(ResourceType.memory)
|
disk_capacity = cluster_data_model.get_resource_from_id(
|
||||||
# available
|
resource.ResourceType.disk).get_capacity(dest_hypervisor)
|
||||||
cores_available = cap_cores.get_capacity(dest_hypervisor)
|
memory_capacity = cluster_data_model.get_resource_from_id(
|
||||||
disk_available = cap_disk.get_capacity(dest_hypervisor)
|
resource.ResourceType.memory).get_capacity(dest_hypervisor)
|
||||||
mem_available = cap_mem.get_capacity(dest_hypervisor)
|
|
||||||
|
|
||||||
if cores_available >= total_cores * self.threshold_cores \
|
if (cpu_capacity >= total_cores * self.threshold_cores and
|
||||||
and disk_available >= total_disk * self.threshold_disk \
|
disk_capacity >= total_disk * self.threshold_disk and
|
||||||
and mem_available >= total_mem * self.threshold_mem:
|
memory_capacity >= total_mem * self.threshold_mem):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@@ -212,25 +232,26 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
def get_number_of_migrations(self):
|
def get_number_of_migrations(self):
|
||||||
return self.number_of_migrations
|
return self.number_of_migrations
|
||||||
|
|
||||||
def calculate_weight(self, model, element, total_cores_used,
|
def calculate_weight(self, cluster_data_model, element,
|
||||||
total_disk_used, total_memory_used):
|
total_cores_used, total_disk_used,
|
||||||
|
total_memory_used):
|
||||||
"""Calculate weight of every resource
|
"""Calculate weight of every resource
|
||||||
|
|
||||||
:param model:
|
:param cluster_data_model:
|
||||||
:param element:
|
:param element:
|
||||||
:param total_cores_used:
|
:param total_cores_used:
|
||||||
:param total_disk_used:
|
:param total_disk_used:
|
||||||
:param total_memory_used:
|
:param total_memory_used:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
cpu_capacity = model.get_resource_from_id(
|
cpu_capacity = cluster_data_model.get_resource_from_id(
|
||||||
ResourceType.cpu_cores).get_capacity(element)
|
resource.ResourceType.cpu_cores).get_capacity(element)
|
||||||
|
|
||||||
disk_capacity = model.get_resource_from_id(
|
disk_capacity = cluster_data_model.get_resource_from_id(
|
||||||
ResourceType.disk).get_capacity(element)
|
resource.ResourceType.disk).get_capacity(element)
|
||||||
|
|
||||||
memory_capacity = model.get_resource_from_id(
|
memory_capacity = cluster_data_model.get_resource_from_id(
|
||||||
ResourceType.memory).get_capacity(element)
|
resource.ResourceType.memory).get_capacity(element)
|
||||||
|
|
||||||
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
|
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
|
||||||
float(cpu_capacity))
|
float(cpu_capacity))
|
||||||
@@ -256,25 +277,25 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
resource_id = "%s_%s" % (hypervisor.uuid, hypervisor.hostname)
|
resource_id = "%s_%s" % (hypervisor.uuid, hypervisor.hostname)
|
||||||
cpu_avg_vm = self.ceilometer. \
|
host_avg_cpu_util = self.ceilometer. \
|
||||||
statistic_aggregation(resource_id=resource_id,
|
statistic_aggregation(resource_id=resource_id,
|
||||||
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
||||||
period="7200",
|
period="7200",
|
||||||
aggregate='avg'
|
aggregate='avg'
|
||||||
)
|
)
|
||||||
if cpu_avg_vm is None:
|
if host_avg_cpu_util is None:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("No values returned by %(resource_id)s "
|
_LE("No values returned by %(resource_id)s "
|
||||||
"for %(metric_name)s"),
|
"for %(metric_name)s"),
|
||||||
resource_id=resource_id,
|
resource_id=resource_id,
|
||||||
metric_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
metric_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
||||||
)
|
)
|
||||||
cpu_avg_vm = 100
|
host_avg_cpu_util = 100
|
||||||
|
|
||||||
cpu_capacity = model.get_resource_from_id(
|
cpu_capacity = model.get_resource_from_id(
|
||||||
ResourceType.cpu_cores).get_capacity(hypervisor)
|
resource.ResourceType.cpu_cores).get_capacity(hypervisor)
|
||||||
|
|
||||||
total_cores_used = cpu_capacity * (cpu_avg_vm / 100)
|
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)
|
||||||
|
|
||||||
return self.calculate_weight(model, hypervisor, total_cores_used,
|
return self.calculate_weight(model, hypervisor, total_cores_used,
|
||||||
0,
|
0,
|
||||||
@@ -292,14 +313,14 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def calculate_score_vm(self, vm, model):
|
def calculate_score_vm(self, vm, cluster_data_model):
|
||||||
"""Calculate Score of virtual machine
|
"""Calculate Score of virtual machine
|
||||||
|
|
||||||
:param vm: the virtual machine
|
:param vm: the virtual machine
|
||||||
:param model: the model
|
:param cluster_data_model: the cluster model
|
||||||
:return: score
|
:return: score
|
||||||
"""
|
"""
|
||||||
if model is None:
|
if cluster_data_model is None:
|
||||||
raise exception.ClusterStateNotDefined()
|
raise exception.ClusterStateNotDefined()
|
||||||
|
|
||||||
vm_cpu_utilization = self.ceilometer. \
|
vm_cpu_utilization = self.ceilometer. \
|
||||||
@@ -318,23 +339,22 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
)
|
)
|
||||||
vm_cpu_utilization = 100
|
vm_cpu_utilization = 100
|
||||||
|
|
||||||
cpu_capacity = model.get_resource_from_id(
|
cpu_capacity = cluster_data_model.get_resource_from_id(
|
||||||
ResourceType.cpu_cores).get_capacity(vm)
|
resource.ResourceType.cpu_cores).get_capacity(vm)
|
||||||
|
|
||||||
total_cores_used = cpu_capacity * (vm_cpu_utilization / 100.0)
|
total_cores_used = cpu_capacity * (vm_cpu_utilization / 100.0)
|
||||||
|
|
||||||
return self.calculate_weight(model, vm, total_cores_used,
|
return self.calculate_weight(cluster_data_model, vm,
|
||||||
0,
|
total_cores_used, 0, 0)
|
||||||
0)
|
|
||||||
|
|
||||||
def add_change_service_state(self, applies_to, state):
|
def add_change_service_state(self, resource_id, state):
|
||||||
parameters = {'state': state}
|
parameters = {'state': state}
|
||||||
self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE,
|
self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE,
|
||||||
applies_to=applies_to,
|
resource_id=resource_id,
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
def add_migration(self,
|
def add_migration(self,
|
||||||
applies_to,
|
resource_id,
|
||||||
migration_type,
|
migration_type,
|
||||||
src_hypervisor,
|
src_hypervisor,
|
||||||
dst_hypervisor):
|
dst_hypervisor):
|
||||||
@@ -342,17 +362,19 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
'src_hypervisor': src_hypervisor,
|
'src_hypervisor': src_hypervisor,
|
||||||
'dst_hypervisor': dst_hypervisor}
|
'dst_hypervisor': dst_hypervisor}
|
||||||
self.solution.add_action(action_type=self.MIGRATION,
|
self.solution.add_action(action_type=self.MIGRATION,
|
||||||
applies_to=applies_to,
|
resource_id=resource_id,
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
def score_of_nodes(self, current_model, score):
|
def score_of_nodes(self, cluster_data_model, score):
|
||||||
"""Calculate score of nodes based on load by VMs"""
|
"""Calculate score of nodes based on load by VMs"""
|
||||||
for hypervisor_id in current_model.get_all_hypervisors():
|
for hypervisor_id in cluster_data_model.get_all_hypervisors():
|
||||||
hypervisor = current_model.get_hypervisor_from_id(hypervisor_id)
|
hypervisor = cluster_data_model. \
|
||||||
count = current_model.get_mapping(). \
|
get_hypervisor_from_id(hypervisor_id)
|
||||||
|
count = cluster_data_model.get_mapping(). \
|
||||||
get_node_vms_from_id(hypervisor_id)
|
get_node_vms_from_id(hypervisor_id)
|
||||||
if len(count) > 0:
|
if len(count) > 0:
|
||||||
result = self.calculate_score_node(hypervisor, current_model)
|
result = self.calculate_score_node(hypervisor,
|
||||||
|
cluster_data_model)
|
||||||
else:
|
else:
|
||||||
''' the hypervisor has not VMs '''
|
''' the hypervisor has not VMs '''
|
||||||
result = 0
|
result = 0
|
||||||
@@ -360,16 +382,16 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
score.append((hypervisor_id, result))
|
score.append((hypervisor_id, result))
|
||||||
return score
|
return score
|
||||||
|
|
||||||
def node_and_vm_score(self, s, score, current_model):
|
def node_and_vm_score(self, sorted_score, score, current_model):
|
||||||
"""Get List of VMs from Node"""
|
"""Get List of VMs from Node"""
|
||||||
node_to_release = s[len(score) - 1][0]
|
node_to_release = sorted_score[len(score) - 1][0]
|
||||||
vms_to_mig = current_model.get_mapping().get_node_vms_from_id(
|
vms_to_mig = current_model.get_mapping().get_node_vms_from_id(
|
||||||
node_to_release)
|
node_to_release)
|
||||||
|
|
||||||
vm_score = []
|
vm_score = []
|
||||||
for vm_id in vms_to_mig:
|
for vm_id in vms_to_mig:
|
||||||
vm = current_model.get_vm_from_id(vm_id)
|
vm = current_model.get_vm_from_id(vm_id)
|
||||||
if vm.state == VMState.ACTIVE.value:
|
if vm.state == vm_state.VMState.ACTIVE.value:
|
||||||
vm_score.append(
|
vm_score.append(
|
||||||
(vm_id, self.calculate_score_vm(vm, current_model)))
|
(vm_id, self.calculate_score_vm(vm, current_model)))
|
||||||
|
|
||||||
@@ -388,51 +410,53 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
mig_src_hypervisor)) == 0:
|
mig_src_hypervisor)) == 0:
|
||||||
self.add_change_service_state(mig_src_hypervisor.
|
self.add_change_service_state(mig_src_hypervisor.
|
||||||
uuid,
|
uuid,
|
||||||
HypervisorState.
|
hyper_state.HypervisorState.
|
||||||
OFFLINE.value)
|
DISABLED.value)
|
||||||
self.number_of_released_nodes += 1
|
self.number_of_released_nodes += 1
|
||||||
|
|
||||||
def calculate_m(self, v, current_model, node_to_release, s):
|
def calculate_num_migrations(self, sorted_vms, current_model,
|
||||||
m = 0
|
node_to_release, sorted_score):
|
||||||
for vm in v:
|
number_migrations = 0
|
||||||
for j in range(0, len(s)):
|
for vm in sorted_vms:
|
||||||
|
for j in range(0, len(sorted_score)):
|
||||||
mig_vm = current_model.get_vm_from_id(vm[0])
|
mig_vm = current_model.get_vm_from_id(vm[0])
|
||||||
mig_src_hypervisor = current_model.get_hypervisor_from_id(
|
mig_src_hypervisor = current_model.get_hypervisor_from_id(
|
||||||
node_to_release)
|
node_to_release)
|
||||||
mig_dst_hypervisor = current_model.get_hypervisor_from_id(
|
mig_dst_hypervisor = current_model.get_hypervisor_from_id(
|
||||||
s[j][0])
|
sorted_score[j][0])
|
||||||
|
|
||||||
result = self.check_migration(current_model,
|
result = self.check_migration(current_model,
|
||||||
mig_src_hypervisor,
|
mig_src_hypervisor,
|
||||||
mig_dst_hypervisor, mig_vm)
|
mig_dst_hypervisor, mig_vm)
|
||||||
if result is True:
|
if result:
|
||||||
self.create_migration_vm(
|
self.create_migration_vm(
|
||||||
current_model, mig_vm,
|
current_model, mig_vm,
|
||||||
mig_src_hypervisor, mig_dst_hypervisor)
|
mig_src_hypervisor, mig_dst_hypervisor)
|
||||||
m += 1
|
number_migrations += 1
|
||||||
break
|
break
|
||||||
return m
|
return number_migrations
|
||||||
|
|
||||||
def unsuccessful_migration_actualization(self, m, unsuccessful_migration):
|
def unsuccessful_migration_actualization(self, number_migrations,
|
||||||
if m > 0:
|
unsuccessful_migration):
|
||||||
self.number_of_migrations += m
|
if number_migrations > 0:
|
||||||
|
self.number_of_migrations += number_migrations
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
return unsuccessful_migration + 1
|
return unsuccessful_migration + 1
|
||||||
|
|
||||||
def execute(self, orign_model):
|
def execute(self, original_model):
|
||||||
LOG.info(_LI("Initializing Sercon Consolidation"))
|
LOG.info(_LI("Initializing Sercon Consolidation"))
|
||||||
|
|
||||||
if orign_model is None:
|
if original_model is None:
|
||||||
raise exception.ClusterStateNotDefined()
|
raise exception.ClusterStateNotDefined()
|
||||||
|
|
||||||
# todo(jed) clone model
|
# todo(jed) clone model
|
||||||
current_model = orign_model
|
current_model = original_model
|
||||||
|
|
||||||
self.efficacy = 100
|
self.efficacy = 100
|
||||||
unsuccessful_migration = 0
|
unsuccessful_migration = 0
|
||||||
|
|
||||||
first = True
|
first_migration = True
|
||||||
size_cluster = len(current_model.get_all_hypervisors())
|
size_cluster = len(current_model.get_all_hypervisors())
|
||||||
if size_cluster == 0:
|
if size_cluster == 0:
|
||||||
raise exception.ClusterEmpty()
|
raise exception.ClusterEmpty()
|
||||||
@@ -444,24 +468,24 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
count = current_model.get_mapping(). \
|
count = current_model.get_mapping(). \
|
||||||
get_node_vms_from_id(hypervisor_id)
|
get_node_vms_from_id(hypervisor_id)
|
||||||
if len(count) == 0:
|
if len(count) == 0:
|
||||||
if hypervisor.state == HypervisorState.ONLINE:
|
if hypervisor.state == hyper_state.HypervisorState.ENABLED:
|
||||||
self.add_change_service_state(hypervisor_id,
|
self.add_change_service_state(hypervisor_id,
|
||||||
HypervisorState.
|
hyper_state.HypervisorState.
|
||||||
OFFLINE.value)
|
DISABLED.value)
|
||||||
|
|
||||||
while self.get_allowed_migration_attempts() >= unsuccessful_migration:
|
while self.get_allowed_migration_attempts() >= unsuccessful_migration:
|
||||||
if first is not True:
|
if not first_migration:
|
||||||
self.efficacy = self.calculate_migration_efficacy()
|
self.efficacy = self.calculate_migration_efficacy()
|
||||||
if self.efficacy < float(self.target_efficacy):
|
if self.efficacy < float(self.target_efficacy):
|
||||||
break
|
break
|
||||||
first = False
|
first_migration = False
|
||||||
score = []
|
score = []
|
||||||
|
|
||||||
score = self.score_of_nodes(current_model, score)
|
score = self.score_of_nodes(current_model, score)
|
||||||
|
|
||||||
''' sort compute nodes by Score decreasing '''''
|
''' sort compute nodes by Score decreasing '''''
|
||||||
s = sorted(score, reverse=True, key=lambda x: (x[1]))
|
sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
|
||||||
LOG.debug("Hypervisor(s) BFD {0}".format(s))
|
LOG.debug("Hypervisor(s) BFD {0}".format(sorted_score))
|
||||||
|
|
||||||
''' get Node to be released '''
|
''' get Node to be released '''
|
||||||
if len(score) == 0:
|
if len(score) == 0:
|
||||||
@@ -471,17 +495,18 @@ class BasicConsolidation(BaseStrategy):
|
|||||||
break
|
break
|
||||||
|
|
||||||
node_to_release, vm_score = self.node_and_vm_score(
|
node_to_release, vm_score = self.node_and_vm_score(
|
||||||
s, score, current_model)
|
sorted_score, score, current_model)
|
||||||
|
|
||||||
''' sort VMs by Score '''
|
''' sort VMs by Score '''
|
||||||
v = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
|
sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
|
||||||
# BFD: Best Fit Decrease
|
# BFD: Best Fit Decrease
|
||||||
LOG.debug("VM(s) BFD {0}".format(v))
|
LOG.debug("VM(s) BFD {0}".format(sorted_vms))
|
||||||
|
|
||||||
m = self.calculate_m(v, current_model, node_to_release, s)
|
migrations = self.calculate_num_migrations(
|
||||||
|
sorted_vms, current_model, node_to_release, sorted_score)
|
||||||
|
|
||||||
unsuccessful_migration = self.unsuccessful_migration_actualization(
|
unsuccessful_migration = self.unsuccessful_migration_actualization(
|
||||||
m, unsuccessful_migration)
|
migrations, unsuccessful_migration)
|
||||||
infos = {
|
infos = {
|
||||||
"number_of_migrations": self.number_of_migrations,
|
"number_of_migrations": self.number_of_migrations,
|
||||||
"number_of_nodes_released": self.number_of_released_nodes,
|
"number_of_nodes_released": self.number_of_released_nodes,
|
||||||
|
|||||||
@@ -18,33 +18,52 @@
|
|||||||
#
|
#
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.strategy.strategies.base import BaseStrategy
|
from watcher.decision_engine.strategy.strategies import base
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DummyStrategy(BaseStrategy):
|
class DummyStrategy(base.BaseStrategy):
|
||||||
|
"""Dummy strategy used for integration testing via Tempest
|
||||||
|
|
||||||
|
*Description*
|
||||||
|
|
||||||
|
This strategy does not provide any useful optimization. Indeed, its only
|
||||||
|
purpose is to be used by Tempest tests.
|
||||||
|
|
||||||
|
*Requirements*
|
||||||
|
|
||||||
|
<None>
|
||||||
|
|
||||||
|
*Limitations*
|
||||||
|
|
||||||
|
Do not use in production.
|
||||||
|
|
||||||
|
*Spec URL*
|
||||||
|
|
||||||
|
<None>
|
||||||
|
"""
|
||||||
|
|
||||||
DEFAULT_NAME = "dummy"
|
DEFAULT_NAME = "dummy"
|
||||||
DEFAULT_DESCRIPTION = "Dummy Strategy"
|
DEFAULT_DESCRIPTION = "Dummy Strategy"
|
||||||
|
|
||||||
NOP = "nop"
|
NOP = "nop"
|
||||||
SLEEP = "sleep"
|
SLEEP = "sleep"
|
||||||
|
|
||||||
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION):
|
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION,
|
||||||
super(DummyStrategy, self).__init__(name, description)
|
osc=None):
|
||||||
|
super(DummyStrategy, self).__init__(name, description, osc)
|
||||||
|
|
||||||
def execute(self, model):
|
def execute(self, original_model):
|
||||||
|
LOG.debug("Executing Dummy strategy")
|
||||||
parameters = {'message': 'hello World'}
|
parameters = {'message': 'hello World'}
|
||||||
self.solution.add_action(action_type=self.NOP,
|
self.solution.add_action(action_type=self.NOP,
|
||||||
applies_to="",
|
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
parameters = {'message': 'Welcome'}
|
parameters = {'message': 'Welcome'}
|
||||||
self.solution.add_action(action_type=self.NOP,
|
self.solution.add_action(action_type=self.NOP,
|
||||||
applies_to="",
|
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
self.solution.add_action(action_type=self.SLEEP,
|
self.solution.add_action(action_type=self.SLEEP,
|
||||||
applies_to="",
|
input_parameters={'duration': 5.0})
|
||||||
input_parameters={'duration': '5'})
|
|
||||||
return self.solution
|
return self.solution
|
||||||
|
|||||||
@@ -16,20 +16,60 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
*Good Thermal Strategy*:
|
||||||
|
|
||||||
|
Towards to software defined infrastructure, the power and thermal
|
||||||
|
intelligences is being adopted to optimize workload, which can help
|
||||||
|
improve efficiency, reduce power, as well as to improve datacenter PUE
|
||||||
|
and lower down operation cost in data center.
|
||||||
|
Outlet (Exhaust Air) Temperature is one of the important thermal
|
||||||
|
telemetries to measure thermal/workload status of server.
|
||||||
|
"""
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher._i18n import _LE
|
from watcher._i18n import _LE
|
||||||
from watcher.common import exception as wexc
|
from watcher.common import exception as wexc
|
||||||
from watcher.decision_engine.model.resource import ResourceType
|
from watcher.decision_engine.model import resource
|
||||||
from watcher.decision_engine.model.vm_state import VMState
|
from watcher.decision_engine.model import vm_state
|
||||||
from watcher.decision_engine.strategy.strategies.base import BaseStrategy
|
from watcher.decision_engine.strategy.strategies import base
|
||||||
from watcher.metrics_engine.cluster_history.ceilometer import \
|
from watcher.metrics_engine.cluster_history import ceilometer as ceil
|
||||||
CeilometerClusterHistory
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class OutletTempControl(BaseStrategy):
|
class OutletTempControl(base.BaseStrategy):
|
||||||
|
"""[PoC] Outlet temperature control using live migration
|
||||||
|
|
||||||
|
*Description*
|
||||||
|
|
||||||
|
It is a migration strategy based on the outlet temperature of compute
|
||||||
|
hosts. It generates solutions to move a workload whenever a server's
|
||||||
|
outlet temperature is higher than the specified threshold.
|
||||||
|
|
||||||
|
*Requirements*
|
||||||
|
|
||||||
|
* Hardware: All computer hosts should support IPMI and PTAS technology
|
||||||
|
* Software: Ceilometer component ceilometer-agent-ipmi running
|
||||||
|
in each compute host, and Ceilometer API can report such telemetry
|
||||||
|
``hardware.ipmi.node.outlet_temperature`` successfully.
|
||||||
|
* You must have at least 2 physical compute hosts to run this strategy.
|
||||||
|
|
||||||
|
*Limitations*
|
||||||
|
|
||||||
|
- This is a proof of concept that is not meant to be used in production
|
||||||
|
- We cannot forecast how many servers should be migrated. This is the
|
||||||
|
reason why we only plan a single virtual machine migration at a time.
|
||||||
|
So it's better to use this algorithm with `CONTINUOUS` audits.
|
||||||
|
- It assume that live migrations are possible
|
||||||
|
|
||||||
|
*Spec URL*
|
||||||
|
|
||||||
|
https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/approved/outlet-temperature-based-strategy.rst
|
||||||
|
""" # noqa
|
||||||
|
|
||||||
DEFAULT_NAME = "outlet_temp_control"
|
DEFAULT_NAME = "outlet_temp_control"
|
||||||
DEFAULT_DESCRIPTION = "outlet temperature based migration strategy"
|
DEFAULT_DESCRIPTION = "outlet temperature based migration strategy"
|
||||||
@@ -40,35 +80,15 @@ class OutletTempControl(BaseStrategy):
|
|||||||
|
|
||||||
MIGRATION = "migrate"
|
MIGRATION = "migrate"
|
||||||
|
|
||||||
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION):
|
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION,
|
||||||
"""[PoC]Outlet temperature control using live migration
|
osc=None):
|
||||||
|
"""Outlet temperature control using live migration
|
||||||
It is a migration strategy based on the Outlet Temperature of physical
|
|
||||||
servers. It generates solutions to move a workload whenever a server’s
|
|
||||||
outlet temperature is higher than the specified threshold. As of now,
|
|
||||||
we cannot forecast how many instances should be migrated. This is the
|
|
||||||
reason why we simply plan a single virtual machine migration.
|
|
||||||
So it's better to use this algorithm with CONTINUOUS audits.
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
* Hardware: computer node should support IPMI and PTAS technology
|
|
||||||
* Software: Ceilometer component ceilometer-agent-ipmi running
|
|
||||||
in each compute node, and Ceilometer API can report such telemetry
|
|
||||||
"hardware.ipmi.node.outlet_temperature" successfully.
|
|
||||||
* You must have at least 2 physical compute nodes to run this strategy.
|
|
||||||
|
|
||||||
Good Strategy:
|
|
||||||
Towards to software defined infrastructure, the power and thermal
|
|
||||||
intelligences is being adopted to optimize workload, which can help
|
|
||||||
improve efficiency, reduce power, as well as to improve datacenter PUE
|
|
||||||
and lower down operation cost in data center.
|
|
||||||
Outlet(Exhaust Air) Temperature is one of the important thermal
|
|
||||||
telemetries to measure thermal/workload status of server.
|
|
||||||
|
|
||||||
:param name: the name of the strategy
|
:param name: the name of the strategy
|
||||||
:param description: a description of the strategy
|
:param description: a description of the strategy
|
||||||
|
:param osc: an OpenStackClients object
|
||||||
"""
|
"""
|
||||||
super(OutletTempControl, self).__init__(name, description)
|
super(OutletTempControl, self).__init__(name, description, osc)
|
||||||
# the migration plan will be triggered when the outlet temperature
|
# the migration plan will be triggered when the outlet temperature
|
||||||
# reaches threshold
|
# reaches threshold
|
||||||
# TODO(zhenzanz): Threshold should be configurable for each audit
|
# TODO(zhenzanz): Threshold should be configurable for each audit
|
||||||
@@ -79,32 +99,33 @@ class OutletTempControl(BaseStrategy):
|
|||||||
@property
|
@property
|
||||||
def ceilometer(self):
|
def ceilometer(self):
|
||||||
if self._ceilometer is None:
|
if self._ceilometer is None:
|
||||||
self._ceilometer = CeilometerClusterHistory()
|
self._ceilometer = ceil.CeilometerClusterHistory(osc=self.osc)
|
||||||
return self._ceilometer
|
return self._ceilometer
|
||||||
|
|
||||||
@ceilometer.setter
|
@ceilometer.setter
|
||||||
def ceilometer(self, c):
|
def ceilometer(self, c):
|
||||||
self._ceilometer = c
|
self._ceilometer = c
|
||||||
|
|
||||||
def calc_used_res(self, model, hypervisor, cap_cores, cap_mem, cap_disk):
|
def calc_used_res(self, cluster_data_model, hypervisor, cpu_capacity,
|
||||||
|
memory_capacity, disk_capacity):
|
||||||
'''calculate the used vcpus, memory and disk based on VM flavors'''
|
'''calculate the used vcpus, memory and disk based on VM flavors'''
|
||||||
vms = model.get_mapping().get_node_vms(hypervisor)
|
vms = cluster_data_model.get_mapping().get_node_vms(hypervisor)
|
||||||
vcpus_used = 0
|
vcpus_used = 0
|
||||||
memory_mb_used = 0
|
memory_mb_used = 0
|
||||||
disk_gb_used = 0
|
disk_gb_used = 0
|
||||||
if len(vms) > 0:
|
if len(vms) > 0:
|
||||||
for vm_id in vms:
|
for vm_id in vms:
|
||||||
vm = model.get_vm_from_id(vm_id)
|
vm = cluster_data_model.get_vm_from_id(vm_id)
|
||||||
vcpus_used += cap_cores.get_capacity(vm)
|
vcpus_used += cpu_capacity.get_capacity(vm)
|
||||||
memory_mb_used += cap_mem.get_capacity(vm)
|
memory_mb_used += memory_capacity.get_capacity(vm)
|
||||||
disk_gb_used += cap_disk.get_capacity(vm)
|
disk_gb_used += disk_capacity.get_capacity(vm)
|
||||||
|
|
||||||
return vcpus_used, memory_mb_used, disk_gb_used
|
return vcpus_used, memory_mb_used, disk_gb_used
|
||||||
|
|
||||||
def group_hosts_by_outlet_temp(self, model):
|
def group_hosts_by_outlet_temp(self, cluster_data_model):
|
||||||
'''Group hosts based on outlet temp meters'''
|
"""Group hosts based on outlet temp meters"""
|
||||||
|
|
||||||
hypervisors = model.get_all_hypervisors()
|
hypervisors = cluster_data_model.get_all_hypervisors()
|
||||||
size_cluster = len(hypervisors)
|
size_cluster = len(hypervisors)
|
||||||
if size_cluster == 0:
|
if size_cluster == 0:
|
||||||
raise wexc.ClusterEmpty()
|
raise wexc.ClusterEmpty()
|
||||||
@@ -112,7 +133,8 @@ class OutletTempControl(BaseStrategy):
|
|||||||
hosts_need_release = []
|
hosts_need_release = []
|
||||||
hosts_target = []
|
hosts_target = []
|
||||||
for hypervisor_id in hypervisors:
|
for hypervisor_id in hypervisors:
|
||||||
hypervisor = model.get_hypervisor_from_id(hypervisor_id)
|
hypervisor = cluster_data_model.get_hypervisor_from_id(
|
||||||
|
hypervisor_id)
|
||||||
resource_id = hypervisor.uuid
|
resource_id = hypervisor.uuid
|
||||||
|
|
||||||
outlet_temp = self.ceilometer.statistic_aggregation(
|
outlet_temp = self.ceilometer.statistic_aggregation(
|
||||||
@@ -134,18 +156,19 @@ class OutletTempControl(BaseStrategy):
|
|||||||
hosts_target.append(hvmap)
|
hosts_target.append(hvmap)
|
||||||
return hosts_need_release, hosts_target
|
return hosts_need_release, hosts_target
|
||||||
|
|
||||||
def choose_vm_to_migrate(self, model, hosts):
|
def choose_vm_to_migrate(self, cluster_data_model, hosts):
|
||||||
'''pick up an active vm instance to migrate from provided hosts'''
|
"""pick up an active vm instance to migrate from provided hosts"""
|
||||||
|
|
||||||
for hvmap in hosts:
|
for hvmap in hosts:
|
||||||
mig_src_hypervisor = hvmap['hv']
|
mig_src_hypervisor = hvmap['hv']
|
||||||
vms_of_src = model.get_mapping().get_node_vms(mig_src_hypervisor)
|
vms_of_src = cluster_data_model.get_mapping().get_node_vms(
|
||||||
|
mig_src_hypervisor)
|
||||||
if len(vms_of_src) > 0:
|
if len(vms_of_src) > 0:
|
||||||
for vm_id in vms_of_src:
|
for vm_id in vms_of_src:
|
||||||
try:
|
try:
|
||||||
# select the first active VM to migrate
|
# select the first active VM to migrate
|
||||||
vm = model.get_vm_from_id(vm_id)
|
vm = cluster_data_model.get_vm_from_id(vm_id)
|
||||||
if vm.state != VMState.ACTIVE.value:
|
if vm.state != vm_state.VMState.ACTIVE.value:
|
||||||
LOG.info(_LE("VM not active, skipped: %s"),
|
LOG.info(_LE("VM not active, skipped: %s"),
|
||||||
vm.uuid)
|
vm.uuid)
|
||||||
continue
|
continue
|
||||||
@@ -156,44 +179,45 @@ class OutletTempControl(BaseStrategy):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def filter_dest_servers(self, model, hosts, vm_to_migrate):
|
def filter_dest_servers(self, cluster_data_model, hosts, vm_to_migrate):
|
||||||
'''Only return hosts with sufficient available resources'''
|
"""Only return hosts with sufficient available resources"""
|
||||||
|
|
||||||
cap_cores = model.get_resource_from_id(ResourceType.cpu_cores)
|
cpu_capacity = cluster_data_model.get_resource_from_id(
|
||||||
cap_disk = model.get_resource_from_id(ResourceType.disk)
|
resource.ResourceType.cpu_cores)
|
||||||
cap_mem = model.get_resource_from_id(ResourceType.memory)
|
disk_capacity = cluster_data_model.get_resource_from_id(
|
||||||
|
resource.ResourceType.disk)
|
||||||
|
memory_capacity = cluster_data_model.get_resource_from_id(
|
||||||
|
resource.ResourceType.memory)
|
||||||
|
|
||||||
required_cores = cap_cores.get_capacity(vm_to_migrate)
|
required_cores = cpu_capacity.get_capacity(vm_to_migrate)
|
||||||
required_disk = cap_disk.get_capacity(vm_to_migrate)
|
required_disk = disk_capacity.get_capacity(vm_to_migrate)
|
||||||
required_mem = cap_mem.get_capacity(vm_to_migrate)
|
required_memory = memory_capacity.get_capacity(vm_to_migrate)
|
||||||
|
|
||||||
# filter hypervisors without enough resource
|
# filter hypervisors without enough resource
|
||||||
dest_servers = []
|
dest_servers = []
|
||||||
for hvmap in hosts:
|
for hvmap in hosts:
|
||||||
host = hvmap['hv']
|
host = hvmap['hv']
|
||||||
# available
|
# available
|
||||||
cores_used, mem_used, disk_used = self.calc_used_res(model,
|
cores_used, mem_used, disk_used = self.calc_used_res(
|
||||||
host,
|
cluster_data_model, host, cpu_capacity, memory_capacity,
|
||||||
cap_cores,
|
disk_capacity)
|
||||||
cap_mem,
|
cores_available = cpu_capacity.get_capacity(host) - cores_used
|
||||||
cap_disk)
|
disk_available = disk_capacity.get_capacity(host) - mem_used
|
||||||
cores_available = cap_cores.get_capacity(host) - cores_used
|
mem_available = memory_capacity.get_capacity(host) - disk_used
|
||||||
disk_available = cap_disk.get_capacity(host) - mem_used
|
|
||||||
mem_available = cap_mem.get_capacity(host) - disk_used
|
|
||||||
if cores_available >= required_cores \
|
if cores_available >= required_cores \
|
||||||
and disk_available >= required_disk \
|
and disk_available >= required_disk \
|
||||||
and mem_available >= required_mem:
|
and mem_available >= required_memory:
|
||||||
dest_servers.append(hvmap)
|
dest_servers.append(hvmap)
|
||||||
|
|
||||||
return dest_servers
|
return dest_servers
|
||||||
|
|
||||||
def execute(self, orign_model):
|
def execute(self, original_model):
|
||||||
LOG.debug("Initializing Outlet temperature strategy")
|
LOG.debug("Initializing Outlet temperature strategy")
|
||||||
|
|
||||||
if orign_model is None:
|
if original_model is None:
|
||||||
raise wexc.ClusterStateNotDefined()
|
raise wexc.ClusterStateNotDefined()
|
||||||
|
|
||||||
current_model = orign_model
|
current_model = original_model
|
||||||
hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp(
|
hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp(
|
||||||
current_model)
|
current_model)
|
||||||
|
|
||||||
@@ -237,10 +261,10 @@ class OutletTempControl(BaseStrategy):
|
|||||||
mig_src_hypervisor,
|
mig_src_hypervisor,
|
||||||
mig_dst_hypervisor):
|
mig_dst_hypervisor):
|
||||||
parameters = {'migration_type': 'live',
|
parameters = {'migration_type': 'live',
|
||||||
'src_hypervisor': mig_src_hypervisor,
|
'src_hypervisor': mig_src_hypervisor.uuid,
|
||||||
'dst_hypervisor': mig_dst_hypervisor}
|
'dst_hypervisor': mig_dst_hypervisor.uuid}
|
||||||
self.solution.add_action(action_type=self.MIGRATION,
|
self.solution.add_action(action_type=self.MIGRATION,
|
||||||
applies_to=vm_src,
|
resource_id=vm_src.uuid,
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
self.solution.model = current_model
|
self.solution.model = current_model
|
||||||
|
|||||||
129
watcher/doc.py
129
watcher/doc.py
@@ -17,21 +17,56 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
|
import inspect
|
||||||
|
|
||||||
from docutils import nodes
|
from docutils import nodes
|
||||||
from docutils.parsers import rst
|
from docutils.parsers import rst
|
||||||
from docutils import statemachine as sm
|
from docutils import statemachine
|
||||||
|
from stevedore import extension
|
||||||
|
|
||||||
from watcher.version import version_info
|
from watcher.version import version_info
|
||||||
|
|
||||||
import textwrap
|
|
||||||
|
class BaseWatcherDirective(rst.Directive):
|
||||||
|
|
||||||
|
def __init__(self, name, arguments, options, content, lineno,
|
||||||
|
content_offset, block_text, state, state_machine):
|
||||||
|
super(BaseWatcherDirective, self).__init__(
|
||||||
|
name, arguments, options, content, lineno,
|
||||||
|
content_offset, block_text, state, state_machine)
|
||||||
|
self.result = statemachine.ViewList()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
raise NotImplementedError('Must override run() is subclass.')
|
||||||
|
|
||||||
|
def add_line(self, line, *lineno):
|
||||||
|
"""Append one line of generated reST to the output."""
|
||||||
|
self.result.append(line, rst.directives.unchanged, *lineno)
|
||||||
|
|
||||||
|
def add_textblock(self, textblock):
|
||||||
|
for line in textblock.splitlines():
|
||||||
|
self.add_line(line)
|
||||||
|
|
||||||
|
def add_object_docstring(self, obj):
|
||||||
|
obj_raw_docstring = obj.__doc__ or ""
|
||||||
|
|
||||||
|
# Maybe it's within the __init__
|
||||||
|
if not obj_raw_docstring and hasattr(obj, "__init__"):
|
||||||
|
if obj.__init__.__doc__:
|
||||||
|
obj_raw_docstring = obj.__init__.__doc__
|
||||||
|
|
||||||
|
if not obj_raw_docstring:
|
||||||
|
# Raise a warning to make the tests fail wit doc8
|
||||||
|
raise self.error("No docstring available for this plugin!")
|
||||||
|
|
||||||
|
obj_docstring = inspect.cleandoc(obj_raw_docstring)
|
||||||
|
self.add_textblock(obj_docstring)
|
||||||
|
|
||||||
|
|
||||||
class WatcherTerm(rst.Directive):
|
class WatcherTerm(BaseWatcherDirective):
|
||||||
"""Directive to import an RST formatted docstring into the Watcher glossary
|
"""Directive to import an RST formatted docstring into the Watcher glossary
|
||||||
|
|
||||||
How to use it
|
**How to use it**
|
||||||
-------------
|
|
||||||
|
|
||||||
# inside your .py file
|
# inside your .py file
|
||||||
class DocumentedObject(object):
|
class DocumentedObject(object):
|
||||||
@@ -47,17 +82,7 @@ class WatcherTerm(rst.Directive):
|
|||||||
# You need to put an import path as an argument for this directive to work
|
# You need to put an import path as an argument for this directive to work
|
||||||
required_arguments = 1
|
required_arguments = 1
|
||||||
|
|
||||||
def add_textblock(self, textblock, *lineno):
|
|
||||||
for line in textblock.splitlines():
|
|
||||||
self.add_line(line)
|
|
||||||
|
|
||||||
def add_line(self, line, *lineno):
|
|
||||||
"""Append one line of generated reST to the output."""
|
|
||||||
self.result.append(line, rst.directives.unchanged, *lineno)
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.result = sm.ViewList()
|
|
||||||
|
|
||||||
cls_path = self.arguments[0]
|
cls_path = self.arguments[0]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -65,20 +90,82 @@ class WatcherTerm(rst.Directive):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise self.error(exc)
|
raise self.error(exc)
|
||||||
|
|
||||||
self.add_class_docstring(cls)
|
self.add_object_docstring(cls)
|
||||||
|
|
||||||
node = nodes.paragraph()
|
node = nodes.paragraph()
|
||||||
node.document = self.state.document
|
node.document = self.state.document
|
||||||
self.state.nested_parse(self.result, 0, node)
|
self.state.nested_parse(self.result, 0, node)
|
||||||
return node.children
|
return node.children
|
||||||
|
|
||||||
def add_class_docstring(self, cls):
|
|
||||||
# Added 4 spaces to align the first line with the rest of the text
|
class DriversDoc(BaseWatcherDirective):
|
||||||
# to be able to dedent it correctly
|
"""Directive to import an RST formatted docstring into the Watcher doc
|
||||||
cls_docstring = textwrap.dedent("%s%s" % (" " * 4, cls.__doc__))
|
|
||||||
self.add_textblock(cls_docstring)
|
This directive imports the RST formatted docstring of every driver declared
|
||||||
|
within an entry point namespace provided as argument
|
||||||
|
|
||||||
|
**How to use it**
|
||||||
|
|
||||||
|
# inside your .py file
|
||||||
|
class DocumentedClassReferencedInEntrypoint(object):
|
||||||
|
'''My *.rst* docstring'''
|
||||||
|
|
||||||
|
def foo(self):
|
||||||
|
'''Foo docstring'''
|
||||||
|
|
||||||
|
# Inside your .rst file
|
||||||
|
.. drivers-doc:: entrypoint_namespace
|
||||||
|
:append_methods_doc: foo
|
||||||
|
|
||||||
|
This directive will then import the docstring and then interprete it.
|
||||||
|
|
||||||
|
Note that no section/sub-section can be imported via this directive as it
|
||||||
|
is a Sphinx restriction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# You need to put an import path as an argument for this directive to work
|
||||||
|
required_arguments = 1
|
||||||
|
optional_arguments = 0
|
||||||
|
final_argument_whitespace = True
|
||||||
|
has_content = False
|
||||||
|
|
||||||
|
option_spec = dict(
|
||||||
|
# CSV formatted list of method names whose return values will be zipped
|
||||||
|
# together in the given order
|
||||||
|
append_methods_doc=lambda opts: [
|
||||||
|
opt.strip() for opt in opts.split(",") if opt.strip()],
|
||||||
|
# By default, we always start by adding the driver object docstring
|
||||||
|
exclude_driver_docstring=rst.directives.flag,
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
ext_manager = extension.ExtensionManager(namespace=self.arguments[0])
|
||||||
|
extensions = ext_manager.extensions
|
||||||
|
# Aggregates drivers based on their module name (i.e import path)
|
||||||
|
classes = [(ext.name, ext.plugin) for ext in extensions]
|
||||||
|
|
||||||
|
for name, cls in classes:
|
||||||
|
self.add_line(".. rubric:: %s" % name)
|
||||||
|
self.add_line("")
|
||||||
|
|
||||||
|
if "exclude_driver_docstring" not in self.options:
|
||||||
|
self.add_object_docstring(cls)
|
||||||
|
self.add_line("")
|
||||||
|
|
||||||
|
for method_name in self.options.get("append_methods_doc", []):
|
||||||
|
if hasattr(cls, method_name):
|
||||||
|
method = getattr(cls, method_name)
|
||||||
|
method_result = inspect.cleandoc(method)
|
||||||
|
self.add_textblock(method_result())
|
||||||
|
self.add_line("")
|
||||||
|
|
||||||
|
node = nodes.paragraph()
|
||||||
|
node.document = self.state.document
|
||||||
|
self.state.nested_parse(self.result, 0, node)
|
||||||
|
return node.children
|
||||||
|
|
||||||
|
|
||||||
def setup(app):
|
def setup(app):
|
||||||
|
app.add_directive('drivers-doc', DriversDoc)
|
||||||
app.add_directive('watcher-term', WatcherTerm)
|
app.add_directive('watcher-term', WatcherTerm)
|
||||||
return {'version': version_info.version_string()}
|
return {'version': version_info.version_string()}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ msgid ""
|
|||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: python-watcher 0.21.1.dev32\n"
|
"Project-Id-Version: python-watcher 0.21.1.dev32\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2016-01-19 17:54+0100\n"
|
"POT-Creation-Date: 2016-02-09 09:07+0100\n"
|
||||||
"PO-Revision-Date: 2015-12-11 15:42+0100\n"
|
"PO-Revision-Date: 2015-12-11 15:42+0100\n"
|
||||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||||
"Language: fr\n"
|
"Language: fr\n"
|
||||||
@@ -24,25 +24,29 @@ msgstr ""
|
|||||||
msgid "Invalid state: %(state)s"
|
msgid "Invalid state: %(state)s"
|
||||||
msgstr "État invalide : %(state)s"
|
msgstr "État invalide : %(state)s"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/action_plan.py:418
|
#: watcher/api/controllers/v1/action_plan.py:422
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
||||||
msgstr "Transition d'état non autorisée : (%(initial_state)s -> %(new_state)s)"
|
msgstr "Transition d'état non autorisée : (%(initial_state)s -> %(new_state)s)"
|
||||||
|
|
||||||
|
#: watcher/api/controllers/v1/audit.py:359
|
||||||
|
msgid "The audit template UUID or name specified is invalid"
|
||||||
|
msgstr "Le nom ou UUID de l'audit template est invalide"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/types.py:148
|
#: watcher/api/controllers/v1/types.py:148
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s is not JSON serializable"
|
msgid "%s is not JSON serializable"
|
||||||
msgstr ""
|
msgstr "%s n'est pas sérialisable en JSON"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/types.py:184
|
#: watcher/api/controllers/v1/types.py:184
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Wrong type. Expected '%(type)s', got '%(value)s'"
|
msgid "Wrong type. Expected '%(type)s', got '%(value)s'"
|
||||||
msgstr ""
|
msgstr "Type incorrect. '%(type)s' attendu, '%(value)s' obtenu"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/types.py:223
|
#: watcher/api/controllers/v1/types.py:223
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "'%s' is an internal attribute and can not be updated"
|
msgid "'%s' is an internal attribute and can not be updated"
|
||||||
msgstr ""
|
msgstr "'%s' wat un attribut interne et ne peut pas être modifié"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/types.py:227
|
#: watcher/api/controllers/v1/types.py:227
|
||||||
#, python-format
|
#, python-format
|
||||||
@@ -60,7 +64,7 @@ msgstr "Limit doit être positif"
|
|||||||
#: watcher/api/controllers/v1/utils.py:47
|
#: watcher/api/controllers/v1/utils.py:47
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
||||||
msgstr ""
|
msgstr "Ordre de tri invalide : %s. Les valeurs acceptées sont 'asc' or 'desc'"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/utils.py:57
|
#: watcher/api/controllers/v1/utils.py:57
|
||||||
#, python-format
|
#, python-format
|
||||||
@@ -69,33 +73,42 @@ msgstr ""
|
|||||||
|
|
||||||
#: watcher/api/middleware/auth_token.py:45
|
#: watcher/api/middleware/auth_token.py:45
|
||||||
msgid "Cannot compile public API routes"
|
msgid "Cannot compile public API routes"
|
||||||
msgstr ""
|
msgstr "Ne peut pas compiler les chemins d'API publique"
|
||||||
|
|
||||||
#: watcher/api/middleware/parsable_error.py:52
|
#: watcher/api/middleware/parsable_error.py:52
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "ErrorDocumentMiddleware received an invalid status %s"
|
msgid "ErrorDocumentMiddleware received an invalid status %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/api/middleware/parsable_error.py:80
|
#: watcher/api/middleware/parsable_error.py:79
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Error parsing HTTP response: %s"
|
msgid "Error parsing HTTP response: %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/actions/change_nova_service_state.py:58
|
#: watcher/applier/actions/change_nova_service_state.py:69
|
||||||
msgid "The target state is not defined"
|
msgid "The target state is not defined"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:69
|
#: watcher/applier/actions/migration.py:43
|
||||||
|
msgid "The parameter resource_id is invalid."
|
||||||
|
msgstr "Le paramètre resource_id est invalide"
|
||||||
|
|
||||||
|
#: watcher/applier/actions/migration.py:86
|
||||||
|
#, python-format
|
||||||
|
msgid "Migration of type %(migration_type)s is not supported."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/applier/workflow_engine/default.py:128
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The WorkFlow Engine has failed to execute the action %s"
|
msgid "The WorkFlow Engine has failed to execute the action %s"
|
||||||
msgstr "Le moteur de workflow a echoué lors de l'éxécution de l'action %s"
|
msgstr "Le moteur de workflow a echoué lors de l'éxécution de l'action %s"
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:77
|
#: watcher/applier/workflow_engine/default.py:146
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Revert action %s"
|
msgid "Revert action %s"
|
||||||
msgstr "Annulation de l'action %s"
|
msgstr "Annulation de l'action %s"
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:83
|
#: watcher/applier/workflow_engine/default.py:152
|
||||||
msgid "Oops! We need disaster recover plan"
|
msgid "Oops! We need disaster recover plan"
|
||||||
msgstr "Oops! Nous avons besoin d'un plan de reprise d'activité"
|
msgstr "Oops! Nous avons besoin d'un plan de reprise d'activité"
|
||||||
|
|
||||||
@@ -115,191 +128,210 @@ msgstr "Sert sur 0.0.0.0:%(port)s, accessible à http://127.0.0.1:%(port)s"
|
|||||||
msgid "serving on http://%(host)s:%(port)s"
|
msgid "serving on http://%(host)s:%(port)s"
|
||||||
msgstr "Sert sur http://%(host)s:%(port)s"
|
msgstr "Sert sur http://%(host)s:%(port)s"
|
||||||
|
|
||||||
#: watcher/common/exception.py:56
|
#: watcher/common/clients.py:29
|
||||||
|
msgid "Version of Nova API to use in novaclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:34
|
||||||
|
msgid "Version of Glance API to use in glanceclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:39
|
||||||
|
msgid "Version of Cinder API to use in cinderclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:44
|
||||||
|
msgid "Version of Ceilometer API to use in ceilometerclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:50
|
||||||
|
msgid "Version of Neutron API to use in neutronclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:59
|
||||||
|
#, python-format
|
||||||
|
msgid "Unexpected keystone client error occurred: %s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:72
|
||||||
msgid "An unknown exception occurred"
|
msgid "An unknown exception occurred"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:77
|
#: watcher/common/exception.py:92
|
||||||
msgid "Exception in string format operation"
|
msgid "Exception in string format operation"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:107
|
#: watcher/common/exception.py:122
|
||||||
msgid "Not authorized"
|
msgid "Not authorized"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:112
|
#: watcher/common/exception.py:127
|
||||||
msgid "Operation not permitted"
|
msgid "Operation not permitted"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:116
|
#: watcher/common/exception.py:131
|
||||||
msgid "Unacceptable parameters"
|
msgid "Unacceptable parameters"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:121
|
#: watcher/common/exception.py:136
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The %(name)s %(id)s could not be found"
|
msgid "The %(name)s %(id)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:125
|
#: watcher/common/exception.py:140
|
||||||
#, fuzzy
|
#, fuzzy
|
||||||
msgid "Conflict"
|
msgid "Conflict"
|
||||||
msgstr "Conflit."
|
msgstr "Conflit"
|
||||||
|
|
||||||
#: watcher/common/exception.py:130
|
#: watcher/common/exception.py:145
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The %(name)s resource %(id)s could not be found"
|
msgid "The %(name)s resource %(id)s could not be found"
|
||||||
msgstr ""
|
msgstr "La ressource %(name)s / %(id)s est introuvable"
|
||||||
|
|
||||||
#: watcher/common/exception.py:135
|
#: watcher/common/exception.py:150
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected an uuid or int but received %(identity)s"
|
msgid "Expected an uuid or int but received %(identity)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:139
|
#: watcher/common/exception.py:154
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Goal %(goal)s is not defined in Watcher configuration file"
|
msgid "Goal %(goal)s is not defined in Watcher configuration file"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:145
|
#: watcher/common/exception.py:158
|
||||||
#, python-format
|
|
||||||
msgid "%(err)s"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:149
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a uuid but received %(uuid)s"
|
msgid "Expected a uuid but received %(uuid)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:153
|
#: watcher/common/exception.py:162
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a logical name but received %(name)s"
|
msgid "Expected a logical name but received %(name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:157
|
#: watcher/common/exception.py:166
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a logical name or uuid but received %(name)s"
|
msgid "Expected a logical name or uuid but received %(name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:161
|
#: watcher/common/exception.py:170
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "AuditTemplate %(audit_template)s could not be found"
|
msgid "AuditTemplate %(audit_template)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:165
|
#: watcher/common/exception.py:174
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An audit_template with UUID %(uuid)s or name %(name)s already exists"
|
msgid "An audit_template with UUID %(uuid)s or name %(name)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:170
|
#: watcher/common/exception.py:179
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "AuditTemplate %(audit_template)s is referenced by one or multiple audit"
|
msgid "AuditTemplate %(audit_template)s is referenced by one or multiple audit"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:175
|
#: watcher/common/exception.py:184
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Audit %(audit)s could not be found"
|
msgid "Audit %(audit)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:179
|
#: watcher/common/exception.py:188
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An audit with UUID %(uuid)s already exists"
|
msgid "An audit with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:183
|
#: watcher/common/exception.py:192
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Audit %(audit)s is referenced by one or multiple action plans"
|
msgid "Audit %(audit)s is referenced by one or multiple action plans"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:188
|
#: watcher/common/exception.py:197
|
||||||
msgid "ActionPlan %(action plan)s could not be found"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:192
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An action plan with UUID %(uuid)s already exists"
|
msgid "ActionPlan %(action_plan)s could not be found"
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:196
|
|
||||||
#, python-format
|
|
||||||
msgid "Action Plan %(action_plan)s is referenced by one or multiple actions"
|
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:201
|
#: watcher/common/exception.py:201
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Action %(action)s could not be found"
|
msgid "An action plan with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:205
|
#: watcher/common/exception.py:205
|
||||||
#, python-format
|
#, python-format
|
||||||
|
msgid "Action Plan %(action_plan)s is referenced by one or multiple actions"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:210
|
||||||
|
#, python-format
|
||||||
|
msgid "Action %(action)s could not be found"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:214
|
||||||
|
#, python-format
|
||||||
msgid "An action with UUID %(uuid)s already exists"
|
msgid "An action with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:209
|
#: watcher/common/exception.py:218
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Action plan %(action_plan)s is referenced by one or multiple goals"
|
msgid "Action plan %(action_plan)s is referenced by one or multiple goals"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:214
|
#: watcher/common/exception.py:223
|
||||||
msgid "Filtering actions on both audit and action-plan is prohibited"
|
msgid "Filtering actions on both audit and action-plan is prohibited"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:223
|
#: watcher/common/exception.py:232
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:233
|
#: watcher/common/exception.py:239
|
||||||
msgid "Description must be an instance of str"
|
msgid "Illegal argument"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:243
|
#: watcher/common/exception.py:243
|
||||||
msgid "An exception occurred without a description"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:251
|
|
||||||
msgid "Description cannot be empty"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:260
|
|
||||||
msgid "No such metric"
|
msgid "No such metric"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:269
|
#: watcher/common/exception.py:247
|
||||||
msgid "No rows were returned"
|
msgid "No rows were returned"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:277
|
#: watcher/common/exception.py:251
|
||||||
|
#, python-format
|
||||||
|
msgid "%(client)s connection failed. Reason: %(reason)s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:255
|
||||||
msgid "'Keystone API endpoint is missing''"
|
msgid "'Keystone API endpoint is missing''"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:281
|
#: watcher/common/exception.py:259
|
||||||
msgid "The list of hypervisor(s) in the cluster is empty"
|
msgid "The list of hypervisor(s) in the cluster is empty"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:285
|
#: watcher/common/exception.py:263
|
||||||
msgid "The metrics resource collector is not defined"
|
msgid "The metrics resource collector is not defined"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:289
|
#: watcher/common/exception.py:267
|
||||||
msgid "the cluster state is not defined"
|
msgid "the cluster state is not defined"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:295
|
#: watcher/common/exception.py:273
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The instance '%(name)s' is not found"
|
msgid "The instance '%(name)s' is not found"
|
||||||
msgstr "L'instance '%(name)s' n'a pas été trouvée"
|
msgstr "L'instance '%(name)s' n'a pas été trouvée"
|
||||||
|
|
||||||
#: watcher/common/exception.py:299
|
#: watcher/common/exception.py:277
|
||||||
msgid "The hypervisor is not found"
|
msgid "The hypervisor is not found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:303
|
#: watcher/common/exception.py:281
|
||||||
#, fuzzy, python-format
|
#, fuzzy, python-format
|
||||||
msgid "Error loading plugin '%(name)s'"
|
msgid "Error loading plugin '%(name)s'"
|
||||||
msgstr "Erreur lors du chargement du module '%(name)s'"
|
msgstr "Erreur lors du chargement du module '%(name)s'"
|
||||||
|
|
||||||
#: watcher/common/keystone.py:59
|
#: watcher/common/exception.py:285
|
||||||
msgid "No Keystone service catalog loaded"
|
#, fuzzy, python-format
|
||||||
|
msgid "The identifier '%(name)s' is a reserved word"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/service.py:83
|
#: watcher/common/service.py:83
|
||||||
@@ -332,7 +364,7 @@ msgstr ""
|
|||||||
#: watcher/common/utils.py:53
|
#: watcher/common/utils.py:53
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid ""
|
msgid ""
|
||||||
"Failed to remove trailing character. Returning original object. Supplied "
|
"Failed to remove trailing character. Returning original object.Supplied "
|
||||||
"object is not a string: %s,"
|
"object is not a string: %s,"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
@@ -350,18 +382,22 @@ msgid ""
|
|||||||
"template uuid instead"
|
"template uuid instead"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:277
|
#: watcher/db/sqlalchemy/api.py:278
|
||||||
msgid "Cannot overwrite UUID for an existing AuditTemplate."
|
msgid "Cannot overwrite UUID for an existing Audit Template."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:386 watcher/db/sqlalchemy/api.py:586
|
#: watcher/db/sqlalchemy/api.py:388
|
||||||
msgid "Cannot overwrite UUID for an existing Audit."
|
msgid "Cannot overwrite UUID for an existing Audit."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:477
|
#: watcher/db/sqlalchemy/api.py:480
|
||||||
msgid "Cannot overwrite UUID for an existing Action."
|
msgid "Cannot overwrite UUID for an existing Action."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/sqlalchemy/api.py:590
|
||||||
|
msgid "Cannot overwrite UUID for an existing Action Plan."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/migration.py:73
|
#: watcher/db/sqlalchemy/migration.py:73
|
||||||
msgid ""
|
msgid ""
|
||||||
"Watcher database schema is already under version control; use upgrade() "
|
"Watcher database schema is already under version control; use upgrade() "
|
||||||
@@ -373,44 +409,44 @@ msgstr ""
|
|||||||
msgid "'obj' argument type is not valid"
|
msgid "'obj' argument type is not valid"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/planner/default.py:75
|
#: watcher/decision_engine/planner/default.py:72
|
||||||
msgid "The action plan is empty"
|
msgid "The action plan is empty"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/selection/default.py:59
|
#: watcher/decision_engine/strategy/selection/default.py:60
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Incorrect mapping: could not find associated strategy for '%s'"
|
msgid "Incorrect mapping: could not find associated strategy for '%s'"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:267
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:269
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:314
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:316
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "No values returned by %(resource_id)s for %(metric_name)s"
|
msgid "No values returned by %(resource_id)s for %(metric_name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:349
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:426
|
||||||
msgid "Initializing Sercon Consolidation"
|
msgid "Initializing Sercon Consolidation"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:406
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:470
|
||||||
msgid "The workloads of the compute nodes of the cluster is zero"
|
msgid "The workloads of the compute nodes of the cluster is zero"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:125
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:127
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s: no outlet temp data"
|
msgid "%s: no outlet temp data"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:149
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:151
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "VM not active, skipped: %s"
|
msgid "VM not active, skipped: %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:206
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:208
|
||||||
msgid "No hosts under outlet temp threshold found"
|
msgid "No hosts under outlet temp threshold found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:229
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:231
|
||||||
msgid "No proper target host could be found"
|
msgid "No proper target host could be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
@@ -573,3 +609,32 @@ msgstr ""
|
|||||||
#~ msgid "The WorkFlow Engine has failedto execute the action %s"
|
#~ msgid "The WorkFlow Engine has failedto execute the action %s"
|
||||||
#~ msgstr ""
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "ActionPlan %(action plan)s could not be found"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "Description must be an instance of str"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "An exception occurred without a description"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "Description cannot be empty"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "The hypervisor state is invalid."
|
||||||
|
#~ msgstr "L'état de l'hyperviseur est invalide"
|
||||||
|
|
||||||
|
#~ msgid "%(err)s"
|
||||||
|
#~ msgstr "%(err)s"
|
||||||
|
|
||||||
|
#~ msgid "No Keystone service catalog loaded"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid "Cannot overwrite UUID for an existing AuditTemplate."
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
#~ msgid ""
|
||||||
|
#~ "This identifier is reserved word and "
|
||||||
|
#~ "cannot be used as variables '%(name)s'"
|
||||||
|
#~ msgstr ""
|
||||||
|
|
||||||
|
|||||||
@@ -7,23 +7,23 @@
|
|||||||
#, fuzzy
|
#, fuzzy
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: python-watcher 0.22.1.dev49\n"
|
"Project-Id-Version: python-watcher 0.24.1.dev12\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2016-01-22 10:43+0100\n"
|
"POT-Creation-Date: 2016-03-16 18:18-0500\n"
|
||||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
"Content-Type: text/plain; charset=utf-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"Generated-By: Babel 2.1.1\n"
|
"Generated-By: Babel 2.2.0\n"
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/action_plan.py:102
|
#: watcher/api/controllers/v1/action_plan.py:102
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Invalid state: %(state)s"
|
msgid "Invalid state: %(state)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/action_plan.py:416
|
#: watcher/api/controllers/v1/action_plan.py:422
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
@@ -65,7 +65,12 @@ msgstr ""
|
|||||||
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/api/controllers/v1/utils.py:57
|
#: watcher/api/controllers/v1/utils.py:58
|
||||||
|
#, python-format
|
||||||
|
msgid "Invalid filter: %s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/api/controllers/v1/utils.py:65
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Adding a new attribute (%s) to the root of the resource is not allowed"
|
msgid "Adding a new attribute (%s) to the root of the resource is not allowed"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
@@ -84,21 +89,37 @@ msgstr ""
|
|||||||
msgid "Error parsing HTTP response: %s"
|
msgid "Error parsing HTTP response: %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/actions/change_nova_service_state.py:58
|
#: watcher/applier/actions/change_nova_service_state.py:90
|
||||||
msgid "The target state is not defined"
|
msgid "The target state is not defined"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:126
|
#: watcher/applier/actions/migration.py:71
|
||||||
|
msgid "The parameter resource_id is invalid."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/applier/actions/migration.py:124
|
||||||
|
#, python-format
|
||||||
|
msgid ""
|
||||||
|
"Unexpected error occured. Migration failed forinstance %s. Leaving "
|
||||||
|
"instance on previous host."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/applier/actions/migration.py:140
|
||||||
|
#, python-format
|
||||||
|
msgid "Migration of type %(migration_type)s is not supported."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/applier/workflow_engine/default.py:129
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The WorkFlow Engine has failed to execute the action %s"
|
msgid "The WorkFlow Engine has failed to execute the action %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:144
|
#: watcher/applier/workflow_engine/default.py:147
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Revert action %s"
|
msgid "Revert action %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/applier/workflow_engine/default.py:150
|
#: watcher/applier/workflow_engine/default.py:153
|
||||||
msgid "Oops! We need disaster recover plan"
|
msgid "Oops! We need disaster recover plan"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
@@ -118,182 +139,223 @@ msgstr ""
|
|||||||
msgid "serving on http://%(host)s:%(port)s"
|
msgid "serving on http://%(host)s:%(port)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:51
|
#: watcher/common/clients.py:29
|
||||||
|
msgid "Version of Nova API to use in novaclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:34
|
||||||
|
msgid "Version of Glance API to use in glanceclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:39
|
||||||
|
msgid "Version of Cinder API to use in cinderclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:44
|
||||||
|
msgid "Version of Ceilometer API to use in ceilometerclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/clients.py:50
|
||||||
|
msgid "Version of Neutron API to use in neutronclient."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:59
|
||||||
|
#, python-format
|
||||||
|
msgid "Unexpected keystone client error occurred: %s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:72
|
||||||
msgid "An unknown exception occurred"
|
msgid "An unknown exception occurred"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:71
|
#: watcher/common/exception.py:92
|
||||||
msgid "Exception in string format operation"
|
msgid "Exception in string format operation"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:101
|
#: watcher/common/exception.py:122
|
||||||
msgid "Not authorized"
|
msgid "Not authorized"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:106
|
#: watcher/common/exception.py:127
|
||||||
msgid "Operation not permitted"
|
msgid "Operation not permitted"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:110
|
#: watcher/common/exception.py:131
|
||||||
msgid "Unacceptable parameters"
|
msgid "Unacceptable parameters"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:115
|
#: watcher/common/exception.py:136
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The %(name)s %(id)s could not be found"
|
msgid "The %(name)s %(id)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:119
|
#: watcher/common/exception.py:140
|
||||||
msgid "Conflict"
|
msgid "Conflict"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:124
|
#: watcher/common/exception.py:145
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The %(name)s resource %(id)s could not be found"
|
msgid "The %(name)s resource %(id)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:129
|
#: watcher/common/exception.py:150
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected an uuid or int but received %(identity)s"
|
msgid "Expected an uuid or int but received %(identity)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:133
|
#: watcher/common/exception.py:154
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Goal %(goal)s is not defined in Watcher configuration file"
|
msgid "Goal %(goal)s is not defined in Watcher configuration file"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:139
|
#: watcher/common/exception.py:158
|
||||||
#, python-format
|
|
||||||
msgid "%(err)s"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:143
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a uuid but received %(uuid)s"
|
msgid "Expected a uuid but received %(uuid)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:147
|
#: watcher/common/exception.py:162
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a logical name but received %(name)s"
|
msgid "Expected a logical name but received %(name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:151
|
#: watcher/common/exception.py:166
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Expected a logical name or uuid but received %(name)s"
|
msgid "Expected a logical name or uuid but received %(name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:155
|
#: watcher/common/exception.py:170
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "AuditTemplate %(audit_template)s could not be found"
|
msgid "AuditTemplate %(audit_template)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:159
|
#: watcher/common/exception.py:174
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An audit_template with UUID %(uuid)s or name %(name)s already exists"
|
msgid "An audit_template with UUID %(uuid)s or name %(name)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:164
|
#: watcher/common/exception.py:179
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "AuditTemplate %(audit_template)s is referenced by one or multiple audit"
|
msgid "AuditTemplate %(audit_template)s is referenced by one or multiple audit"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:169
|
#: watcher/common/exception.py:184
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Audit %(audit)s could not be found"
|
msgid "Audit %(audit)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:173
|
#: watcher/common/exception.py:188
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An audit with UUID %(uuid)s already exists"
|
msgid "An audit with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:177
|
#: watcher/common/exception.py:192
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Audit %(audit)s is referenced by one or multiple action plans"
|
msgid "Audit %(audit)s is referenced by one or multiple action plans"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:182
|
#: watcher/common/exception.py:197
|
||||||
msgid "ActionPlan %(action plan)s could not be found"
|
#, python-format
|
||||||
|
msgid "ActionPlan %(action_plan)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:186
|
#: watcher/common/exception.py:201
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An action plan with UUID %(uuid)s already exists"
|
msgid "An action plan with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:190
|
#: watcher/common/exception.py:205
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Action Plan %(action_plan)s is referenced by one or multiple actions"
|
msgid "Action Plan %(action_plan)s is referenced by one or multiple actions"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:195
|
#: watcher/common/exception.py:210
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Action %(action)s could not be found"
|
msgid "Action %(action)s could not be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:199
|
#: watcher/common/exception.py:214
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "An action with UUID %(uuid)s already exists"
|
msgid "An action with UUID %(uuid)s already exists"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:203
|
#: watcher/common/exception.py:218
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Action plan %(action_plan)s is referenced by one or multiple goals"
|
msgid "Action plan %(action_plan)s is referenced by one or multiple goals"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:208
|
#: watcher/common/exception.py:223
|
||||||
msgid "Filtering actions on both audit and action-plan is prohibited"
|
msgid "Filtering actions on both audit and action-plan is prohibited"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:217
|
#: watcher/common/exception.py:232
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:224
|
#: watcher/common/exception.py:238
|
||||||
|
#, python-format
|
||||||
|
msgid "Workflow execution error: %(error)s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:242
|
||||||
msgid "Illegal argument"
|
msgid "Illegal argument"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:228
|
#: watcher/common/exception.py:246
|
||||||
msgid "No such metric"
|
msgid "No such metric"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:232
|
#: watcher/common/exception.py:250
|
||||||
msgid "No rows were returned"
|
msgid "No rows were returned"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:236
|
|
||||||
msgid "'Keystone API endpoint is missing''"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:240
|
|
||||||
msgid "The list of hypervisor(s) in the cluster is empty"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:244
|
|
||||||
msgid "The metrics resource collector is not defined"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:248
|
|
||||||
msgid "the cluster state is not defined"
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: watcher/common/exception.py:254
|
#: watcher/common/exception.py:254
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "The instance '%(name)s' is not found"
|
msgid "%(client)s connection failed. Reason: %(reason)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:258
|
#: watcher/common/exception.py:258
|
||||||
msgid "The hypervisor is not found"
|
msgid "'Keystone API endpoint is missing''"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/exception.py:262
|
#: watcher/common/exception.py:262
|
||||||
|
msgid "The list of hypervisor(s) in the cluster is empty"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:266
|
||||||
|
msgid "The metrics resource collector is not defined"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:270
|
||||||
|
msgid "the cluster state is not defined"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:276
|
||||||
|
#, python-format
|
||||||
|
msgid "The instance '%(name)s' is not found"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:280
|
||||||
|
msgid "The hypervisor is not found"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:284
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Error loading plugin '%(name)s'"
|
msgid "Error loading plugin '%(name)s'"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/keystone.py:59
|
#: watcher/common/exception.py:288
|
||||||
msgid "No Keystone service catalog loaded"
|
#, python-format
|
||||||
|
msgid "The identifier '%(name)s' is a reserved word"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:292
|
||||||
|
#, python-format
|
||||||
|
msgid "The %(name)s resource %(id)s is not soft deleted"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/common/exception.py:296
|
||||||
|
msgid "Limit should be positive"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/common/service.py:83
|
#: watcher/common/service.py:83
|
||||||
@@ -338,24 +400,105 @@ msgstr ""
|
|||||||
msgid "Messaging configuration error"
|
msgid "Messaging configuration error"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:256
|
#: watcher/db/purge.py:50
|
||||||
|
msgid "Audit Templates"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:51
|
||||||
|
msgid "Audits"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:52
|
||||||
|
msgid "Action Plans"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:53
|
||||||
|
msgid "Actions"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:100
|
||||||
|
msgid "Total"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:158
|
||||||
|
msgid "Audit Template"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:206
|
||||||
|
#, python-format
|
||||||
|
msgid ""
|
||||||
|
"Orphans found:\n"
|
||||||
|
"%s"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:265
|
||||||
|
#, python-format
|
||||||
|
msgid "There are %(count)d objects set for deletion. Continue? [y/N]"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:272
|
||||||
|
#, python-format
|
||||||
|
msgid ""
|
||||||
|
"The number of objects (%(num)s) to delete from the database exceeds the "
|
||||||
|
"maximum number of objects (%(max_number)s) specified."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:277
|
||||||
|
msgid "Do you want to delete objects up to the specified maximum number? [y/N]"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:340
|
||||||
|
msgid "Deleting..."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:346
|
||||||
|
msgid "Starting purge command"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:356
|
||||||
|
msgid " (orphans excluded)"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:357
|
||||||
|
msgid " (may include orphans)"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:360 watcher/db/purge.py:361
|
||||||
|
#, python-format
|
||||||
|
msgid "Purge results summary%s:"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:364
|
||||||
|
#, python-format
|
||||||
|
msgid "Here below is a table containing the objects that can be purged%s:"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/purge.py:369
|
||||||
|
msgid "Purge process completed"
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/sqlalchemy/api.py:362
|
||||||
msgid ""
|
msgid ""
|
||||||
"Multiple audit templates exist with the same name. Please use the audit "
|
"Multiple audit templates exist with the same name. Please use the audit "
|
||||||
"template uuid instead"
|
"template uuid instead"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:277
|
#: watcher/db/sqlalchemy/api.py:384
|
||||||
msgid "Cannot overwrite UUID for an existing AuditTemplate."
|
msgid "Cannot overwrite UUID for an existing Audit Template."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:386 watcher/db/sqlalchemy/api.py:586
|
#: watcher/db/sqlalchemy/api.py:495
|
||||||
msgid "Cannot overwrite UUID for an existing Audit."
|
msgid "Cannot overwrite UUID for an existing Audit."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/api.py:477
|
#: watcher/db/sqlalchemy/api.py:588
|
||||||
msgid "Cannot overwrite UUID for an existing Action."
|
msgid "Cannot overwrite UUID for an existing Action."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
#: watcher/db/sqlalchemy/api.py:699
|
||||||
|
msgid "Cannot overwrite UUID for an existing Action Plan."
|
||||||
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/db/sqlalchemy/migration.py:73
|
#: watcher/db/sqlalchemy/migration.py:73
|
||||||
msgid ""
|
msgid ""
|
||||||
"Watcher database schema is already under version control; use upgrade() "
|
"Watcher database schema is already under version control; use upgrade() "
|
||||||
@@ -367,44 +510,44 @@ msgstr ""
|
|||||||
msgid "'obj' argument type is not valid"
|
msgid "'obj' argument type is not valid"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/planner/default.py:76
|
#: watcher/decision_engine/planner/default.py:79
|
||||||
msgid "The action plan is empty"
|
msgid "The action plan is empty"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/selection/default.py:59
|
#: watcher/decision_engine/strategy/selection/default.py:60
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Incorrect mapping: could not find associated strategy for '%s'"
|
msgid "Incorrect mapping: could not find associated strategy for '%s'"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:267
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:288
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:314
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:335
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "No values returned by %(resource_id)s for %(metric_name)s"
|
msgid "No values returned by %(resource_id)s for %(metric_name)s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:424
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:448
|
||||||
msgid "Initializing Sercon Consolidation"
|
msgid "Initializing Sercon Consolidation"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:468
|
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:492
|
||||||
msgid "The workloads of the compute nodes of the cluster is zero"
|
msgid "The workloads of the compute nodes of the cluster is zero"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:125
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:147
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s: no outlet temp data"
|
msgid "%s: no outlet temp data"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:149
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:172
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "VM not active, skipped: %s"
|
msgid "VM not active, skipped: %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:206
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:230
|
||||||
msgid "No hosts under outlet temp threshold found"
|
msgid "No hosts under outlet temp threshold found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:229
|
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:253
|
||||||
msgid "No proper target host could be found"
|
msgid "No proper target host could be found"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
|||||||
@@ -20,17 +20,19 @@
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from watcher.common.ceilometer import CeilometerClient
|
from watcher.common import ceilometer_helper
|
||||||
|
|
||||||
from watcher.metrics_engine.cluster_history.api import BaseClusterHistory
|
from watcher.metrics_engine.cluster_history import base
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class CeilometerClusterHistory(BaseClusterHistory):
|
class CeilometerClusterHistory(base.BaseClusterHistory):
|
||||||
def __init__(self):
|
def __init__(self, osc=None):
|
||||||
self.ceilometer = CeilometerClient()
|
""":param osc: an OpenStackClients instance"""
|
||||||
|
super(CeilometerClusterHistory, self).__init__()
|
||||||
|
self.ceilometer = ceilometer_helper.CeilometerHelper(osc=osc)
|
||||||
|
|
||||||
def statistic_list(self, meter_name, query=None, period=None):
|
def statistic_list(self, meter_name, query=None, period=None):
|
||||||
return self.ceilometer.statistic_list(meter_name, query, period)
|
return self.ceilometer.statistic_list(meter_name, query, period)
|
||||||
|
|||||||
@@ -20,18 +20,16 @@
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.common.keystone import KeystoneClient
|
from watcher.common import nova_helper
|
||||||
from watcher.common.nova import NovaClient
|
from watcher.metrics_engine.cluster_model_collector import nova as cnova
|
||||||
from watcher.metrics_engine.cluster_model_collector.nova import \
|
|
||||||
NovaClusterModelCollector
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
class CollectorManager(object):
|
class CollectorManager(object):
|
||||||
def get_cluster_model_collector(self):
|
def get_cluster_model_collector(self, osc=None):
|
||||||
keystone = KeystoneClient()
|
""":param osc: an OpenStackClients instance"""
|
||||||
wrapper = NovaClient(keystone.get_credentials(),
|
nova = nova_helper.NovaHelper(osc=osc)
|
||||||
session=keystone.get_session())
|
return cnova.NovaClusterModelCollector(nova)
|
||||||
return NovaClusterModelCollector(wrapper=wrapper)
|
|
||||||
|
|||||||
@@ -17,47 +17,47 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.decision_engine.model.hypervisor import Hypervisor
|
from watcher.decision_engine.model import hypervisor as obj_hypervisor
|
||||||
from watcher.decision_engine.model.model_root import ModelRoot
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.model.resource import Resource
|
from watcher.decision_engine.model import resource
|
||||||
from watcher.decision_engine.model.resource import ResourceType
|
from watcher.decision_engine.model import vm as obj_vm
|
||||||
from watcher.decision_engine.model.vm import VM
|
from watcher.metrics_engine.cluster_model_collector import base
|
||||||
from watcher.metrics_engine.cluster_model_collector.api import \
|
|
||||||
BaseClusterModelCollector
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NovaClusterModelCollector(BaseClusterModelCollector):
|
class NovaClusterModelCollector(base.BaseClusterModelCollector):
|
||||||
def __init__(self, wrapper):
|
def __init__(self, wrapper):
|
||||||
|
super(NovaClusterModelCollector, self).__init__()
|
||||||
self.wrapper = wrapper
|
self.wrapper = wrapper
|
||||||
|
|
||||||
def get_latest_cluster_data_model(self):
|
def get_latest_cluster_data_model(self):
|
||||||
|
LOG.debug("Getting latest cluster data model")
|
||||||
|
|
||||||
cluster = ModelRoot()
|
cluster = model_root.ModelRoot()
|
||||||
mem = Resource(ResourceType.memory)
|
mem = resource.Resource(resource.ResourceType.memory)
|
||||||
num_cores = Resource(ResourceType.cpu_cores)
|
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
|
||||||
disk = Resource(ResourceType.disk)
|
disk = resource.Resource(resource.ResourceType.disk)
|
||||||
|
disk_capacity = resource.Resource(resource.ResourceType.disk_capacity)
|
||||||
cluster.create_resource(mem)
|
cluster.create_resource(mem)
|
||||||
cluster.create_resource(num_cores)
|
cluster.create_resource(num_cores)
|
||||||
cluster.create_resource(disk)
|
cluster.create_resource(disk)
|
||||||
|
cluster.create_resource(disk_capacity)
|
||||||
|
|
||||||
flavor_cache = {}
|
flavor_cache = {}
|
||||||
hypervisors = self.wrapper.get_hypervisors_list()
|
hypervisors = self.wrapper.get_hypervisors_list()
|
||||||
for h in hypervisors:
|
for h in hypervisors:
|
||||||
service = self.wrapper.nova.services.find(id=h.service['id'])
|
service = self.wrapper.nova.services.find(id=h.service['id'])
|
||||||
# create hypervisor in cluster_model_collector
|
# create hypervisor in cluster_model_collector
|
||||||
hypervisor = Hypervisor()
|
hypervisor = obj_hypervisor.Hypervisor()
|
||||||
hypervisor.uuid = service.host
|
hypervisor.uuid = service.host
|
||||||
hypervisor.hostname = h.hypervisor_hostname
|
hypervisor.hostname = h.hypervisor_hostname
|
||||||
# set capacity
|
# set capacity
|
||||||
mem.set_capacity(hypervisor, h.memory_mb)
|
mem.set_capacity(hypervisor, h.memory_mb)
|
||||||
disk.set_capacity(hypervisor, h.free_disk_gb)
|
disk.set_capacity(hypervisor, h.free_disk_gb)
|
||||||
|
disk_capacity.set_capacity(hypervisor, h.local_gb)
|
||||||
num_cores.set_capacity(hypervisor, h.vcpus)
|
num_cores.set_capacity(hypervisor, h.vcpus)
|
||||||
hypervisor.state = h.state
|
hypervisor.state = h.state
|
||||||
hypervisor.status = h.status
|
hypervisor.status = h.status
|
||||||
@@ -65,7 +65,7 @@ class NovaClusterModelCollector(BaseClusterModelCollector):
|
|||||||
vms = self.wrapper.get_vms_by_hypervisor(str(service.host))
|
vms = self.wrapper.get_vms_by_hypervisor(str(service.host))
|
||||||
for v in vms:
|
for v in vms:
|
||||||
# create VM in cluster_model_collector
|
# create VM in cluster_model_collector
|
||||||
vm = VM()
|
vm = obj_vm.VM()
|
||||||
vm.uuid = v.id
|
vm.uuid = v.id
|
||||||
# nova/nova/compute/vm_states.py
|
# nova/nova/compute/vm_states.py
|
||||||
vm.state = getattr(v, 'OS-EXT-STS:vm_state')
|
vm.state = getattr(v, 'OS-EXT-STS:vm_state')
|
||||||
|
|||||||
@@ -42,7 +42,6 @@ class Action(base.WatcherObject):
|
|||||||
'uuid': obj_utils.str_or_none,
|
'uuid': obj_utils.str_or_none,
|
||||||
'action_plan_id': obj_utils.int_or_none,
|
'action_plan_id': obj_utils.int_or_none,
|
||||||
'action_type': obj_utils.str_or_none,
|
'action_type': obj_utils.str_or_none,
|
||||||
'applies_to': obj_utils.str_or_none,
|
|
||||||
'input_parameters': obj_utils.dict_or_none,
|
'input_parameters': obj_utils.dict_or_none,
|
||||||
'state': obj_utils.str_or_none,
|
'state': obj_utils.str_or_none,
|
||||||
# todo(jed) remove parameter alarm
|
# todo(jed) remove parameter alarm
|
||||||
|
|||||||
@@ -71,13 +71,14 @@ state may be one of the following:
|
|||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
from watcher.db import api as dbapi
|
from watcher.db import api as dbapi
|
||||||
|
from watcher.objects import action as action_objects
|
||||||
from watcher.objects import base
|
from watcher.objects import base
|
||||||
from watcher.objects import utils as obj_utils
|
from watcher.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
class State(object):
|
class State(object):
|
||||||
RECOMMENDED = 'RECOMMENDED'
|
RECOMMENDED = 'RECOMMENDED'
|
||||||
TRIGGERED = 'TRIGGERED'
|
PENDING = 'PENDING'
|
||||||
ONGOING = 'ONGOING'
|
ONGOING = 'ONGOING'
|
||||||
FAILED = 'FAILED'
|
FAILED = 'FAILED'
|
||||||
SUCCEEDED = 'SUCCEEDED'
|
SUCCEEDED = 'SUCCEEDED'
|
||||||
@@ -251,6 +252,14 @@ class ActionPlan(base.WatcherObject):
|
|||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: Audit(context)
|
object, e.g.: Audit(context)
|
||||||
"""
|
"""
|
||||||
|
related_actions = action_objects.Action.list(
|
||||||
|
context=self._context,
|
||||||
|
filters={"action_plan_uuid": self.uuid})
|
||||||
|
|
||||||
|
# Cascade soft_delete of related actions
|
||||||
|
for related_action in related_actions:
|
||||||
|
related_action.soft_delete()
|
||||||
|
|
||||||
self.dbapi.soft_delete_action_plan(self.uuid)
|
self.dbapi.soft_delete_action_plan(self.uuid)
|
||||||
self.state = "DELETED"
|
self.state = State.DELETED
|
||||||
self.save()
|
self.save()
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ contain a list of extra parameters related to the
|
|||||||
provided as a list of key-value pairs.
|
provided as a list of key-value pairs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
from watcher.db import api as dbapi
|
from watcher.db import api as dbapi
|
||||||
@@ -164,7 +163,7 @@ class AuditTemplate(base.WatcherObject):
|
|||||||
return audit_template
|
return audit_template
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def list(cls, context, limit=None, marker=None,
|
def list(cls, context, filters=None, limit=None, marker=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
"""Return a list of :class:`AuditTemplate` objects.
|
"""Return a list of :class:`AuditTemplate` objects.
|
||||||
|
|
||||||
@@ -174,6 +173,7 @@ class AuditTemplate(base.WatcherObject):
|
|||||||
argument, even though we don't use it.
|
argument, even though we don't use it.
|
||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: AuditTemplate(context)
|
object, e.g.: AuditTemplate(context)
|
||||||
|
:param filters: dict mapping the filter key to a value.
|
||||||
:param limit: maximum number of resources to return in a single result.
|
:param limit: maximum number of resources to return in a single result.
|
||||||
:param marker: pagination marker for large data sets.
|
:param marker: pagination marker for large data sets.
|
||||||
:param sort_key: column to sort results by.
|
:param sort_key: column to sort results by.
|
||||||
@@ -183,6 +183,7 @@ class AuditTemplate(base.WatcherObject):
|
|||||||
|
|
||||||
db_audit_templates = cls.dbapi.get_audit_template_list(
|
db_audit_templates = cls.dbapi.get_audit_template_list(
|
||||||
context,
|
context,
|
||||||
|
filters=filters,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
marker=marker,
|
marker=marker,
|
||||||
sort_key=sort_key,
|
sort_key=sort_key,
|
||||||
@@ -202,9 +203,6 @@ class AuditTemplate(base.WatcherObject):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
values = self.obj_get_changes()
|
values = self.obj_get_changes()
|
||||||
goal = values['goal']
|
|
||||||
if goal not in cfg.CONF.watcher_goals.goals.keys():
|
|
||||||
raise exception.InvalidGoal(goal=goal)
|
|
||||||
db_audit_template = self.dbapi.create_audit_template(values)
|
db_audit_template = self.dbapi.create_audit_template(values)
|
||||||
self._from_db_object(self, db_audit_template)
|
self._from_db_object(self, db_audit_template)
|
||||||
|
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ def dt_serializer(name):
|
|||||||
return serializer
|
return serializer
|
||||||
|
|
||||||
|
|
||||||
def dt_deserializer(instance, val):
|
def dt_deserializer(val):
|
||||||
"""A deserializer method for datetime attributes."""
|
"""A deserializer method for datetime attributes."""
|
||||||
if val is None:
|
if val is None:
|
||||||
return None
|
return None
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user