Compare commits
247 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3591d9fa0a | ||
|
|
44fc7d5799 | ||
|
|
a330576eae | ||
|
|
70d05214c7 | ||
|
|
ca9644f4d8 | ||
|
|
44061cf333 | ||
|
|
18bf1f4e8d | ||
|
|
f2df0da0b2 | ||
|
|
3c83077724 | ||
|
|
d8872a743b | ||
|
|
7556d19638 | ||
|
|
58276ec79e | ||
|
|
36ad9e12da | ||
|
|
cdb1975530 | ||
|
|
6efffd6d89 | ||
|
|
95ec79626b | ||
|
|
00aa77651b | ||
|
|
7d62175b23 | ||
|
|
5107cfa30f | ||
|
|
ff57eb73f9 | ||
|
|
4c035a7cbd | ||
|
|
b5d9eb6acb | ||
|
|
904b72cf5e | ||
|
|
d23e7f0f8c | ||
|
|
55cbb15fbc | ||
|
|
3a5b42302c | ||
|
|
4fdb22cba2 | ||
|
|
431f17d999 | ||
|
|
b586612d25 | ||
|
|
ad1593bb36 | ||
|
|
bbd0ae5b16 | ||
|
|
5a30f814bf | ||
|
|
7f6a300ea0 | ||
|
|
93a8ba804f | ||
|
|
415bab4bc9 | ||
|
|
fc388d8292 | ||
|
|
5b70c28047 | ||
|
|
b290ad7368 | ||
|
|
8c8e58e7d9 | ||
|
|
391bb92bd2 | ||
|
|
171654c0ea | ||
|
|
0157fa7dad | ||
|
|
3912075c19 | ||
|
|
d42a89f70f | ||
|
|
6bb25d2c36 | ||
|
|
4179c3527c | ||
|
|
3b1356346a | ||
|
|
67be974861 | ||
|
|
8c916930c8 | ||
|
|
b537979e45 | ||
|
|
aa74817686 | ||
|
|
831e58df10 | ||
|
|
3dd03b2d45 | ||
|
|
2548f0bbba | ||
|
|
39d7ce9ee8 | ||
|
|
1f8c073cb3 | ||
|
|
0353a0ac77 | ||
|
|
921584ac4b | ||
|
|
65a09ce32d | ||
|
|
92dad3be2d | ||
|
|
d86fee294f | ||
|
|
95a01c4e12 | ||
|
|
b9456e242e | ||
|
|
4e49ad64c0 | ||
|
|
184b1b1ce6 | ||
|
|
f49d0555e7 | ||
|
|
9d8a0feab4 | ||
|
|
52a5c99fc5 | ||
|
|
cfaab0cbdc | ||
|
|
6bb0432ee7 | ||
|
|
99837d6339 | ||
|
|
3075723da9 | ||
|
|
b0bdeea7cf | ||
|
|
5eaad33709 | ||
|
|
24b6432490 | ||
|
|
ca61594511 | ||
|
|
bd57077bfe | ||
|
|
56bcba2dc0 | ||
|
|
73928412b3 | ||
|
|
29f41b7dff | ||
|
|
02f86ffe02 | ||
|
|
20c6bf1b5a | ||
|
|
083f070d17 | ||
|
|
4022b59d79 | ||
|
|
3d1cb11ea6 | ||
|
|
d0b1dacec1 | ||
|
|
45a06445f3 | ||
|
|
2f173bba56 | ||
|
|
cb497d2642 | ||
|
|
e1fd686272 | ||
|
|
8f7127a874 | ||
|
|
3a529a0f7b | ||
|
|
5c81f1bd7f | ||
|
|
e0c019002a | ||
|
|
cc24ef6e08 | ||
|
|
7e27abc5db | ||
|
|
4844baa816 | ||
|
|
e771ae9e95 | ||
|
|
a2488045ea | ||
|
|
cce5ebd3f0 | ||
|
|
a7ab77078e | ||
|
|
9af32bce5b | ||
|
|
4cf35e7e62 | ||
|
|
6f27e50cf0 | ||
|
|
bd8c5c684c | ||
|
|
1834db853b | ||
|
|
59ef0d24d1 | ||
|
|
c53817c33d | ||
|
|
b33b7a0474 | ||
|
|
033bc072c0 | ||
|
|
f32ed6bc79 | ||
|
|
707590143b | ||
|
|
b2663de513 | ||
|
|
dd210292ae | ||
|
|
abb9155eb4 | ||
|
|
f607ae8ec0 | ||
|
|
b3ded34244 | ||
|
|
bdfb074aa4 | ||
|
|
b3be5f16fc | ||
|
|
dad60fb878 | ||
|
|
fb66a9f2c3 | ||
|
|
dc9ef6f49c | ||
|
|
8e8a43ed48 | ||
|
|
5ac65b7bfc | ||
|
|
7b9b726577 | ||
|
|
c81cd675a5 | ||
|
|
ab926bf6c5 | ||
|
|
08c688ed11 | ||
|
|
e399d96661 | ||
|
|
ba54b30d4a | ||
|
|
44d9183d36 | ||
|
|
f6f3c00206 | ||
|
|
cc87b823fa | ||
|
|
ba2395f7e7 | ||
|
|
b546ce8777 | ||
|
|
0900eaa9df | ||
|
|
9fb5b2a4e7 | ||
|
|
d80edea218 | ||
|
|
26d6074689 | ||
|
|
40a653215f | ||
|
|
1492f5d8dc | ||
|
|
76263f149a | ||
|
|
028006d15d | ||
|
|
d27ba8cc2a | ||
|
|
33750ce7a9 | ||
|
|
cb8d1a98d6 | ||
|
|
f32252d510 | ||
|
|
4849f8dde9 | ||
|
|
0cafdcdee9 | ||
|
|
3a70225164 | ||
|
|
892c766ac4 | ||
|
|
63a3fd84ae | ||
|
|
287ace1dcc | ||
|
|
4b302e415e | ||
|
|
f24744c910 | ||
|
|
d9a85eda2c | ||
|
|
82c8633e42 | ||
|
|
d3f23795f5 | ||
|
|
e7f4456a80 | ||
|
|
a36a309e2e | ||
|
|
8e3affd9ac | ||
|
|
71e979cae0 | ||
|
|
6edfd34a53 | ||
|
|
0c8c32e69e | ||
|
|
9138b7bacb | ||
|
|
072822d920 | ||
|
|
f67ce8cca5 | ||
|
|
9e6f768263 | ||
|
|
ba9c89186b | ||
|
|
16e7d9c13b | ||
|
|
c3536406bd | ||
|
|
0c66fe2e65 | ||
|
|
74933bf0ba | ||
|
|
1dae83da57 | ||
|
|
5ec8932182 | ||
|
|
701b258dc7 | ||
|
|
f7fcdf14d0 | ||
|
|
47ba6c0808 | ||
|
|
5b5fbbedb4 | ||
|
|
a1c575bfc5 | ||
|
|
27e887556d | ||
|
|
891f6bc241 | ||
|
|
5dd6817d47 | ||
|
|
7cdcb4743e | ||
|
|
6d03c4c543 | ||
|
|
bcc129cf94 | ||
|
|
40cff311c6 | ||
|
|
1a48a7fc57 | ||
|
|
652aa54586 | ||
|
|
42a3886ded | ||
|
|
3430493de1 | ||
|
|
f5bcf9d355 | ||
|
|
d809523bef | ||
|
|
bfe3c28986 | ||
|
|
3c8caa3d0a | ||
|
|
766d064dd0 | ||
|
|
ce196b68c4 | ||
|
|
42130c42a1 | ||
|
|
1a8639d256 | ||
|
|
1702fe1a83 | ||
|
|
354ebd35cc | ||
|
|
7297603f65 | ||
|
|
9626cb1356 | ||
|
|
9e027940d7 | ||
|
|
3754938d96 | ||
|
|
8a7f930a64 | ||
|
|
f7e506155b | ||
|
|
54da2a75fb | ||
|
|
5cbb9aca7e | ||
|
|
bd79882b16 | ||
|
|
960c50ba45 | ||
|
|
9411f85cd2 | ||
|
|
b4370f0461 | ||
|
|
97799521f9 | ||
|
|
96fa7f33ac | ||
|
|
1c2d0aa1f2 | ||
|
|
070aed7076 | ||
|
|
2b402d3cbf | ||
|
|
cca3e75ac1 | ||
|
|
6f27275f44 | ||
|
|
95548af426 | ||
|
|
cdc847d352 | ||
|
|
b69244f8ef | ||
|
|
cbd6d88025 | ||
|
|
028d7c939c | ||
|
|
a8fa969379 | ||
|
|
80ee4b29f5 | ||
|
|
e562c9173c | ||
|
|
ec0c359037 | ||
|
|
3b6bef180b | ||
|
|
640e4e1fea | ||
|
|
eeb817cd6e | ||
|
|
c6afa7c320 | ||
|
|
9ccd17e40b | ||
|
|
2a7e0d652c | ||
|
|
a94e35b60e | ||
|
|
72e3d5c7f9 | ||
|
|
be56441e55 | ||
|
|
aa2b213a45 | ||
|
|
668513d771 | ||
|
|
0242d33adb | ||
|
|
c38dc9828b | ||
|
|
4ce1a9096b | ||
|
|
02163d64aa | ||
|
|
d91f0bff22 | ||
|
|
e401cb7c9d | ||
|
|
fa31341bbb |
146
.zuul.yaml
146
.zuul.yaml
@@ -1,39 +1,139 @@
|
|||||||
- project:
|
- project:
|
||||||
name: openstack/watcher
|
|
||||||
check:
|
check:
|
||||||
jobs:
|
jobs:
|
||||||
- watcher-tempest-multinode
|
- watcher-tempest-functional
|
||||||
|
- watcher-tempest-dummy_optim
|
||||||
|
- watcher-tempest-actuator
|
||||||
|
- watcher-tempest-basic_optim
|
||||||
|
- watcher-tempest-workload_balancing
|
||||||
|
- watcherclient-tempest-functional
|
||||||
- legacy-rally-dsvm-watcher-rally
|
- legacy-rally-dsvm-watcher-rally
|
||||||
|
- openstack-tox-lower-constraints
|
||||||
|
gate:
|
||||||
|
jobs:
|
||||||
|
- watcher-tempest-functional
|
||||||
|
- watcher-tempest-dummy_optim
|
||||||
|
- watcher-tempest-actuator
|
||||||
|
- watcher-tempest-basic_optim
|
||||||
|
- watcher-tempest-workload_balancing
|
||||||
|
- watcherclient-tempest-functional
|
||||||
|
- legacy-rally-dsvm-watcher-rally
|
||||||
|
- openstack-tox-lower-constraints
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-base-multinode
|
name: watcher-tempest-dummy_optim
|
||||||
parent: legacy-dsvm-base-multinode
|
parent: watcher-tempest-multinode
|
||||||
run: playbooks/legacy/watcher-tempest-base-multinode/run.yaml
|
vars:
|
||||||
post-run: playbooks/legacy/watcher-tempest-base-multinode/post.yaml
|
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim'
|
||||||
timeout: 4200
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-actuator
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_actuator'
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-basic_optim
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_basic_optim'
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-workload_balancing
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: 'watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing'
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-multinode
|
||||||
|
parent: watcher-tempest-functional
|
||||||
|
voting: false
|
||||||
|
nodeset: openstack-two-node
|
||||||
|
pre-run: playbooks/pre.yaml
|
||||||
|
run: playbooks/orchestrate-tempest.yaml
|
||||||
|
roles:
|
||||||
|
- zuul: openstack/tempest
|
||||||
|
group-vars:
|
||||||
|
subnode:
|
||||||
|
devstack_local_conf:
|
||||||
|
post-config:
|
||||||
|
$NOVA_CONF:
|
||||||
|
libvirt:
|
||||||
|
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||||
|
devstack_services:
|
||||||
|
watcher-api: false
|
||||||
|
watcher-decision-engine: false
|
||||||
|
watcher-applier: false
|
||||||
|
# We need to add TLS support for watcher plugin
|
||||||
|
tls-proxy: false
|
||||||
|
ceilometer: false
|
||||||
|
ceilometer-acompute: false
|
||||||
|
ceilometer-acentral: false
|
||||||
|
ceilometer-anotification: false
|
||||||
|
watcher: false
|
||||||
|
gnocchi-api: false
|
||||||
|
gnocchi-metricd: false
|
||||||
|
rabbit: false
|
||||||
|
mysql: false
|
||||||
|
vars:
|
||||||
|
devstack_local_conf:
|
||||||
|
post-config:
|
||||||
|
$NOVA_CONF:
|
||||||
|
libvirt:
|
||||||
|
live_migration_uri: 'qemu+ssh://root@%s/system'
|
||||||
|
test-config:
|
||||||
|
$TEMPEST_CONFIG:
|
||||||
|
compute:
|
||||||
|
min_compute_nodes: 2
|
||||||
|
compute-feature-enabled:
|
||||||
|
live_migration: true
|
||||||
|
block_migration_for_live_migration: true
|
||||||
|
devstack_plugins:
|
||||||
|
ceilometer: https://git.openstack.org/openstack/ceilometer
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-functional
|
||||||
|
parent: devstack-tempest
|
||||||
|
timeout: 7200
|
||||||
required-projects:
|
required-projects:
|
||||||
|
- openstack/ceilometer
|
||||||
- openstack-infra/devstack-gate
|
- openstack-infra/devstack-gate
|
||||||
- openstack/python-openstackclient
|
- openstack/python-openstackclient
|
||||||
- openstack/python-watcherclient
|
- openstack/python-watcherclient
|
||||||
- openstack/watcher
|
- openstack/watcher
|
||||||
- openstack/watcher-tempest-plugin
|
- openstack/watcher-tempest-plugin
|
||||||
nodeset: legacy-ubuntu-xenial-2-node
|
- openstack/tempest
|
||||||
|
vars:
|
||||||
|
devstack_plugins:
|
||||||
|
watcher: https://git.openstack.org/openstack/watcher
|
||||||
|
devstack_services:
|
||||||
|
tls-proxy: false
|
||||||
|
watcher-api: true
|
||||||
|
watcher-decision-engine: true
|
||||||
|
watcher-applier: true
|
||||||
|
tempest: true
|
||||||
|
s-account: false
|
||||||
|
s-container: false
|
||||||
|
s-object: false
|
||||||
|
s-proxy: false
|
||||||
|
devstack_localrc:
|
||||||
|
TEMPEST_PLUGINS: '/opt/stack/watcher-tempest-plugin'
|
||||||
|
tempest_test_regex: 'watcher_tempest_plugin.tests.api'
|
||||||
|
tox_envlist: all
|
||||||
|
tox_environment:
|
||||||
|
# Do we really need to set this? It's cargo culted
|
||||||
|
PYTHONUNBUFFERED: 'true'
|
||||||
|
zuul_copy_output:
|
||||||
|
/etc/hosts: logs
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-multinode
|
# This job is used in python-watcherclient repo
|
||||||
parent: watcher-tempest-base-multinode
|
|
||||||
voting: false
|
|
||||||
|
|
||||||
- job:
|
|
||||||
# This job is used by python-watcherclient repo
|
|
||||||
name: watcherclient-tempest-functional
|
name: watcherclient-tempest-functional
|
||||||
parent: legacy-dsvm-base
|
parent: watcher-tempest-functional
|
||||||
run: playbooks/legacy/watcherclient-tempest-functional/run.yaml
|
voting: false
|
||||||
post-run: playbooks/legacy/watcherclient-tempest-functional/post.yaml
|
|
||||||
timeout: 4200
|
timeout: 4200
|
||||||
required-projects:
|
vars:
|
||||||
- openstack-dev/devstack
|
tempest_concurrency: 1
|
||||||
- openstack-infra/devstack-gate
|
devstack_localrc:
|
||||||
- openstack/python-openstackclient
|
TEMPEST_PLUGINS: '/opt/stack/python-watcherclient'
|
||||||
- openstack/python-watcherclient
|
tempest_test_regex: 'watcherclient.tests.functional'
|
||||||
- openstack/watcher
|
|
||||||
|
|||||||
@@ -8,4 +8,4 @@
|
|||||||
watcher Style Commandments
|
watcher Style Commandments
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/
|
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
|
||||||
|
|||||||
@@ -2,8 +2,8 @@
|
|||||||
Team and repository tags
|
Team and repository tags
|
||||||
========================
|
========================
|
||||||
|
|
||||||
.. image:: https://governance.openstack.org/badges/watcher.svg
|
.. image:: https://governance.openstack.org/tc/badges/watcher.svg
|
||||||
:target: https://governance.openstack.org/reference/tags/index.html
|
:target: https://governance.openstack.org/tc/reference/tags/index.html
|
||||||
|
|
||||||
.. Change things from this point on
|
.. Change things from this point on
|
||||||
|
|
||||||
@@ -22,10 +22,11 @@ service for multi-tenant OpenStack-based clouds.
|
|||||||
Watcher provides a robust framework to realize a wide range of cloud
|
Watcher provides a robust framework to realize a wide range of cloud
|
||||||
optimization goals, including the reduction of data center
|
optimization goals, including the reduction of data center
|
||||||
operating costs, increased system performance via intelligent virtual machine
|
operating costs, increased system performance via intelligent virtual machine
|
||||||
migration, increased energy efficiency-and more!
|
migration, increased energy efficiency and more!
|
||||||
|
|
||||||
* Free software: Apache license
|
* Free software: Apache license
|
||||||
* Wiki: https://wiki.openstack.org/wiki/Watcher
|
* Wiki: https://wiki.openstack.org/wiki/Watcher
|
||||||
* Source: https://github.com/openstack/watcher
|
* Source: https://github.com/openstack/watcher
|
||||||
* Bugs: https://bugs.launchpad.net/watcher
|
* Bugs: https://bugs.launchpad.net/watcher
|
||||||
* Documentation: https://docs.openstack.org/watcher/latest/
|
* Documentation: https://docs.openstack.org/watcher/latest/
|
||||||
|
* Release notes: https://docs.openstack.org/releasenotes/watcher/
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher}
|
|||||||
|
|
||||||
WATCHER_CONF_DIR=/etc/watcher
|
WATCHER_CONF_DIR=/etc/watcher
|
||||||
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
||||||
WATCHER_POLICY_JSON=$WATCHER_CONF_DIR/policy.json
|
WATCHER_POLICY_YAML=$WATCHER_CONF_DIR/policy.yaml.sample
|
||||||
|
|
||||||
WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack
|
WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack
|
||||||
WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files
|
WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files
|
||||||
@@ -106,7 +106,25 @@ function configure_watcher {
|
|||||||
# Put config files in ``/etc/watcher`` for everyone to find
|
# Put config files in ``/etc/watcher`` for everyone to find
|
||||||
sudo install -d -o $STACK_USER $WATCHER_CONF_DIR
|
sudo install -d -o $STACK_USER $WATCHER_CONF_DIR
|
||||||
|
|
||||||
install_default_policy watcher
|
local project=watcher
|
||||||
|
local project_uc
|
||||||
|
project_uc=$(echo watcher|tr a-z A-Z)
|
||||||
|
local conf_dir="${project_uc}_CONF_DIR"
|
||||||
|
# eval conf dir to get the variable
|
||||||
|
conf_dir="${!conf_dir}"
|
||||||
|
local project_dir="${project_uc}_DIR"
|
||||||
|
# eval project dir to get the variable
|
||||||
|
project_dir="${!project_dir}"
|
||||||
|
local sample_conf_dir="${project_dir}/etc/${project}"
|
||||||
|
local sample_policy_dir="${project_dir}/etc/${project}/policy.d"
|
||||||
|
local sample_policy_generator="${project_dir}/etc/${project}/oslo-policy-generator/watcher-policy-generator.conf"
|
||||||
|
|
||||||
|
# first generate policy.yaml
|
||||||
|
oslopolicy-sample-generator --config-file $sample_policy_generator
|
||||||
|
# then optionally copy over policy.d
|
||||||
|
if [[ -d $sample_policy_dir ]]; then
|
||||||
|
cp -r $sample_policy_dir $conf_dir/policy.d
|
||||||
|
fi
|
||||||
|
|
||||||
# Rebuild the config file from scratch
|
# Rebuild the config file from scratch
|
||||||
create_watcher_conf
|
create_watcher_conf
|
||||||
@@ -159,15 +177,19 @@ function create_watcher_conf {
|
|||||||
iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
|
||||||
iniset $WATCHER_CONF DEFAULT control_exchange watcher
|
iniset $WATCHER_CONF DEFAULT control_exchange watcher
|
||||||
|
|
||||||
|
iniset_rpc_backend watcher $WATCHER_CONF
|
||||||
|
|
||||||
iniset $WATCHER_CONF database connection $(database_connection_url watcher)
|
iniset $WATCHER_CONF database connection $(database_connection_url watcher)
|
||||||
iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST"
|
iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST"
|
||||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
|
||||||
|
|
||||||
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_JSON
|
if is_service_enabled tls-proxy; then
|
||||||
|
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT"
|
||||||
|
# iniset $WATCHER_CONF api enable_ssl_api "True"
|
||||||
|
else
|
||||||
|
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||||
|
fi
|
||||||
|
|
||||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
|
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML
|
||||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
|
|
||||||
iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST
|
|
||||||
|
|
||||||
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
||||||
|
|
||||||
@@ -279,8 +301,7 @@ function start_watcher_api {
|
|||||||
|
|
||||||
# Start proxies if enabled
|
# Start proxies if enabled
|
||||||
if is_service_enabled tls-proxy; then
|
if is_service_enabled tls-proxy; then
|
||||||
start_tls_proxy '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT
|
||||||
start_tls_proxy '*' $EC2_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT &
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,9 @@
|
|||||||
# Make sure rabbit is enabled
|
# Make sure rabbit is enabled
|
||||||
enable_service rabbit
|
enable_service rabbit
|
||||||
|
|
||||||
|
# Make sure mysql is enabled
|
||||||
|
enable_service mysql
|
||||||
|
|
||||||
# Enable Watcher services
|
# Enable Watcher services
|
||||||
enable_service watcher-api
|
enable_service watcher-api
|
||||||
enable_service watcher-decision-engine
|
enable_service watcher-decision-engine
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ It is used via a single directive in the .rst file
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from sphinx.util.compat import Directive
|
from docutils.parsers.rst import Directive
|
||||||
from docutils import nodes
|
from docutils import nodes
|
||||||
|
|
||||||
from watcher.notifications import base as notification
|
from watcher.notifications import base as notification
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ The source install instructions specifically avoid using platform specific
|
|||||||
packages, instead using the source for the code and the Python Package Index
|
packages, instead using the source for the code and the Python Package Index
|
||||||
(PyPi_).
|
(PyPi_).
|
||||||
|
|
||||||
.. _PyPi: https://pypi.python.org/pypi
|
.. _PyPi: https://pypi.org/
|
||||||
|
|
||||||
It's expected that your system already has python2.7_, latest version of pip_,
|
It's expected that your system already has python2.7_, latest version of pip_,
|
||||||
and git_ available.
|
and git_ available.
|
||||||
|
|||||||
@@ -1,3 +1,7 @@
|
|||||||
|
==================================================
|
||||||
|
OpenStack Infrastructure Optimization Service APIs
|
||||||
|
==================================================
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ extensions = [
|
|||||||
'ext.versioned_notifications',
|
'ext.versioned_notifications',
|
||||||
'oslo_config.sphinxconfiggen',
|
'oslo_config.sphinxconfiggen',
|
||||||
'openstackdocstheme',
|
'openstackdocstheme',
|
||||||
|
'sphinx.ext.napoleon',
|
||||||
]
|
]
|
||||||
|
|
||||||
wsme_protocols = ['restjson']
|
wsme_protocols = ['restjson']
|
||||||
|
|||||||
@@ -129,10 +129,14 @@ Configure the Identity service for the Watcher service
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ openstack endpoint create --region YOUR_REGION watcher \
|
$ openstack endpoint create --region YOUR_REGION
|
||||||
--publicurl http://WATCHER_API_PUBLIC_IP:9322 \
|
watcher public http://WATCHER_API_PUBLIC_IP:9322
|
||||||
--internalurl http://WATCHER_API_INTERNAL_IP:9322 \
|
|
||||||
--adminurl http://WATCHER_API_ADMIN_IP:9322
|
$ openstack endpoint create --region YOUR_REGION
|
||||||
|
watcher internal http://WATCHER_API_INTERNAL_IP:9322
|
||||||
|
|
||||||
|
$ openstack endpoint create --region YOUR_REGION
|
||||||
|
watcher admin http://WATCHER_API_ADMIN_IP:9322
|
||||||
|
|
||||||
.. _watcher-db_configuration:
|
.. _watcher-db_configuration:
|
||||||
|
|
||||||
@@ -200,8 +204,8 @@ configuration file, in order:
|
|||||||
|
|
||||||
|
|
||||||
Although some configuration options are mentioned here, it is recommended that
|
Although some configuration options are mentioned here, it is recommended that
|
||||||
you review all the `available options
|
you review all the :ref:`available options
|
||||||
<https://git.openstack.org/cgit/openstack/watcher/tree/etc/watcher/watcher.conf.sample>`_
|
<watcher_sample_configuration_files>`
|
||||||
so that the watcher service is configured for your needs.
|
so that the watcher service is configured for your needs.
|
||||||
|
|
||||||
#. The Watcher Service stores information in a database. This guide uses the
|
#. The Watcher Service stores information in a database. This guide uses the
|
||||||
@@ -217,7 +221,7 @@ so that the watcher service is configured for your needs.
|
|||||||
# The SQLAlchemy connection string used to connect to the
|
# The SQLAlchemy connection string used to connect to the
|
||||||
# database (string value)
|
# database (string value)
|
||||||
#connection=<None>
|
#connection=<None>
|
||||||
connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
connection = mysql+pymysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8
|
||||||
|
|
||||||
#. Configure the Watcher Service to use the RabbitMQ message broker by
|
#. Configure the Watcher Service to use the RabbitMQ message broker by
|
||||||
setting one or more of these options. Replace RABBIT_HOST with the
|
setting one or more of these options. Replace RABBIT_HOST with the
|
||||||
@@ -235,21 +239,8 @@ so that the watcher service is configured for your needs.
|
|||||||
# option. (string value)
|
# option. (string value)
|
||||||
control_exchange = watcher
|
control_exchange = watcher
|
||||||
|
|
||||||
...
|
# ...
|
||||||
|
transport_url = rabbit://RABBITMQ_USER:RABBITMQ_PASSWORD@RABBIT_HOST
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
|
|
||||||
# The username used by the message broker (string value)
|
|
||||||
rabbit_userid = RABBITMQ_USER
|
|
||||||
|
|
||||||
# The password of user used by the message broker (string value)
|
|
||||||
rabbit_password = RABBITMQ_PASSWORD
|
|
||||||
|
|
||||||
# The host where the message brokeris installed (string value)
|
|
||||||
rabbit_host = RABBIT_HOST
|
|
||||||
|
|
||||||
# The port used bythe message broker (string value)
|
|
||||||
#rabbit_port = 5672
|
|
||||||
|
|
||||||
|
|
||||||
#. Watcher API shall validate the token provided by every incoming request,
|
#. Watcher API shall validate the token provided by every incoming request,
|
||||||
@@ -273,7 +264,7 @@ so that the watcher service is configured for your needs.
|
|||||||
|
|
||||||
# Authentication URL (unknown value)
|
# Authentication URL (unknown value)
|
||||||
#auth_url = <None>
|
#auth_url = <None>
|
||||||
auth_url = http://IDENTITY_IP:35357
|
auth_url = http://IDENTITY_IP:5000
|
||||||
|
|
||||||
# Username (unknown value)
|
# Username (unknown value)
|
||||||
# Deprecated group/name - [DEFAULT]/username
|
# Deprecated group/name - [DEFAULT]/username
|
||||||
@@ -319,7 +310,7 @@ so that the watcher service is configured for your needs.
|
|||||||
|
|
||||||
# Authentication URL (unknown value)
|
# Authentication URL (unknown value)
|
||||||
#auth_url = <None>
|
#auth_url = <None>
|
||||||
auth_url = http://IDENTITY_IP:35357
|
auth_url = http://IDENTITY_IP:5000
|
||||||
|
|
||||||
# Username (unknown value)
|
# Username (unknown value)
|
||||||
# Deprecated group/name - [DEFAULT]/username
|
# Deprecated group/name - [DEFAULT]/username
|
||||||
@@ -349,7 +340,7 @@ so that the watcher service is configured for your needs.
|
|||||||
[nova_client]
|
[nova_client]
|
||||||
|
|
||||||
# Version of Nova API to use in novaclient. (string value)
|
# Version of Nova API to use in novaclient. (string value)
|
||||||
#api_version = 2.53
|
#api_version = 2.56
|
||||||
api_version = 2.1
|
api_version = 2.1
|
||||||
|
|
||||||
#. Create the Watcher Service database tables::
|
#. Create the Watcher Service database tables::
|
||||||
@@ -391,7 +382,7 @@ Ceilometer is designed to collect measurements from OpenStack services and from
|
|||||||
other external components. If you would like to add new meters to the currently
|
other external components. If you would like to add new meters to the currently
|
||||||
existing ones, you need to follow the documentation below:
|
existing ones, you need to follow the documentation below:
|
||||||
|
|
||||||
#. https://docs.openstack.org/ceilometer/latest/contributor/new_meters.html#meters
|
#. https://docs.openstack.org/ceilometer/latest/contributor/measurements.html#new-measurements
|
||||||
|
|
||||||
The Ceilometer collector uses a pluggable storage system, meaning that you can
|
The Ceilometer collector uses a pluggable storage system, meaning that you can
|
||||||
pick any database system you prefer.
|
pick any database system you prefer.
|
||||||
|
|||||||
@@ -1,5 +1,9 @@
|
|||||||
|
===================
|
||||||
|
Configuration Guide
|
||||||
|
===================
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 2
|
||||||
|
|
||||||
configuring
|
configuring
|
||||||
watcher
|
watcher
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ notifications of important events.
|
|||||||
|
|
||||||
* https://launchpad.net
|
* https://launchpad.net
|
||||||
* https://launchpad.net/watcher
|
* https://launchpad.net/watcher
|
||||||
* https://launchpad.net/~openstack
|
* https://launchpad.net/openstack
|
||||||
|
|
||||||
|
|
||||||
Project Hosting Details
|
Project Hosting Details
|
||||||
@@ -49,7 +49,7 @@ Bug tracker
|
|||||||
https://launchpad.net/watcher
|
https://launchpad.net/watcher
|
||||||
|
|
||||||
Mailing list (prefix subjects with ``[watcher]`` for faster responses)
|
Mailing list (prefix subjects with ``[watcher]`` for faster responses)
|
||||||
https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
|
http://lists.openstack.org/pipermail/openstack-dev/
|
||||||
|
|
||||||
Wiki
|
Wiki
|
||||||
https://wiki.openstack.org/Watcher
|
https://wiki.openstack.org/Watcher
|
||||||
@@ -65,7 +65,7 @@ IRC Channel
|
|||||||
|
|
||||||
Weekly Meetings
|
Weekly Meetings
|
||||||
On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4``
|
On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4``
|
||||||
IRC channel, 13:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
IRC channel, 08:00 UTC on odd weeks in the ``#openstack-meeting-alt``
|
||||||
IRC channel (`meetings logs`_)
|
IRC channel (`meetings logs`_)
|
||||||
|
|
||||||
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
|
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ different version of the above, please document your configuration here!
|
|||||||
|
|
||||||
.. _Python: https://www.python.org/
|
.. _Python: https://www.python.org/
|
||||||
.. _git: https://git-scm.com/
|
.. _git: https://git-scm.com/
|
||||||
.. _setuptools: https://pypi.python.org/pypi/setuptools
|
.. _setuptools: https://pypi.org/project/setuptools
|
||||||
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html
|
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html
|
||||||
|
|
||||||
Getting the latest code
|
Getting the latest code
|
||||||
@@ -69,8 +69,8 @@ itself.
|
|||||||
|
|
||||||
These dependencies can be installed from PyPi_ using the Python tool pip_.
|
These dependencies can be installed from PyPi_ using the Python tool pip_.
|
||||||
|
|
||||||
.. _PyPi: https://pypi.python.org/
|
.. _PyPi: https://pypi.org/
|
||||||
.. _pip: https://pypi.python.org/pypi/pip
|
.. _pip: https://pypi.org/project/pip
|
||||||
|
|
||||||
However, your system *may* need additional dependencies that `pip` (and by
|
However, your system *may* need additional dependencies that `pip` (and by
|
||||||
extension, PyPi) cannot satisfy. These dependencies should be installed
|
extension, PyPi) cannot satisfy. These dependencies should be installed
|
||||||
@@ -123,9 +123,10 @@ You can re-activate this virtualenv for your current shell using:
|
|||||||
|
|
||||||
$ workon watcher
|
$ workon watcher
|
||||||
|
|
||||||
For more information on virtual environments, see virtualenv_.
|
For more information on virtual environments, see virtualenv_ and
|
||||||
|
virtualenvwrapper_.
|
||||||
|
|
||||||
.. _virtualenv: https://www.virtualenv.org/
|
.. _virtualenv: https://pypi.org/project/virtualenv/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ requirements.txt file::
|
|||||||
|
|
||||||
.. _cookiecutter: https://github.com/audreyr/cookiecutter
|
.. _cookiecutter: https://github.com/audreyr/cookiecutter
|
||||||
.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter
|
.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter
|
||||||
.. _python-watcher: https://pypi.python.org/pypi/python-watcher
|
.. _python-watcher: https://pypi.org/project/python-watcher
|
||||||
|
|
||||||
Implementing a plugin for Watcher
|
Implementing a plugin for Watcher
|
||||||
=================================
|
=================================
|
||||||
|
|||||||
@@ -208,7 +208,7 @@ Here below is how to register ``DummyClusterDataModelCollector`` using pbr_:
|
|||||||
watcher_cluster_data_model_collectors =
|
watcher_cluster_data_model_collectors =
|
||||||
dummy = thirdparty.dummy:DummyClusterDataModelCollector
|
dummy = thirdparty.dummy:DummyClusterDataModelCollector
|
||||||
|
|
||||||
.. _pbr: http://docs.openstack.org/pbr/latest
|
.. _pbr: https://docs.openstack.org/pbr/latest/
|
||||||
|
|
||||||
|
|
||||||
Add new notification endpoints
|
Add new notification endpoints
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ requires new metrics not covered by Ceilometer, you can add them through a
|
|||||||
`Ceilometer plugin`_.
|
`Ceilometer plugin`_.
|
||||||
|
|
||||||
|
|
||||||
.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/decision_engine/cluster/history/ceilometer.py
|
.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/datasource/ceilometer.py
|
||||||
.. _`Ceilometer developer guide`: https://docs.openstack.org/ceilometer/latest/contributor/architecture.html#storing-accessing-the-data
|
.. _`Ceilometer developer guide`: https://docs.openstack.org/ceilometer/latest/contributor/architecture.html#storing-accessing-the-data
|
||||||
.. _`Ceilometer`: https://docs.openstack.org/ceilometer/latest
|
.. _`Ceilometer`: https://docs.openstack.org/ceilometer/latest
|
||||||
.. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md
|
.. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ the following::
|
|||||||
(watcher) $ tox -e pep8
|
(watcher) $ tox -e pep8
|
||||||
|
|
||||||
.. _tox: https://tox.readthedocs.org/
|
.. _tox: https://tox.readthedocs.org/
|
||||||
.. _Gerrit: http://review.openstack.org/
|
.. _Gerrit: https://review.openstack.org/
|
||||||
|
|
||||||
You may pass options to the test programs using positional arguments. To run a
|
You may pass options to the test programs using positional arguments. To run a
|
||||||
specific unit test, you can pass extra options to `os-testr`_ after putting
|
specific unit test, you can pass extra options to `os-testr`_ after putting
|
||||||
|
|||||||
@@ -267,14 +267,14 @@ the same goal and same workload of the :ref:`Cluster <cluster_definition>`.
|
|||||||
Project
|
Project
|
||||||
=======
|
=======
|
||||||
|
|
||||||
:ref:`Projects <project_definition>` represent the base unit of “ownership”
|
:ref:`Projects <project_definition>` represent the base unit of "ownership"
|
||||||
in OpenStack, in that all :ref:`resources <managed_resource_definition>` in
|
in OpenStack, in that all :ref:`resources <managed_resource_definition>` in
|
||||||
OpenStack should be owned by a specific :ref:`project <project_definition>`.
|
OpenStack should be owned by a specific :ref:`project <project_definition>`.
|
||||||
In OpenStack Identity, a :ref:`project <project_definition>` must be owned by a
|
In OpenStack Identity, a :ref:`project <project_definition>` must be owned by a
|
||||||
specific domain.
|
specific domain.
|
||||||
|
|
||||||
Please, read `the official OpenStack definition of a Project
|
Please, read `the official OpenStack definition of a Project
|
||||||
<http://docs.openstack.org/glossary/content/glossary.html>`_.
|
<https://docs.openstack.org/doc-contrib-guide/common/glossary.html>`_.
|
||||||
|
|
||||||
.. _scoring_engine_definition:
|
.. _scoring_engine_definition:
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ metrics receiver, complex event processor and profiler, optimization processor
|
|||||||
and an action plan applier. This provides a robust framework to realize a wide
|
and an action plan applier. This provides a robust framework to realize a wide
|
||||||
range of cloud optimization goals, including the reduction of data center
|
range of cloud optimization goals, including the reduction of data center
|
||||||
operating costs, increased system performance via intelligent virtual machine
|
operating costs, increased system performance via intelligent virtual machine
|
||||||
migration, increased energy efficiency—and more!
|
migration, increased energy efficiency and more!
|
||||||
|
|
||||||
Watcher project consists of several source code repositories:
|
Watcher project consists of several source code repositories:
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,8 @@
|
|||||||
|
|
||||||
[keystone_authtoken]
|
[keystone_authtoken]
|
||||||
...
|
...
|
||||||
auth_uri = http://controller:5000
|
www_authenticate_uri = http://controller:5000
|
||||||
auth_url = http://controller:35357
|
auth_url = http://controller:5000
|
||||||
memcached_servers = controller:11211
|
memcached_servers = controller:11211
|
||||||
auth_type = password
|
auth_type = password
|
||||||
project_domain_name = default
|
project_domain_name = default
|
||||||
@@ -47,7 +47,7 @@
|
|||||||
[watcher_clients_auth]
|
[watcher_clients_auth]
|
||||||
...
|
...
|
||||||
auth_type = password
|
auth_type = password
|
||||||
auth_url = http://controller:35357
|
auth_url = http://controller:5000
|
||||||
username = watcher
|
username = watcher
|
||||||
password = WATCHER_PASS
|
password = WATCHER_PASS
|
||||||
project_domain_name = default
|
project_domain_name = default
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ Infrastructure Optimization service
|
|||||||
verify.rst
|
verify.rst
|
||||||
next-steps.rst
|
next-steps.rst
|
||||||
|
|
||||||
The Infrastructure Optimization service (watcher) provides
|
The Infrastructure Optimization service (Watcher) provides
|
||||||
flexible and scalable resource optimization service for
|
flexible and scalable resource optimization service for
|
||||||
multi-tenant OpenStack-based clouds.
|
multi-tenant OpenStack-based clouds.
|
||||||
|
|
||||||
@@ -21,19 +21,19 @@ applier. This provides a robust framework to realize a wide
|
|||||||
range of cloud optimization goals, including the reduction
|
range of cloud optimization goals, including the reduction
|
||||||
of data center operating costs, increased system performance
|
of data center operating costs, increased system performance
|
||||||
via intelligent virtual machine migration, increased energy
|
via intelligent virtual machine migration, increased energy
|
||||||
efficiency—and more!
|
efficiency and more!
|
||||||
|
|
||||||
Watcher also supports a pluggable architecture by which custom
|
Watcher also supports a pluggable architecture by which custom
|
||||||
optimization algorithms, data metrics and data profilers can be
|
optimization algorithms, data metrics and data profilers can be
|
||||||
developed and inserted into the Watcher framework.
|
developed and inserted into the Watcher framework.
|
||||||
|
|
||||||
Check the documentation for watcher optimization strategies at
|
Check the documentation for watcher optimization strategies at
|
||||||
https://docs.openstack.org/watcher/latest/strategies/index.html
|
`Strategies <https://docs.openstack.org/watcher/latest/strategies/index.html>`_.
|
||||||
|
|
||||||
Check watcher glossary at
|
Check watcher glossary at `Glossary
|
||||||
https://docs.openstack.org/watcher/latest/glossary.html
|
<https://docs.openstack.org/watcher/latest/glossary.html>`_.
|
||||||
|
|
||||||
|
|
||||||
This chapter assumes a working setup of OpenStack following the
|
This chapter assumes a working setup of OpenStack following the
|
||||||
`OpenStack Installation Tutorial
|
`OpenStack Installation Tutorial
|
||||||
<https://docs.openstack.org/pike/install/>`_.
|
<https://docs.openstack.org/queens/install/>`_.
|
||||||
|
|||||||
@@ -6,4 +6,4 @@ Next steps
|
|||||||
Your OpenStack environment now includes the watcher service.
|
Your OpenStack environment now includes the watcher service.
|
||||||
|
|
||||||
To add additional services, see
|
To add additional services, see
|
||||||
https://docs.openstack.org/pike/install/.
|
https://docs.openstack.org/queens/install/.
|
||||||
|
|||||||
@@ -7,9 +7,7 @@ Service for the Watcher API
|
|||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
:Author: openstack@lists.launchpad.net
|
:Author: openstack@lists.launchpad.net
|
||||||
:Date:
|
|
||||||
:Copyright: OpenStack Foundation
|
:Copyright: OpenStack Foundation
|
||||||
:Version:
|
|
||||||
:Manual section: 1
|
:Manual section: 1
|
||||||
:Manual group: cloud computing
|
:Manual group: cloud computing
|
||||||
|
|
||||||
|
|||||||
@@ -7,9 +7,7 @@ Service for the Watcher Applier
|
|||||||
-------------------------------
|
-------------------------------
|
||||||
|
|
||||||
:Author: openstack@lists.launchpad.net
|
:Author: openstack@lists.launchpad.net
|
||||||
:Date:
|
|
||||||
:Copyright: OpenStack Foundation
|
:Copyright: OpenStack Foundation
|
||||||
:Version:
|
|
||||||
:Manual section: 1
|
:Manual section: 1
|
||||||
:Manual group: cloud computing
|
:Manual group: cloud computing
|
||||||
|
|
||||||
|
|||||||
@@ -7,9 +7,7 @@ Service for the Watcher Decision Engine
|
|||||||
---------------------------------------
|
---------------------------------------
|
||||||
|
|
||||||
:Author: openstack@lists.launchpad.net
|
:Author: openstack@lists.launchpad.net
|
||||||
:Date:
|
|
||||||
:Copyright: OpenStack Foundation
|
:Copyright: OpenStack Foundation
|
||||||
:Version:
|
|
||||||
:Manual section: 1
|
:Manual section: 1
|
||||||
:Manual group: cloud computing
|
:Manual group: cloud computing
|
||||||
|
|
||||||
|
|||||||
86
doc/source/strategies/actuation.rst
Normal file
86
doc/source/strategies/actuation.rst
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
=============
|
||||||
|
Actuator
|
||||||
|
=============
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
--------
|
||||||
|
|
||||||
|
**display name**: ``Actuator``
|
||||||
|
|
||||||
|
**goal**: ``unclassified``
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.actuation.Actuator
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
Metrics
|
||||||
|
*******
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Cluster data model
|
||||||
|
******************
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Actions
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's actions.
|
||||||
|
|
||||||
|
Planner
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's planner:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Strategy parameters are:
|
||||||
|
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
parameter type default Value description
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
``actions`` array None Actions to be executed.
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
|
||||||
|
The elements of actions array are:
|
||||||
|
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
parameter type default Value description
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
``action_type`` string None Action name defined in
|
||||||
|
setup.cfg(mandatory)
|
||||||
|
``resource_id`` string None Resource_id of the action.
|
||||||
|
``input_parameters`` object None Input_parameters of the
|
||||||
|
action(mandatory).
|
||||||
|
==================== ====== ===================== =============================
|
||||||
|
|
||||||
|
Efficacy Indicator
|
||||||
|
------------------
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
This strategy create an action plan with a predefined set of actions.
|
||||||
|
|
||||||
|
How to use it ?
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
$ openstack optimize audittemplate create \
|
||||||
|
at1 unclassified --strategy actuator
|
||||||
|
|
||||||
|
$ openstack optimize audit create -a at1 \
|
||||||
|
-p actions='[{"action_type": "migrate", "resource_id": "56a40802-6fde-4b59-957c-c84baec7eaed", "input_parameters": {"migration_type": "live", "source_node": "s01"}}]'
|
||||||
|
|
||||||
|
External Links
|
||||||
|
--------------
|
||||||
|
|
||||||
|
None
|
||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``server_consolidation``
|
**goal**: ``server_consolidation``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation.BasicConsolidation
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
92
doc/source/strategies/host_maintenance.rst
Normal file
92
doc/source/strategies/host_maintenance.rst
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
===========================
|
||||||
|
Host Maintenance Strategy
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
--------
|
||||||
|
|
||||||
|
**display name**: ``Host Maintenance Strategy``
|
||||||
|
|
||||||
|
**goal**: ``cluster_maintaining``
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
None.
|
||||||
|
|
||||||
|
Metrics
|
||||||
|
*******
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Cluster data model
|
||||||
|
******************
|
||||||
|
|
||||||
|
Default Watcher's Compute cluster data model:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector
|
||||||
|
|
||||||
|
Actions
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's actions:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: 30 30
|
||||||
|
:header-rows: 1
|
||||||
|
|
||||||
|
* - action
|
||||||
|
- description
|
||||||
|
* - ``migration``
|
||||||
|
- .. watcher-term:: watcher.applier.actions.migration.Migrate
|
||||||
|
|
||||||
|
Planner
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's planner:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Strategy parameters are:
|
||||||
|
|
||||||
|
==================== ====== ====================================
|
||||||
|
parameter type default Value description
|
||||||
|
==================== ====== ====================================
|
||||||
|
``maintenance_node`` String The name of the compute node which
|
||||||
|
need maintenance. Required.
|
||||||
|
``backup_node`` String The name of the compute node which
|
||||||
|
will backup the maintenance node.
|
||||||
|
Optional.
|
||||||
|
==================== ====== ====================================
|
||||||
|
|
||||||
|
Efficacy Indicator
|
||||||
|
------------------
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
For more information on the Host Maintenance Strategy please refer
|
||||||
|
to: https://specs.openstack.org/openstack/watcher-specs/specs/queens/approved/cluster-maintenance-strategy.html
|
||||||
|
|
||||||
|
How to use it ?
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
$ openstack optimize audit create \
|
||||||
|
-g cluster_maintaining -s host_maintenance \
|
||||||
|
-p maintenance_node=compute01 \
|
||||||
|
-p backup_node=compute02 \
|
||||||
|
--auto-trigger
|
||||||
|
|
||||||
|
External Links
|
||||||
|
--------------
|
||||||
|
|
||||||
|
None.
|
||||||
@@ -9,11 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``thermal_optimization``
|
**goal**: ``thermal_optimization``
|
||||||
|
|
||||||
Outlet (Exhaust Air) temperature is a new thermal telemetry which can be
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.outlet_temp_control
|
||||||
used to measure the host's thermal/workload status. This strategy makes
|
|
||||||
decisions to migrate workloads to the hosts with good thermal condition
|
|
||||||
(lowest outlet temperature) when the outlet temperature of source hosts
|
|
||||||
reach a configurable threshold.
|
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``saving_energy``
|
**goal**: ``saving_energy``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy.SavingEnergy
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
@@ -67,13 +67,13 @@ parameter type default description
|
|||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Energy saving strategy efficacy indicator is unclassified.
|
None
|
||||||
https://github.com/openstack/watcher/blob/master/watcher/decision_engine/goal/goals.py#L215-L218
|
|
||||||
|
|
||||||
Algorithm
|
Algorithm
|
||||||
---------
|
---------
|
||||||
|
|
||||||
For more information on the Energy Saving Strategy please refer to:http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
For more information on the Energy Saving Strategy please refer to:
|
||||||
|
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
||||||
|
|
||||||
How to use it ?
|
How to use it ?
|
||||||
---------------
|
---------------
|
||||||
@@ -91,10 +91,10 @@ step 2: Create audit to do optimization
|
|||||||
$ openstack optimize audittemplate create \
|
$ openstack optimize audittemplate create \
|
||||||
at1 saving_energy --strategy saving_energy
|
at1 saving_energy --strategy saving_energy
|
||||||
|
|
||||||
$ openstack optimize audit create -a at1
|
$ openstack optimize audit create -a at1 \
|
||||||
|
-p free_used_percent=20.0
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
*Spec URL*
|
None
|
||||||
http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html
|
|
||||||
|
|||||||
87
doc/source/strategies/storage_capacity_balance.rst
Normal file
87
doc/source/strategies/storage_capacity_balance.rst
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
========================
|
||||||
|
Storage capacity balance
|
||||||
|
========================
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
--------
|
||||||
|
|
||||||
|
**display name**: ``Storage Capacity Balance Strategy``
|
||||||
|
|
||||||
|
**goal**: ``workload_balancing``
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.storage_capacity_balance.StorageCapacityBalance
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
Metrics
|
||||||
|
*******
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Cluster data model
|
||||||
|
******************
|
||||||
|
|
||||||
|
Storage cluster data model is required:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector
|
||||||
|
|
||||||
|
Actions
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's actions:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: 25 35
|
||||||
|
:header-rows: 1
|
||||||
|
|
||||||
|
* - action
|
||||||
|
- description
|
||||||
|
* - ``volume_migrate``
|
||||||
|
- .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate
|
||||||
|
|
||||||
|
Planner
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's planner:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Strategy parameter is:
|
||||||
|
|
||||||
|
==================== ====== ============= =====================================
|
||||||
|
parameter type default Value description
|
||||||
|
==================== ====== ============= =====================================
|
||||||
|
``volume_threshold`` Number 80.0 Volume threshold for capacity balance
|
||||||
|
==================== ====== ============= =====================================
|
||||||
|
|
||||||
|
|
||||||
|
Efficacy Indicator
|
||||||
|
------------------
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
For more information on the zone migration strategy please refer to:
|
||||||
|
http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html
|
||||||
|
|
||||||
|
How to use it ?
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
$ openstack optimize audittemplate create \
|
||||||
|
at1 workload_balancing --strategy storage_capacity_balance
|
||||||
|
|
||||||
|
$ openstack optimize audit create -a at1 \
|
||||||
|
-p volume_threshold=85.0
|
||||||
|
|
||||||
|
External Links
|
||||||
|
--------------
|
||||||
|
|
||||||
|
None
|
||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``airflow_optimization``
|
**goal**: ``airflow_optimization``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow.UniformAirflow
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``vm_consolidation``
|
**goal**: ``vm_consolidation``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation.VMWorkloadConsolidation
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``workload_balancing``
|
**goal**: ``workload_balancing``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization.WorkloadStabilization
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Synopsis
|
|||||||
|
|
||||||
**goal**: ``workload_balancing``
|
**goal**: ``workload_balancing``
|
||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|||||||
154
doc/source/strategies/zone_migration.rst
Normal file
154
doc/source/strategies/zone_migration.rst
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
==============
|
||||||
|
Zone migration
|
||||||
|
==============
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
--------
|
||||||
|
|
||||||
|
**display name**: ``Zone migration``
|
||||||
|
|
||||||
|
**goal**: ``hardware_maintenance``
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
Metrics
|
||||||
|
*******
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
Cluster data model
|
||||||
|
******************
|
||||||
|
|
||||||
|
Default Watcher's Compute cluster data model:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector
|
||||||
|
|
||||||
|
Storage cluster data model is also required:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector
|
||||||
|
|
||||||
|
Actions
|
||||||
|
*******
|
||||||
|
|
||||||
|
|
||||||
|
Default Watcher's actions:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: 30 30
|
||||||
|
:header-rows: 1
|
||||||
|
|
||||||
|
* - action
|
||||||
|
- description
|
||||||
|
* - ``migrate``
|
||||||
|
- .. watcher-term:: watcher.applier.actions.migration.Migrate
|
||||||
|
* - ``volume_migrate``
|
||||||
|
- .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate
|
||||||
|
|
||||||
|
Planner
|
||||||
|
*******
|
||||||
|
|
||||||
|
Default Watcher's planner:
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Strategy parameters are:
|
||||||
|
|
||||||
|
======================== ======== ============= ==============================
|
||||||
|
parameter type default Value description
|
||||||
|
======================== ======== ============= ==============================
|
||||||
|
``compute_nodes`` array None Compute nodes to migrate.
|
||||||
|
``storage_pools`` array None Storage pools to migrate.
|
||||||
|
``parallel_total`` integer 6 The number of actions to be
|
||||||
|
run in parallel in total.
|
||||||
|
``parallel_per_node`` integer 2 The number of actions to be
|
||||||
|
run in parallel per compute
|
||||||
|
node.
|
||||||
|
``parallel_per_pool`` integer 2 The number of actions to be
|
||||||
|
run in parallel per storage
|
||||||
|
pool.
|
||||||
|
``priority`` object None List prioritizes instances
|
||||||
|
and volumes.
|
||||||
|
``with_attached_volume`` boolean False False: Instances will migrate
|
||||||
|
after all volumes migrate.
|
||||||
|
True: An instance will migrate
|
||||||
|
after the attached volumes
|
||||||
|
migrate.
|
||||||
|
======================== ======== ============= ==============================
|
||||||
|
|
||||||
|
The elements of compute_nodes array are:
|
||||||
|
|
||||||
|
============= ======= =============== =============================
|
||||||
|
parameter type default Value description
|
||||||
|
============= ======= =============== =============================
|
||||||
|
``src_node`` string None Compute node from which
|
||||||
|
instances migrate(mandatory).
|
||||||
|
``dst_node`` string None Compute node to which
|
||||||
|
instances migrate.
|
||||||
|
============= ======= =============== =============================
|
||||||
|
|
||||||
|
The elements of storage_pools array are:
|
||||||
|
|
||||||
|
============= ======= =============== ==============================
|
||||||
|
parameter type default Value description
|
||||||
|
============= ======= =============== ==============================
|
||||||
|
``src_pool`` string None Storage pool from which
|
||||||
|
volumes migrate(mandatory).
|
||||||
|
``dst_pool`` string None Storage pool to which
|
||||||
|
volumes migrate.
|
||||||
|
``src_type`` string None Source volume type(mandatory).
|
||||||
|
``dst_type`` string None Destination volume type
|
||||||
|
(mandatory).
|
||||||
|
============= ======= =============== ==============================
|
||||||
|
|
||||||
|
The elements of priority object are:
|
||||||
|
|
||||||
|
================ ======= =============== ======================
|
||||||
|
parameter type default Value description
|
||||||
|
================ ======= =============== ======================
|
||||||
|
``project`` array None Project names.
|
||||||
|
``compute_node`` array None Compute node names.
|
||||||
|
``storage_pool`` array None Storage pool names.
|
||||||
|
``compute`` enum None Instance attributes.
|
||||||
|
|compute|
|
||||||
|
``storage`` enum None Volume attributes.
|
||||||
|
|storage|
|
||||||
|
================ ======= =============== ======================
|
||||||
|
|
||||||
|
.. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"]
|
||||||
|
.. |storage| replace:: ["size", "created_at"]
|
||||||
|
|
||||||
|
Efficacy Indicator
|
||||||
|
------------------
|
||||||
|
|
||||||
|
.. watcher-func::
|
||||||
|
:format: literal_block
|
||||||
|
|
||||||
|
watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator
|
||||||
|
|
||||||
|
Algorithm
|
||||||
|
---------
|
||||||
|
|
||||||
|
For more information on the zone migration strategy please refer
|
||||||
|
to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/zone-migration-strategy.html
|
||||||
|
|
||||||
|
How to use it ?
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. code-block:: shell
|
||||||
|
|
||||||
|
$ openstack optimize audittemplate create \
|
||||||
|
at1 hardware_maintenance --strategy zone_migration
|
||||||
|
|
||||||
|
$ openstack optimize audit create -a at1 \
|
||||||
|
-p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]'
|
||||||
|
|
||||||
|
External Links
|
||||||
|
--------------
|
||||||
|
|
||||||
|
None
|
||||||
@@ -39,6 +39,22 @@ named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``.
|
|||||||
If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon
|
If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon
|
||||||
plugin installation guide`_.
|
plugin installation guide`_.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Notice, that in this guide we'll use `OpenStack CLI`_ as major interface.
|
||||||
|
Nevertheless, you can use `Watcher CLI`_ in the same way. It can be
|
||||||
|
achieved by replacing
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ openstack optimize ...
|
||||||
|
|
||||||
|
with
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
$ watcher ...
|
||||||
|
|
||||||
.. _`installation guide`: https://docs.openstack.org/python-watcherclient/latest
|
.. _`installation guide`: https://docs.openstack.org/python-watcherclient/latest
|
||||||
.. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html
|
.. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html
|
||||||
.. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html
|
.. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html
|
||||||
@@ -51,10 +67,6 @@ watcher binary without options.
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher help
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack help optimize
|
$ openstack help optimize
|
||||||
|
|
||||||
How do I run an audit of my cluster ?
|
How do I run an audit of my cluster ?
|
||||||
@@ -64,10 +76,6 @@ First, you need to find the :ref:`goal <goal_definition>` you want to achieve:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher goal list
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize goal list
|
$ openstack optimize goal list
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
@@ -81,10 +89,6 @@ An :ref:`audit template <audit_template_definition>` defines an optimization
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audittemplate create my_first_audit_template <your_goal>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal>
|
$ openstack optimize audittemplate create my_first_audit_template <your_goal>
|
||||||
|
|
||||||
Although optional, you may want to actually set a specific strategy for your
|
Although optional, you may want to actually set a specific strategy for your
|
||||||
@@ -93,10 +97,6 @@ following command:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher strategy list --goal <your_goal_uuid_or_name>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize strategy list --goal <your_goal_uuid_or_name>
|
$ openstack optimize strategy list --goal <your_goal_uuid_or_name>
|
||||||
|
|
||||||
You can use the following command to check strategy details including which
|
You can use the following command to check strategy details including which
|
||||||
@@ -104,21 +104,12 @@ parameters of which format it supports:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher strategy show <your_strategy>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize strategy show <your_strategy>
|
$ openstack optimize strategy show <your_strategy>
|
||||||
|
|
||||||
The command to create your audit template would then be:
|
The command to create your audit template would then be:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audittemplate create my_first_audit_template <your_goal> \
|
|
||||||
--strategy <your_strategy>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize audittemplate create my_first_audit_template <your_goal> \
|
$ openstack optimize audittemplate create my_first_audit_template <your_goal> \
|
||||||
--strategy <your_strategy>
|
--strategy <your_strategy>
|
||||||
|
|
||||||
@@ -133,10 +124,6 @@ audit) that you want to use.
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audittemplate list
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize audittemplate list
|
$ openstack optimize audittemplate list
|
||||||
|
|
||||||
- Start an audit based on this :ref:`audit template
|
- Start an audit based on this :ref:`audit template
|
||||||
@@ -144,10 +131,6 @@ or::
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audit create -a <your_audit_template>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize audit create -a <your_audit_template>
|
$ openstack optimize audit create -a <your_audit_template>
|
||||||
|
|
||||||
If your_audit_template was created by --strategy <your_strategy>, and it
|
If your_audit_template was created by --strategy <your_strategy>, and it
|
||||||
@@ -156,11 +139,6 @@ format), your can append `-p` to input required parameters:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher audit create -a <your_audit_template> \
|
|
||||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize audit create -a <your_audit_template> \
|
$ openstack optimize audit create -a <your_audit_template> \
|
||||||
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
-p <your_strategy_para1>=5.5 -p <your_strategy_para2>=hi
|
||||||
|
|
||||||
@@ -173,19 +151,13 @@ Input parameter could cause audit creation failure, when:
|
|||||||
Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
|
Watcher service will compute an :ref:`Action Plan <action_plan_definition>`
|
||||||
composed of a list of potential optimization :ref:`actions <action_definition>`
|
composed of a list of potential optimization :ref:`actions <action_definition>`
|
||||||
(instance migration, disabling of a compute node, ...) according to the
|
(instance migration, disabling of a compute node, ...) according to the
|
||||||
:ref:`goal <goal_definition>` to achieve. You can see all of the goals
|
:ref:`goal <goal_definition>` to achieve.
|
||||||
available in section ``[watcher_strategies]`` of the Watcher service
|
|
||||||
configuration file.
|
|
||||||
|
|
||||||
- Wait until the Watcher audit has produced a new :ref:`action plan
|
- Wait until the Watcher audit has produced a new :ref:`action plan
|
||||||
<action_plan_definition>`, and get it:
|
<action_plan_definition>`, and get it:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher actionplan list --audit <the_audit_uuid>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize actionplan list --audit <the_audit_uuid>
|
$ openstack optimize actionplan list --audit <the_audit_uuid>
|
||||||
|
|
||||||
- Have a look on the list of optimization :ref:`actions <action_definition>`
|
- Have a look on the list of optimization :ref:`actions <action_definition>`
|
||||||
@@ -193,10 +165,6 @@ or::
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher action list --action-plan <the_action_plan_uuid>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize action list --action-plan <the_action_plan_uuid>
|
$ openstack optimize action list --action-plan <the_action_plan_uuid>
|
||||||
|
|
||||||
Once you have learned how to create an :ref:`Action Plan
|
Once you have learned how to create an :ref:`Action Plan
|
||||||
@@ -207,10 +175,6 @@ cluster:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher actionplan start <the_action_plan_uuid>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize actionplan start <the_action_plan_uuid>
|
$ openstack optimize actionplan start <the_action_plan_uuid>
|
||||||
|
|
||||||
You can follow the states of the :ref:`actions <action_definition>` by
|
You can follow the states of the :ref:`actions <action_definition>` by
|
||||||
@@ -218,19 +182,11 @@ periodically calling:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher action list
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize action list
|
$ openstack optimize action list
|
||||||
|
|
||||||
You can also obtain more detailed information about a specific action:
|
You can also obtain more detailed information about a specific action:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
$ watcher action show <the_action_uuid>
|
|
||||||
|
|
||||||
or::
|
|
||||||
|
|
||||||
$ openstack optimize action show <the_action_uuid>
|
$ openstack optimize action show <the_action_uuid>
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,3 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
output_file = /etc/watcher/policy.yaml.sample
|
||||||
|
namespace = watcher
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
{
|
|
||||||
"admin_api": "role:admin or role:administrator",
|
|
||||||
"show_password": "!",
|
|
||||||
"default": "rule:admin_api",
|
|
||||||
|
|
||||||
"action:detail": "rule:default",
|
|
||||||
"action:get": "rule:default",
|
|
||||||
"action:get_all": "rule:default",
|
|
||||||
|
|
||||||
"action_plan:delete": "rule:default",
|
|
||||||
"action_plan:detail": "rule:default",
|
|
||||||
"action_plan:get": "rule:default",
|
|
||||||
"action_plan:get_all": "rule:default",
|
|
||||||
"action_plan:update": "rule:default",
|
|
||||||
|
|
||||||
"audit:create": "rule:default",
|
|
||||||
"audit:delete": "rule:default",
|
|
||||||
"audit:detail": "rule:default",
|
|
||||||
"audit:get": "rule:default",
|
|
||||||
"audit:get_all": "rule:default",
|
|
||||||
"audit:update": "rule:default",
|
|
||||||
|
|
||||||
"audit_template:create": "rule:default",
|
|
||||||
"audit_template:delete": "rule:default",
|
|
||||||
"audit_template:detail": "rule:default",
|
|
||||||
"audit_template:get": "rule:default",
|
|
||||||
"audit_template:get_all": "rule:default",
|
|
||||||
"audit_template:update": "rule:default",
|
|
||||||
|
|
||||||
"goal:detail": "rule:default",
|
|
||||||
"goal:get": "rule:default",
|
|
||||||
"goal:get_all": "rule:default",
|
|
||||||
|
|
||||||
"scoring_engine:detail": "rule:default",
|
|
||||||
"scoring_engine:get": "rule:default",
|
|
||||||
"scoring_engine:get_all": "rule:default",
|
|
||||||
|
|
||||||
"strategy:detail": "rule:default",
|
|
||||||
"strategy:get": "rule:default",
|
|
||||||
"strategy:get_all": "rule:default",
|
|
||||||
|
|
||||||
"service:detail": "rule:default",
|
|
||||||
"service:get": "rule:default",
|
|
||||||
"service:get_all": "rule:default"
|
|
||||||
}
|
|
||||||
165
lower-constraints.txt
Normal file
165
lower-constraints.txt
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
alabaster==0.7.10
|
||||||
|
alembic==0.9.8
|
||||||
|
amqp==2.2.2
|
||||||
|
appdirs==1.4.3
|
||||||
|
APScheduler==3.5.1
|
||||||
|
asn1crypto==0.24.0
|
||||||
|
automaton==1.14.0
|
||||||
|
Babel==2.5.3
|
||||||
|
bandit==1.4.0
|
||||||
|
beautifulsoup4==4.6.0
|
||||||
|
cachetools==2.0.1
|
||||||
|
certifi==2018.1.18
|
||||||
|
cffi==1.11.5
|
||||||
|
chardet==3.0.4
|
||||||
|
cliff==2.11.0
|
||||||
|
cmd2==0.8.1
|
||||||
|
contextlib2==0.5.5
|
||||||
|
coverage==4.5.1
|
||||||
|
croniter==0.3.20
|
||||||
|
cryptography==2.1.4
|
||||||
|
debtcollector==1.19.0
|
||||||
|
decorator==4.2.1
|
||||||
|
deprecation==2.0
|
||||||
|
doc8==0.8.0
|
||||||
|
docutils==0.14
|
||||||
|
dogpile.cache==0.6.5
|
||||||
|
dulwich==0.19.0
|
||||||
|
enum34==1.1.6
|
||||||
|
enum-compat==0.0.2
|
||||||
|
eventlet==0.20.0
|
||||||
|
extras==1.0.0
|
||||||
|
fasteners==0.14.1
|
||||||
|
fixtures==3.0.0
|
||||||
|
flake8==2.5.5
|
||||||
|
freezegun==0.3.10
|
||||||
|
future==0.16.0
|
||||||
|
futurist==1.6.0
|
||||||
|
gitdb2==2.0.3
|
||||||
|
GitPython==2.1.8
|
||||||
|
gnocchiclient==7.0.1
|
||||||
|
greenlet==0.4.13
|
||||||
|
hacking==0.12.0
|
||||||
|
idna==2.6
|
||||||
|
imagesize==1.0.0
|
||||||
|
iso8601==0.1.12
|
||||||
|
Jinja2==2.10
|
||||||
|
jmespath==0.9.3
|
||||||
|
jsonpatch==1.21
|
||||||
|
jsonpointer==2.0
|
||||||
|
jsonschema==2.6.0
|
||||||
|
keystoneauth1==3.4.0
|
||||||
|
keystonemiddleware==4.21.0
|
||||||
|
kombu==4.1.0
|
||||||
|
linecache2==1.0.0
|
||||||
|
logutils==0.3.5
|
||||||
|
lxml==4.1.1
|
||||||
|
Mako==1.0.7
|
||||||
|
MarkupSafe==1.0
|
||||||
|
mccabe==0.2.1
|
||||||
|
mock==2.0.0
|
||||||
|
monotonic==1.4
|
||||||
|
mox3==0.25.0
|
||||||
|
msgpack==0.5.6
|
||||||
|
munch==2.2.0
|
||||||
|
netaddr==0.7.19
|
||||||
|
netifaces==0.10.6
|
||||||
|
networkx==1.11
|
||||||
|
openstackdocstheme==1.20.0
|
||||||
|
openstacksdk==0.12.0
|
||||||
|
os-api-ref===1.4.0
|
||||||
|
os-client-config==1.29.0
|
||||||
|
os-service-types==1.2.0
|
||||||
|
os-testr==1.0.0
|
||||||
|
osc-lib==1.10.0
|
||||||
|
oslo.cache==1.29.0
|
||||||
|
oslo.concurrency==3.26.0
|
||||||
|
oslo.config==5.2.0
|
||||||
|
oslo.context==2.20.0
|
||||||
|
oslo.db==4.35.0
|
||||||
|
oslo.i18n==3.20.0
|
||||||
|
oslo.log==3.37.0
|
||||||
|
oslo.messaging==5.36.0
|
||||||
|
oslo.middleware==3.35.0
|
||||||
|
oslo.policy==1.34.0
|
||||||
|
oslo.reports==1.27.0
|
||||||
|
oslo.serialization==2.25.0
|
||||||
|
oslo.service==1.30.0
|
||||||
|
oslo.utils==3.36.0
|
||||||
|
oslo.versionedobjects==1.32.0
|
||||||
|
oslotest==3.3.0
|
||||||
|
packaging==17.1
|
||||||
|
Paste==2.0.3
|
||||||
|
PasteDeploy==1.5.2
|
||||||
|
pbr==3.1.1
|
||||||
|
pecan==1.2.1
|
||||||
|
pep8==1.5.7
|
||||||
|
pika==0.10.0
|
||||||
|
pika-pool==0.1.3
|
||||||
|
prettytable==0.7.2
|
||||||
|
psutil==5.4.3
|
||||||
|
pycadf==2.7.0
|
||||||
|
pycparser==2.18
|
||||||
|
pyflakes==0.8.1
|
||||||
|
Pygments==2.2.0
|
||||||
|
pyinotify==0.9.6
|
||||||
|
pyOpenSSL==17.5.0
|
||||||
|
pyparsing==2.2.0
|
||||||
|
pyperclip==1.6.0
|
||||||
|
python-ceilometerclient==2.9.0
|
||||||
|
python-cinderclient==3.5.0
|
||||||
|
python-dateutil==2.7.0
|
||||||
|
python-editor==1.0.3
|
||||||
|
python-glanceclient==2.9.1
|
||||||
|
python-ironicclient==2.3.0
|
||||||
|
python-keystoneclient==3.15.0
|
||||||
|
python-mimeparse==1.6.0
|
||||||
|
python-monascaclient==1.10.0
|
||||||
|
python-neutronclient==6.7.0
|
||||||
|
python-novaclient==10.1.0
|
||||||
|
python-openstackclient==3.14.0
|
||||||
|
python-subunit==1.2.0
|
||||||
|
pytz==2018.3
|
||||||
|
PyYAML==3.12
|
||||||
|
reno==2.7.0
|
||||||
|
repoze.lru==0.7
|
||||||
|
requests==2.18.4
|
||||||
|
requestsexceptions==1.4.0
|
||||||
|
restructuredtext-lint==1.1.3
|
||||||
|
rfc3986==1.1.0
|
||||||
|
Routes==2.4.1
|
||||||
|
simplegeneric==0.8.1
|
||||||
|
simplejson==3.13.2
|
||||||
|
six==1.11.0
|
||||||
|
smmap2==2.0.3
|
||||||
|
snowballstemmer==1.2.1
|
||||||
|
Sphinx==1.6.5
|
||||||
|
sphinxcontrib-httpdomain==1.6.1
|
||||||
|
sphinxcontrib-pecanwsme==0.8.0
|
||||||
|
sphinxcontrib-websupport==1.0.1
|
||||||
|
SQLAlchemy==1.2.5
|
||||||
|
sqlalchemy-migrate==0.11.0
|
||||||
|
sqlparse==0.2.4
|
||||||
|
statsd==3.2.2
|
||||||
|
stestr==2.0.0
|
||||||
|
stevedore==1.28.0
|
||||||
|
taskflow==3.1.0
|
||||||
|
Tempita==0.5.2
|
||||||
|
tenacity==4.9.0
|
||||||
|
testrepository==0.0.20
|
||||||
|
testresources==2.0.1
|
||||||
|
testscenarios==0.5.0
|
||||||
|
testtools==2.3.0
|
||||||
|
traceback2==1.4.0
|
||||||
|
tzlocal==1.5.1
|
||||||
|
ujson==1.35
|
||||||
|
unittest2==1.1.0
|
||||||
|
urllib3==1.22
|
||||||
|
vine==1.1.4
|
||||||
|
voluptuous==0.11.1
|
||||||
|
waitress==1.1.0
|
||||||
|
warlock==1.3.0
|
||||||
|
WebOb==1.7.4
|
||||||
|
WebTest==2.0.29
|
||||||
|
wrapt==1.10.11
|
||||||
|
WSME==0.9.2
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
- hosts: primary
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=/logs/**
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
- hosts: primary
|
|
||||||
name: Legacy Watcher tempest base multinode
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Ensure legacy workspace directory
|
|
||||||
file:
|
|
||||||
path: '{{ ansible_user_dir }}/workspace'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
cat > clonemap.yaml << EOF
|
|
||||||
clonemap:
|
|
||||||
- name: openstack-infra/devstack-gate
|
|
||||||
dest: devstack-gate
|
|
||||||
EOF
|
|
||||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
|
||||||
git://git.openstack.org \
|
|
||||||
openstack-infra/devstack-gate
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
|
||||||
[[local|localrc]]
|
|
||||||
TEMPEST_PLUGINS='/opt/stack/new/watcher-tempest-plugin'
|
|
||||||
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
|
|
||||||
# Enable watcher devstack plugin.
|
|
||||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
|
||||||
|
|
||||||
EOF
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
|
|
||||||
export DEVSTACK_SUBNODE_CONFIG=" "
|
|
||||||
export PYTHONUNBUFFERED=true
|
|
||||||
export DEVSTACK_GATE_TEMPEST=1
|
|
||||||
export DEVSTACK_GATE_NEUTRON=1
|
|
||||||
export DEVSTACK_GATE_TOPOLOGY="multinode"
|
|
||||||
export PROJECTS="openstack/watcher $PROJECTS"
|
|
||||||
export PROJECTS="openstack/python-watcherclient $PROJECTS"
|
|
||||||
export PROJECTS="openstack/watcher-tempest-plugin $PROJECTS"
|
|
||||||
|
|
||||||
export DEVSTACK_GATE_TEMPEST_REGEX="watcher_tempest_plugin"
|
|
||||||
|
|
||||||
export BRANCH_OVERRIDE=default
|
|
||||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
|
||||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
|
||||||
fi
|
|
||||||
|
|
||||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
|
||||||
./safe-devstack-vm-gate-wrap.sh
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
- hosts: primary
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=**/*nose_results.html
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=**/*testr_results.html.gz
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=/.testrepository/tmp*
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=**/*testrepository.subunit.gz
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}/tox'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=/.tox/*/log/*
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
|
|
||||||
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
|
|
||||||
synchronize:
|
|
||||||
src: '{{ ansible_user_dir }}/workspace/'
|
|
||||||
dest: '{{ zuul.executor.log_root }}'
|
|
||||||
mode: pull
|
|
||||||
copy_links: true
|
|
||||||
verify_host: true
|
|
||||||
rsync_opts:
|
|
||||||
- --include=/logs/**
|
|
||||||
- --include=*/
|
|
||||||
- --exclude=*
|
|
||||||
- --prune-empty-dirs
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
- hosts: all
|
|
||||||
name: Legacy watcherclient-dsvm-functional
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Ensure legacy workspace directory
|
|
||||||
file:
|
|
||||||
path: '{{ ansible_user_dir }}/workspace'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
cat > clonemap.yaml << EOF
|
|
||||||
clonemap:
|
|
||||||
- name: openstack-infra/devstack-gate
|
|
||||||
dest: devstack-gate
|
|
||||||
EOF
|
|
||||||
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
|
|
||||||
git://git.openstack.org \
|
|
||||||
openstack-infra/devstack-gate
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
cat << 'EOF' >>"/tmp/dg-local.conf"
|
|
||||||
[[local|localrc]]
|
|
||||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
|
||||||
|
|
||||||
EOF
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
|
|
||||||
- shell:
|
|
||||||
cmd: |
|
|
||||||
set -e
|
|
||||||
set -x
|
|
||||||
ENABLED_SERVICES=tempest
|
|
||||||
ENABLED_SERVICES+=,watcher-api,watcher-decision-engine,watcher-applier
|
|
||||||
export ENABLED_SERVICES
|
|
||||||
|
|
||||||
export PYTHONUNBUFFERED=true
|
|
||||||
export BRANCH_OVERRIDE=default
|
|
||||||
export PROJECTS="openstack/watcher $PROJECTS"
|
|
||||||
export DEVSTACK_PROJECT_FROM_GIT=python-watcherclient
|
|
||||||
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
|
|
||||||
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
|
|
||||||
fi
|
|
||||||
function post_test_hook {
|
|
||||||
# Configure and run functional tests
|
|
||||||
$BASE/new/python-watcherclient/watcherclient/tests/functional/hooks/post_test_hook.sh
|
|
||||||
}
|
|
||||||
export -f post_test_hook
|
|
||||||
|
|
||||||
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
|
|
||||||
./safe-devstack-vm-gate-wrap.sh
|
|
||||||
executable: /bin/bash
|
|
||||||
chdir: '{{ ansible_user_dir }}/workspace'
|
|
||||||
environment: '{{ zuul | zuul_legacy_vars }}'
|
|
||||||
14
playbooks/orchestrate-tempest.yaml
Normal file
14
playbooks/orchestrate-tempest.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
- hosts: all
|
||||||
|
# This is the default strategy, however since orchestrate-devstack requires
|
||||||
|
# "linear", it is safer to enforce it in case this is running in an
|
||||||
|
# environment configured with a different default strategy.
|
||||||
|
strategy: linear
|
||||||
|
roles:
|
||||||
|
- orchestrate-devstack
|
||||||
|
|
||||||
|
- hosts: tempest
|
||||||
|
roles:
|
||||||
|
- setup-tempest-run-dir
|
||||||
|
- setup-tempest-data-dir
|
||||||
|
- acl-devstack-files
|
||||||
|
- run-tempest
|
||||||
3
playbooks/pre.yaml
Normal file
3
playbooks/pre.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
- hosts: all
|
||||||
|
roles:
|
||||||
|
- add-hostnames-to-hosts
|
||||||
@@ -29,7 +29,7 @@ Useful links
|
|||||||
|
|
||||||
* How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html
|
* How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html
|
||||||
|
|
||||||
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||||
|
|
||||||
* More about Rally: https://docs.openstack.org/rally/latest/
|
* More about Rally: https://docs.openstack.org/rally/latest/
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Audits have 'name' field now, that is more friendly to end users.
|
||||||
|
Audit's name can't exceed 63 characters.
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Adds audit scoper for storage data model, now watcher users can specify
|
||||||
|
audit scope for storage CDM in the same manner as compute scope.
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Feature to exclude instances from audit scope based on project_id is added.
|
||||||
|
Now instances from particular project in OpenStack can be excluded from audit
|
||||||
|
defining scope in audit templates.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Adds baremetal data model in Watcher
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Added a way to check state of strategy before audit's execution.
|
||||||
|
Administrator can use "watcher strategy state <strategy_name>" command
|
||||||
|
to get information about metrics' availability, datasource's availability
|
||||||
|
and CDM's availability.
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Watcher has a whole scope of the cluster, when building
|
||||||
|
compute CDM which includes all instances.
|
||||||
|
It filters excluded instances when migration during the
|
||||||
|
audit.
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added a strategy for one compute node maintenance,
|
||||||
|
without having the user's application been interrupted.
|
||||||
|
If given one backup node, the strategy will firstly
|
||||||
|
migrate all instances from the maintenance node to
|
||||||
|
the backup node. If the backup node is not provided,
|
||||||
|
it will migrate all instances, relying on nova-scheduler.
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Watcher got an ability to calculate multiple global efficacy indicators
|
||||||
|
during audit's execution. Now global efficacy can be calculated for many
|
||||||
|
resource types (like volumes, instances, network) if strategy supports
|
||||||
|
efficacy indicators.
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Added notifications about cancelling of action plan.
|
||||||
|
Now event based plugins know when action plan cancel
|
||||||
|
started and completed.
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Instance cold migration logic is now replaced with using Nova migrate
|
||||||
|
Server(migrate Action) API which has host option since v2.56.
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
Nova API version is now set to 2.56 by default. This needs the migrate
|
||||||
|
action of migration type cold with destination_node parameter to work.
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
The migrate action of migration type cold with destination_node parameter
|
||||||
|
was fixed. Before fixing, it booted an instance in the service project
|
||||||
|
as a migrated instance.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added storage capacity balance strategy.
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added strategy "Zone migration" and it's goal "Hardware maintenance".
|
||||||
|
The strategy migrates many instances and volumes efficiently with
|
||||||
|
minimum downtime automatically.
|
||||||
@@ -24,7 +24,6 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from watcher import version as watcher_version
|
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ Contents:
|
|||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
unreleased
|
unreleased
|
||||||
|
queens
|
||||||
pike
|
pike
|
||||||
ocata
|
ocata
|
||||||
newton
|
newton
|
||||||
|
|||||||
@@ -1,26 +1,23 @@
|
|||||||
# Andi Chandler <andi@gowling.com>, 2016. #zanata
|
|
||||||
# Andi Chandler <andi@gowling.com>, 2017. #zanata
|
# Andi Chandler <andi@gowling.com>, 2017. #zanata
|
||||||
|
# Andi Chandler <andi@gowling.com>, 2018. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: watcher 1.4.1.dev113\n"
|
"Project-Id-Version: watcher\n"
|
||||||
"Report-Msgid-Bugs-To: \n"
|
"Report-Msgid-Bugs-To: \n"
|
||||||
"POT-Creation-Date: 2017-10-23 04:03+0000\n"
|
"POT-Creation-Date: 2018-02-28 12:27+0000\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2017-10-21 06:22+0000\n"
|
"PO-Revision-Date: 2018-02-16 07:20+0000\n"
|
||||||
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
|
||||||
"Language-Team: English (United Kingdom)\n"
|
"Language-Team: English (United Kingdom)\n"
|
||||||
"Language: en-GB\n"
|
"Language: en_GB\n"
|
||||||
"X-Generator: Zanata 3.9.6\n"
|
"X-Generator: Zanata 4.3.3\n"
|
||||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||||
|
|
||||||
msgid "0.29.0"
|
msgid "0.29.0"
|
||||||
msgstr "0.29.0"
|
msgstr "0.29.0"
|
||||||
|
|
||||||
msgid "0.33.0"
|
|
||||||
msgstr "0.33.0"
|
|
||||||
|
|
||||||
msgid "0.34.0"
|
msgid "0.34.0"
|
||||||
msgstr "0.34.0"
|
msgstr "0.34.0"
|
||||||
|
|
||||||
@@ -39,6 +36,15 @@ msgstr "1.4.0"
|
|||||||
msgid "1.4.1"
|
msgid "1.4.1"
|
||||||
msgstr "1.4.1"
|
msgstr "1.4.1"
|
||||||
|
|
||||||
|
msgid "1.5.0"
|
||||||
|
msgstr "1.5.0"
|
||||||
|
|
||||||
|
msgid "1.6.0"
|
||||||
|
msgstr "1.6.0"
|
||||||
|
|
||||||
|
msgid "1.7.0"
|
||||||
|
msgstr "1.7.0"
|
||||||
|
|
||||||
msgid "Add a service supervisor to watch Watcher deamons."
|
msgid "Add a service supervisor to watch Watcher deamons."
|
||||||
msgstr "Add a service supervisor to watch Watcher daemons."
|
msgstr "Add a service supervisor to watch Watcher daemons."
|
||||||
|
|
||||||
@@ -74,17 +80,6 @@ msgstr ""
|
|||||||
msgid "Added SUSPENDED audit state"
|
msgid "Added SUSPENDED audit state"
|
||||||
msgstr "Added SUSPENDED audit state"
|
msgstr "Added SUSPENDED audit state"
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Added a generic scoring engine module, which will standardize interactions "
|
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
|
||||||
"scoring engine by different Strategies, which improve the code and data "
|
|
||||||
"model re-use."
|
|
||||||
msgstr ""
|
|
||||||
"Added a generic scoring engine module, which will standardize interactions "
|
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
|
||||||
"scoring engine by different Strategies, which improve the code and data "
|
|
||||||
"model re-use."
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added a generic scoring engine module, which will standarize interactions "
|
"Added a generic scoring engine module, which will standarize interactions "
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
"with scoring engines through the common API. It is possible to use the "
|
||||||
@@ -141,6 +136,17 @@ msgstr ""
|
|||||||
"Added a way to add a new action without having to amend the source code of "
|
"Added a way to add a new action without having to amend the source code of "
|
||||||
"the default planner."
|
"the default planner."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Added a way to check state of strategy before audit's execution. "
|
||||||
|
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
|
||||||
|
"get information about metrics' availability, datasource's availability and "
|
||||||
|
"CDM's availability."
|
||||||
|
msgstr ""
|
||||||
|
"Added a way to check state of strategy before audit's execution. "
|
||||||
|
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
|
||||||
|
"get information about metrics' availability, datasource's availability and "
|
||||||
|
"CDM's availability."
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added a way to compare the efficacy of different strategies for a give "
|
"Added a way to compare the efficacy of different strategies for a give "
|
||||||
"optimization goal."
|
"optimization goal."
|
||||||
@@ -155,13 +161,6 @@ msgstr ""
|
|||||||
"Added a way to create periodic audit to be able to continuously optimise the "
|
"Added a way to create periodic audit to be able to continuously optimise the "
|
||||||
"cloud infrastructure."
|
"cloud infrastructure."
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
|
||||||
"have been deployed on the node where the decision engine is running."
|
|
||||||
msgstr ""
|
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
|
||||||
"have been deployed on the node where the decision engine is running."
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
"Added a way to return the of available goals depending on which strategies "
|
||||||
"have been deployed on the node where the decison engine is running."
|
"have been deployed on the node where the decison engine is running."
|
||||||
@@ -195,13 +194,233 @@ msgstr ""
|
|||||||
"Added Gnocchi support as data source for metrics. Administrator can change "
|
"Added Gnocchi support as data source for metrics. Administrator can change "
|
||||||
"data source for each strategy using config file."
|
"data source for each strategy using config file."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||||
|
"know when action plan cancel started and completed."
|
||||||
|
msgstr ""
|
||||||
|
"Added notifications about cancelling of action plan. Now event based plugins "
|
||||||
|
"know when action plan cancel started and completed."
|
||||||
|
|
||||||
msgid "Added policies to handle user rights to access Watcher API."
|
msgid "Added policies to handle user rights to access Watcher API."
|
||||||
msgstr "Added policies to handle user rights to access Watcher API."
|
msgstr "Added policies to handle user rights to access Watcher API."
|
||||||
|
|
||||||
#, fuzzy
|
msgid "Added storage capacity balance strategy."
|
||||||
|
msgstr "Added storage capacity balance strategy."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
|
||||||
|
"The strategy migrates many instances and volumes efficiently with minimum "
|
||||||
|
"downtime automatically."
|
||||||
|
msgstr ""
|
||||||
|
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
|
||||||
|
"The strategy migrates many instances and volumes efficiently with minimum "
|
||||||
|
"downtime automatically."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Added strategy to identify and migrate a Noisy Neighbor - a low priority VM "
|
||||||
|
"that negatively affects peformance of a high priority VM by over utilizing "
|
||||||
|
"Last Level Cache."
|
||||||
|
msgstr ""
|
||||||
|
"Added strategy to identify and migrate a Noisy Neighbour - a low priority VM "
|
||||||
|
"that negatively affects performance of a high priority VM by over utilising "
|
||||||
|
"Last Level Cache."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Added the functionality to filter out instances which have metadata field "
|
||||||
|
"'optimize' set to False. For now, this is only available for the "
|
||||||
|
"basic_consolidation strategy (if \"check_optimize_metadata\" configuration "
|
||||||
|
"option is enabled)."
|
||||||
|
msgstr ""
|
||||||
|
"Added the functionality to filter out instances which have metadata field "
|
||||||
|
"'optimize' set to False. For now, this is only available for the "
|
||||||
|
"basic_consolidation strategy (if \"check_optimize_metadata\" configuration "
|
||||||
|
"option is enabled)."
|
||||||
|
|
||||||
|
msgid "Added using of JSONSchema instead of voluptuous to validate Actions."
|
||||||
|
msgstr "Added using of JSONSchema instead of voluptuous to validate Actions."
|
||||||
|
|
||||||
|
msgid "Added volume migrate action"
|
||||||
|
msgstr "Added volume migrate action"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Adds audit scoper for storage data model, now watcher users can specify "
|
||||||
|
"audit scope for storage CDM in the same manner as compute scope."
|
||||||
|
msgstr ""
|
||||||
|
"Adds audit scoper for storage data model, now watcher users can specify "
|
||||||
|
"audit scope for storage CDM in the same manner as compute scope."
|
||||||
|
|
||||||
|
msgid "Adds baremetal data model in Watcher"
|
||||||
|
msgstr "Adds baremetal data model in Watcher"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Allow decision engine to pass strategy parameters, like optimization "
|
||||||
|
"threshold, to selected strategy, also strategy to provide parameters info to "
|
||||||
|
"end user."
|
||||||
|
msgstr ""
|
||||||
|
"Allow decision engine to pass strategy parameters, like optimisation "
|
||||||
|
"threshold, to selected strategy, also strategy to provide parameters info to "
|
||||||
|
"end user."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||||
|
"name can't exceed 63 characters."
|
||||||
|
msgstr ""
|
||||||
|
"Audits have 'name' field now, that is more friendly to end users. Audit's "
|
||||||
|
"name can't exceed 63 characters."
|
||||||
|
|
||||||
|
msgid "Centralize all configuration options for Watcher."
|
||||||
|
msgstr "Centralise all configuration options for Watcher."
|
||||||
|
|
||||||
msgid "Contents:"
|
msgid "Contents:"
|
||||||
msgstr "Contents:"
|
msgstr "Contents:"
|
||||||
|
|
||||||
#, fuzzy
|
msgid ""
|
||||||
|
"Copy all audit templates parameters into audit instead of having a reference "
|
||||||
|
"to the audit template."
|
||||||
|
msgstr ""
|
||||||
|
"Copy all audit templates parameters into audit instead of having a reference "
|
||||||
|
"to the audit template."
|
||||||
|
|
||||||
msgid "Current Series Release Notes"
|
msgid "Current Series Release Notes"
|
||||||
msgstr "Current Series Release Notes"
|
msgstr "Current Series Release Notes"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Each CDM collector can have its own CDM scoper now. This changed Scope JSON "
|
||||||
|
"schema definition for the audit template POST data. Please see audit "
|
||||||
|
"template create help message in python-watcherclient."
|
||||||
|
msgstr ""
|
||||||
|
"Each CDM collector can have its own CDM scoper now. This changed Scope JSON "
|
||||||
|
"schema definition for the audit template POST data. Please see audit "
|
||||||
|
"template create help message in python-watcherclient."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Enhancement of vm_workload_consolidation strategy by using 'memory.resident' "
|
||||||
|
"metric in place of 'memory.usage', as memory.usage shows the memory usage "
|
||||||
|
"inside guest-os and memory.resident represents volume of RAM used by "
|
||||||
|
"instance on host machine."
|
||||||
|
msgstr ""
|
||||||
|
"Enhancement of vm_workload_consolidation strategy by using 'memory.resident' "
|
||||||
|
"metric in place of 'memory.usage', as memory.usage shows the memory usage "
|
||||||
|
"inside guest-os and memory.resident represents volume of RAM used by "
|
||||||
|
"instance on host machine."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Existing workload_balance strategy based on the VM workloads of CPU. This "
|
||||||
|
"feature improves the strategy. By the input parameter \"metrics\", it makes "
|
||||||
|
"decision to migrate a VM base on CPU or memory utilization."
|
||||||
|
msgstr ""
|
||||||
|
"Existing workload_balance strategy based on the VM workloads of CPU. This "
|
||||||
|
"feature improves the strategy. By the input parameter \"metrics\", it makes "
|
||||||
|
"decision to migrate a VM base on CPU or memory utilisation."
|
||||||
|
|
||||||
|
msgid "New Features"
|
||||||
|
msgstr "New Features"
|
||||||
|
|
||||||
|
msgid "Newton Series Release Notes"
|
||||||
|
msgstr "Newton Series Release Notes"
|
||||||
|
|
||||||
|
msgid "Ocata Series Release Notes"
|
||||||
|
msgstr "Ocata Series Release Notes"
|
||||||
|
|
||||||
|
msgid "Pike Series Release Notes"
|
||||||
|
msgstr "Pike Series Release Notes"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Provide a notification mechanism into Watcher that supports versioning. "
|
||||||
|
"Whenever a Watcher object is created, updated or deleted, a versioned "
|
||||||
|
"notification will, if it's relevant, be automatically sent to notify in "
|
||||||
|
"order to allow an event-driven style of architecture within Watcher. "
|
||||||
|
"Moreover, it will also give other services and/or 3rd party softwares (e.g. "
|
||||||
|
"monitoring solutions or rules engines) the ability to react to such events."
|
||||||
|
msgstr ""
|
||||||
|
"Provide a notification mechanism into Watcher that supports versioning. "
|
||||||
|
"Whenever a Watcher object is created, updated or deleted, a versioned "
|
||||||
|
"notification will, if it's relevant, be automatically sent to notify in "
|
||||||
|
"order to allow an event-driven style of architecture within Watcher. "
|
||||||
|
"Moreover, it will also give other services and/or 3rd party software (e.g. "
|
||||||
|
"monitoring solutions or rules engines) the ability to react to such events."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Provides a generic way to define the scope of an audit. The set of audited "
|
||||||
|
"resources will be called \"Audit scope\" and will be defined in each audit "
|
||||||
|
"template (which contains the audit settings)."
|
||||||
|
msgstr ""
|
||||||
|
"Provides a generic way to define the scope of an audit. The set of audited "
|
||||||
|
"resources will be called \"Audit scope\" and will be defined in each audit "
|
||||||
|
"template (which contains the audit settings)."
|
||||||
|
|
||||||
|
msgid "Queens Series Release Notes"
|
||||||
|
msgstr "Queens Series Release Notes"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"The graph model describes how VMs are associated to compute hosts. This "
|
||||||
|
"allows for seeing relationships upfront between the entities and hence can "
|
||||||
|
"be used to identify hot/cold spots in the data center and influence a "
|
||||||
|
"strategy decision."
|
||||||
|
msgstr ""
|
||||||
|
"The graph model describes how VMs are associated to compute hosts. This "
|
||||||
|
"allows for seeing relationships upfront between the entities and hence can "
|
||||||
|
"be used to identify hot/cold spots in the data centre and influence a "
|
||||||
|
"strategy decision."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"There is new ability to create Watcher continuous audits with cron interval. "
|
||||||
|
"It means you may use, for example, optional argument '--interval \"\\*/5 \\* "
|
||||||
|
"\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a "
|
||||||
|
"best effort basis and therefore, we recommend you to use a minimal cron "
|
||||||
|
"interval of at least one minute."
|
||||||
|
msgstr ""
|
||||||
|
"There is new ability to create Watcher continuous audits with cron interval. "
|
||||||
|
"It means you may use, for example, optional argument '--interval \"\\*/5 \\* "
|
||||||
|
"\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a "
|
||||||
|
"best effort basis and therefore, we recommend you to use a minimal cron "
|
||||||
|
"interval of at least one minute."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Watcher can continuously optimize the OpenStack cloud for a specific "
|
||||||
|
"strategy or goal by triggering an audit periodically which generates an "
|
||||||
|
"action plan and run it automatically."
|
||||||
|
msgstr ""
|
||||||
|
"Watcher can continuously optimise the OpenStack cloud for a specific "
|
||||||
|
"strategy or goal by triggering an audit periodically which generates an "
|
||||||
|
"action plan and run it automatically."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Watcher can now run specific actions in parallel improving the performances "
|
||||||
|
"dramatically when executing an action plan."
|
||||||
|
msgstr ""
|
||||||
|
"Watcher can now run specific actions in parallel improving the performance "
|
||||||
|
"dramatically when executing an action plan."
|
||||||
|
|
||||||
|
msgid "Watcher database can now be upgraded thanks to Alembic."
|
||||||
|
msgstr "Watcher database can now be upgraded thanks to Alembic."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||||
|
"during audit's execution. Now global efficacy can be calculated for many "
|
||||||
|
"resource types (like volumes, instances, network) if strategy supports "
|
||||||
|
"efficacy indicators."
|
||||||
|
msgstr ""
|
||||||
|
"Watcher got an ability to calculate multiple global efficacy indicators "
|
||||||
|
"during audit's execution. Now global efficacy can be calculated for many "
|
||||||
|
"resource types (like volumes, instances, network) if strategy supports "
|
||||||
|
"efficacy indicators."
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Watcher supports multiple metrics backend and relies on Ceilometer and "
|
||||||
|
"Monasca."
|
||||||
|
msgstr ""
|
||||||
|
"Watcher supports multiple metrics backend and relies on Ceilometer and "
|
||||||
|
"Monasca."
|
||||||
|
|
||||||
|
msgid "Welcome to watcher's Release Notes documentation!"
|
||||||
|
msgstr "Welcome to watcher's Release Notes documentation!"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"all Watcher objects have been refactored to support OVO (oslo."
|
||||||
|
"versionedobjects) which was a prerequisite step in order to implement "
|
||||||
|
"versioned notifications."
|
||||||
|
msgstr ""
|
||||||
|
"all Watcher objects have been refactored to support OVO (oslo."
|
||||||
|
"versionedobjects) which was a prerequisite step in order to implement "
|
||||||
|
"versioned notifications."
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
|
|
||||||
msgid ""
|
|
||||||
msgstr ""
|
|
||||||
"Project-Id-Version: watcher 1.0.1.dev51\n"
|
|
||||||
"Report-Msgid-Bugs-To: \n"
|
|
||||||
"POT-Creation-Date: 2017-03-21 11:57+0000\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"PO-Revision-Date: 2016-10-22 06:44+0000\n"
|
|
||||||
"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
|
|
||||||
"Language-Team: French\n"
|
|
||||||
"Language: fr\n"
|
|
||||||
"X-Generator: Zanata 3.9.6\n"
|
|
||||||
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
|
||||||
|
|
||||||
msgid "0.29.0"
|
|
||||||
msgstr "0.29.0"
|
|
||||||
|
|
||||||
msgid "Contents:"
|
|
||||||
msgstr "Contenu :"
|
|
||||||
|
|
||||||
msgid "Current Series Release Notes"
|
|
||||||
msgstr "Note de la release actuelle"
|
|
||||||
|
|
||||||
msgid "New Features"
|
|
||||||
msgstr "Nouvelles fonctionnalités"
|
|
||||||
|
|
||||||
msgid "Newton Series Release Notes"
|
|
||||||
msgstr "Note de release pour Newton"
|
|
||||||
|
|
||||||
msgid "Welcome to watcher's Release Notes documentation!"
|
|
||||||
msgstr "Bienvenue dans la documentation de la note de Release de Watcher"
|
|
||||||
6
releasenotes/source/queens.rst
Normal file
6
releasenotes/source/queens.rst
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
===================================
|
||||||
|
Queens Series Release Notes
|
||||||
|
===================================
|
||||||
|
|
||||||
|
.. release-notes::
|
||||||
|
:branch: stable/queens
|
||||||
@@ -2,48 +2,48 @@
|
|||||||
# of appearance. Changing the order has an impact on the overall integration
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
apscheduler>=3.0.5 # MIT License
|
apscheduler>=3.5.1 # MIT License
|
||||||
enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
enum34>=1.1.6;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||||
jsonpatch!=1.20,>=1.16 # BSD
|
jsonpatch>=1.21 # BSD
|
||||||
keystoneauth1>=3.3.0 # Apache-2.0
|
keystoneauth1>=3.4.0 # Apache-2.0
|
||||||
jsonschema<3.0.0,>=2.6.0 # MIT
|
jsonschema<3.0.0,>=2.6.0 # MIT
|
||||||
keystonemiddleware>=4.17.0 # Apache-2.0
|
keystonemiddleware>=4.21.0 # Apache-2.0
|
||||||
lxml!=3.7.0,>=3.4.1 # BSD
|
lxml>=4.1.1 # BSD
|
||||||
croniter>=0.3.4 # MIT License
|
croniter>=0.3.20 # MIT License
|
||||||
oslo.concurrency>=3.20.0 # Apache-2.0
|
oslo.concurrency>=3.26.0 # Apache-2.0
|
||||||
oslo.cache>=1.26.0 # Apache-2.0
|
oslo.cache>=1.29.0 # Apache-2.0
|
||||||
oslo.config>=5.1.0 # Apache-2.0
|
oslo.config>=5.2.0 # Apache-2.0
|
||||||
oslo.context>=2.19.2 # Apache-2.0
|
oslo.context>=2.20.0 # Apache-2.0
|
||||||
oslo.db>=4.27.0 # Apache-2.0
|
oslo.db>=4.35.0 # Apache-2.0
|
||||||
oslo.i18n>=3.15.3 # Apache-2.0
|
oslo.i18n>=3.20.0 # Apache-2.0
|
||||||
oslo.log>=3.30.0 # Apache-2.0
|
oslo.log>=3.37.0 # Apache-2.0
|
||||||
oslo.messaging>=5.29.0 # Apache-2.0
|
oslo.messaging>=5.36.0 # Apache-2.0
|
||||||
oslo.policy>=1.23.0 # Apache-2.0
|
oslo.policy>=1.34.0 # Apache-2.0
|
||||||
oslo.reports>=1.18.0 # Apache-2.0
|
oslo.reports>=1.27.0 # Apache-2.0
|
||||||
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
|
oslo.serialization>=2.25.0 # Apache-2.0
|
||||||
oslo.service>=1.24.0 # Apache-2.0
|
oslo.service>=1.30.0 # Apache-2.0
|
||||||
oslo.utils>=3.31.0 # Apache-2.0
|
oslo.utils>=3.36.0 # Apache-2.0
|
||||||
oslo.versionedobjects>=1.28.0 # Apache-2.0
|
oslo.versionedobjects>=1.32.0 # Apache-2.0
|
||||||
PasteDeploy>=1.5.0 # MIT
|
PasteDeploy>=1.5.2 # MIT
|
||||||
pbr!=2.1.0,>=2.0.0 # Apache-2.0
|
pbr>=3.1.1 # Apache-2.0
|
||||||
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
|
pecan>=1.2.1 # BSD
|
||||||
PrettyTable<0.8,>=0.7.1 # BSD
|
PrettyTable<0.8,>=0.7.2 # BSD
|
||||||
voluptuous>=0.8.9 # BSD License
|
voluptuous>=0.11.1 # BSD License
|
||||||
gnocchiclient>=3.3.1 # Apache-2.0
|
gnocchiclient>=7.0.1 # Apache-2.0
|
||||||
python-ceilometerclient>=2.5.0 # Apache-2.0
|
python-ceilometerclient>=2.9.0 # Apache-2.0
|
||||||
python-cinderclient>=3.2.0 # Apache-2.0
|
python-cinderclient>=3.5.0 # Apache-2.0
|
||||||
python-glanceclient>=2.8.0 # Apache-2.0
|
python-glanceclient>=2.9.1 # Apache-2.0
|
||||||
python-keystoneclient>=3.8.0 # Apache-2.0
|
python-keystoneclient>=3.15.0 # Apache-2.0
|
||||||
python-monascaclient>=1.7.0 # Apache-2.0
|
python-monascaclient>=1.10.0 # Apache-2.0
|
||||||
python-neutronclient>=6.3.0 # Apache-2.0
|
python-neutronclient>=6.7.0 # Apache-2.0
|
||||||
python-novaclient>=9.1.0 # Apache-2.0
|
python-novaclient>=10.1.0 # Apache-2.0
|
||||||
python-openstackclient>=3.12.0 # Apache-2.0
|
python-openstackclient>=3.14.0 # Apache-2.0
|
||||||
python-ironicclient>=1.14.0 # Apache-2.0
|
python-ironicclient>=2.3.0 # Apache-2.0
|
||||||
six>=1.10.0 # MIT
|
six>=1.11.0 # MIT
|
||||||
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
|
SQLAlchemy>=1.2.5 # MIT
|
||||||
stevedore>=1.20.0 # Apache-2.0
|
stevedore>=1.28.0 # Apache-2.0
|
||||||
taskflow>=2.7.0 # Apache-2.0
|
taskflow>=3.1.0 # Apache-2.0
|
||||||
WebOb>=1.7.1 # MIT
|
WebOb>=1.7.4 # MIT
|
||||||
WSME>=0.8.0 # MIT
|
WSME>=0.9.2 # MIT
|
||||||
networkx<2.0,>=1.10 # BSD
|
networkx>=1.11 # BSD
|
||||||
|
|
||||||
|
|||||||
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
16
roles/add-hostnames-to-hosts/tasks/main.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
- name: Set up the list of hostnames and addresses
|
||||||
|
set_fact:
|
||||||
|
hostname_addresses: >
|
||||||
|
{% set hosts = {} -%}
|
||||||
|
{% for host, vars in hostvars.items() -%}
|
||||||
|
{% set _ = hosts.update({vars['ansible_hostname']: vars['nodepool']['private_ipv4']}) -%}
|
||||||
|
{% endfor -%}
|
||||||
|
{{- hosts -}}
|
||||||
|
- name: Add inventory hostnames to the hosts file
|
||||||
|
become: yes
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/hosts
|
||||||
|
state: present
|
||||||
|
insertafter: EOF
|
||||||
|
line: "{{ item.value }} {{ item.key }}"
|
||||||
|
with_dict: "{{ hostname_addresses }}"
|
||||||
12
setup.cfg
12
setup.cfg
@@ -32,6 +32,12 @@ setup-hooks =
|
|||||||
oslo.config.opts =
|
oslo.config.opts =
|
||||||
watcher = watcher.conf.opts:list_opts
|
watcher = watcher.conf.opts:list_opts
|
||||||
|
|
||||||
|
oslo.policy.policies =
|
||||||
|
watcher = watcher.common.policies:list_rules
|
||||||
|
|
||||||
|
oslo.policy.enforcer =
|
||||||
|
watcher = watcher.common.policy:get_enforcer
|
||||||
|
|
||||||
console_scripts =
|
console_scripts =
|
||||||
watcher-api = watcher.cmd.api:main
|
watcher-api = watcher.cmd.api:main
|
||||||
watcher-db-manage = watcher.cmd.dbmanage:main
|
watcher-db-manage = watcher.cmd.dbmanage:main
|
||||||
@@ -51,6 +57,8 @@ watcher_goals =
|
|||||||
airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization
|
airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization
|
||||||
noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization
|
noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization
|
||||||
saving_energy = watcher.decision_engine.goal.goals:SavingEnergy
|
saving_energy = watcher.decision_engine.goal.goals:SavingEnergy
|
||||||
|
hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance
|
||||||
|
cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining
|
||||||
|
|
||||||
watcher_scoring_engines =
|
watcher_scoring_engines =
|
||||||
dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer
|
dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer
|
||||||
@@ -71,6 +79,9 @@ watcher_strategies =
|
|||||||
workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance
|
workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance
|
||||||
uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow
|
uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow
|
||||||
noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor
|
noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor
|
||||||
|
storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance
|
||||||
|
zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration
|
||||||
|
host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance
|
||||||
|
|
||||||
watcher_actions =
|
watcher_actions =
|
||||||
migrate = watcher.applier.actions.migration:Migrate
|
migrate = watcher.applier.actions.migration:Migrate
|
||||||
@@ -91,6 +102,7 @@ watcher_planners =
|
|||||||
watcher_cluster_data_model_collectors =
|
watcher_cluster_data_model_collectors =
|
||||||
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
|
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
|
||||||
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
|
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
|
||||||
|
baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector
|
||||||
|
|
||||||
|
|
||||||
[pbr]
|
[pbr]
|
||||||
|
|||||||
@@ -2,25 +2,27 @@
|
|||||||
# of appearance. Changing the order has an impact on the overall integration
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
coverage!=4.4,>=4.0 # Apache-2.0
|
coverage!=4.4 # Apache-2.0
|
||||||
doc8>=0.6.0 # Apache-2.0
|
doc8 # Apache-2.0
|
||||||
freezegun>=0.3.6 # Apache-2.0
|
freezegun # Apache-2.0
|
||||||
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
||||||
mock>=2.0.0 # BSD
|
mock # BSD
|
||||||
oslotest>=1.10.0 # Apache-2.0
|
oslotest # Apache-2.0
|
||||||
os-testr>=1.0.0 # Apache-2.0
|
os-testr # Apache-2.0
|
||||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
testrepository # Apache-2.0/BSD
|
||||||
testscenarios>=0.4 # Apache-2.0/BSD
|
testscenarios # Apache-2.0/BSD
|
||||||
testtools>=2.2.0 # MIT
|
testtools # MIT
|
||||||
|
|
||||||
# Doc requirements
|
# Doc requirements
|
||||||
openstackdocstheme>=1.17.0 # Apache-2.0
|
openstackdocstheme # Apache-2.0
|
||||||
sphinx>=1.6.2 # BSD
|
sphinx!=1.6.6,!=1.6.7 # BSD
|
||||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
sphinxcontrib-pecanwsme # Apache-2.0
|
||||||
|
|
||||||
|
# api-ref
|
||||||
|
os-api-ref # Apache-2.0
|
||||||
|
|
||||||
# releasenotes
|
# releasenotes
|
||||||
reno>=2.5.0 # Apache-2.0
|
reno # Apache-2.0
|
||||||
|
|
||||||
# bandit
|
# bandit
|
||||||
bandit>=1.1.0 # Apache-2.0
|
bandit>=1.1.0 # Apache-2.0
|
||||||
|
|||||||
13
tox.ini
13
tox.ini
@@ -46,12 +46,16 @@ sitepackages = False
|
|||||||
commands =
|
commands =
|
||||||
oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf
|
oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf
|
||||||
|
|
||||||
|
[testenv:genpolicy]
|
||||||
|
commands =
|
||||||
|
oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
filename = *.py,app.wsgi
|
filename = *.py,app.wsgi
|
||||||
show-source=True
|
show-source=True
|
||||||
ignore= H105,E123,E226,N320,H202
|
ignore= H105,E123,E226,N320,H202
|
||||||
builtins= _
|
builtins= _
|
||||||
enable-extensions = H106,H203
|
enable-extensions = H106,H203,H904
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
|
||||||
|
|
||||||
[testenv:wheel]
|
[testenv:wheel]
|
||||||
@@ -72,3 +76,10 @@ commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasen
|
|||||||
[testenv:bandit]
|
[testenv:bandit]
|
||||||
deps = -r{toxinidir}/test-requirements.txt
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
commands = bandit -r watcher -x tests -n5 -ll -s B320
|
commands = bandit -r watcher -x tests -n5 -ll -s B320
|
||||||
|
|
||||||
|
[testenv:lower-constraints]
|
||||||
|
basepython = python3
|
||||||
|
deps =
|
||||||
|
-c{toxinidir}/lower-constraints.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
-r{toxinidir}/requirements.txt
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ class ActionCollection(collection.Collection):
|
|||||||
collection = ActionCollection()
|
collection = ActionCollection()
|
||||||
collection.actions = [Action.convert_with_links(p, expand)
|
collection.actions = [Action.convert_with_links(p, expand)
|
||||||
for p in actions]
|
for p in actions]
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
return collection
|
return collection
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -232,6 +232,10 @@ class ActionsController(rest.RestController):
|
|||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None,
|
resource_url=None,
|
||||||
action_plan_uuid=None, audit_uuid=None):
|
action_plan_uuid=None, audit_uuid=None):
|
||||||
|
additional_fields = ['action_plan_uuid']
|
||||||
|
|
||||||
|
api_utils.validate_sort_key(sort_key, list(objects.Action.fields) +
|
||||||
|
additional_fields)
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
@@ -247,7 +251,10 @@ class ActionsController(rest.RestController):
|
|||||||
if audit_uuid:
|
if audit_uuid:
|
||||||
filters['audit_uuid'] = audit_uuid
|
filters['audit_uuid'] = audit_uuid
|
||||||
|
|
||||||
sort_db_key = sort_key
|
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||||
|
additional_fields)
|
||||||
|
sort_db_key = (sort_key if not need_api_sort
|
||||||
|
else None)
|
||||||
|
|
||||||
actions = objects.Action.list(pecan.request.context,
|
actions = objects.Action.list(pecan.request.context,
|
||||||
limit,
|
limit,
|
||||||
@@ -255,11 +262,15 @@ class ActionsController(rest.RestController):
|
|||||||
sort_dir=sort_dir,
|
sort_dir=sort_dir,
|
||||||
filters=filters)
|
filters=filters)
|
||||||
|
|
||||||
return ActionCollection.convert_with_links(actions, limit,
|
actions_collection = ActionCollection.convert_with_links(
|
||||||
url=resource_url,
|
actions, limit, url=resource_url, expand=expand,
|
||||||
expand=expand,
|
sort_key=sort_key, sort_dir=sort_dir)
|
||||||
sort_key=sort_key,
|
|
||||||
sort_dir=sort_dir)
|
if need_api_sort:
|
||||||
|
api_utils.make_api_sort(actions_collection.actions,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
return actions_collection
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
@wsme_pecan.wsexpose(ActionCollection, types.uuid, int,
|
||||||
wtypes.text, wtypes.text, types.uuid,
|
wtypes.text, wtypes.text, types.uuid,
|
||||||
@@ -341,7 +352,7 @@ class ActionsController(rest.RestController):
|
|||||||
|
|
||||||
@wsme_pecan.wsexpose(Action, body=Action, status_code=201)
|
@wsme_pecan.wsexpose(Action, body=Action, status_code=201)
|
||||||
def post(self, action):
|
def post(self, action):
|
||||||
"""Create a new action.
|
"""Create a new action(forbidden).
|
||||||
|
|
||||||
:param action: a action within the request body.
|
:param action: a action within the request body.
|
||||||
"""
|
"""
|
||||||
@@ -364,7 +375,7 @@ class ActionsController(rest.RestController):
|
|||||||
@wsme.validate(types.uuid, [ActionPatchType])
|
@wsme.validate(types.uuid, [ActionPatchType])
|
||||||
@wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType])
|
@wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType])
|
||||||
def patch(self, action_uuid, patch):
|
def patch(self, action_uuid, patch):
|
||||||
"""Update an existing action.
|
"""Update an existing action(forbidden).
|
||||||
|
|
||||||
:param action_uuid: UUID of a action.
|
:param action_uuid: UUID of a action.
|
||||||
:param patch: a json PATCH document to apply to this action.
|
:param patch: a json PATCH document to apply to this action.
|
||||||
@@ -401,7 +412,7 @@ class ActionsController(rest.RestController):
|
|||||||
|
|
||||||
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
|
||||||
def delete(self, action_uuid):
|
def delete(self, action_uuid):
|
||||||
"""Delete a action.
|
"""Delete a action(forbidden).
|
||||||
|
|
||||||
:param action_uuid: UUID of a action.
|
:param action_uuid: UUID of a action.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -305,17 +305,6 @@ class ActionPlanCollection(collection.Collection):
|
|||||||
ap_collection = ActionPlanCollection()
|
ap_collection = ActionPlanCollection()
|
||||||
ap_collection.action_plans = [ActionPlan.convert_with_links(
|
ap_collection.action_plans = [ActionPlan.convert_with_links(
|
||||||
p, expand) for p in rpc_action_plans]
|
p, expand) for p in rpc_action_plans]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'audit_uuid':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
ap_collection.action_plans = sorted(
|
|
||||||
ap_collection.action_plans,
|
|
||||||
key=lambda action_plan: action_plan.audit_uuid,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
|
ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs)
|
||||||
return ap_collection
|
return ap_collection
|
||||||
|
|
||||||
@@ -331,20 +320,25 @@ class ActionPlansController(rest.RestController):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ActionPlansController, self).__init__()
|
super(ActionPlansController, self).__init__()
|
||||||
|
self.applier_client = rpcapi.ApplierAPI()
|
||||||
|
|
||||||
from_actionsPlans = False
|
from_actionsPlans = False
|
||||||
"""A flag to indicate if the requests to this controller are coming
|
"""A flag to indicate if the requests to this controller are coming
|
||||||
from the top-level resource ActionPlan."""
|
from the top-level resource ActionPlan."""
|
||||||
|
|
||||||
_custom_actions = {
|
_custom_actions = {
|
||||||
'detail': ['GET'],
|
'start': ['POST'],
|
||||||
|
'detail': ['GET']
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_action_plans_collection(self, marker, limit,
|
def _get_action_plans_collection(self, marker, limit,
|
||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None, audit_uuid=None,
|
resource_url=None, audit_uuid=None,
|
||||||
strategy=None):
|
strategy=None):
|
||||||
|
additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name']
|
||||||
|
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.ActionPlan.fields) + additional_fields)
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
@@ -363,10 +357,10 @@ class ActionPlansController(rest.RestController):
|
|||||||
else:
|
else:
|
||||||
filters['strategy_name'] = strategy
|
filters['strategy_name'] = strategy
|
||||||
|
|
||||||
if sort_key == 'audit_uuid':
|
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||||
sort_db_key = None
|
additional_fields)
|
||||||
else:
|
sort_db_key = (sort_key if not need_api_sort
|
||||||
sort_db_key = sort_key
|
else None)
|
||||||
|
|
||||||
action_plans = objects.ActionPlan.list(
|
action_plans = objects.ActionPlan.list(
|
||||||
pecan.request.context,
|
pecan.request.context,
|
||||||
@@ -374,12 +368,15 @@ class ActionPlansController(rest.RestController):
|
|||||||
marker_obj, sort_key=sort_db_key,
|
marker_obj, sort_key=sort_db_key,
|
||||||
sort_dir=sort_dir, filters=filters)
|
sort_dir=sort_dir, filters=filters)
|
||||||
|
|
||||||
return ActionPlanCollection.convert_with_links(
|
action_plans_collection = ActionPlanCollection.convert_with_links(
|
||||||
action_plans, limit,
|
action_plans, limit, url=resource_url, expand=expand,
|
||||||
url=resource_url,
|
sort_key=sort_key, sort_dir=sort_dir)
|
||||||
expand=expand,
|
|
||||||
sort_key=sort_key,
|
if need_api_sort:
|
||||||
sort_dir=sort_dir)
|
api_utils.make_api_sort(action_plans_collection.action_plans,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
return action_plans_collection
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
|
@wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text,
|
||||||
wtypes.text, types.uuid, wtypes.text)
|
wtypes.text, types.uuid, wtypes.text)
|
||||||
@@ -460,6 +457,15 @@ class ActionPlansController(rest.RestController):
|
|||||||
policy.enforce(context, 'action_plan:delete', action_plan,
|
policy.enforce(context, 'action_plan:delete', action_plan,
|
||||||
action='action_plan:delete')
|
action='action_plan:delete')
|
||||||
|
|
||||||
|
allowed_states = (ap_objects.State.SUCCEEDED,
|
||||||
|
ap_objects.State.RECOMMENDED,
|
||||||
|
ap_objects.State.FAILED,
|
||||||
|
ap_objects.State.SUPERSEDED,
|
||||||
|
ap_objects.State.CANCELLED)
|
||||||
|
if action_plan.state not in allowed_states:
|
||||||
|
raise exception.DeleteError(
|
||||||
|
state=action_plan.state)
|
||||||
|
|
||||||
action_plan.soft_delete()
|
action_plan.soft_delete()
|
||||||
|
|
||||||
@wsme.validate(types.uuid, [ActionPlanPatchType])
|
@wsme.validate(types.uuid, [ActionPlanPatchType])
|
||||||
@@ -531,7 +537,7 @@ class ActionPlansController(rest.RestController):
|
|||||||
if action_plan_to_update[field] != patch_val:
|
if action_plan_to_update[field] != patch_val:
|
||||||
action_plan_to_update[field] = patch_val
|
action_plan_to_update[field] = patch_val
|
||||||
|
|
||||||
if (field == 'state'and
|
if (field == 'state' and
|
||||||
patch_val == objects.action_plan.State.PENDING):
|
patch_val == objects.action_plan.State.PENDING):
|
||||||
launch_action_plan = True
|
launch_action_plan = True
|
||||||
|
|
||||||
@@ -548,11 +554,39 @@ class ActionPlansController(rest.RestController):
|
|||||||
a.save()
|
a.save()
|
||||||
|
|
||||||
if launch_action_plan:
|
if launch_action_plan:
|
||||||
applier_client = rpcapi.ApplierAPI()
|
self.applier_client.launch_action_plan(pecan.request.context,
|
||||||
applier_client.launch_action_plan(pecan.request.context,
|
action_plan.uuid)
|
||||||
action_plan.uuid)
|
|
||||||
|
|
||||||
action_plan_to_update = objects.ActionPlan.get_by_uuid(
|
action_plan_to_update = objects.ActionPlan.get_by_uuid(
|
||||||
pecan.request.context,
|
pecan.request.context,
|
||||||
action_plan_uuid)
|
action_plan_uuid)
|
||||||
return ActionPlan.convert_with_links(action_plan_to_update)
|
return ActionPlan.convert_with_links(action_plan_to_update)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(ActionPlan, types.uuid)
|
||||||
|
def start(self, action_plan_uuid, **kwargs):
|
||||||
|
"""Start an action_plan
|
||||||
|
|
||||||
|
:param action_plan_uuid: UUID of an action_plan.
|
||||||
|
"""
|
||||||
|
|
||||||
|
action_plan_to_start = api_utils.get_resource(
|
||||||
|
'ActionPlan', action_plan_uuid, eager=True)
|
||||||
|
context = pecan.request.context
|
||||||
|
|
||||||
|
policy.enforce(context, 'action_plan:start', action_plan_to_start,
|
||||||
|
action='action_plan:start')
|
||||||
|
|
||||||
|
if action_plan_to_start['state'] != \
|
||||||
|
objects.action_plan.State.RECOMMENDED:
|
||||||
|
raise Exception.StartError(
|
||||||
|
state=action_plan_to_start.state)
|
||||||
|
|
||||||
|
action_plan_to_start['state'] = objects.action_plan.State.PENDING
|
||||||
|
action_plan_to_start.save()
|
||||||
|
|
||||||
|
self.applier_client.launch_action_plan(pecan.request.context,
|
||||||
|
action_plan_uuid)
|
||||||
|
action_plan_to_start = objects.ActionPlan.get_by_uuid(
|
||||||
|
pecan.request.context, action_plan_uuid)
|
||||||
|
|
||||||
|
return ActionPlan.convert_with_links(action_plan_to_start)
|
||||||
|
|||||||
@@ -37,6 +37,8 @@ import wsme
|
|||||||
from wsme import types as wtypes
|
from wsme import types as wtypes
|
||||||
import wsmeext.pecan as wsme_pecan
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.api.controllers import base
|
from watcher.api.controllers import base
|
||||||
from watcher.api.controllers import link
|
from watcher.api.controllers import link
|
||||||
@@ -49,6 +51,8 @@ from watcher.common import utils
|
|||||||
from watcher.decision_engine import rpcapi
|
from watcher.decision_engine import rpcapi
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AuditPostType(wtypes.Base):
|
class AuditPostType(wtypes.Base):
|
||||||
|
|
||||||
@@ -129,6 +133,11 @@ class AuditPostType(wtypes.Base):
|
|||||||
goal = objects.Goal.get(context, self.goal)
|
goal = objects.Goal.get(context, self.goal)
|
||||||
self.name = "%s-%s" % (goal.name,
|
self.name = "%s-%s" % (goal.name,
|
||||||
datetime.datetime.utcnow().isoformat())
|
datetime.datetime.utcnow().isoformat())
|
||||||
|
# No more than 63 characters
|
||||||
|
if len(self.name) > 63:
|
||||||
|
LOG.warning("Audit: %s length exceeds 63 characters",
|
||||||
|
self.name)
|
||||||
|
self.name = self.name[0:63]
|
||||||
|
|
||||||
return Audit(
|
return Audit(
|
||||||
name=self.name,
|
name=self.name,
|
||||||
@@ -166,10 +175,10 @@ class AuditPatchType(types.JsonPatchType):
|
|||||||
|
|
||||||
|
|
||||||
class Audit(base.APIBase):
|
class Audit(base.APIBase):
|
||||||
"""API representation of a audit.
|
"""API representation of an audit.
|
||||||
|
|
||||||
This class enforces type checking and value constraints, and converts
|
This class enforces type checking and value constraints, and converts
|
||||||
between the internal object model and the API representation of a audit.
|
between the internal object model and the API representation of an audit.
|
||||||
"""
|
"""
|
||||||
_goal_uuid = None
|
_goal_uuid = None
|
||||||
_goal_name = None
|
_goal_name = None
|
||||||
@@ -264,19 +273,19 @@ class Audit(base.APIBase):
|
|||||||
|
|
||||||
goal_uuid = wsme.wsproperty(
|
goal_uuid = wsme.wsproperty(
|
||||||
wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True)
|
wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True)
|
||||||
"""Goal UUID the audit template refers to"""
|
"""Goal UUID the audit refers to"""
|
||||||
|
|
||||||
goal_name = wsme.wsproperty(
|
goal_name = wsme.wsproperty(
|
||||||
wtypes.text, _get_goal_name, _set_goal_name, mandatory=False)
|
wtypes.text, _get_goal_name, _set_goal_name, mandatory=False)
|
||||||
"""The name of the goal this audit template refers to"""
|
"""The name of the goal this audit refers to"""
|
||||||
|
|
||||||
strategy_uuid = wsme.wsproperty(
|
strategy_uuid = wsme.wsproperty(
|
||||||
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
|
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
|
||||||
"""Strategy UUID the audit template refers to"""
|
"""Strategy UUID the audit refers to"""
|
||||||
|
|
||||||
strategy_name = wsme.wsproperty(
|
strategy_name = wsme.wsproperty(
|
||||||
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
|
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
|
||||||
"""The name of the strategy this audit template refers to"""
|
"""The name of the strategy this audit refers to"""
|
||||||
|
|
||||||
parameters = {wtypes.text: types.jsontype}
|
parameters = {wtypes.text: types.jsontype}
|
||||||
"""The strategy parameters for this audit"""
|
"""The strategy parameters for this audit"""
|
||||||
@@ -380,17 +389,6 @@ class AuditCollection(collection.Collection):
|
|||||||
collection = AuditCollection()
|
collection = AuditCollection()
|
||||||
collection.audits = [Audit.convert_with_links(p, expand)
|
collection.audits = [Audit.convert_with_links(p, expand)
|
||||||
for p in rpc_audits]
|
for p in rpc_audits]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'goal_uuid':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
collection.audits = sorted(
|
|
||||||
collection.audits,
|
|
||||||
key=lambda audit: audit.goal_uuid,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
return collection
|
return collection
|
||||||
|
|
||||||
@@ -405,6 +403,7 @@ class AuditsController(rest.RestController):
|
|||||||
"""REST controller for Audits."""
|
"""REST controller for Audits."""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(AuditsController, self).__init__()
|
super(AuditsController, self).__init__()
|
||||||
|
self.dc_client = rpcapi.DecisionEngineAPI()
|
||||||
|
|
||||||
from_audits = False
|
from_audits = False
|
||||||
"""A flag to indicate if the requests to this controller are coming
|
"""A flag to indicate if the requests to this controller are coming
|
||||||
@@ -418,8 +417,14 @@ class AuditsController(rest.RestController):
|
|||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None, goal=None,
|
resource_url=None, goal=None,
|
||||||
strategy=None):
|
strategy=None):
|
||||||
|
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||||
|
"strategy_name"]
|
||||||
|
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.Audit.fields) + additional_fields)
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Audit.get_by_uuid(pecan.request.context,
|
marker_obj = objects.Audit.get_by_uuid(pecan.request.context,
|
||||||
@@ -440,23 +445,25 @@ class AuditsController(rest.RestController):
|
|||||||
# TODO(michaelgugino): add method to get goal by name.
|
# TODO(michaelgugino): add method to get goal by name.
|
||||||
filters['strategy_name'] = strategy
|
filters['strategy_name'] = strategy
|
||||||
|
|
||||||
if sort_key == 'goal_uuid':
|
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||||
sort_db_key = 'goal_id'
|
additional_fields)
|
||||||
elif sort_key == 'strategy_uuid':
|
sort_db_key = (sort_key if not need_api_sort
|
||||||
sort_db_key = 'strategy_id'
|
else None)
|
||||||
else:
|
|
||||||
sort_db_key = sort_key
|
|
||||||
|
|
||||||
audits = objects.Audit.list(pecan.request.context,
|
audits = objects.Audit.list(pecan.request.context,
|
||||||
limit,
|
limit,
|
||||||
marker_obj, sort_key=sort_db_key,
|
marker_obj, sort_key=sort_db_key,
|
||||||
sort_dir=sort_dir, filters=filters)
|
sort_dir=sort_dir, filters=filters)
|
||||||
|
|
||||||
return AuditCollection.convert_with_links(audits, limit,
|
audits_collection = AuditCollection.convert_with_links(
|
||||||
url=resource_url,
|
audits, limit, url=resource_url, expand=expand,
|
||||||
expand=expand,
|
sort_key=sort_key, sort_dir=sort_dir)
|
||||||
sort_key=sort_key,
|
|
||||||
sort_dir=sort_dir)
|
if need_api_sort:
|
||||||
|
api_utils.make_api_sort(audits_collection.audits, sort_key,
|
||||||
|
sort_dir)
|
||||||
|
|
||||||
|
return audits_collection
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text,
|
@wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text,
|
||||||
wtypes.text, wtypes.text, wtypes.text, int)
|
wtypes.text, wtypes.text, wtypes.text, int)
|
||||||
@@ -511,7 +518,7 @@ class AuditsController(rest.RestController):
|
|||||||
def get_one(self, audit):
|
def get_one(self, audit):
|
||||||
"""Retrieve information about the given audit.
|
"""Retrieve information about the given audit.
|
||||||
|
|
||||||
:param audit_uuid: UUID or name of an audit.
|
:param audit: UUID or name of an audit.
|
||||||
"""
|
"""
|
||||||
if self.from_audits:
|
if self.from_audits:
|
||||||
raise exception.OperationNotPermitted
|
raise exception.OperationNotPermitted
|
||||||
@@ -526,7 +533,7 @@ class AuditsController(rest.RestController):
|
|||||||
def post(self, audit_p):
|
def post(self, audit_p):
|
||||||
"""Create a new audit.
|
"""Create a new audit.
|
||||||
|
|
||||||
:param audit_p: a audit within the request body.
|
:param audit_p: an audit within the request body.
|
||||||
"""
|
"""
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
policy.enforce(context, 'audit:create',
|
policy.enforce(context, 'audit:create',
|
||||||
@@ -556,7 +563,7 @@ class AuditsController(rest.RestController):
|
|||||||
|
|
||||||
if no_schema and audit.parameters:
|
if no_schema and audit.parameters:
|
||||||
raise exception.Invalid(_('Specify parameters but no predefined '
|
raise exception.Invalid(_('Specify parameters but no predefined '
|
||||||
'strategy for audit template, or no '
|
'strategy for audit, or no '
|
||||||
'parameter spec in predefined strategy'))
|
'parameter spec in predefined strategy'))
|
||||||
|
|
||||||
audit_dict = audit.as_dict()
|
audit_dict = audit.as_dict()
|
||||||
@@ -569,8 +576,7 @@ class AuditsController(rest.RestController):
|
|||||||
|
|
||||||
# trigger decision-engine to run the audit
|
# trigger decision-engine to run the audit
|
||||||
if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value:
|
if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value:
|
||||||
dc_client = rpcapi.DecisionEngineAPI()
|
self.dc_client.trigger_audit(context, new_audit.uuid)
|
||||||
dc_client.trigger_audit(context, new_audit.uuid)
|
|
||||||
|
|
||||||
return Audit.convert_with_links(new_audit)
|
return Audit.convert_with_links(new_audit)
|
||||||
|
|
||||||
@@ -579,7 +585,7 @@ class AuditsController(rest.RestController):
|
|||||||
def patch(self, audit, patch):
|
def patch(self, audit, patch):
|
||||||
"""Update an existing audit.
|
"""Update an existing audit.
|
||||||
|
|
||||||
:param auditd: UUID or name of a audit.
|
:param audit: UUID or name of an audit.
|
||||||
:param patch: a json PATCH document to apply to this audit.
|
:param patch: a json PATCH document to apply to this audit.
|
||||||
"""
|
"""
|
||||||
if self.from_audits:
|
if self.from_audits:
|
||||||
@@ -633,7 +639,14 @@ class AuditsController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
audit_to_delete = api_utils.get_resource(
|
audit_to_delete = api_utils.get_resource(
|
||||||
'Audit', audit, eager=True)
|
'Audit', audit, eager=True)
|
||||||
policy.enforce(context, 'audit:update', audit_to_delete,
|
policy.enforce(context, 'audit:delete', audit_to_delete,
|
||||||
action='audit:update')
|
action='audit:delete')
|
||||||
|
|
||||||
|
initial_state = audit_to_delete.state
|
||||||
|
new_state = objects.audit.State.DELETED
|
||||||
|
if not objects.audit.AuditStateTransitionManager(
|
||||||
|
).check_transition(initial_state, new_state):
|
||||||
|
raise exception.DeleteError(
|
||||||
|
state=initial_state)
|
||||||
|
|
||||||
audit_to_delete.soft_delete()
|
audit_to_delete.soft_delete()
|
||||||
|
|||||||
@@ -474,9 +474,13 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
def _get_audit_templates_collection(self, filters, marker, limit,
|
def _get_audit_templates_collection(self, filters, marker, limit,
|
||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None):
|
resource_url=None):
|
||||||
|
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
|
||||||
|
"strategy_name"]
|
||||||
|
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.AuditTemplate.fields) + additional_fields)
|
||||||
api_utils.validate_search_filters(
|
api_utils.validate_search_filters(
|
||||||
filters, list(objects.audit_template.AuditTemplate.fields) +
|
filters, list(objects.AuditTemplate.fields) + additional_fields)
|
||||||
["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"])
|
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
@@ -486,19 +490,26 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
pecan.request.context,
|
pecan.request.context,
|
||||||
marker)
|
marker)
|
||||||
|
|
||||||
audit_templates = objects.AuditTemplate.list(
|
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||||
pecan.request.context,
|
additional_fields)
|
||||||
filters,
|
sort_db_key = (sort_key if not need_api_sort
|
||||||
limit,
|
else None)
|
||||||
marker_obj, sort_key=sort_key,
|
|
||||||
sort_dir=sort_dir)
|
|
||||||
|
|
||||||
return AuditTemplateCollection.convert_with_links(audit_templates,
|
audit_templates = objects.AuditTemplate.list(
|
||||||
limit,
|
pecan.request.context, filters, limit, marker_obj,
|
||||||
url=resource_url,
|
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||||
expand=expand,
|
|
||||||
sort_key=sort_key,
|
audit_templates_collection = \
|
||||||
sort_dir=sort_dir)
|
AuditTemplateCollection.convert_with_links(
|
||||||
|
audit_templates, limit, url=resource_url, expand=expand,
|
||||||
|
sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
if need_api_sort:
|
||||||
|
api_utils.make_api_sort(
|
||||||
|
audit_templates_collection.audit_templates, sort_key,
|
||||||
|
sort_dir)
|
||||||
|
|
||||||
|
return audit_templates_collection
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
|
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
|
||||||
types.uuid, int, wtypes.text, wtypes.text)
|
types.uuid, int, wtypes.text, wtypes.text)
|
||||||
@@ -677,8 +688,8 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
audit_template_to_delete = api_utils.get_resource('AuditTemplate',
|
audit_template_to_delete = api_utils.get_resource('AuditTemplate',
|
||||||
audit_template)
|
audit_template)
|
||||||
policy.enforce(context, 'audit_template:update',
|
policy.enforce(context, 'audit_template:delete',
|
||||||
audit_template_to_delete,
|
audit_template_to_delete,
|
||||||
action='audit_template:update')
|
action='audit_template:delete')
|
||||||
|
|
||||||
audit_template_to_delete.soft_delete()
|
audit_template_to_delete.soft_delete()
|
||||||
|
|||||||
@@ -130,17 +130,6 @@ class GoalCollection(collection.Collection):
|
|||||||
goal_collection = GoalCollection()
|
goal_collection = GoalCollection()
|
||||||
goal_collection.goals = [
|
goal_collection.goals = [
|
||||||
Goal.convert_with_links(g, expand) for g in goals]
|
Goal.convert_with_links(g, expand) for g in goals]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'strategy':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
goal_collection.goals = sorted(
|
|
||||||
goal_collection.goals,
|
|
||||||
key=lambda goal: goal.uuid,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
goal_collection.next = goal_collection.get_next(
|
goal_collection.next = goal_collection.get_next(
|
||||||
limit, url=url, **kwargs)
|
limit, url=url, **kwargs)
|
||||||
return goal_collection
|
return goal_collection
|
||||||
@@ -167,17 +156,19 @@ class GoalsController(rest.RestController):
|
|||||||
|
|
||||||
def _get_goals_collection(self, marker, limit, sort_key, sort_dir,
|
def _get_goals_collection(self, marker, limit, sort_key, sort_dir,
|
||||||
expand=False, resource_url=None):
|
expand=False, resource_url=None):
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.Goal.fields))
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
|
||||||
else None)
|
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Goal.get_by_uuid(
|
marker_obj = objects.Goal.get_by_uuid(
|
||||||
pecan.request.context, marker)
|
pecan.request.context, marker)
|
||||||
|
|
||||||
|
sort_db_key = (sort_key if sort_key in objects.Goal.fields
|
||||||
|
else None)
|
||||||
|
|
||||||
goals = objects.Goal.list(pecan.request.context, limit, marker_obj,
|
goals = objects.Goal.list(pecan.request.context, limit, marker_obj,
|
||||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
|||||||
@@ -123,17 +123,6 @@ class ScoringEngineCollection(collection.Collection):
|
|||||||
collection = ScoringEngineCollection()
|
collection = ScoringEngineCollection()
|
||||||
collection.scoring_engines = [ScoringEngine.convert_with_links(
|
collection.scoring_engines = [ScoringEngine.convert_with_links(
|
||||||
se, expand) for se in scoring_engines]
|
se, expand) for se in scoring_engines]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'name':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
collection.goals = sorted(
|
|
||||||
collection.scoring_engines,
|
|
||||||
key=lambda se: se.name,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
return collection
|
return collection
|
||||||
|
|
||||||
@@ -160,7 +149,8 @@ class ScoringEngineController(rest.RestController):
|
|||||||
def _get_scoring_engines_collection(self, marker, limit,
|
def _get_scoring_engines_collection(self, marker, limit,
|
||||||
sort_key, sort_dir, expand=False,
|
sort_key, sort_dir, expand=False,
|
||||||
resource_url=None):
|
resource_url=None):
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.ScoringEngine.fields))
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
@@ -171,7 +161,8 @@ class ScoringEngineController(rest.RestController):
|
|||||||
|
|
||||||
filters = {}
|
filters = {}
|
||||||
|
|
||||||
sort_db_key = sort_key
|
sort_db_key = (sort_key if sort_key in objects.ScoringEngine.fields
|
||||||
|
else None)
|
||||||
|
|
||||||
scoring_engines = objects.ScoringEngine.list(
|
scoring_engines = objects.ScoringEngine.list(
|
||||||
context=pecan.request.context,
|
context=pecan.request.context,
|
||||||
|
|||||||
@@ -154,17 +154,6 @@ class ServiceCollection(collection.Collection):
|
|||||||
service_collection = ServiceCollection()
|
service_collection = ServiceCollection()
|
||||||
service_collection.services = [
|
service_collection.services = [
|
||||||
Service.convert_with_links(g, expand) for g in services]
|
Service.convert_with_links(g, expand) for g in services]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'service':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
service_collection.services = sorted(
|
|
||||||
service_collection.services,
|
|
||||||
key=lambda service: service.id,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
service_collection.next = service_collection.get_next(
|
service_collection.next = service_collection.get_next(
|
||||||
limit, url=url, marker_field='id', **kwargs)
|
limit, url=url, marker_field='id', **kwargs)
|
||||||
return service_collection
|
return service_collection
|
||||||
@@ -191,17 +180,19 @@ class ServicesController(rest.RestController):
|
|||||||
|
|
||||||
def _get_services_collection(self, marker, limit, sort_key, sort_dir,
|
def _get_services_collection(self, marker, limit, sort_key, sort_dir,
|
||||||
expand=False, resource_url=None):
|
expand=False, resource_url=None):
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.Service.fields))
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
|
||||||
else None)
|
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Service.get(
|
marker_obj = objects.Service.get(
|
||||||
pecan.request.context, marker)
|
pecan.request.context, marker)
|
||||||
|
|
||||||
|
sort_db_key = (sort_key if sort_key in objects.Service.fields
|
||||||
|
else None)
|
||||||
|
|
||||||
services = objects.Service.list(
|
services = objects.Service.list(
|
||||||
pecan.request.context, limit, marker_obj,
|
pecan.request.context, limit, marker_obj,
|
||||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ from watcher.api.controllers.v1 import utils as api_utils
|
|||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import policy
|
from watcher.common import policy
|
||||||
from watcher.common import utils as common_utils
|
from watcher.common import utils as common_utils
|
||||||
|
from watcher.decision_engine import rpcapi
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
|
|
||||||
@@ -172,17 +173,6 @@ class StrategyCollection(collection.Collection):
|
|||||||
strategy_collection = StrategyCollection()
|
strategy_collection = StrategyCollection()
|
||||||
strategy_collection.strategies = [
|
strategy_collection.strategies = [
|
||||||
Strategy.convert_with_links(g, expand) for g in strategies]
|
Strategy.convert_with_links(g, expand) for g in strategies]
|
||||||
|
|
||||||
if 'sort_key' in kwargs:
|
|
||||||
reverse = False
|
|
||||||
if kwargs['sort_key'] == 'strategy':
|
|
||||||
if 'sort_dir' in kwargs:
|
|
||||||
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
|
||||||
strategy_collection.strategies = sorted(
|
|
||||||
strategy_collection.strategies,
|
|
||||||
key=lambda strategy: strategy.uuid,
|
|
||||||
reverse=reverse)
|
|
||||||
|
|
||||||
strategy_collection.next = strategy_collection.get_next(
|
strategy_collection.next = strategy_collection.get_next(
|
||||||
limit, url=url, **kwargs)
|
limit, url=url, **kwargs)
|
||||||
return strategy_collection
|
return strategy_collection
|
||||||
@@ -205,32 +195,44 @@ class StrategiesController(rest.RestController):
|
|||||||
|
|
||||||
_custom_actions = {
|
_custom_actions = {
|
||||||
'detail': ['GET'],
|
'detail': ['GET'],
|
||||||
|
'state': ['GET'],
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_strategies_collection(self, filters, marker, limit, sort_key,
|
def _get_strategies_collection(self, filters, marker, limit, sort_key,
|
||||||
sort_dir, expand=False, resource_url=None):
|
sort_dir, expand=False, resource_url=None):
|
||||||
|
additional_fields = ["goal_uuid", "goal_name"]
|
||||||
|
|
||||||
|
api_utils.validate_sort_key(
|
||||||
|
sort_key, list(objects.Strategy.fields) + additional_fields)
|
||||||
api_utils.validate_search_filters(
|
api_utils.validate_search_filters(
|
||||||
filters, list(objects.strategy.Strategy.fields) +
|
filters, list(objects.Strategy.fields) + additional_fields)
|
||||||
["goal_uuid", "goal_name"])
|
|
||||||
limit = api_utils.validate_limit(limit)
|
limit = api_utils.validate_limit(limit)
|
||||||
api_utils.validate_sort_dir(sort_dir)
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
sort_db_key = (sort_key if sort_key in objects.Strategy.fields
|
|
||||||
else None)
|
|
||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Strategy.get_by_uuid(
|
marker_obj = objects.Strategy.get_by_uuid(
|
||||||
pecan.request.context, marker)
|
pecan.request.context, marker)
|
||||||
|
|
||||||
|
need_api_sort = api_utils.check_need_api_sort(sort_key,
|
||||||
|
additional_fields)
|
||||||
|
sort_db_key = (sort_key if not need_api_sort
|
||||||
|
else None)
|
||||||
|
|
||||||
strategies = objects.Strategy.list(
|
strategies = objects.Strategy.list(
|
||||||
pecan.request.context, limit, marker_obj, filters=filters,
|
pecan.request.context, limit, marker_obj, filters=filters,
|
||||||
sort_key=sort_db_key, sort_dir=sort_dir)
|
sort_key=sort_db_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
return StrategyCollection.convert_with_links(
|
strategies_collection = StrategyCollection.convert_with_links(
|
||||||
strategies, limit, url=resource_url, expand=expand,
|
strategies, limit, url=resource_url, expand=expand,
|
||||||
sort_key=sort_key, sort_dir=sort_dir)
|
sort_key=sort_key, sort_dir=sort_dir)
|
||||||
|
|
||||||
|
if need_api_sort:
|
||||||
|
api_utils.make_api_sort(strategies_collection.strategies,
|
||||||
|
sort_key, sort_dir)
|
||||||
|
|
||||||
|
return strategies_collection
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text,
|
@wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text,
|
||||||
int, wtypes.text, wtypes.text)
|
int, wtypes.text, wtypes.text)
|
||||||
def get_all(self, goal=None, marker=None, limit=None,
|
def get_all(self, goal=None, marker=None, limit=None,
|
||||||
@@ -288,6 +290,26 @@ class StrategiesController(rest.RestController):
|
|||||||
return self._get_strategies_collection(
|
return self._get_strategies_collection(
|
||||||
filters, marker, limit, sort_key, sort_dir, expand, resource_url)
|
filters, marker, limit, sort_key, sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(wtypes.text, wtypes.text)
|
||||||
|
def state(self, strategy):
|
||||||
|
"""Retrieve a inforamation about strategy requirements.
|
||||||
|
|
||||||
|
:param strategy: name of the strategy.
|
||||||
|
"""
|
||||||
|
context = pecan.request.context
|
||||||
|
policy.enforce(context, 'strategy:state', action='strategy:state')
|
||||||
|
parents = pecan.request.path.split('/')[:-1]
|
||||||
|
if parents[-2] != "strategies":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
rpc_strategy = api_utils.get_resource('Strategy', strategy)
|
||||||
|
de_client = rpcapi.DecisionEngineAPI()
|
||||||
|
strategy_state = de_client.get_strategy_info(context,
|
||||||
|
rpc_strategy.name)
|
||||||
|
strategy_state.extend([{
|
||||||
|
'type': 'Name', 'state': rpc_strategy.name,
|
||||||
|
'mandatory': '', 'comment': ''}])
|
||||||
|
return strategy_state
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(Strategy, wtypes.text)
|
@wsme_pecan.wsexpose(Strategy, wtypes.text)
|
||||||
def get_one(self, strategy):
|
def get_one(self, strategy):
|
||||||
"""Retrieve information about the given strategy.
|
"""Retrieve information about the given strategy.
|
||||||
|
|||||||
@@ -13,6 +13,8 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from operator import attrgetter
|
||||||
|
|
||||||
import jsonpatch
|
import jsonpatch
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import reflection
|
from oslo_utils import reflection
|
||||||
@@ -54,6 +56,13 @@ def validate_sort_dir(sort_dir):
|
|||||||
"'asc' or 'desc'") % sort_dir)
|
"'asc' or 'desc'") % sort_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_sort_key(sort_key, allowed_fields):
|
||||||
|
# Very lightweight validation for now
|
||||||
|
if sort_key not in allowed_fields:
|
||||||
|
raise wsme.exc.ClientSideError(
|
||||||
|
_("Invalid sort key: %s") % sort_key)
|
||||||
|
|
||||||
|
|
||||||
def validate_search_filters(filters, allowed_fields):
|
def validate_search_filters(filters, allowed_fields):
|
||||||
# Very lightweight validation for now
|
# Very lightweight validation for now
|
||||||
# todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries)
|
# todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries)
|
||||||
@@ -63,6 +72,19 @@ def validate_search_filters(filters, allowed_fields):
|
|||||||
_("Invalid filter: %s") % filter_name)
|
_("Invalid filter: %s") % filter_name)
|
||||||
|
|
||||||
|
|
||||||
|
def check_need_api_sort(sort_key, additional_fields):
|
||||||
|
return sort_key in additional_fields
|
||||||
|
|
||||||
|
|
||||||
|
def make_api_sort(sorting_list, sort_key, sort_dir):
|
||||||
|
# First sort by uuid field, than sort by sort_key
|
||||||
|
# sort() ensures stable sorting, so we could
|
||||||
|
# make lexicographical sort
|
||||||
|
reverse_direction = (sort_dir == 'desc')
|
||||||
|
sorting_list.sort(key=attrgetter('uuid'), reverse=reverse_direction)
|
||||||
|
sorting_list.sort(key=attrgetter(sort_key), reverse=reverse_direction)
|
||||||
|
|
||||||
|
|
||||||
def apply_jsonpatch(doc, patch):
|
def apply_jsonpatch(doc, patch):
|
||||||
for p in patch:
|
for p in patch:
|
||||||
if p['op'] == 'add' and p['path'].count('/') == 1:
|
if p['op'] == 'add' and p['path'].count('/') == 1:
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ class ContextHook(hooks.PecanHook):
|
|||||||
auth_url = headers.get('X-Auth-Url')
|
auth_url = headers.get('X-Auth-Url')
|
||||||
if auth_url is None:
|
if auth_url is None:
|
||||||
importutils.import_module('keystonemiddleware.auth_token')
|
importutils.import_module('keystonemiddleware.auth_token')
|
||||||
auth_url = cfg.CONF.keystone_authtoken.auth_uri
|
auth_url = cfg.CONF.keystone_authtoken.www_authenticate_uri
|
||||||
|
|
||||||
state.request.context = context.make_context(
|
state.request.context = context.make_context(
|
||||||
auth_token=auth_token,
|
auth_token=auth_token,
|
||||||
|
|||||||
@@ -50,6 +50,12 @@ class Migrate(base.BaseAction):
|
|||||||
source and the destination compute hostname (list of available compute
|
source and the destination compute hostname (list of available compute
|
||||||
hosts is returned by this command: ``nova service-list --binary
|
hosts is returned by this command: ``nova service-list --binary
|
||||||
nova-compute``).
|
nova-compute``).
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Nova API version must be 2.56 or above if `destination_node` parameter
|
||||||
|
is given.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# input parameters constants
|
# input parameters constants
|
||||||
@@ -113,8 +119,10 @@ class Migrate(base.BaseAction):
|
|||||||
dest_hostname=destination)
|
dest_hostname=destination)
|
||||||
except nova_helper.nvexceptions.ClientException as e:
|
except nova_helper.nvexceptions.ClientException as e:
|
||||||
LOG.debug("Nova client exception occurred while live "
|
LOG.debug("Nova client exception occurred while live "
|
||||||
"migrating instance %s.Exception: %s" %
|
"migrating instance "
|
||||||
(self.instance_uuid, e))
|
"%(instance)s.Exception: %(exception)s",
|
||||||
|
{'instance': self.instance_uuid, 'exception': e})
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
LOG.critical("Unexpected error occurred. Migration failed for "
|
LOG.critical("Unexpected error occurred. Migration failed for "
|
||||||
|
|||||||
@@ -36,13 +36,16 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
|
|
||||||
By using this action, you will be able to migrate cinder volume.
|
By using this action, you will be able to migrate cinder volume.
|
||||||
Migration type 'swap' can only be used for migrating attached volume.
|
Migration type 'swap' can only be used for migrating attached volume.
|
||||||
Migration type 'cold' can only be used for migrating detached volume.
|
Migration type 'migrate' can be used for migrating detached volume to
|
||||||
|
the pool of same volume type.
|
||||||
|
Migration type 'retype' can be used for changing volume type of
|
||||||
|
detached volume.
|
||||||
|
|
||||||
The action schema is::
|
The action schema is::
|
||||||
|
|
||||||
schema = Schema({
|
schema = Schema({
|
||||||
'resource_id': str, # should be a UUID
|
'resource_id': str, # should be a UUID
|
||||||
'migration_type': str, # choices -> "swap", "cold"
|
'migration_type': str, # choices -> "swap", "migrate","retype"
|
||||||
'destination_node': str,
|
'destination_node': str,
|
||||||
'destination_type': str,
|
'destination_type': str,
|
||||||
})
|
})
|
||||||
@@ -60,7 +63,8 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
|
|
||||||
MIGRATION_TYPE = 'migration_type'
|
MIGRATION_TYPE = 'migration_type'
|
||||||
SWAP = 'swap'
|
SWAP = 'swap'
|
||||||
COLD = 'cold'
|
RETYPE = 'retype'
|
||||||
|
MIGRATE = 'migrate'
|
||||||
DESTINATION_NODE = "destination_node"
|
DESTINATION_NODE = "destination_node"
|
||||||
DESTINATION_TYPE = "destination_type"
|
DESTINATION_TYPE = "destination_type"
|
||||||
|
|
||||||
@@ -85,7 +89,7 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
},
|
},
|
||||||
'migration_type': {
|
'migration_type': {
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
"enum": ["swap", "cold"]
|
"enum": ["swap", "retype", "migrate"]
|
||||||
},
|
},
|
||||||
'destination_node': {
|
'destination_node': {
|
||||||
"anyof": [
|
"anyof": [
|
||||||
@@ -127,20 +131,6 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
def destination_type(self):
|
def destination_type(self):
|
||||||
return self.input_parameters.get(self.DESTINATION_TYPE)
|
return self.input_parameters.get(self.DESTINATION_TYPE)
|
||||||
|
|
||||||
def _cold_migrate(self, volume, dest_node, dest_type):
|
|
||||||
if not self.cinder_util.can_cold(volume, dest_node):
|
|
||||||
raise exception.Invalid(
|
|
||||||
message=(_("Invalid state for cold migration")))
|
|
||||||
|
|
||||||
if dest_node:
|
|
||||||
return self.cinder_util.migrate(volume, dest_node)
|
|
||||||
elif dest_type:
|
|
||||||
return self.cinder_util.retype(volume, dest_type)
|
|
||||||
else:
|
|
||||||
raise exception.Invalid(
|
|
||||||
message=(_("destination host or destination type is "
|
|
||||||
"required when migration type is cold")))
|
|
||||||
|
|
||||||
def _can_swap(self, volume):
|
def _can_swap(self, volume):
|
||||||
"""Judge volume can be swapped"""
|
"""Judge volume can be swapped"""
|
||||||
|
|
||||||
@@ -212,12 +202,14 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
volume = self.cinder_util.get_volume(volume_id)
|
volume = self.cinder_util.get_volume(volume_id)
|
||||||
if self.migration_type == self.COLD:
|
if self.migration_type == self.SWAP:
|
||||||
return self._cold_migrate(volume, dest_node, dest_type)
|
|
||||||
elif self.migration_type == self.SWAP:
|
|
||||||
if dest_node:
|
if dest_node:
|
||||||
LOG.warning("dest_node is ignored")
|
LOG.warning("dest_node is ignored")
|
||||||
return self._swap_volume(volume, dest_type)
|
return self._swap_volume(volume, dest_type)
|
||||||
|
elif self.migration_type == self.RETYPE:
|
||||||
|
return self.cinder_util.retype(volume, dest_type)
|
||||||
|
elif self.migration_type == self.MIGRATE:
|
||||||
|
return self.cinder_util.migrate(volume, dest_node)
|
||||||
else:
|
else:
|
||||||
raise exception.Invalid(
|
raise exception.Invalid(
|
||||||
message=(_("Migration of type '%(migration_type)s' is not "
|
message=(_("Migration of type '%(migration_type)s' is not "
|
||||||
|
|||||||
@@ -40,10 +40,10 @@ def main():
|
|||||||
|
|
||||||
if host == '127.0.0.1':
|
if host == '127.0.0.1':
|
||||||
LOG.info('serving on 127.0.0.1:%(port)s, '
|
LOG.info('serving on 127.0.0.1:%(port)s, '
|
||||||
'view at %(protocol)s://127.0.0.1:%(port)s' %
|
'view at %(protocol)s://127.0.0.1:%(port)s',
|
||||||
dict(protocol=protocol, port=port))
|
dict(protocol=protocol, port=port))
|
||||||
else:
|
else:
|
||||||
LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
|
LOG.info('serving on %(protocol)s://%(host)s:%(port)s',
|
||||||
dict(protocol=protocol, host=host, port=port))
|
dict(protocol=protocol, host=host, port=port))
|
||||||
|
|
||||||
api_schedule = scheduling.APISchedulingService()
|
api_schedule = scheduling.APISchedulingService()
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ import sys
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.common import service as service
|
from watcher.common import service
|
||||||
from watcher import conf
|
from watcher import conf
|
||||||
from watcher.decision_engine import sync
|
from watcher.decision_engine import sync
|
||||||
|
|
||||||
|
|||||||
@@ -70,16 +70,18 @@ class CinderHelper(object):
|
|||||||
def get_volume_type_list(self):
|
def get_volume_type_list(self):
|
||||||
return self.cinder.volume_types.list()
|
return self.cinder.volume_types.list()
|
||||||
|
|
||||||
|
def get_volume_snapshots_list(self):
|
||||||
|
return self.cinder.volume_snapshots.list(
|
||||||
|
search_opts={'all_tenants': True})
|
||||||
|
|
||||||
def get_volume_type_by_backendname(self, backendname):
|
def get_volume_type_by_backendname(self, backendname):
|
||||||
|
"""Return a list of volume type"""
|
||||||
volume_type_list = self.get_volume_type_list()
|
volume_type_list = self.get_volume_type_list()
|
||||||
|
|
||||||
volume_type = [volume_type for volume_type in volume_type_list
|
volume_type = [volume_type.name for volume_type in volume_type_list
|
||||||
if volume_type.extra_specs.get(
|
if volume_type.extra_specs.get(
|
||||||
'volume_backend_name') == backendname]
|
'volume_backend_name') == backendname]
|
||||||
if volume_type:
|
return volume_type
|
||||||
return volume_type[0].name
|
|
||||||
else:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def get_volume(self, volume):
|
def get_volume(self, volume):
|
||||||
|
|
||||||
@@ -111,23 +113,6 @@ class CinderHelper(object):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def can_cold(self, volume, host=None):
|
|
||||||
"""Judge volume can be migrated"""
|
|
||||||
can_cold = False
|
|
||||||
status = self.get_volume(volume).status
|
|
||||||
snapshot = self._has_snapshot(volume)
|
|
||||||
|
|
||||||
same_host = False
|
|
||||||
if host and getattr(volume, 'os-vol-host-attr:host') == host:
|
|
||||||
same_host = True
|
|
||||||
|
|
||||||
if (status == 'available' and
|
|
||||||
snapshot is False and
|
|
||||||
same_host is False):
|
|
||||||
can_cold = True
|
|
||||||
|
|
||||||
return can_cold
|
|
||||||
|
|
||||||
def get_deleting_volume(self, volume):
|
def get_deleting_volume(self, volume):
|
||||||
volume = self.get_volume(volume)
|
volume = self.get_volume(volume)
|
||||||
all_volume = self.get_volume_list()
|
all_volume = self.get_volume_list()
|
||||||
@@ -154,13 +139,13 @@ class CinderHelper(object):
|
|||||||
volume = self.get_volume(volume.id)
|
volume = self.get_volume(volume.id)
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s" % retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
|
LOG.debug("Waiting to complete deletion of volume %s", volume.id)
|
||||||
if self._can_get_volume(volume.id):
|
if self._can_get_volume(volume.id):
|
||||||
LOG.error("Volume deletion error: %s" % volume.id)
|
LOG.error("Volume deletion error: %s", volume.id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
LOG.debug("Volume %s was deleted successfully." % volume.id)
|
LOG.debug("Volume %s was deleted successfully.", volume.id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def check_migrated(self, volume, retry_interval=10):
|
def check_migrated(self, volume, retry_interval=10):
|
||||||
@@ -194,8 +179,7 @@ class CinderHelper(object):
|
|||||||
LOG.error(error_msg)
|
LOG.error(error_msg)
|
||||||
return False
|
return False
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Volume migration succeeded : "
|
"Volume migration succeeded : volume %s is now on host '%s'.", (
|
||||||
"volume %s is now on host '%s'." % (
|
|
||||||
volume.id, host_name))
|
volume.id, host_name))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -204,13 +188,13 @@ class CinderHelper(object):
|
|||||||
volume = self.get_volume(volume)
|
volume = self.get_volume(volume)
|
||||||
dest_backend = self.backendname_from_poolname(dest_node)
|
dest_backend = self.backendname_from_poolname(dest_node)
|
||||||
dest_type = self.get_volume_type_by_backendname(dest_backend)
|
dest_type = self.get_volume_type_by_backendname(dest_backend)
|
||||||
if volume.volume_type != dest_type:
|
if volume.volume_type not in dest_type:
|
||||||
raise exception.Invalid(
|
raise exception.Invalid(
|
||||||
message=(_("Volume type must be same for migrating")))
|
message=(_("Volume type must be same for migrating")))
|
||||||
|
|
||||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||||
LOG.debug("Volume %s found on host '%s'."
|
LOG.debug("Volume %s found on host '%s'.",
|
||||||
% (volume.id, source_node))
|
(volume.id, source_node))
|
||||||
|
|
||||||
self.cinder.volumes.migrate_volume(
|
self.cinder.volumes.migrate_volume(
|
||||||
volume, dest_node, False, True)
|
volume, dest_node, False, True)
|
||||||
@@ -226,8 +210,8 @@ class CinderHelper(object):
|
|||||||
|
|
||||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Volume %s found on host '%s'." % (
|
"Volume %s found on host '%s'.",
|
||||||
volume.id, source_node))
|
(volume.id, source_node))
|
||||||
|
|
||||||
self.cinder.volumes.retype(
|
self.cinder.volumes.retype(
|
||||||
volume, dest_type, "on-demand")
|
volume, dest_type, "on-demand")
|
||||||
@@ -249,14 +233,14 @@ class CinderHelper(object):
|
|||||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s" % retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
|
|
||||||
if getattr(new_volume, 'status') != 'available':
|
if getattr(new_volume, 'status') != 'available':
|
||||||
error_msg = (_("Failed to create volume '%(volume)s. ") %
|
error_msg = (_("Failed to create volume '%(volume)s. ") %
|
||||||
{'volume': new_volume.id})
|
{'volume': new_volume.id})
|
||||||
raise Exception(error_msg)
|
raise Exception(error_msg)
|
||||||
|
|
||||||
LOG.debug("Volume %s was created successfully." % new_volume)
|
LOG.debug("Volume %s was created successfully.", new_volume)
|
||||||
return new_volume
|
return new_volume
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ class RequestContext(context.RequestContext):
|
|||||||
# safely ignore this as we don't use it.
|
# safely ignore this as we don't use it.
|
||||||
kwargs.pop('user_identity', None)
|
kwargs.pop('user_identity', None)
|
||||||
kwargs.pop('global_request_id', None)
|
kwargs.pop('global_request_id', None)
|
||||||
|
kwargs.pop('project', None)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
LOG.warning('Arguments dropped when creating context: %s',
|
LOG.warning('Arguments dropped when creating context: %s',
|
||||||
str(kwargs))
|
str(kwargs))
|
||||||
|
|||||||
@@ -305,7 +305,7 @@ class ActionFilterCombinationProhibited(Invalid):
|
|||||||
|
|
||||||
|
|
||||||
class UnsupportedActionType(UnsupportedError):
|
class UnsupportedActionType(UnsupportedError):
|
||||||
msg_fmt = _("Provided %(action_type) is not supported yet")
|
msg_fmt = _("Provided %(action_type)s is not supported yet")
|
||||||
|
|
||||||
|
|
||||||
class EfficacyIndicatorNotFound(ResourceNotFound):
|
class EfficacyIndicatorNotFound(ResourceNotFound):
|
||||||
@@ -332,6 +332,14 @@ class PatchError(Invalid):
|
|||||||
msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
|
msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteError(Invalid):
|
||||||
|
msg_fmt = _("Couldn't delete when state is '%(state)s'.")
|
||||||
|
|
||||||
|
|
||||||
|
class StartError(Invalid):
|
||||||
|
msg_fmt = _("Couldn't start when state is '%(state)s'.")
|
||||||
|
|
||||||
|
|
||||||
# decision engine
|
# decision engine
|
||||||
|
|
||||||
class WorkflowExecutionException(WatcherException):
|
class WorkflowExecutionException(WatcherException):
|
||||||
@@ -362,6 +370,14 @@ class ClusterEmpty(WatcherException):
|
|||||||
msg_fmt = _("The list of compute node(s) in the cluster is empty")
|
msg_fmt = _("The list of compute node(s) in the cluster is empty")
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeClusterEmpty(WatcherException):
|
||||||
|
msg_fmt = _("The list of compute node(s) in the cluster is empty")
|
||||||
|
|
||||||
|
|
||||||
|
class StorageClusterEmpty(WatcherException):
|
||||||
|
msg_fmt = _("The list of storage node(s) in the cluster is empty")
|
||||||
|
|
||||||
|
|
||||||
class MetricCollectorNotDefined(WatcherException):
|
class MetricCollectorNotDefined(WatcherException):
|
||||||
msg_fmt = _("The metrics resource collector is not defined")
|
msg_fmt = _("The metrics resource collector is not defined")
|
||||||
|
|
||||||
@@ -405,6 +421,10 @@ class UnsupportedDataSource(UnsupportedError):
|
|||||||
"by strategy %(strategy)s")
|
"by strategy %(strategy)s")
|
||||||
|
|
||||||
|
|
||||||
|
class DataSourceNotAvailable(WatcherException):
|
||||||
|
msg_fmt = _("Datasource %(datasource)s is not available.")
|
||||||
|
|
||||||
|
|
||||||
class NoSuchMetricForHost(WatcherException):
|
class NoSuchMetricForHost(WatcherException):
|
||||||
msg_fmt = _("No %(metric)s metric for %(host)s found.")
|
msg_fmt = _("No %(metric)s metric for %(host)s found.")
|
||||||
|
|
||||||
@@ -469,6 +489,14 @@ class VolumeNotFound(StorageResourceNotFound):
|
|||||||
msg_fmt = _("The volume '%(name)s' could not be found")
|
msg_fmt = _("The volume '%(name)s' could not be found")
|
||||||
|
|
||||||
|
|
||||||
|
class BaremetalResourceNotFound(WatcherException):
|
||||||
|
msg_fmt = _("The baremetal resource '%(name)s' could not be found")
|
||||||
|
|
||||||
|
|
||||||
|
class IronicNodeNotFound(BaremetalResourceNotFound):
|
||||||
|
msg_fmt = _("The ironic node %(uuid)s could not be found")
|
||||||
|
|
||||||
|
|
||||||
class LoadingError(WatcherException):
|
class LoadingError(WatcherException):
|
||||||
msg_fmt = _("Error loading plugin '%(name)s'")
|
msg_fmt = _("Error loading plugin '%(name)s'")
|
||||||
|
|
||||||
@@ -488,3 +516,7 @@ class NegativeLimitError(WatcherException):
|
|||||||
class NotificationPayloadError(WatcherException):
|
class NotificationPayloadError(WatcherException):
|
||||||
_msg_fmt = _("Payload not populated when trying to send notification "
|
_msg_fmt = _("Payload not populated when trying to send notification "
|
||||||
"\"%(class_name)s\"")
|
"\"%(class_name)s\"")
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPoolAttributeValue(Invalid):
|
||||||
|
msg_fmt = _("The %(name)s pool %(attribute)s is not integer")
|
||||||
|
|||||||
49
watcher/common/ironic_helper.py
Normal file
49
watcher/common/ironic_helper.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2017 ZTE Corporation
|
||||||
|
#
|
||||||
|
# Authors:Yumeng Bao <bao.yumeng@zte.com.cn>
|
||||||
|
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from watcher.common import clients
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import utils
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class IronicHelper(object):
|
||||||
|
|
||||||
|
def __init__(self, osc=None):
|
||||||
|
""":param osc: an OpenStackClients instance"""
|
||||||
|
self.osc = osc if osc else clients.OpenStackClients()
|
||||||
|
self.ironic = self.osc.ironic()
|
||||||
|
|
||||||
|
def get_ironic_node_list(self):
|
||||||
|
return self.ironic.node.list()
|
||||||
|
|
||||||
|
def get_ironic_node_by_uuid(self, node_uuid):
|
||||||
|
"""Get ironic node by node UUID"""
|
||||||
|
try:
|
||||||
|
node = self.ironic.node.get(utils.Struct(uuid=node_uuid))
|
||||||
|
if not node:
|
||||||
|
raise exception.IronicNodeNotFound(uuid=node_uuid)
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
raise exception.IronicNodeNotFound(uuid=node_uuid)
|
||||||
|
# We need to pass an object with an 'uuid' attribute to make it work
|
||||||
|
return node
|
||||||
@@ -17,9 +17,9 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
import random
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from novaclient import api_versions
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
import cinderclient.exceptions as ciexceptions
|
import cinderclient.exceptions as ciexceptions
|
||||||
@@ -29,9 +29,12 @@ import novaclient.exceptions as nvexceptions
|
|||||||
from watcher.common import clients
|
from watcher.common import clients
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
|
from watcher import conf
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
CONF = conf.CONF
|
||||||
|
|
||||||
|
|
||||||
class NovaHelper(object):
|
class NovaHelper(object):
|
||||||
|
|
||||||
@@ -52,14 +55,21 @@ class NovaHelper(object):
|
|||||||
return self.nova.hypervisors.get(utils.Struct(id=node_id))
|
return self.nova.hypervisors.get(utils.Struct(id=node_id))
|
||||||
|
|
||||||
def get_compute_node_by_hostname(self, node_hostname):
|
def get_compute_node_by_hostname(self, node_hostname):
|
||||||
"""Get compute node by ID (*not* UUID)"""
|
"""Get compute node by hostname"""
|
||||||
# We need to pass an object with an 'id' attribute to make it work
|
|
||||||
try:
|
try:
|
||||||
compute_nodes = self.nova.hypervisors.search(node_hostname)
|
hypervisors = [hv for hv in self.get_compute_node_list()
|
||||||
if len(compute_nodes) != 1:
|
if hv.service['host'] == node_hostname]
|
||||||
|
if len(hypervisors) != 1:
|
||||||
|
# TODO(hidekazu)
|
||||||
|
# this may occur if VMware vCenter driver is used
|
||||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
|
else:
|
||||||
|
compute_nodes = self.nova.hypervisors.search(
|
||||||
|
hypervisors[0].hypervisor_hostname)
|
||||||
|
if len(compute_nodes) != 1:
|
||||||
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
|
|
||||||
return self.get_compute_node_by_id(compute_nodes[0].id)
|
return self.get_compute_node_by_id(compute_nodes[0].id)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
@@ -67,6 +77,9 @@ class NovaHelper(object):
|
|||||||
def get_instance_list(self):
|
def get_instance_list(self):
|
||||||
return self.nova.servers.list(search_opts={'all_tenants': True})
|
return self.nova.servers.list(search_opts={'all_tenants': True})
|
||||||
|
|
||||||
|
def get_flavor_list(self):
|
||||||
|
return self.nova.flavors.list(**{'is_public': None})
|
||||||
|
|
||||||
def get_service(self, service_id):
|
def get_service(self, service_id):
|
||||||
return self.nova.services.find(id=service_id)
|
return self.nova.services.find(id=service_id)
|
||||||
|
|
||||||
@@ -96,7 +109,7 @@ class NovaHelper(object):
|
|||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
LOG.debug("confirm resize failed for the "
|
LOG.debug("confirm resize failed for the "
|
||||||
"instance %s" % instance.id)
|
"instance %s", instance.id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def wait_for_volume_status(self, volume, status, timeout=60,
|
def wait_for_volume_status(self, volume, status, timeout=60,
|
||||||
@@ -120,240 +133,68 @@ class NovaHelper(object):
|
|||||||
return volume.status == status
|
return volume.status == status
|
||||||
|
|
||||||
def watcher_non_live_migrate_instance(self, instance_id, dest_hostname,
|
def watcher_non_live_migrate_instance(self, instance_id, dest_hostname,
|
||||||
keep_original_image_name=True,
|
|
||||||
retry=120):
|
retry=120):
|
||||||
"""This method migrates a given instance
|
"""This method migrates a given instance
|
||||||
|
|
||||||
using an image of this instance and creating a new instance
|
This method uses the Nova built-in migrate()
|
||||||
from this image. It saves some configuration information
|
action to do a migration of a given instance.
|
||||||
about the original instance : security group, list of networks,
|
For migrating a given dest_hostname, Nova API version
|
||||||
list of attached volumes, floating IP, ...
|
must be 2.56 or higher.
|
||||||
in order to apply the same settings to the new instance.
|
|
||||||
At the end of the process the original instance is deleted.
|
|
||||||
It returns True if the migration was successful,
|
It returns True if the migration was successful,
|
||||||
False otherwise.
|
False otherwise.
|
||||||
|
|
||||||
if destination hostname not given, this method calls nova api
|
|
||||||
to migrate the instance.
|
|
||||||
|
|
||||||
:param instance_id: the unique id of the instance to migrate.
|
:param instance_id: the unique id of the instance to migrate.
|
||||||
:param keep_original_image_name: flag indicating whether the
|
:param dest_hostname: the name of the destination compute node, if
|
||||||
image name from which the original instance was built must be
|
destination_node is None, nova scheduler choose
|
||||||
used as the name of the intermediate image used for migration.
|
the destination host
|
||||||
If this flag is False, a temporary image name is built
|
|
||||||
"""
|
"""
|
||||||
new_image_name = ""
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Trying a non-live migrate of instance '%s' " % instance_id)
|
"Trying a cold migrate of instance '%s' ", instance_id)
|
||||||
|
|
||||||
# Looking for the instance to migrate
|
# Looking for the instance to migrate
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance %s not found !" % instance_id)
|
LOG.debug("Instance %s not found !", instance_id)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
# NOTE: If destination node is None call Nova API to migrate
|
|
||||||
# instance
|
|
||||||
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
|
host_name = getattr(instance, "OS-EXT-SRV-ATTR:host")
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
"Instance %(instance)s found on host '%(host)s'.",
|
||||||
|
{'instance': instance_id, 'host': host_name})
|
||||||
|
|
||||||
if dest_hostname is None:
|
previous_status = getattr(instance, 'status')
|
||||||
previous_status = getattr(instance, 'status')
|
|
||||||
|
|
||||||
instance.migrate()
|
if (dest_hostname and
|
||||||
instance = self.nova.servers.get(instance_id)
|
not self._check_nova_api_version(self.nova, "2.56")):
|
||||||
while (getattr(instance, 'status') not in
|
LOG.error("For migrating a given dest_hostname,"
|
||||||
["VERIFY_RESIZE", "ERROR"] and retry):
|
"Nova API version must be 2.56 or higher")
|
||||||
instance = self.nova.servers.get(instance.id)
|
return False
|
||||||
time.sleep(2)
|
|
||||||
retry -= 1
|
|
||||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
|
||||||
|
|
||||||
if (host_name != new_hostname and
|
instance.migrate(host=dest_hostname)
|
||||||
instance.status == 'VERIFY_RESIZE'):
|
instance = self.nova.servers.get(instance_id)
|
||||||
if not self.confirm_resize(instance, previous_status):
|
|
||||||
return False
|
while (getattr(instance, 'status') not in
|
||||||
LOG.debug(
|
["VERIFY_RESIZE", "ERROR"] and retry):
|
||||||
"cold migration succeeded : "
|
instance = self.nova.servers.get(instance.id)
|
||||||
"instance %s is now on host '%s'." % (
|
time.sleep(2)
|
||||||
instance_id, new_hostname))
|
retry -= 1
|
||||||
return True
|
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||||
else:
|
|
||||||
LOG.debug(
|
if (host_name != new_hostname and
|
||||||
"cold migration for instance %s failed" % instance_id)
|
instance.status == 'VERIFY_RESIZE'):
|
||||||
|
if not self.confirm_resize(instance, previous_status):
|
||||||
return False
|
return False
|
||||||
|
LOG.debug(
|
||||||
if not keep_original_image_name:
|
"cold migration succeeded : "
|
||||||
# randrange gives you an integral value
|
"instance %(instance)s is now on host '%(host)s'.",
|
||||||
irand = random.randint(0, 1000)
|
{'instance': instance_id, 'host': new_hostname})
|
||||||
|
return True
|
||||||
# Building the temporary image name
|
|
||||||
# which will be used for the migration
|
|
||||||
new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand)
|
|
||||||
else:
|
else:
|
||||||
# Get the image name of the current instance.
|
|
||||||
# We'll use the same name for the new instance.
|
|
||||||
imagedict = getattr(instance, "image")
|
|
||||||
image_id = imagedict["id"]
|
|
||||||
image = self.glance.images.get(image_id)
|
|
||||||
new_image_name = getattr(image, "name")
|
|
||||||
|
|
||||||
instance_name = getattr(instance, "name")
|
|
||||||
flavor_name = instance.flavor.get('original_name')
|
|
||||||
keypair_name = getattr(instance, "key_name")
|
|
||||||
|
|
||||||
addresses = getattr(instance, "addresses")
|
|
||||||
|
|
||||||
floating_ip = ""
|
|
||||||
network_names_list = []
|
|
||||||
|
|
||||||
for network_name, network_conf_obj in addresses.items():
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Extracting network configuration for network '%s'" %
|
"cold migration for instance %s failed", instance_id)
|
||||||
network_name)
|
|
||||||
|
|
||||||
network_names_list.append(network_name)
|
|
||||||
|
|
||||||
for net_conf_item in network_conf_obj:
|
|
||||||
if net_conf_item['OS-EXT-IPS:type'] == "floating":
|
|
||||||
floating_ip = net_conf_item['addr']
|
|
||||||
break
|
|
||||||
|
|
||||||
sec_groups_list = getattr(instance, "security_groups")
|
|
||||||
sec_groups = []
|
|
||||||
|
|
||||||
for sec_group_dict in sec_groups_list:
|
|
||||||
sec_groups.append(sec_group_dict['name'])
|
|
||||||
|
|
||||||
# Stopping the old instance properly so
|
|
||||||
# that no new data is sent to it and to its attached volumes
|
|
||||||
stopped_ok = self.stop_instance(instance_id)
|
|
||||||
|
|
||||||
if not stopped_ok:
|
|
||||||
LOG.debug("Could not stop instance: %s" % instance_id)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Building the temporary image which will be used
|
|
||||||
# to re-build the same instance on another target host
|
|
||||||
image_uuid = self.create_image_from_instance(instance_id,
|
|
||||||
new_image_name)
|
|
||||||
|
|
||||||
if not image_uuid:
|
|
||||||
LOG.debug(
|
|
||||||
"Could not build temporary image of instance: %s" %
|
|
||||||
instance_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
#
|
|
||||||
# We need to get the list of attached volumes and detach
|
|
||||||
# them from the instance in order to attache them later
|
|
||||||
# to the new instance
|
|
||||||
#
|
|
||||||
blocks = []
|
|
||||||
|
|
||||||
# Looks like this :
|
|
||||||
# os-extended-volumes:volumes_attached |
|
|
||||||
# [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}]
|
|
||||||
attached_volumes = getattr(instance,
|
|
||||||
"os-extended-volumes:volumes_attached")
|
|
||||||
|
|
||||||
for attached_volume in attached_volumes:
|
|
||||||
volume_id = attached_volume['id']
|
|
||||||
|
|
||||||
try:
|
|
||||||
volume = self.cinder.volumes.get(volume_id)
|
|
||||||
|
|
||||||
attachments_list = getattr(volume, "attachments")
|
|
||||||
|
|
||||||
device_name = attachments_list[0]['device']
|
|
||||||
# When a volume is attached to an instance
|
|
||||||
# it contains the following property :
|
|
||||||
# attachments = [{u'device': u'/dev/vdb',
|
|
||||||
# u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814',
|
|
||||||
# u'id': u'f6d62785-04b8-400d-9626-88640610f65e',
|
|
||||||
# u'host_name': None, u'volume_id':
|
|
||||||
# u'f6d62785-04b8-400d-9626-88640610f65e'}]
|
|
||||||
|
|
||||||
# boot_index indicates a number
|
|
||||||
# designating the boot order of the device.
|
|
||||||
# Use -1 for the boot volume,
|
|
||||||
# choose 0 for an attached volume.
|
|
||||||
block_device_mapping_v2_item = {"device_name": device_name,
|
|
||||||
"source_type": "volume",
|
|
||||||
"destination_type":
|
|
||||||
"volume",
|
|
||||||
"uuid": volume_id,
|
|
||||||
"boot_index": "0"}
|
|
||||||
|
|
||||||
blocks.append(
|
|
||||||
block_device_mapping_v2_item)
|
|
||||||
|
|
||||||
LOG.debug("Detaching volume %s from instance: %s" % (
|
|
||||||
volume_id, instance_id))
|
|
||||||
# volume.detach()
|
|
||||||
self.nova.volumes.delete_server_volume(instance_id,
|
|
||||||
volume_id)
|
|
||||||
|
|
||||||
if not self.wait_for_volume_status(volume, "available", 5,
|
|
||||||
10):
|
|
||||||
LOG.debug(
|
|
||||||
"Could not detach volume %s from instance: %s" % (
|
|
||||||
volume_id, instance_id))
|
|
||||||
return False
|
|
||||||
except ciexceptions.NotFound:
|
|
||||||
LOG.debug("Volume '%s' not found " % image_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# We create the new instance from
|
|
||||||
# the intermediate image of the original instance
|
|
||||||
new_instance = self. \
|
|
||||||
create_instance(dest_hostname,
|
|
||||||
instance_name,
|
|
||||||
image_uuid,
|
|
||||||
flavor_name,
|
|
||||||
sec_groups,
|
|
||||||
network_names_list=network_names_list,
|
|
||||||
keypair_name=keypair_name,
|
|
||||||
create_new_floating_ip=False,
|
|
||||||
block_device_mapping_v2=blocks)
|
|
||||||
|
|
||||||
if not new_instance:
|
|
||||||
LOG.debug(
|
|
||||||
"Could not create new instance "
|
|
||||||
"for non-live migration of instance %s" % instance_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
LOG.debug("Detaching floating ip '%s' from instance %s" % (
|
|
||||||
floating_ip, instance_id))
|
|
||||||
# We detach the floating ip from the current instance
|
|
||||||
instance.remove_floating_ip(floating_ip)
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
"Attaching floating ip '%s' to the new instance %s" % (
|
|
||||||
floating_ip, new_instance.id))
|
|
||||||
|
|
||||||
# We attach the same floating ip to the new instance
|
|
||||||
new_instance.add_floating_ip(floating_ip)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.debug(e)
|
|
||||||
|
|
||||||
new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host")
|
|
||||||
|
|
||||||
# Deleting the old instance (because no more useful)
|
|
||||||
delete_ok = self.delete_instance(instance_id)
|
|
||||||
if not delete_ok:
|
|
||||||
LOG.debug("Could not delete instance: %s" % instance_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
"Instance %s has been successfully migrated "
|
|
||||||
"to new host '%s' and its new id is %s." % (
|
|
||||||
instance_id, new_host_name, new_instance.id))
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def resize_instance(self, instance_id, flavor, retry=120):
|
def resize_instance(self, instance_id, flavor, retry=120):
|
||||||
"""This method resizes given instance with specified flavor.
|
"""This method resizes given instance with specified flavor.
|
||||||
|
|
||||||
@@ -366,8 +207,10 @@ class NovaHelper(object):
|
|||||||
:param instance_id: the unique id of the instance to resize.
|
:param instance_id: the unique id of the instance to resize.
|
||||||
:param flavor: the name or ID of the flavor to resize to.
|
:param flavor: the name or ID of the flavor to resize to.
|
||||||
"""
|
"""
|
||||||
LOG.debug("Trying a resize of instance %s to flavor '%s'" % (
|
LOG.debug(
|
||||||
instance_id, flavor))
|
"Trying a resize of instance %(instance)s to "
|
||||||
|
"flavor '%(flavor)s'",
|
||||||
|
{'instance': instance_id, 'flavor': flavor})
|
||||||
|
|
||||||
# Looking for the instance to resize
|
# Looking for the instance to resize
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
@@ -384,17 +227,17 @@ class NovaHelper(object):
|
|||||||
"instance %s. Exception: %s", instance_id, e)
|
"instance %s. Exception: %s", instance_id, e)
|
||||||
|
|
||||||
if not flavor_id:
|
if not flavor_id:
|
||||||
LOG.debug("Flavor not found: %s" % flavor)
|
LOG.debug("Flavor not found: %s", flavor)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
LOG.debug("Instance not found: %s", instance_id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
|
instance_status = getattr(instance, 'OS-EXT-STS:vm_state')
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Instance %s is in '%s' status." % (instance_id,
|
"Instance %(id)s is in '%(status)s' status.",
|
||||||
instance_status))
|
{'id': instance_id, 'status': instance_status})
|
||||||
|
|
||||||
instance.resize(flavor=flavor_id)
|
instance.resize(flavor=flavor_id)
|
||||||
while getattr(instance,
|
while getattr(instance,
|
||||||
@@ -432,17 +275,20 @@ class NovaHelper(object):
|
|||||||
destination_node is None, nova scheduler choose
|
destination_node is None, nova scheduler choose
|
||||||
the destination host
|
the destination host
|
||||||
"""
|
"""
|
||||||
LOG.debug("Trying to live migrate instance %s " % (instance_id))
|
LOG.debug(
|
||||||
|
"Trying a live migrate instance %(instance)s ",
|
||||||
|
{'instance': instance_id})
|
||||||
|
|
||||||
# Looking for the instance to migrate
|
# Looking for the instance to migrate
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
LOG.debug("Instance not found: %s", instance_id)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
"Instance %(instance)s found on host '%(host)s'.",
|
||||||
|
{'instance': instance_id, 'host': host_name})
|
||||||
|
|
||||||
# From nova api version 2.25(Mitaka release), the default value of
|
# From nova api version 2.25(Mitaka release), the default value of
|
||||||
# block_migration is None which is mapped to 'auto'.
|
# block_migration is None which is mapped to 'auto'.
|
||||||
@@ -464,7 +310,7 @@ class NovaHelper(object):
|
|||||||
if host_name != new_hostname and instance.status == 'ACTIVE':
|
if host_name != new_hostname and instance.status == 'ACTIVE':
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Live migration succeeded : "
|
"Live migration succeeded : "
|
||||||
"instance %s is now on host '%s'." % (
|
"instance %s is now on host '%s'.", (
|
||||||
instance_id, new_hostname))
|
instance_id, new_hostname))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@@ -475,7 +321,7 @@ class NovaHelper(object):
|
|||||||
and retry:
|
and retry:
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||||
LOG.debug("Instance task state: %s is null" % instance_id)
|
LOG.debug("Instance task state: %s is null", instance_id)
|
||||||
break
|
break
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Waiting the migration of {0} to {1}'.format(
|
'Waiting the migration of {0} to {1}'.format(
|
||||||
@@ -491,13 +337,13 @@ class NovaHelper(object):
|
|||||||
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Live migration succeeded : "
|
"Live migration succeeded : "
|
||||||
"instance %s is now on host '%s'." % (
|
"instance %(instance)s is now on host '%(host)s'.",
|
||||||
instance_id, host_name))
|
{'instance': instance_id, 'host': host_name})
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def abort_live_migrate(self, instance_id, source, destination, retry=240):
|
def abort_live_migrate(self, instance_id, source, destination, retry=240):
|
||||||
LOG.debug("Aborting live migration of instance %s" % instance_id)
|
LOG.debug("Aborting live migration of instance %s", instance_id)
|
||||||
migration = self.get_running_migration(instance_id)
|
migration = self.get_running_migration(instance_id)
|
||||||
if migration:
|
if migration:
|
||||||
migration_id = getattr(migration[0], "id")
|
migration_id = getattr(migration[0], "id")
|
||||||
@@ -510,7 +356,7 @@ class NovaHelper(object):
|
|||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
else:
|
else:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"No running migrations found for instance %s" % instance_id)
|
"No running migrations found for instance %s", instance_id)
|
||||||
|
|
||||||
while retry:
|
while retry:
|
||||||
instance = self.nova.servers.get(instance_id)
|
instance = self.nova.servers.get(instance_id)
|
||||||
@@ -534,24 +380,34 @@ class NovaHelper(object):
|
|||||||
"for the instance %s" % instance_id)
|
"for the instance %s" % instance_id)
|
||||||
|
|
||||||
def enable_service_nova_compute(self, hostname):
|
def enable_service_nova_compute(self, hostname):
|
||||||
if self.nova.services.enable(host=hostname,
|
if float(CONF.nova_client.api_version) < 2.53:
|
||||||
binary='nova-compute'). \
|
status = self.nova.services.enable(
|
||||||
status == 'enabled':
|
host=hostname, binary='nova-compute').status == 'enabled'
|
||||||
return True
|
|
||||||
else:
|
else:
|
||||||
return False
|
service_uuid = self.nova.services.list(host=hostname,
|
||||||
|
binary='nova-compute')[0].id
|
||||||
|
status = self.nova.services.enable(
|
||||||
|
service_uuid=service_uuid).status == 'enabled'
|
||||||
|
|
||||||
|
return status
|
||||||
|
|
||||||
def disable_service_nova_compute(self, hostname, reason=None):
|
def disable_service_nova_compute(self, hostname, reason=None):
|
||||||
if self.nova.services.disable_log_reason(host=hostname,
|
if float(CONF.nova_client.api_version) < 2.53:
|
||||||
binary='nova-compute',
|
status = self.nova.services.disable_log_reason(
|
||||||
reason=reason). \
|
host=hostname,
|
||||||
status == 'disabled':
|
binary='nova-compute',
|
||||||
return True
|
reason=reason).status == 'disabled'
|
||||||
else:
|
else:
|
||||||
return False
|
service_uuid = self.nova.services.list(host=hostname,
|
||||||
|
binary='nova-compute')[0].id
|
||||||
|
status = self.nova.services.disable_log_reason(
|
||||||
|
service_uuid=service_uuid,
|
||||||
|
reason=reason).status == 'disabled'
|
||||||
|
|
||||||
|
return status
|
||||||
|
|
||||||
def set_host_offline(self, hostname):
|
def set_host_offline(self, hostname):
|
||||||
# See API on http://developer.openstack.org/api-ref-compute-v2.1.html
|
# See API on https://developer.openstack.org/api-ref/compute/
|
||||||
# especially the PUT request
|
# especially the PUT request
|
||||||
# regarding this resource : /v2.1/os-hosts/{host_name}
|
# regarding this resource : /v2.1/os-hosts/{host_name}
|
||||||
#
|
#
|
||||||
@@ -575,7 +431,7 @@ class NovaHelper(object):
|
|||||||
host = self.nova.hosts.get(hostname)
|
host = self.nova.hosts.get(hostname)
|
||||||
|
|
||||||
if not host:
|
if not host:
|
||||||
LOG.debug("host not found: %s" % hostname)
|
LOG.debug("host not found: %s", hostname)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
host[0].update(
|
host[0].update(
|
||||||
@@ -597,18 +453,19 @@ class NovaHelper(object):
|
|||||||
key-value pairs to associate to the image as metadata.
|
key-value pairs to associate to the image as metadata.
|
||||||
"""
|
"""
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Trying to create an image from instance %s ..." % instance_id)
|
"Trying to create an image from instance %s ...", instance_id)
|
||||||
|
|
||||||
# Looking for the instance
|
# Looking for the instance
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
|
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
LOG.debug("Instance not found: %s", instance_id)
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Instance %s found on host '%s'." % (instance_id, host_name))
|
"Instance %(instance)s found on host '%(host)s'.",
|
||||||
|
{'instance': instance_id, 'host': host_name})
|
||||||
|
|
||||||
# We need to wait for an appropriate status
|
# We need to wait for an appropriate status
|
||||||
# of the instance before we can build an image from it
|
# of the instance before we can build an image from it
|
||||||
@@ -635,14 +492,15 @@ class NovaHelper(object):
|
|||||||
if not image:
|
if not image:
|
||||||
break
|
break
|
||||||
status = image.status
|
status = image.status
|
||||||
LOG.debug("Current image status: %s" % status)
|
LOG.debug("Current image status: %s", status)
|
||||||
|
|
||||||
if not image:
|
if not image:
|
||||||
LOG.debug("Image not found: %s" % image_uuid)
|
LOG.debug("Image not found: %s", image_uuid)
|
||||||
else:
|
else:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Image %s successfully created for instance %s" % (
|
"Image %(image)s successfully created for "
|
||||||
image_uuid, instance_id))
|
"instance %(instance)s",
|
||||||
|
{'image': image_uuid, 'instance': instance_id})
|
||||||
return image_uuid
|
return image_uuid
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -651,16 +509,16 @@ class NovaHelper(object):
|
|||||||
|
|
||||||
:param instance_id: the unique id of the instance to delete.
|
:param instance_id: the unique id of the instance to delete.
|
||||||
"""
|
"""
|
||||||
LOG.debug("Trying to remove instance %s ..." % instance_id)
|
LOG.debug("Trying to remove instance %s ...", instance_id)
|
||||||
|
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
|
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
LOG.debug("Instance not found: %s", instance_id)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
self.nova.servers.delete(instance_id)
|
self.nova.servers.delete(instance_id)
|
||||||
LOG.debug("Instance %s removed." % instance_id)
|
LOG.debug("Instance %s removed.", instance_id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def stop_instance(self, instance_id):
|
def stop_instance(self, instance_id):
|
||||||
@@ -668,21 +526,21 @@ class NovaHelper(object):
|
|||||||
|
|
||||||
:param instance_id: the unique id of the instance to stop.
|
:param instance_id: the unique id of the instance to stop.
|
||||||
"""
|
"""
|
||||||
LOG.debug("Trying to stop instance %s ..." % instance_id)
|
LOG.debug("Trying to stop instance %s ...", instance_id)
|
||||||
|
|
||||||
instance = self.find_instance(instance_id)
|
instance = self.find_instance(instance_id)
|
||||||
|
|
||||||
if not instance:
|
if not instance:
|
||||||
LOG.debug("Instance not found: %s" % instance_id)
|
LOG.debug("Instance not found: %s", instance_id)
|
||||||
return False
|
return False
|
||||||
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
|
elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped":
|
||||||
LOG.debug("Instance has been stopped: %s" % instance_id)
|
LOG.debug("Instance has been stopped: %s", instance_id)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.nova.servers.stop(instance_id)
|
self.nova.servers.stop(instance_id)
|
||||||
|
|
||||||
if self.wait_for_instance_state(instance, "stopped", 8, 10):
|
if self.wait_for_instance_state(instance, "stopped", 8, 10):
|
||||||
LOG.debug("Instance %s stopped." % instance_id)
|
LOG.debug("Instance %s stopped.", instance_id)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@@ -723,11 +581,11 @@ class NovaHelper(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
while instance.status not in status_list and retry:
|
while instance.status not in status_list and retry:
|
||||||
LOG.debug("Current instance status: %s" % instance.status)
|
LOG.debug("Current instance status: %s", instance.status)
|
||||||
time.sleep(sleep)
|
time.sleep(sleep)
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("Current instance status: %s" % instance.status)
|
LOG.debug("Current instance status: %s", instance.status)
|
||||||
return instance.status in status_list
|
return instance.status in status_list
|
||||||
|
|
||||||
def create_instance(self, node_id, inst_name="test", image_id=None,
|
def create_instance(self, node_id, inst_name="test", image_id=None,
|
||||||
@@ -743,26 +601,26 @@ class NovaHelper(object):
|
|||||||
It returns the unique id of the created instance.
|
It returns the unique id of the created instance.
|
||||||
"""
|
"""
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Trying to create new instance '%s' "
|
"Trying to create new instance '%(inst)s' "
|
||||||
"from image '%s' with flavor '%s' ..." % (
|
"from image '%(image)s' with flavor '%(flavor)s' ...",
|
||||||
inst_name, image_id, flavor_name))
|
{'inst': inst_name, 'image': image_id, 'flavor': flavor_name})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.nova.keypairs.findall(name=keypair_name)
|
self.nova.keypairs.findall(name=keypair_name)
|
||||||
except nvexceptions.NotFound:
|
except nvexceptions.NotFound:
|
||||||
LOG.debug("Key pair '%s' not found " % keypair_name)
|
LOG.debug("Key pair '%s' not found ", keypair_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
image = self.glance.images.get(image_id)
|
image = self.glance.images.get(image_id)
|
||||||
except glexceptions.NotFound:
|
except glexceptions.NotFound:
|
||||||
LOG.debug("Image '%s' not found " % image_id)
|
LOG.debug("Image '%s' not found ", image_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
flavor = self.nova.flavors.find(name=flavor_name)
|
flavor = self.nova.flavors.find(name=flavor_name)
|
||||||
except nvexceptions.NotFound:
|
except nvexceptions.NotFound:
|
||||||
LOG.debug("Flavor '%s' not found " % flavor_name)
|
LOG.debug("Flavor '%s' not found ", flavor_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Make sure all security groups exist
|
# Make sure all security groups exist
|
||||||
@@ -770,7 +628,7 @@ class NovaHelper(object):
|
|||||||
group_id = self.get_security_group_id_from_name(sec_group_name)
|
group_id = self.get_security_group_id_from_name(sec_group_name)
|
||||||
|
|
||||||
if not group_id:
|
if not group_id:
|
||||||
LOG.debug("Security group '%s' not found " % sec_group_name)
|
LOG.debug("Security group '%s' not found ", sec_group_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
net_list = list()
|
net_list = list()
|
||||||
@@ -779,7 +637,7 @@ class NovaHelper(object):
|
|||||||
nic_id = self.get_network_id_from_name(network_name)
|
nic_id = self.get_network_id_from_name(network_name)
|
||||||
|
|
||||||
if not nic_id:
|
if not nic_id:
|
||||||
LOG.debug("Network '%s' not found " % network_name)
|
LOG.debug("Network '%s' not found ", network_name)
|
||||||
return
|
return
|
||||||
net_obj = {"net-id": nic_id}
|
net_obj = {"net-id": nic_id}
|
||||||
net_list.append(net_obj)
|
net_list.append(net_obj)
|
||||||
@@ -805,14 +663,16 @@ class NovaHelper(object):
|
|||||||
if create_new_floating_ip and instance.status == 'ACTIVE':
|
if create_new_floating_ip and instance.status == 'ACTIVE':
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Creating a new floating IP"
|
"Creating a new floating IP"
|
||||||
" for instance '%s'" % instance.id)
|
" for instance '%s'", instance.id)
|
||||||
# Creating floating IP for the new instance
|
# Creating floating IP for the new instance
|
||||||
floating_ip = self.nova.floating_ips.create()
|
floating_ip = self.nova.floating_ips.create()
|
||||||
|
|
||||||
instance.add_floating_ip(floating_ip)
|
instance.add_floating_ip(floating_ip)
|
||||||
|
|
||||||
LOG.debug("Instance %s associated to Floating IP '%s'" % (
|
LOG.debug(
|
||||||
instance.id, floating_ip.ip))
|
"Instance %(instance)s associated to "
|
||||||
|
"Floating IP '%(ip)s'",
|
||||||
|
{'instance': instance.id, 'ip': floating_ip.ip})
|
||||||
|
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
@@ -886,7 +746,7 @@ class NovaHelper(object):
|
|||||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s" % retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
if getattr(new_volume, 'status') != "in-use":
|
if getattr(new_volume, 'status') != "in-use":
|
||||||
LOG.error("Volume update retry timeout or error")
|
LOG.error("Volume update retry timeout or error")
|
||||||
return False
|
return False
|
||||||
@@ -894,5 +754,15 @@ class NovaHelper(object):
|
|||||||
host_name = getattr(new_volume, "os-vol-host-attr:host")
|
host_name = getattr(new_volume, "os-vol-host-attr:host")
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Volume update succeeded : "
|
"Volume update succeeded : "
|
||||||
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
|
"Volume %s is now on host '%s'.",
|
||||||
|
(new_volume.id, host_name))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def _check_nova_api_version(self, client, version):
|
||||||
|
api_version = api_versions.APIVersion(version_str=version)
|
||||||
|
try:
|
||||||
|
api_versions.discover_version(client, api_version)
|
||||||
|
return True
|
||||||
|
except nvexceptions.UnsupportedVersion as e:
|
||||||
|
LOG.exception(e)
|
||||||
|
return False
|
||||||
|
|||||||
37
watcher/common/policies/__init__.py
Normal file
37
watcher/common/policies/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
from watcher.common.policies import action
|
||||||
|
from watcher.common.policies import action_plan
|
||||||
|
from watcher.common.policies import audit
|
||||||
|
from watcher.common.policies import audit_template
|
||||||
|
from watcher.common.policies import base
|
||||||
|
from watcher.common.policies import goal
|
||||||
|
from watcher.common.policies import scoring_engine
|
||||||
|
from watcher.common.policies import service
|
||||||
|
from watcher.common.policies import strategy
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return itertools.chain(
|
||||||
|
base.list_rules(),
|
||||||
|
action.list_rules(),
|
||||||
|
action_plan.list_rules(),
|
||||||
|
audit.list_rules(),
|
||||||
|
audit_template.list_rules(),
|
||||||
|
goal.list_rules(),
|
||||||
|
scoring_engine.list_rules(),
|
||||||
|
service.list_rules(),
|
||||||
|
strategy.list_rules(),
|
||||||
|
)
|
||||||
57
watcher/common/policies/action.py
Normal file
57
watcher/common/policies/action.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
ACTION = 'action:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve a list of actions with detail.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/actions/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve information about a given action.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/actions/{action_id}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve a list of all actions.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/actions',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
90
watcher/common/policies/action_plan.py
Normal file
90
watcher/common/policies/action_plan.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
ACTION_PLAN = 'action_plan:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'delete',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Delete an action plan.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans/{action_plan_uuid}',
|
||||||
|
'method': 'DELETE'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve a list of action plans with detail.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get an action plan.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans/{action_plan_id}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get all action plans.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'update',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Update an action plans.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans/{action_plan_uuid}',
|
||||||
|
'method': 'PATCH'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=ACTION_PLAN % 'start',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Start an action plans.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/action_plans/{action_plan_uuid}/action',
|
||||||
|
'method': 'POST'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
90
watcher/common/policies/audit.py
Normal file
90
watcher/common/policies/audit.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
AUDIT = 'audit:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'create',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Create a new audit.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits',
|
||||||
|
'method': 'POST'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'delete',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Delete an audit.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits/{audit_uuid}',
|
||||||
|
'method': 'DELETE'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve audit list with details.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get an audit.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits/{audit_uuid}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get all audits.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT % 'update',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Update an audit.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audits/{audit_uuid}',
|
||||||
|
'method': 'PATCH'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
90
watcher/common/policies/audit_template.py
Normal file
90
watcher/common/policies/audit_template.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
AUDIT_TEMPLATE = 'audit_template:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'create',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Create an audit template.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates',
|
||||||
|
'method': 'POST'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'delete',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Delete an audit template.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates/{audit_template_uuid}',
|
||||||
|
'method': 'DELETE'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve a list of audit templates with details.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get an audit template.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates/{audit_template_uuid}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get a list of all audit templates.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=AUDIT_TEMPLATE % 'update',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Update an audit template.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/audit_templates/{audit_template_uuid}',
|
||||||
|
'method': 'PATCH'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
32
watcher/common/policies/base.py
Normal file
32
watcher/common/policies/base.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
RULE_ADMIN_API = 'rule:admin_api'
|
||||||
|
ROLE_ADMIN_OR_ADMINISTRATOR = 'role:admin or role:administrator'
|
||||||
|
ALWAYS_DENY = '!'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.RuleDefault(
|
||||||
|
name='admin_api',
|
||||||
|
check_str=ROLE_ADMIN_OR_ADMINISTRATOR
|
||||||
|
),
|
||||||
|
policy.RuleDefault(
|
||||||
|
name='show_password',
|
||||||
|
check_str=ALWAYS_DENY
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
57
watcher/common/policies/goal.py
Normal file
57
watcher/common/policies/goal.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
GOAL = 'goal:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=GOAL % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Retrieve a list of goals with detail.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/goals/detail',
|
||||||
|
'method': 'DELETE'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=GOAL % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get a goal.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/goals/{goal_uuid}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=GOAL % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get all goals.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/goals',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
66
watcher/common/policies/scoring_engine.py
Normal file
66
watcher/common/policies/scoring_engine.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
SCORING_ENGINE = 'scoring_engine:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
# FIXME(lbragstad): Find someone from watcher to double check this
|
||||||
|
# information. This API isn't listed in watcher's API reference
|
||||||
|
# documentation.
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SCORING_ENGINE % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='List scoring engines with details.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/scoring_engines/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
# FIXME(lbragstad): Find someone from watcher to double check this
|
||||||
|
# information. This API isn't listed in watcher's API reference
|
||||||
|
# documentation.
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SCORING_ENGINE % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get a scoring engine.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/scoring_engines/{scoring_engine_id}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
# FIXME(lbragstad): Find someone from watcher to double check this
|
||||||
|
# information. This API isn't listed in watcher's API reference
|
||||||
|
# documentation.
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SCORING_ENGINE % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get all scoring engines.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/scoring_engines',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
57
watcher/common/policies/service.py
Normal file
57
watcher/common/policies/service.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
SERVICE = 'service:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SERVICE % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='List services with detail.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/services/',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SERVICE % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get a specific service.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/services/{service_id}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=SERVICE % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='List all services.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/services/',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
68
watcher/common/policies/strategy.py
Normal file
68
watcher/common/policies/strategy.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
|
from watcher.common.policies import base
|
||||||
|
|
||||||
|
STRATEGY = 'strategy:%s'
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=STRATEGY % 'detail',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='List strategies with detail.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/strategies/detail',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=STRATEGY % 'get',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get a strategy.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/strategies/{strategy_uuid}',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=STRATEGY % 'get_all',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='List all strategies.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/strategies',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
policy.DocumentedRuleDefault(
|
||||||
|
name=STRATEGY % 'state',
|
||||||
|
check_str=base.RULE_ADMIN_API,
|
||||||
|
description='Get state of strategy.',
|
||||||
|
operations=[
|
||||||
|
{
|
||||||
|
'path': '/v1/strategies{strategy_uuid}/state',
|
||||||
|
'method': 'GET'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def list_rules():
|
||||||
|
return rules
|
||||||
@@ -15,11 +15,13 @@
|
|||||||
|
|
||||||
"""Policy Engine For Watcher."""
|
"""Policy Engine For Watcher."""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_policy import policy
|
from oslo_policy import policy
|
||||||
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
|
from watcher.common import policies
|
||||||
|
|
||||||
_ENFORCER = None
|
_ENFORCER = None
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@@ -56,6 +58,7 @@ def init(policy_file=None, rules=None,
|
|||||||
default_rule=default_rule,
|
default_rule=default_rule,
|
||||||
use_conf=use_conf,
|
use_conf=use_conf,
|
||||||
overwrite=overwrite)
|
overwrite=overwrite)
|
||||||
|
_ENFORCER.register_defaults(policies.list_rules())
|
||||||
return _ENFORCER
|
return _ENFORCER
|
||||||
|
|
||||||
|
|
||||||
@@ -92,3 +95,23 @@ def enforce(context, rule=None, target=None,
|
|||||||
'user_id': context.user_id}
|
'user_id': context.user_id}
|
||||||
return enforcer.enforce(rule, target, credentials,
|
return enforcer.enforce(rule, target, credentials,
|
||||||
do_raise=do_raise, exc=exc, *args, **kwargs)
|
do_raise=do_raise, exc=exc, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_enforcer():
|
||||||
|
# This method is for use by oslopolicy CLI scripts. Those scripts need the
|
||||||
|
# 'output-file' and 'namespace' options, but having those in sys.argv means
|
||||||
|
# loading the Watcher config options will fail as those are not expected
|
||||||
|
# to be present. So we pass in an arg list with those stripped out.
|
||||||
|
conf_args = []
|
||||||
|
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
|
||||||
|
i = 1
|
||||||
|
while i < len(sys.argv):
|
||||||
|
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
conf_args.append(sys.argv[i])
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
cfg.CONF(conf_args, project='watcher')
|
||||||
|
init()
|
||||||
|
return _ENFORCER
|
||||||
|
|||||||
@@ -69,7 +69,8 @@ _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO',
|
|||||||
'keystoneclient=INFO', 'stevedore=INFO',
|
'keystoneclient=INFO', 'stevedore=INFO',
|
||||||
'eventlet.wsgi.server=WARN', 'iso8601=WARN',
|
'eventlet.wsgi.server=WARN', 'iso8601=WARN',
|
||||||
'paramiko=WARN', 'requests=WARN', 'neutronclient=WARN',
|
'paramiko=WARN', 'requests=WARN', 'neutronclient=WARN',
|
||||||
'glanceclient=WARN', 'watcher.openstack.common=WARN']
|
'glanceclient=WARN', 'watcher.openstack.common=WARN',
|
||||||
|
'apscheduler=WARN']
|
||||||
|
|
||||||
Singleton = service.Singleton
|
Singleton = service.Singleton
|
||||||
|
|
||||||
@@ -288,7 +289,7 @@ class Service(service.ServiceBase):
|
|||||||
return api_manager_version
|
return api_manager_version
|
||||||
|
|
||||||
|
|
||||||
def launch(conf, service_, workers=1, restart_method='reload'):
|
def launch(conf, service_, workers=1, restart_method='mutate'):
|
||||||
return service.launch(conf, service_, workers, restart_method)
|
return service.launch(conf, service_, workers, restart_method)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user