Compare commits
5 Commits
master
...
wallaby-eo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab6cd112d7 | ||
|
|
d60e2a96e4 | ||
|
|
adf4725f8c | ||
|
|
537823b216 | ||
|
|
281455a08a |
@@ -2,3 +2,4 @@
|
|||||||
host=review.opendev.org
|
host=review.opendev.org
|
||||||
port=29418
|
port=29418
|
||||||
project=openstack/watcher.git
|
project=openstack/watcher.git
|
||||||
|
defaultbranch=stable/wallaby
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v5.0.0
|
|
||||||
hooks:
|
|
||||||
# whitespace
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: mixed-line-ending
|
|
||||||
args: ['--fix', 'lf']
|
|
||||||
exclude: '.*\.(svg)$'
|
|
||||||
- id: check-byte-order-marker
|
|
||||||
# file format and permissions
|
|
||||||
- id: check-ast
|
|
||||||
- id: debug-statements
|
|
||||||
- id: check-json
|
|
||||||
files: .*\.json$
|
|
||||||
- id: check-yaml
|
|
||||||
files: .*\.(yaml|yml)$
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-shebang-scripts-are-executable
|
|
||||||
# git
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: detect-private-key
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
|
||||||
rev: v1.5.5
|
|
||||||
hooks:
|
|
||||||
- id: remove-tabs
|
|
||||||
exclude: '.*\.(svg)$'
|
|
||||||
- repo: https://opendev.org/openstack/hacking
|
|
||||||
rev: 7.0.0
|
|
||||||
hooks:
|
|
||||||
- id: hacking
|
|
||||||
additional_dependencies: []
|
|
||||||
exclude: '^(doc|releasenotes|tools)/.*$'
|
|
||||||
- repo: https://github.com/PyCQA/bandit
|
|
||||||
rev: 1.8.3
|
|
||||||
hooks:
|
|
||||||
- id: bandit
|
|
||||||
args: ['-x', 'tests', '-s', 'B101,B311,B320']
|
|
||||||
- repo: https://github.com/hhatto/autopep8
|
|
||||||
rev: v2.3.2
|
|
||||||
hooks:
|
|
||||||
- id: autopep8
|
|
||||||
files: '^.*\.py$'
|
|
||||||
- repo: https://github.com/codespell-project/codespell
|
|
||||||
rev: v2.4.1
|
|
||||||
hooks:
|
|
||||||
- id: codespell
|
|
||||||
args: ['--ignore-words=doc/dictionary.txt']
|
|
||||||
- repo: https://github.com/sphinx-contrib/sphinx-lint
|
|
||||||
rev: v1.0.0
|
|
||||||
hooks:
|
|
||||||
- id: sphinx-lint
|
|
||||||
args: [--enable=default-role]
|
|
||||||
files: ^doc/|^releasenotes/|^api-guide/
|
|
||||||
types: [rst]
|
|
||||||
- repo: https://github.com/PyCQA/doc8
|
|
||||||
rev: v1.1.2
|
|
||||||
hooks:
|
|
||||||
- id: doc8
|
|
||||||
379
.zuul.yaml
379
.zuul.yaml
@@ -1,24 +1,107 @@
|
|||||||
|
- project:
|
||||||
|
queue: watcher
|
||||||
|
templates:
|
||||||
|
- check-requirements
|
||||||
|
- openstack-cover-jobs
|
||||||
|
- openstack-python3-wallaby-jobs
|
||||||
|
- publish-openstack-docs-pti
|
||||||
|
- release-notes-jobs-python3
|
||||||
|
check:
|
||||||
|
jobs:
|
||||||
|
- watcher-tempest-functional
|
||||||
|
- watcher-grenade
|
||||||
|
- watcher-tempest-strategies
|
||||||
|
- watcher-tempest-actuator
|
||||||
|
- watcherclient-tempest-functional
|
||||||
|
- watcher-tls-test
|
||||||
|
- watcher-tempest-functional-ipv6-only
|
||||||
|
gate:
|
||||||
|
jobs:
|
||||||
|
- watcher-tempest-functional
|
||||||
|
- watcher-tempest-functional-ipv6-only
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-dummy_optim
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-actuator
|
name: watcher-tempest-actuator
|
||||||
parent: watcher-tempest-multinode
|
parent: watcher-tempest-multinode
|
||||||
vars:
|
vars:
|
||||||
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-basic_optim
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_basic_optim
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-vm_workload_consolidation
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_vm_workload_consolidation
|
||||||
|
devstack_local_conf:
|
||||||
|
test-config:
|
||||||
|
$WATCHER_CONFIG:
|
||||||
|
watcher_strategies.vm_workload_consolidation:
|
||||||
|
datasource: ceilometer
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-workload_balancing
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-zone_migration
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_zone_migration
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-host_maintenance
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_host_maintenance
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-storage_balance
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_storage_balance
|
||||||
|
devstack_local_conf:
|
||||||
|
test-config:
|
||||||
|
$TEMPEST_CONFIG:
|
||||||
|
volume:
|
||||||
|
backend_names: ['BACKEND_1', 'BACKEND_2']
|
||||||
|
volume-feature-enabled:
|
||||||
|
multi_backend: true
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-strategies
|
name: watcher-tempest-strategies
|
||||||
parent: watcher-tempest-multinode
|
parent: watcher-tempest-multinode
|
||||||
vars:
|
vars:
|
||||||
tempest_concurrency: 1
|
tempest_concurrency: 1
|
||||||
# All tests inside watcher_tempest_plugin.tests.scenario with tag "strategy"
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies
|
||||||
# or test_execute_strategies file
|
|
||||||
# excluding tests with tag "real_load"
|
- job:
|
||||||
tempest_test_regex: (^watcher_tempest_plugin.tests.scenario)(.*\[.*\bstrategy\b.*\].*)|(^watcher_tempest_plugin.tests.scenario.test_execute_strategies)
|
name: watcher-tls-test
|
||||||
tempest_exclude_regex: .*\[.*\breal_load\b.*\].*
|
parent: watcher-tempest-multinode
|
||||||
|
group-vars:
|
||||||
|
subnode:
|
||||||
|
devstack_services:
|
||||||
|
tls-proxy: true
|
||||||
|
vars:
|
||||||
|
devstack_services:
|
||||||
|
tls-proxy: true
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-multinode
|
name: watcher-tempest-multinode
|
||||||
parent: watcher-tempest-functional
|
parent: watcher-tempest-functional
|
||||||
nodeset: openstack-two-node-noble
|
nodeset: openstack-two-node-focal
|
||||||
roles:
|
roles:
|
||||||
- zuul: openstack/tempest
|
- zuul: openstack/tempest
|
||||||
group-vars:
|
group-vars:
|
||||||
@@ -32,16 +115,12 @@
|
|||||||
period: 120
|
period: 120
|
||||||
watcher_cluster_data_model_collectors.storage:
|
watcher_cluster_data_model_collectors.storage:
|
||||||
period: 120
|
period: 120
|
||||||
$CINDER_CONF:
|
|
||||||
# enable notifications in compute node, by default they are only
|
|
||||||
# configured in the controller
|
|
||||||
oslo_messaging_notifications:
|
|
||||||
driver: messagingv2
|
|
||||||
devstack_services:
|
devstack_services:
|
||||||
watcher-api: false
|
watcher-api: false
|
||||||
watcher-decision-engine: true
|
watcher-decision-engine: true
|
||||||
watcher-applier: false
|
watcher-applier: false
|
||||||
c-bak: false
|
# We need to add TLS support for watcher plugin
|
||||||
|
tls-proxy: false
|
||||||
ceilometer: false
|
ceilometer: false
|
||||||
ceilometer-acompute: false
|
ceilometer-acompute: false
|
||||||
ceilometer-acentral: false
|
ceilometer-acentral: false
|
||||||
@@ -52,13 +131,6 @@
|
|||||||
rabbit: false
|
rabbit: false
|
||||||
mysql: false
|
mysql: false
|
||||||
vars:
|
vars:
|
||||||
devstack_localrc:
|
|
||||||
GNOCCHI_ARCHIVE_POLICY_TEMPEST: "ceilometer-low-rate"
|
|
||||||
CEILOMETER_PIPELINE_INTERVAL: 15
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: false
|
|
||||||
ceilometer-acentral: true
|
|
||||||
ceilometer-anotification: true
|
|
||||||
devstack_local_conf:
|
devstack_local_conf:
|
||||||
post-config:
|
post-config:
|
||||||
$WATCHER_CONF:
|
$WATCHER_CONF:
|
||||||
@@ -68,11 +140,6 @@
|
|||||||
period: 120
|
period: 120
|
||||||
watcher_cluster_data_model_collectors.storage:
|
watcher_cluster_data_model_collectors.storage:
|
||||||
period: 120
|
period: 120
|
||||||
$CINDER_CONF:
|
|
||||||
# enable notifications in compute node, by default they are only
|
|
||||||
# configured in the controller
|
|
||||||
oslo_messaging_notifications:
|
|
||||||
driver: messagingv2
|
|
||||||
test-config:
|
test-config:
|
||||||
$TEMPEST_CONFIG:
|
$TEMPEST_CONFIG:
|
||||||
compute:
|
compute:
|
||||||
@@ -83,10 +150,6 @@
|
|||||||
block_migration_for_live_migration: true
|
block_migration_for_live_migration: true
|
||||||
placement:
|
placement:
|
||||||
min_microversion: 1.29
|
min_microversion: 1.29
|
||||||
telemetry:
|
|
||||||
ceilometer_polling_interval: 15
|
|
||||||
optimize:
|
|
||||||
run_continuous_audit_tests: true
|
|
||||||
devstack_plugins:
|
devstack_plugins:
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
ceilometer: https://opendev.org/openstack/ceilometer
|
||||||
|
|
||||||
@@ -105,6 +168,7 @@
|
|||||||
devstack_plugins:
|
devstack_plugins:
|
||||||
watcher: https://opendev.org/openstack/watcher
|
watcher: https://opendev.org/openstack/watcher
|
||||||
devstack_services:
|
devstack_services:
|
||||||
|
tls-proxy: false
|
||||||
watcher-api: true
|
watcher-api: true
|
||||||
watcher-decision-engine: true
|
watcher-decision-engine: true
|
||||||
watcher-applier: true
|
watcher-applier: true
|
||||||
@@ -136,7 +200,7 @@
|
|||||||
- openstack/python-watcherclient
|
- openstack/python-watcherclient
|
||||||
- openstack/watcher-tempest-plugin
|
- openstack/watcher-tempest-plugin
|
||||||
vars: *base_vars
|
vars: *base_vars
|
||||||
irrelevant-files: &irrelevent_files
|
irrelevant-files:
|
||||||
- ^(test-|)requirements.txt$
|
- ^(test-|)requirements.txt$
|
||||||
- ^.*\.rst$
|
- ^.*\.rst$
|
||||||
- ^api-ref/.*$
|
- ^api-ref/.*$
|
||||||
@@ -149,257 +213,10 @@
|
|||||||
- ^tox.ini$
|
- ^tox.ini$
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-sg-core-tempest-base
|
# This job is used in python-watcherclient repo
|
||||||
parent: devstack-tempest
|
name: watcherclient-tempest-functional
|
||||||
nodeset: openstack-two-node-noble
|
parent: watcher-tempest-functional
|
||||||
description: |
|
timeout: 4200
|
||||||
This job is for testing watcher and sg-core/prometheus installation
|
|
||||||
abstract: true
|
|
||||||
pre-run:
|
|
||||||
- playbooks/generate_prometheus_config.yml
|
|
||||||
irrelevant-files: *irrelevent_files
|
|
||||||
timeout: 7800
|
|
||||||
required-projects: &base_sg_required_projects
|
|
||||||
- openstack/aodh
|
|
||||||
- openstack/ceilometer
|
|
||||||
- openstack/tempest
|
|
||||||
- openstack-k8s-operators/sg-core
|
|
||||||
- openstack/watcher
|
|
||||||
- openstack/python-watcherclient
|
|
||||||
- openstack/watcher-tempest-plugin
|
|
||||||
- openstack/devstack-plugin-prometheus
|
|
||||||
vars:
|
vars:
|
||||||
configure_swap_size: 8192
|
|
||||||
devstack_plugins:
|
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
|
||||||
aodh: https://opendev.org/openstack/aodh
|
|
||||||
sg-core: https://github.com/openstack-k8s-operators/sg-core
|
|
||||||
watcher: https://opendev.org/openstack/watcher
|
|
||||||
devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: true
|
|
||||||
watcher-api: true
|
|
||||||
watcher-decision-engine: true
|
|
||||||
watcher-applier: true
|
|
||||||
tempest: true
|
|
||||||
# We do not need Swift in this job so disable it for speed
|
|
||||||
# Swift services
|
|
||||||
s-account: false
|
|
||||||
s-container: false
|
|
||||||
s-object: false
|
|
||||||
s-proxy: false
|
|
||||||
# Prometheus related service
|
|
||||||
prometheus: true
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
CEILOMETER_BACKENDS: "sg-core"
|
|
||||||
CEILOMETER_PIPELINE_INTERVAL: 15
|
|
||||||
CEILOMETER_ALARM_THRESHOLD: 6000000000
|
|
||||||
PROMETHEUS_CONFIG_FILE: "/home/zuul/prometheus.yml"
|
|
||||||
devstack_local_conf:
|
|
||||||
post-config:
|
|
||||||
$WATCHER_CONF:
|
|
||||||
watcher_datasources:
|
|
||||||
datasources: prometheus
|
|
||||||
prometheus_client:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9090
|
|
||||||
watcher_cluster_data_model_collectors.compute:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.baremetal:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.storage:
|
|
||||||
period: 120
|
|
||||||
compute_model:
|
|
||||||
enable_extended_attributes: true
|
|
||||||
nova_client:
|
|
||||||
api_version: "2.96"
|
|
||||||
test-config:
|
|
||||||
$TEMPEST_CONFIG:
|
|
||||||
compute:
|
|
||||||
min_compute_nodes: 2
|
|
||||||
min_microversion: 2.56
|
|
||||||
compute-feature-enabled:
|
|
||||||
live_migration: true
|
|
||||||
block_migration_for_live_migration: true
|
|
||||||
placement:
|
|
||||||
min_microversion: 1.29
|
|
||||||
service_available:
|
|
||||||
sg_core: True
|
|
||||||
telemetry_services:
|
|
||||||
metric_backends: prometheus
|
|
||||||
telemetry:
|
|
||||||
disable_ssl_certificate_validation: True
|
|
||||||
ceilometer_polling_interval: 15
|
|
||||||
optimize:
|
|
||||||
datasource: prometheus
|
|
||||||
extended_attributes_nova_microversion: "2.96"
|
|
||||||
data_model_collectors_period: 120
|
|
||||||
run_continuous_audit_tests: true
|
|
||||||
tempest_plugins:
|
|
||||||
- watcher-tempest-plugin
|
|
||||||
# All tests inside watcher_tempest_plugin.tests.scenario with tag "strategy"
|
|
||||||
# and test_execute_strategies, test_data_model files
|
|
||||||
# excluding tests with tag "real_load"
|
|
||||||
tempest_test_regex: (watcher_tempest_plugin.tests.scenario)(.*\[.*\bstrategy\b.*\].*)|(watcher_tempest_plugin.tests.scenario.(test_execute_strategies|test_data_model))
|
|
||||||
tempest_exclude_regex: .*\[.*\breal_load\b.*\].*
|
|
||||||
tempest_concurrency: 1
|
tempest_concurrency: 1
|
||||||
tox_envlist: all
|
tempest_test_regex: watcher_tempest_plugin.tests.client_functional
|
||||||
zuul_copy_output:
|
|
||||||
/etc/prometheus/prometheus.yml: logs
|
|
||||||
group-vars:
|
|
||||||
subnode:
|
|
||||||
devstack_plugins:
|
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
|
||||||
devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: true
|
|
||||||
sg-core: false
|
|
||||||
prometheus: false
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
CEILOMETER_BACKEND: "none"
|
|
||||||
CEILOMETER_BACKENDS: "none"
|
|
||||||
devstack_local_conf:
|
|
||||||
post-config:
|
|
||||||
$WATCHER_CONF:
|
|
||||||
watcher_cluster_data_model_collectors.compute:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.baremetal:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.storage:
|
|
||||||
period: 120
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-prometheus-integration
|
|
||||||
parent: watcher-sg-core-tempest-base
|
|
||||||
vars:
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: false
|
|
||||||
node_exporter: false
|
|
||||||
group-vars:
|
|
||||||
subnode:
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: false
|
|
||||||
node_exporter: false
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-aetos-integration
|
|
||||||
parent: watcher-sg-core-tempest-base
|
|
||||||
description: |
|
|
||||||
This job tests Watcher with Aetos reverse-proxy for Prometheus
|
|
||||||
using Keystone authentication instead of direct Prometheus access.
|
|
||||||
required-projects:
|
|
||||||
- openstack/python-observabilityclient
|
|
||||||
- openstack/aetos
|
|
||||||
vars: &aetos_vars
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: false
|
|
||||||
node_exporter: false
|
|
||||||
devstack_plugins:
|
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
|
||||||
sg-core: https://github.com/openstack-k8s-operators/sg-core
|
|
||||||
watcher: https://opendev.org/openstack/watcher
|
|
||||||
devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
aetos: https://opendev.org/openstack/aetos
|
|
||||||
devstack_local_conf:
|
|
||||||
post-config:
|
|
||||||
$WATCHER_CONF:
|
|
||||||
watcher_datasources:
|
|
||||||
datasources: aetos
|
|
||||||
aetos_client:
|
|
||||||
interface: public
|
|
||||||
region_name: RegionOne
|
|
||||||
fqdn_label: fqdn
|
|
||||||
instance_uuid_label: resource
|
|
||||||
test-config:
|
|
||||||
$TEMPEST_CONFIG:
|
|
||||||
optimize:
|
|
||||||
datasource: prometheus
|
|
||||||
group-vars:
|
|
||||||
subnode:
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: false
|
|
||||||
node_exporter: false
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-prometheus-integration-realdata
|
|
||||||
parent: watcher-sg-core-tempest-base
|
|
||||||
vars: &realdata_vars
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: true
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
NODE_EXPORTER_COLLECTOR_EXCLUDE: ""
|
|
||||||
devstack_local_conf:
|
|
||||||
test-config:
|
|
||||||
$TEMPEST_CONFIG:
|
|
||||||
optimize:
|
|
||||||
datasource: ""
|
|
||||||
real_workload_period: 300
|
|
||||||
# All tests inside watcher_tempest_plugin.tests.scenario with tag "real_load"
|
|
||||||
tempest_test_regex: (^watcher_tempest_plugin.tests.scenario)(.*\[.*\breal_load\b.*\].*)
|
|
||||||
tempest_exclude_regex: ""
|
|
||||||
group-vars: &realdata_group_vars
|
|
||||||
subnode:
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: true
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
NODE_EXPORTER_COLLECTOR_EXCLUDE: ""
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-prometheus-integration-threading
|
|
||||||
parent: watcher-prometheus-integration
|
|
||||||
vars:
|
|
||||||
devstack_localrc:
|
|
||||||
'SYSTEMD_ENV_VARS["watcher-decision-engine"]': OS_WATCHER_DISABLE_EVENTLET_PATCHING=true
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: openstack-tox-py312-threading
|
|
||||||
parent: openstack-tox-py312
|
|
||||||
description: |
|
|
||||||
Run tox with the py3-threading environment.
|
|
||||||
vars:
|
|
||||||
tox_envlist: py3-threading
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-aetos-integration-realdata
|
|
||||||
parent: watcher-aetos-integration
|
|
||||||
vars: *realdata_vars
|
|
||||||
group-vars: *realdata_group_vars
|
|
||||||
|
|
||||||
- project:
|
|
||||||
queue: watcher
|
|
||||||
templates:
|
|
||||||
- check-requirements
|
|
||||||
- openstack-cover-jobs
|
|
||||||
- openstack-python3-jobs
|
|
||||||
- publish-openstack-docs-pti
|
|
||||||
- release-notes-jobs-python3
|
|
||||||
check:
|
|
||||||
jobs:
|
|
||||||
- openstack-tox-py312-threading
|
|
||||||
- watcher-tempest-functional
|
|
||||||
- watcher-grenade
|
|
||||||
- watcher-tempest-strategies
|
|
||||||
- watcher-tempest-actuator
|
|
||||||
- python-watcherclient-functional:
|
|
||||||
files:
|
|
||||||
- ^watcher/api/*
|
|
||||||
- watcher-tempest-functional-ipv6-only
|
|
||||||
- watcher-prometheus-integration
|
|
||||||
- watcher-prometheus-integration-threading
|
|
||||||
- watcher-aetos-integration
|
|
||||||
gate:
|
|
||||||
jobs:
|
|
||||||
- watcher-tempest-functional
|
|
||||||
- watcher-tempest-functional-ipv6-only
|
|
||||||
experimental:
|
|
||||||
jobs:
|
|
||||||
- watcher-prometheus-integration-realdata
|
|
||||||
- watcher-aetos-integration-realdata
|
|
||||||
periodic-weekly:
|
|
||||||
jobs:
|
|
||||||
- watcher-prometheus-integration-realdata
|
|
||||||
- watcher-aetos-integration-realdata
|
|
||||||
|
|||||||
@@ -189,16 +189,6 @@ action_state:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
action_status_message:
|
|
||||||
description: |
|
|
||||||
Message with additional information about the Action state.
|
|
||||||
This field can be set when transitioning an action to SKIPPED state,
|
|
||||||
or updated for actions that are already in SKIPPED state to provide
|
|
||||||
more detailed explanations, fix typos, or expand on initial reasons.
|
|
||||||
in: body
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
min_version: 1.5
|
|
||||||
action_type:
|
action_type:
|
||||||
description: |
|
description: |
|
||||||
Action type based on specific API action. Actions in Watcher are
|
Action type based on specific API action. Actions in Watcher are
|
||||||
@@ -240,13 +230,6 @@ actionplan_state:
|
|||||||
in: body
|
in: body
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
actionplan_status_message:
|
|
||||||
description: |
|
|
||||||
Message with additional information about the Action Plan state.
|
|
||||||
in: body
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
min_version: 1.5
|
|
||||||
|
|
||||||
# Audit
|
# Audit
|
||||||
audit_autotrigger:
|
audit_autotrigger:
|
||||||
@@ -337,13 +320,6 @@ audit_state:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
audit_status_message:
|
|
||||||
description: |
|
|
||||||
Message with additional information about the Audit state.
|
|
||||||
in: body
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
min_version: 1.5
|
|
||||||
audit_strategy:
|
audit_strategy:
|
||||||
description: |
|
description: |
|
||||||
The UUID or name of the Strategy.
|
The UUID or name of the Strategy.
|
||||||
@@ -444,24 +420,12 @@ links:
|
|||||||
type: array
|
type: array
|
||||||
|
|
||||||
# Data Model Node
|
# Data Model Node
|
||||||
node_disabled_reason:
|
|
||||||
description: |
|
|
||||||
The Disabled Reason of the node.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
node_disk:
|
node_disk:
|
||||||
description: |
|
description: |
|
||||||
The Disk of the node(in GiB).
|
The Disk of the node(in GiB).
|
||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
node_disk_gb_reserved:
|
|
||||||
description: |
|
|
||||||
The Disk Reserved of the node (in GiB).
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
node_disk_ratio:
|
node_disk_ratio:
|
||||||
description: |
|
description: |
|
||||||
The Disk Ratio of the node.
|
The Disk Ratio of the node.
|
||||||
@@ -480,12 +444,6 @@ node_memory:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
node_memory_mb_reserved:
|
|
||||||
description: |
|
|
||||||
The Memory Reserved of the node(in MiB).
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
node_memory_ratio:
|
node_memory_ratio:
|
||||||
description: |
|
description: |
|
||||||
The Memory Ratio of the node.
|
The Memory Ratio of the node.
|
||||||
@@ -498,12 +456,6 @@ node_state:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
node_status:
|
|
||||||
description: |
|
|
||||||
The Status of the node.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
node_uuid:
|
node_uuid:
|
||||||
description: |
|
description: |
|
||||||
The Unique UUID of the node.
|
The Unique UUID of the node.
|
||||||
@@ -516,18 +468,13 @@ node_vcpu_ratio:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: float
|
type: float
|
||||||
node_vcpu_reserved:
|
|
||||||
description: |
|
|
||||||
The Vcpu Reserved of the node.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: integer
|
|
||||||
node_vcpus:
|
node_vcpus:
|
||||||
description: |
|
description: |
|
||||||
The Vcpu of the node.
|
The Vcpu of the node.
|
||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
|
|
||||||
# Scoring Engine
|
# Scoring Engine
|
||||||
scoring_engine_description:
|
scoring_engine_description:
|
||||||
description: |
|
description: |
|
||||||
@@ -555,50 +502,18 @@ server_disk:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
server_flavor_extra_specs:
|
|
||||||
description: |
|
|
||||||
The flavor extra specs of the server.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: JSON
|
|
||||||
min_version: 1.6
|
|
||||||
server_locked:
|
|
||||||
description: |
|
|
||||||
Whether the server is locked.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
server_memory:
|
server_memory:
|
||||||
description: |
|
description: |
|
||||||
The Memory of server.
|
The Memory of server.
|
||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
server_metadata:
|
|
||||||
description: |
|
|
||||||
The metadata associated with the server.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: JSON
|
|
||||||
server_name:
|
server_name:
|
||||||
description: |
|
description: |
|
||||||
The Name of the server.
|
The Name of the server.
|
||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
server_pinned_az:
|
|
||||||
description: |
|
|
||||||
The pinned availability zone of the server.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
min_version: 1.6
|
|
||||||
server_project_id:
|
|
||||||
description: |
|
|
||||||
The project ID of the server.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
server_state:
|
server_state:
|
||||||
description: |
|
description: |
|
||||||
The State of the server.
|
The State of the server.
|
||||||
@@ -617,12 +532,6 @@ server_vcpus:
|
|||||||
in: body
|
in: body
|
||||||
required: true
|
required: true
|
||||||
type: integer
|
type: integer
|
||||||
server_watcher_exclude:
|
|
||||||
description: |
|
|
||||||
Whether the server is excluded from the scope.
|
|
||||||
in: body
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
# Service
|
# Service
|
||||||
service_host:
|
service_host:
|
||||||
description: |
|
description: |
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"op": "replace",
|
|
||||||
"value": "SKIPPED",
|
|
||||||
"path": "/state"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"op": "replace",
|
|
||||||
"value": "Skipping due to maintenance window",
|
|
||||||
"path": "/status_message"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"op": "replace",
|
|
||||||
"value": "SKIPPED",
|
|
||||||
"path": "/state"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"state": "SKIPPED",
|
|
||||||
"description": "Migrate instance to another compute node",
|
|
||||||
"parents": [
|
|
||||||
"b4529294-1de6-4302-b57a-9b5d5dc363c6"
|
|
||||||
],
|
|
||||||
"links": [
|
|
||||||
{
|
|
||||||
"rel": "self",
|
|
||||||
"href": "http://controller:9322/v1/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rel": "bookmark",
|
|
||||||
"href": "http://controller:9322/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"action_plan_uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
|
||||||
"uuid": "54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a",
|
|
||||||
"deleted_at": null,
|
|
||||||
"updated_at": "2018-04-10T12:15:44.026973+00:00",
|
|
||||||
"input_parameters": {
|
|
||||||
"migration_type": "live",
|
|
||||||
"destination_node": "compute-2",
|
|
||||||
"resource_id": "a1b2c3d4-e5f6-7890-1234-567890abcdef"
|
|
||||||
},
|
|
||||||
"action_type": "migrate",
|
|
||||||
"created_at": "2018-04-10T11:59:12.725147+00:00",
|
|
||||||
"status_message": "Action skipped by user. Reason:Skipping due to maintenance window"
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"op": "replace",
|
|
||||||
"value": "Action skipped due to scheduled maintenance window",
|
|
||||||
"path": "/status_message"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"state": "SKIPPED",
|
|
||||||
"description": "Migrate instance to another compute node",
|
|
||||||
"parents": [
|
|
||||||
"b4529294-1de6-4302-b57a-9b5d5dc363c6"
|
|
||||||
],
|
|
||||||
"links": [
|
|
||||||
{
|
|
||||||
"rel": "self",
|
|
||||||
"href": "http://controller:9322/v1/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rel": "bookmark",
|
|
||||||
"href": "http://controller:9322/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"action_plan_uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
|
||||||
"uuid": "54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a",
|
|
||||||
"deleted_at": null,
|
|
||||||
"updated_at": "2018-04-10T12:20:15.123456+00:00",
|
|
||||||
"input_parameters": {
|
|
||||||
"migration_type": "live",
|
|
||||||
"destination_node": "compute-2",
|
|
||||||
"resource_id": "a1b2c3d4-e5f6-7890-1234-567890abcdef"
|
|
||||||
},
|
|
||||||
"action_type": "migrate",
|
|
||||||
"created_at": "2018-04-10T11:59:12.725147+00:00",
|
|
||||||
"status_message": "Action skipped by user. Reason: Action skipped due to scheduled maintenance window"
|
|
||||||
}
|
|
||||||
@@ -21,8 +21,7 @@
|
|||||||
"uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
"uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
||||||
"audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a",
|
"audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a",
|
||||||
"created_at": "2018-04-10T11:59:52.640067+00:00",
|
"created_at": "2018-04-10T11:59:52.640067+00:00",
|
||||||
"hostname": "controller",
|
"hostname": "controller"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,5 @@
|
|||||||
"strategy_name": "dummy_with_resize",
|
"strategy_name": "dummy_with_resize",
|
||||||
"uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
"uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf",
|
||||||
"audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a",
|
"audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a",
|
||||||
"hostname": "controller",
|
"hostname": "controller"
|
||||||
"status_message": null
|
}
|
||||||
}
|
|
||||||
@@ -24,8 +24,7 @@
|
|||||||
"duration": 3.2
|
"duration": 3.2
|
||||||
},
|
},
|
||||||
"action_type": "sleep",
|
"action_type": "sleep",
|
||||||
"created_at": "2018-03-26T11:56:08.235226+00:00",
|
"created_at": "2018-03-26T11:56:08.235226+00:00"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -22,6 +22,5 @@
|
|||||||
"message": "Welcome"
|
"message": "Welcome"
|
||||||
},
|
},
|
||||||
"action_type": "nop",
|
"action_type": "nop",
|
||||||
"created_at": "2018-04-10T11:59:12.725147+00:00",
|
"created_at": "2018-04-10T11:59:12.725147+00:00"
|
||||||
"status_message": null
|
}
|
||||||
}
|
|
||||||
@@ -51,6 +51,5 @@
|
|||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"hostname": null,
|
"hostname": null,
|
||||||
"start_time": null,
|
"start_time": null,
|
||||||
"end_time": null,
|
"end_time": null
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"auto_trigger": false,
|
"auto_trigger": false,
|
||||||
"force": false,
|
"force": false,
|
||||||
"uuid": "65a5da84-5819-4aea-8278-a28d2b489028",
|
"uuid": "65a5da84-5819-4aea-8278-a28d2b489028",
|
||||||
"goal_name": "workload_balancing",
|
"goal_name": "workload_balancing",
|
||||||
"scope": [],
|
"scope": [],
|
||||||
@@ -53,8 +53,7 @@
|
|||||||
"updated_at": "2018-04-06T09:44:01.604146+00:00",
|
"updated_at": "2018-04-06T09:44:01.604146+00:00",
|
||||||
"hostname": "controller",
|
"hostname": "controller",
|
||||||
"start_time": null,
|
"start_time": null,
|
||||||
"end_time": null,
|
"end_time": null
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,6 +51,5 @@
|
|||||||
"updated_at": "2018-04-06T11:54:01.266447+00:00",
|
"updated_at": "2018-04-06T11:54:01.266447+00:00",
|
||||||
"hostname": "controller",
|
"hostname": "controller",
|
||||||
"start_time": null,
|
"start_time": null,
|
||||||
"end_time": null,
|
"end_time": null
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,62 +1,38 @@
|
|||||||
{
|
{
|
||||||
"context": [
|
"context": [
|
||||||
{
|
{
|
||||||
"server_watcher_exclude": false,
|
"server_uuid": "1bf91464-9b41-428d-a11e-af691e5563bb",
|
||||||
"server_name": "chenke-test1",
|
"server_name": "chenke-test1",
|
||||||
"server_state": "active",
|
"server_vcpus": "1",
|
||||||
"server_memory": "512",
|
"server_memory": "512",
|
||||||
"server_disk": "1",
|
"server_disk": "1",
|
||||||
"server_vcpus": "1",
|
"server_state": "active",
|
||||||
"server_metadata": {},
|
"node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112",
|
||||||
"server_project_id": "baea342fc74b4a1785b4a40c69a8d958",
|
|
||||||
"server_locked":false,
|
|
||||||
"server_uuid": "1bf91464-9b41-428d-a11e-af691e5563bb",
|
|
||||||
"server_pinned_az": "nova",
|
|
||||||
"server_flavor_extra_specs": {
|
|
||||||
"hw_rng:allowed": true
|
|
||||||
},
|
|
||||||
"node_hostname": "localhost.localdomain",
|
"node_hostname": "localhost.localdomain",
|
||||||
"node_status": "enabled",
|
|
||||||
"node_disabled_reason": null,
|
|
||||||
"node_state": "up",
|
|
||||||
"node_memory": "16383",
|
|
||||||
"node_memory_mb_reserved": "512",
|
|
||||||
"node_disk": "37",
|
|
||||||
"node_disk_gb_reserved": "0",
|
|
||||||
"node_vcpus": "4",
|
"node_vcpus": "4",
|
||||||
"node_vcpu_reserved": "0",
|
|
||||||
"node_memory_ratio": "1.5",
|
|
||||||
"node_vcpu_ratio": "16.0",
|
"node_vcpu_ratio": "16.0",
|
||||||
|
"node_memory": "16383",
|
||||||
|
"node_memory_ratio": "1.5",
|
||||||
|
"node_disk": "37"
|
||||||
"node_disk_ratio": "1.0",
|
"node_disk_ratio": "1.0",
|
||||||
"node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112"
|
"node_state": "up",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"server_watcher_exclude": false,
|
"server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4",
|
||||||
"server_name": "chenke-test2",
|
"server_name": "chenke-test2",
|
||||||
"server_state": "active",
|
"server_vcpus": "1",
|
||||||
"server_memory": "512",
|
"server_memory": "512",
|
||||||
"server_disk": "1",
|
"server_disk": "1",
|
||||||
"server_vcpus": "1",
|
"server_state": "active",
|
||||||
"server_metadata": {},
|
"node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112",
|
||||||
"server_project_id": "baea342fc74b4a1785b4a40c69a8d958",
|
|
||||||
"server_locked": false,
|
|
||||||
"server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4",
|
|
||||||
"server_pinned_az": "nova",
|
|
||||||
"server_flavor_extra_specs": {},
|
|
||||||
"node_hostname": "localhost.localdomain",
|
"node_hostname": "localhost.localdomain",
|
||||||
"node_status": "enabled",
|
|
||||||
"node_disabled_reason": null,
|
|
||||||
"node_state": "up",
|
|
||||||
"node_memory": "16383",
|
|
||||||
"node_memory_mb_reserved": "512",
|
|
||||||
"node_disk": "37",
|
|
||||||
"node_disk_gb_reserved": "0",
|
|
||||||
"node_vcpus": "4",
|
"node_vcpus": "4",
|
||||||
"node_vcpu_reserved": "0",
|
|
||||||
"node_memory_ratio": "1.5",
|
|
||||||
"node_vcpu_ratio": "16.0",
|
"node_vcpu_ratio": "16.0",
|
||||||
|
"node_memory": "16383",
|
||||||
|
"node_memory_ratio": "1.5",
|
||||||
|
"node_disk": "37"
|
||||||
"node_disk_ratio": "1.0",
|
"node_disk_ratio": "1.0",
|
||||||
"node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112"
|
"node_state": "up",
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,7 +139,6 @@ Response
|
|||||||
- global_efficacy: actionplan_global_efficacy
|
- global_efficacy: actionplan_global_efficacy
|
||||||
- links: links
|
- links: links
|
||||||
- hostname: actionplan_hostname
|
- hostname: actionplan_hostname
|
||||||
- status_message: actionplan_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Action Plan:**
|
**Example JSON representation of an Action Plan:**
|
||||||
|
|
||||||
@@ -178,7 +177,6 @@ Response
|
|||||||
- global_efficacy: actionplan_global_efficacy
|
- global_efficacy: actionplan_global_efficacy
|
||||||
- links: links
|
- links: links
|
||||||
- hostname: actionplan_hostname
|
- hostname: actionplan_hostname
|
||||||
- status_message: actionplan_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
@@ -235,7 +233,6 @@ version 1:
|
|||||||
- global_efficacy: actionplan_global_efficacy
|
- global_efficacy: actionplan_global_efficacy
|
||||||
- links: links
|
- links: links
|
||||||
- hostname: actionplan_hostname
|
- hostname: actionplan_hostname
|
||||||
- status_message: actionplan_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Action Plan:**
|
**Example JSON representation of an Action Plan:**
|
||||||
|
|
||||||
|
|||||||
@@ -23,9 +23,6 @@ following:
|
|||||||
|
|
||||||
- **PENDING** : the ``Action`` has not been executed yet by the
|
- **PENDING** : the ``Action`` has not been executed yet by the
|
||||||
``Watcher Applier``.
|
``Watcher Applier``.
|
||||||
- **SKIPPED** : the ``Action`` will not be executed because a predefined
|
|
||||||
skipping condition is found by ``Watcher Applier`` or is explicitly
|
|
||||||
skipped by the ``Administrator``.
|
|
||||||
- **ONGOING** : the ``Action`` is currently being processed by the
|
- **ONGOING** : the ``Action`` is currently being processed by the
|
||||||
``Watcher Applier``.
|
``Watcher Applier``.
|
||||||
- **SUCCEEDED** : the ``Action`` has been executed successfully
|
- **SUCCEEDED** : the ``Action`` has been executed successfully
|
||||||
@@ -114,7 +111,6 @@ Response
|
|||||||
- description: action_description
|
- description: action_description
|
||||||
- input_parameters: action_input_parameters
|
- input_parameters: action_input_parameters
|
||||||
- links: links
|
- links: links
|
||||||
- status_message: action_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Action:**
|
**Example JSON representation of an Action:**
|
||||||
|
|
||||||
@@ -152,111 +148,8 @@ Response
|
|||||||
- description: action_description
|
- description: action_description
|
||||||
- input_parameters: action_input_parameters
|
- input_parameters: action_input_parameters
|
||||||
- links: links
|
- links: links
|
||||||
- status_message: action_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Action:**
|
**Example JSON representation of an Action:**
|
||||||
|
|
||||||
.. literalinclude:: samples/actions-show-response.json
|
.. literalinclude:: samples/actions-show-response.json
|
||||||
:language: javascript
|
|
||||||
|
|
||||||
Skip Action
|
|
||||||
===========
|
|
||||||
|
|
||||||
.. rest_method:: PATCH /v1/actions/{action_ident}
|
|
||||||
|
|
||||||
Skips an Action resource by changing its state to SKIPPED.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Only Actions in PENDING state can be skipped. The Action must belong to
|
|
||||||
an Action Plan in RECOMMENDED or PENDING state. This operation requires
|
|
||||||
API microversion 1.5 or later.
|
|
||||||
|
|
||||||
Normal response codes: 200
|
|
||||||
|
|
||||||
Error codes: 400,404,403,409
|
|
||||||
|
|
||||||
Request
|
|
||||||
-------
|
|
||||||
|
|
||||||
.. rest_parameters:: parameters.yaml
|
|
||||||
|
|
||||||
- action_ident: action_ident
|
|
||||||
|
|
||||||
**Example Action skip request:**
|
|
||||||
|
|
||||||
.. literalinclude:: samples/action-skip-request.json
|
|
||||||
:language: javascript
|
|
||||||
|
|
||||||
**Example Action skip request with custom status message:**
|
|
||||||
|
|
||||||
.. literalinclude:: samples/action-skip-request-with-message.json
|
|
||||||
:language: javascript
|
|
||||||
|
|
||||||
Response
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. rest_parameters:: parameters.yaml
|
|
||||||
|
|
||||||
- uuid: uuid
|
|
||||||
- action_type: action_type
|
|
||||||
- state: action_state
|
|
||||||
- action_plan_uuid: action_action_plan_uuid
|
|
||||||
- parents: action_parents
|
|
||||||
- description: action_description
|
|
||||||
- input_parameters: action_input_parameters
|
|
||||||
- links: links
|
|
||||||
- status_message: action_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of a skipped Action:**
|
|
||||||
|
|
||||||
.. literalinclude:: samples/action-skip-response.json
|
|
||||||
:language: javascript
|
|
||||||
|
|
||||||
Update Action Status Message
|
|
||||||
============================
|
|
||||||
|
|
||||||
.. rest_method:: PATCH /v1/actions/{action_ident}
|
|
||||||
|
|
||||||
Updates the status_message of an Action that is already in SKIPPED state.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
The status_message field can only be updated for Actions that are currently
|
|
||||||
in SKIPPED state. This allows administrators to fix typos, provide more
|
|
||||||
detailed explanations, or expand on reasons that were initially omitted.
|
|
||||||
This operation requires API microversion 1.5 or later.
|
|
||||||
|
|
||||||
Normal response codes: 200
|
|
||||||
|
|
||||||
Error codes: 400,404,403,409
|
|
||||||
|
|
||||||
Request
|
|
||||||
-------
|
|
||||||
|
|
||||||
.. rest_parameters:: parameters.yaml
|
|
||||||
|
|
||||||
- action_ident: action_ident
|
|
||||||
|
|
||||||
**Example status_message update request for a SKIPPED action:**
|
|
||||||
|
|
||||||
.. literalinclude:: samples/action-update-status-message-request.json
|
|
||||||
:language: javascript
|
|
||||||
|
|
||||||
Response
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. rest_parameters:: parameters.yaml
|
|
||||||
|
|
||||||
- uuid: uuid
|
|
||||||
- action_type: action_type
|
|
||||||
- state: action_state
|
|
||||||
- action_plan_uuid: action_action_plan_uuid
|
|
||||||
- parents: action_parents
|
|
||||||
- description: action_description
|
|
||||||
- input_parameters: action_input_parameters
|
|
||||||
- links: links
|
|
||||||
- status_message: action_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Action with updated status_message:**
|
|
||||||
|
|
||||||
.. literalinclude:: samples/action-update-status-message-response.json
|
|
||||||
:language: javascript
|
:language: javascript
|
||||||
@@ -85,7 +85,6 @@ version 1:
|
|||||||
- start_time: audit_starttime_resp
|
- start_time: audit_starttime_resp
|
||||||
- end_time: audit_endtime_resp
|
- end_time: audit_endtime_resp
|
||||||
- force: audit_force
|
- force: audit_force
|
||||||
- status_message: audit_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
@@ -185,7 +184,6 @@ Response
|
|||||||
- start_time: audit_starttime_resp
|
- start_time: audit_starttime_resp
|
||||||
- end_time: audit_endtime_resp
|
- end_time: audit_endtime_resp
|
||||||
- force: audit_force
|
- force: audit_force
|
||||||
- status_message: audit_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
@@ -233,7 +231,6 @@ Response
|
|||||||
- start_time: audit_starttime_resp
|
- start_time: audit_starttime_resp
|
||||||
- end_time: audit_endtime_resp
|
- end_time: audit_endtime_resp
|
||||||
- force: audit_force
|
- force: audit_force
|
||||||
- status_message: audit_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
@@ -289,7 +286,6 @@ version 1:
|
|||||||
- start_time: audit_starttime_resp
|
- start_time: audit_starttime_resp
|
||||||
- end_time: audit_endtime_resp
|
- end_time: audit_endtime_resp
|
||||||
- force: audit_force
|
- force: audit_force
|
||||||
- status_message: audit_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
@@ -345,7 +341,6 @@ Response
|
|||||||
- start_time: audit_starttime_resp
|
- start_time: audit_starttime_resp
|
||||||
- end_time: audit_endtime_resp
|
- end_time: audit_endtime_resp
|
||||||
- force: audit_force
|
- force: audit_force
|
||||||
- status_message: audit_status_message
|
|
||||||
|
|
||||||
**Example JSON representation of an Audit:**
|
**Example JSON representation of an Audit:**
|
||||||
|
|
||||||
|
|||||||
@@ -35,32 +35,21 @@ Response
|
|||||||
|
|
||||||
.. rest_parameters:: parameters.yaml
|
.. rest_parameters:: parameters.yaml
|
||||||
|
|
||||||
- server_watcher_exclude: server_watcher_exclude
|
- server_uuid: server_uuid
|
||||||
- server_name: server_name
|
- server_name: server_name
|
||||||
- server_state: server_state
|
- server_vcpus: server_vcpus
|
||||||
- server_memory: server_memory
|
- server_memory: server_memory
|
||||||
- server_disk: server_disk
|
- server_disk: server_disk
|
||||||
- server_vcpus: server_vcpus
|
- server_state: server_state
|
||||||
- server_metadata: server_metadata
|
|
||||||
- server_project_id: server_project_id
|
|
||||||
- server_locked: server_locked
|
|
||||||
- server_uuid: server_uuid
|
|
||||||
- server_pinned_az: server_pinned_az
|
|
||||||
- server_flavor_extra_specs: server_flavor_extra_specs
|
|
||||||
- node_hostname: node_hostname
|
|
||||||
- node_status: node_status
|
|
||||||
- node_disabled_reason: node_disabled_reason
|
|
||||||
- node_state: node_state
|
|
||||||
- node_memory: node_memory
|
|
||||||
- node_memory_mb_reserved: node_memory_mb_reserved
|
|
||||||
- node_disk: node_disk
|
|
||||||
- node_disk_gb_reserved: node_disk_gb_reserved
|
|
||||||
- node_vcpus: node_vcpus
|
|
||||||
- node_vcpu_reserved: node_vcpu_reserved
|
|
||||||
- node_memory_ratio: node_memory_ratio
|
|
||||||
- node_vcpu_ratio: node_vcpu_ratio
|
|
||||||
- node_disk_ratio: node_disk_ratio
|
|
||||||
- node_uuid: node_uuid
|
- node_uuid: node_uuid
|
||||||
|
- node_hostname: node_hostname
|
||||||
|
- node_vcpus: node_vcpus
|
||||||
|
- node_vcpu_ratio: node_vcpu_ratio
|
||||||
|
- node_memory: node_memory
|
||||||
|
- node_memory_ratio: node_memory_ratio
|
||||||
|
- node_disk: node_disk
|
||||||
|
- node_disk_ratio: node_disk_ratio
|
||||||
|
- node_state: node_state
|
||||||
|
|
||||||
**Example JSON representation of a Data Model:**
|
**Example JSON representation of a Data Model:**
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Here are some examples of ``Goals``:
|
|||||||
- minimize the energy consumption
|
- minimize the energy consumption
|
||||||
- minimize the number of compute nodes (consolidation)
|
- minimize the number of compute nodes (consolidation)
|
||||||
- balance the workload among compute nodes
|
- balance the workload among compute nodes
|
||||||
- minimize the license cost (some software have a licensing model which is
|
- minimize the license cost (some softwares have a licensing model which is
|
||||||
based on the number of sockets or cores where the software is deployed)
|
based on the number of sockets or cores where the software is deployed)
|
||||||
- find the most appropriate moment for a planned maintenance on a
|
- find the most appropriate moment for a planned maintenance on a
|
||||||
given group of host (which may be an entire availability zone):
|
given group of host (which may be an entire availability zone):
|
||||||
@@ -123,4 +123,4 @@ Response
|
|||||||
**Example JSON representation of a Goal:**
|
**Example JSON representation of a Goal:**
|
||||||
|
|
||||||
.. literalinclude:: samples/goal-show-response.json
|
.. literalinclude:: samples/goal-show-response.json
|
||||||
:language: javascript
|
:language: javascript
|
||||||
23
bindep.txt
23
bindep.txt
@@ -1,23 +0,0 @@
|
|||||||
# This is a cross-platform list tracking distribution packages needed for install and tests;
|
|
||||||
# see https://docs.openstack.org/infra/bindep/ for additional information.
|
|
||||||
|
|
||||||
mysql [platform:rpm !platform:redhat test]
|
|
||||||
mysql-client [platform:dpkg !platform:debian test]
|
|
||||||
mysql-devel [platform:rpm !platform:redhat test]
|
|
||||||
mysql-server [!platform:redhat !platform:debian test]
|
|
||||||
mariadb-devel [platform:rpm platform:redhat test]
|
|
||||||
mariadb-server [platform:rpm platform:redhat platform:debian test]
|
|
||||||
python3-all [platform:dpkg test]
|
|
||||||
python3-all-dev [platform:dpkg test]
|
|
||||||
python3 [platform:rpm test]
|
|
||||||
python3-devel [platform:rpm test]
|
|
||||||
sqlite-devel [platform:rpm test]
|
|
||||||
# gettext and graphviz are needed by doc builds only.
|
|
||||||
gettext [doc]
|
|
||||||
graphviz [doc]
|
|
||||||
# fonts-freefont-otf is needed for pdf docs builds with the 'xelatex' engine
|
|
||||||
fonts-freefont-otf [pdf-docs]
|
|
||||||
texlive [pdf-docs]
|
|
||||||
texlive-latex-recommended [pdf-docs]
|
|
||||||
texlive-xetex [pdf-docs]
|
|
||||||
latexmk [pdf-docs]
|
|
||||||
42
devstack/files/apache-watcher-api.template
Normal file
42
devstack/files/apache-watcher-api.template
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
# This is an example Apache2 configuration file for using the
|
||||||
|
# Watcher API through mod_wsgi. This version assumes you are
|
||||||
|
# running devstack to configure the software.
|
||||||
|
|
||||||
|
Listen %WATCHER_SERVICE_PORT%
|
||||||
|
|
||||||
|
<VirtualHost *:%WATCHER_SERVICE_PORT%>
|
||||||
|
WSGIDaemonProcess watcher-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP}
|
||||||
|
WSGIScriptAlias / %WATCHER_WSGI_DIR%/app.wsgi
|
||||||
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
WSGIProcessGroup watcher-api
|
||||||
|
WSGIPassAuthorization On
|
||||||
|
|
||||||
|
ErrorLogFormat "%M"
|
||||||
|
ErrorLog /var/log/%APACHE_NAME%/watcher-api.log
|
||||||
|
CustomLog /var/log/%APACHE_NAME%/watcher-api-access.log combined
|
||||||
|
|
||||||
|
|
||||||
|
<Directory %WATCHER_WSGI_DIR%>
|
||||||
|
WSGIProcessGroup watcher-api
|
||||||
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
Require all granted
|
||||||
|
</IfVersion>
|
||||||
|
<IfVersion < 2.4>
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</IfVersion>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
# lib/watcher
|
# lib/watcher
|
||||||
# Functions to control the configuration and operation of the watcher services
|
# Functions to control the configuration and operation of the watcher services
|
||||||
|
|
||||||
@@ -36,6 +38,7 @@ GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master}
|
|||||||
GITDIR["python-watcherclient"]=$DEST/python-watcherclient
|
GITDIR["python-watcherclient"]=$DEST/python-watcherclient
|
||||||
|
|
||||||
WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher}
|
WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher}
|
||||||
|
WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher}
|
||||||
|
|
||||||
WATCHER_CONF_DIR=/etc/watcher
|
WATCHER_CONF_DIR=/etc/watcher
|
||||||
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
||||||
@@ -55,16 +58,29 @@ else
|
|||||||
WATCHER_BIN_DIR=$(get_python_exec_prefix)
|
WATCHER_BIN_DIR=$(get_python_exec_prefix)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
WATCHER_UWSGI=watcher.wsgi.api:application
|
# There are 2 modes, which is "uwsgi" which runs with an apache
|
||||||
|
# proxy uwsgi in front of it, or "mod_wsgi", which runs in
|
||||||
|
# apache. mod_wsgi is deprecated, don't use it.
|
||||||
|
WATCHER_USE_WSGI_MODE=${WATCHER_USE_WSGI_MODE:-$WSGI_MODE}
|
||||||
|
WATCHER_UWSGI=$WATCHER_BIN_DIR/watcher-api-wsgi
|
||||||
WATCHER_UWSGI_CONF=$WATCHER_CONF_DIR/watcher-uwsgi.ini
|
WATCHER_UWSGI_CONF=$WATCHER_CONF_DIR/watcher-uwsgi.ini
|
||||||
WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher}
|
|
||||||
|
if is_suse; then
|
||||||
|
WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/srv/www/htdocs/watcher}
|
||||||
|
else
|
||||||
|
WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher}
|
||||||
|
fi
|
||||||
# Public facing bits
|
# Public facing bits
|
||||||
WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$SERVICE_HOST}
|
WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$SERVICE_HOST}
|
||||||
WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322}
|
WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322}
|
||||||
WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322}
|
WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322}
|
||||||
WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
|
WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
|
||||||
|
|
||||||
WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST/infra-optim"
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
|
WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST/infra-optim"
|
||||||
|
else
|
||||||
|
WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT"
|
||||||
|
fi
|
||||||
|
|
||||||
# Entry Points
|
# Entry Points
|
||||||
# ------------
|
# ------------
|
||||||
@@ -87,8 +103,12 @@ function _cleanup_watcher_apache_wsgi {
|
|||||||
# cleanup_watcher() - Remove residual data files, anything left over from previous
|
# cleanup_watcher() - Remove residual data files, anything left over from previous
|
||||||
# runs that a clean run would need to clean up
|
# runs that a clean run would need to clean up
|
||||||
function cleanup_watcher {
|
function cleanup_watcher {
|
||||||
sudo rm -rf $WATCHER_STATE_PATH
|
sudo rm -rf $WATCHER_STATE_PATH $WATCHER_AUTH_CACHE_DIR
|
||||||
remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI"
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
|
remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI"
|
||||||
|
else
|
||||||
|
_cleanup_watcher_apache_wsgi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# configure_watcher() - Set config files, create data dirs, etc
|
# configure_watcher() - Set config files, create data dirs, etc
|
||||||
@@ -137,6 +157,31 @@ function create_watcher_accounts {
|
|||||||
"$WATCHER_API_URL"
|
"$WATCHER_API_URL"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# _config_watcher_apache_wsgi() - Set WSGI config files of watcher
|
||||||
|
function _config_watcher_apache_wsgi {
|
||||||
|
local watcher_apache_conf
|
||||||
|
if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then
|
||||||
|
local service_port=$WATCHER_SERVICE_PORT
|
||||||
|
if is_service_enabled tls-proxy; then
|
||||||
|
service_port=$WATCHER_SERVICE_PORT_INT
|
||||||
|
service_protocol="http"
|
||||||
|
fi
|
||||||
|
sudo mkdir -p $WATCHER_WSGI_DIR
|
||||||
|
sudo cp $WATCHER_DIR/watcher/api/app.wsgi $WATCHER_WSGI_DIR/app.wsgi
|
||||||
|
watcher_apache_conf=$(apache_site_config_for watcher-api)
|
||||||
|
sudo cp $WATCHER_DEVSTACK_FILES_DIR/apache-watcher-api.template $watcher_apache_conf
|
||||||
|
sudo sed -e "
|
||||||
|
s|%WATCHER_SERVICE_PORT%|$service_port|g;
|
||||||
|
s|%WATCHER_WSGI_DIR%|$WATCHER_WSGI_DIR|g;
|
||||||
|
s|%USER%|$STACK_USER|g;
|
||||||
|
s|%APIWORKERS%|$API_WORKERS|g;
|
||||||
|
s|%APACHE_NAME%|$APACHE_NAME|g;
|
||||||
|
" -i $watcher_apache_conf
|
||||||
|
enable_apache_site watcher-api
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
# create_watcher_conf() - Create a new watcher.conf file
|
# create_watcher_conf() - Create a new watcher.conf file
|
||||||
function create_watcher_conf {
|
function create_watcher_conf {
|
||||||
# (Re)create ``watcher.conf``
|
# (Re)create ``watcher.conf``
|
||||||
@@ -154,16 +199,21 @@ function create_watcher_conf {
|
|||||||
iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)"
|
iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)"
|
||||||
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT"
|
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT"
|
||||||
# iniset $WATCHER_CONF api enable_ssl_api "True"
|
# iniset $WATCHER_CONF api enable_ssl_api "True"
|
||||||
|
else
|
||||||
|
if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then
|
||||||
|
iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)"
|
||||||
|
iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML
|
iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML
|
||||||
|
|
||||||
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
||||||
|
|
||||||
configure_keystone_authtoken_middleware $WATCHER_CONF watcher
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR
|
||||||
configure_keystone_authtoken_middleware $WATCHER_CONF watcher "watcher_clients_auth"
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth"
|
||||||
|
|
||||||
if is_fedora; then
|
if is_fedora || is_suse; then
|
||||||
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
||||||
# install things in /usr/bin
|
# install things in /usr/bin
|
||||||
iniset $WATCHER_CONF DEFAULT bindir "/usr/bin"
|
iniset $WATCHER_CONF DEFAULT bindir "/usr/bin"
|
||||||
@@ -181,8 +231,12 @@ function create_watcher_conf {
|
|||||||
# Format logging
|
# Format logging
|
||||||
setup_logging $WATCHER_CONF
|
setup_logging $WATCHER_CONF
|
||||||
|
|
||||||
write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim" "" "watcher-api"
|
#config apache files
|
||||||
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
|
write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim"
|
||||||
|
else
|
||||||
|
_config_watcher_apache_wsgi
|
||||||
|
fi
|
||||||
# Register SSL certificates if provided
|
# Register SSL certificates if provided
|
||||||
if is_ssl_enabled_service watcher; then
|
if is_ssl_enabled_service watcher; then
|
||||||
ensure_certificates WATCHER
|
ensure_certificates WATCHER
|
||||||
@@ -194,6 +248,13 @@ function create_watcher_conf {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# create_watcher_cache_dir() - Part of the init_watcher() process
|
||||||
|
function create_watcher_cache_dir {
|
||||||
|
# Create cache dir
|
||||||
|
sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR
|
||||||
|
rm -rf $WATCHER_AUTH_CACHE_DIR/*
|
||||||
|
}
|
||||||
|
|
||||||
# init_watcher() - Initialize databases, etc.
|
# init_watcher() - Initialize databases, etc.
|
||||||
function init_watcher {
|
function init_watcher {
|
||||||
# clean up from previous (possibly aborted) runs
|
# clean up from previous (possibly aborted) runs
|
||||||
@@ -205,6 +266,7 @@ function init_watcher {
|
|||||||
# Create watcher schema
|
# Create watcher schema
|
||||||
$WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade
|
$WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade
|
||||||
fi
|
fi
|
||||||
|
create_watcher_cache_dir
|
||||||
}
|
}
|
||||||
|
|
||||||
# install_watcherclient() - Collect source and prepare
|
# install_watcherclient() - Collect source and prepare
|
||||||
@@ -213,15 +275,15 @@ function install_watcherclient {
|
|||||||
git_clone_by_name "python-watcherclient"
|
git_clone_by_name "python-watcherclient"
|
||||||
setup_dev_lib "python-watcherclient"
|
setup_dev_lib "python-watcherclient"
|
||||||
fi
|
fi
|
||||||
if [[ "$GLOBAL_VENV" == "True" ]]; then
|
|
||||||
sudo ln -sf /opt/stack/data/venv/bin/watcher /usr/local/bin
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# install_watcher() - Collect source and prepare
|
# install_watcher() - Collect source and prepare
|
||||||
function install_watcher {
|
function install_watcher {
|
||||||
git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH
|
git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH
|
||||||
setup_develop $WATCHER_DIR
|
setup_develop $WATCHER_DIR
|
||||||
|
if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then
|
||||||
|
install_apache_wsgi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# start_watcher_api() - Start the API process ahead of other things
|
# start_watcher_api() - Start the API process ahead of other things
|
||||||
@@ -235,10 +297,19 @@ function start_watcher_api {
|
|||||||
service_port=$WATCHER_SERVICE_PORT_INT
|
service_port=$WATCHER_SERVICE_PORT_INT
|
||||||
service_protocol="http"
|
service_protocol="http"
|
||||||
fi
|
fi
|
||||||
run_process "watcher-api" "$(which uwsgi) --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF"
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
watcher_url=$service_protocol://$SERVICE_HOST/infra-optim
|
run_process "watcher-api" "$(which uwsgi) --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF"
|
||||||
# TODO(sean-k-mooney): we should probably check that we can hit
|
watcher_url=$service_protocol://$SERVICE_HOST/infra-optim
|
||||||
# the microversion endpoint and get a valid response.
|
else
|
||||||
|
watcher_url=$service_protocol://$SERVICE_HOST:$service_port
|
||||||
|
enable_apache_site watcher-api
|
||||||
|
restart_apache_server
|
||||||
|
# Start proxies if enabled
|
||||||
|
if is_service_enabled tls-proxy; then
|
||||||
|
start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Waiting for watcher-api to start..."
|
echo "Waiting for watcher-api to start..."
|
||||||
if ! wait_for_service $SERVICE_TIMEOUT $watcher_url; then
|
if ! wait_for_service $SERVICE_TIMEOUT $watcher_url; then
|
||||||
die $LINENO "watcher-api did not start"
|
die $LINENO "watcher-api did not start"
|
||||||
@@ -256,25 +327,17 @@ function start_watcher {
|
|||||||
|
|
||||||
# stop_watcher() - Stop running processes (non-screen)
|
# stop_watcher() - Stop running processes (non-screen)
|
||||||
function stop_watcher {
|
function stop_watcher {
|
||||||
stop_process watcher-api
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
|
stop_process watcher-api
|
||||||
|
else
|
||||||
|
disable_apache_site watcher-api
|
||||||
|
restart_apache_server
|
||||||
|
fi
|
||||||
for serv in watcher-decision-engine watcher-applier; do
|
for serv in watcher-decision-engine watcher-applier; do
|
||||||
stop_process $serv
|
stop_process $serv
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
# configure_tempest_for_watcher() - Configure Tempest for watcher
|
|
||||||
function configure_tempest_for_watcher {
|
|
||||||
# Set default microversion for watcher-tempest-plugin
|
|
||||||
# Please make sure to update this when the microversion is updated, otherwise
|
|
||||||
# new tests may be skipped.
|
|
||||||
TEMPEST_WATCHER_MIN_MICROVERSION=${TEMPEST_WATCHER_MIN_MICROVERSION:-"1.0"}
|
|
||||||
TEMPEST_WATCHER_MAX_MICROVERSION=${TEMPEST_WATCHER_MAX_MICROVERSION:-"1.6"}
|
|
||||||
|
|
||||||
# Set microversion options in tempest.conf
|
|
||||||
iniset $TEMPEST_CONFIG optimize min_microversion $TEMPEST_WATCHER_MIN_MICROVERSION
|
|
||||||
iniset $TEMPEST_CONFIG optimize max_microversion $TEMPEST_WATCHER_MAX_MICROVERSION
|
|
||||||
}
|
|
||||||
|
|
||||||
# Restore xtrace
|
# Restore xtrace
|
||||||
$_XTRACE_WATCHER
|
$_XTRACE_WATCHER
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ GLANCE_HOSTPORT=${SERVICE_HOST}:9292
|
|||||||
DATABASE_TYPE=mysql
|
DATABASE_TYPE=mysql
|
||||||
|
|
||||||
# Enable services (including neutron)
|
# Enable services (including neutron)
|
||||||
ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client,node-exporter
|
ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client
|
||||||
|
|
||||||
NOVA_VNC_ENABLED=True
|
NOVA_VNC_ENABLED=True
|
||||||
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
|
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
|
||||||
@@ -42,10 +42,6 @@ disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api
|
|||||||
LOGFILE=$DEST/logs/stack.sh.log
|
LOGFILE=$DEST/logs/stack.sh.log
|
||||||
LOGDAYS=2
|
LOGDAYS=2
|
||||||
|
|
||||||
CEILOMETER_BACKEND="none"
|
|
||||||
CEILOMETER_BACKENDS="none"
|
|
||||||
enable_plugin devstack-plugin-prometheus https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
|
|
||||||
[[post-config|$NOVA_CONF]]
|
[[post-config|$NOVA_CONF]]
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
compute_monitors=cpu.virt_driver
|
compute_monitors=cpu.virt_driver
|
||||||
|
|||||||
@@ -18,10 +18,6 @@ NETWORK_GATEWAY=10.254.1.1 # Change this for your network
|
|||||||
|
|
||||||
MULTI_HOST=1
|
MULTI_HOST=1
|
||||||
|
|
||||||
CEILOMETER_ALARM_THRESHOLD="6000000000"
|
|
||||||
CEILOMETER_BACKENDS="sg-core"
|
|
||||||
CEILOMETER_PIPELINE_INTERVAL="15"
|
|
||||||
|
|
||||||
|
|
||||||
#Set this to FALSE if do not want to run watcher-api behind mod-wsgi
|
#Set this to FALSE if do not want to run watcher-api behind mod-wsgi
|
||||||
#WATCHER_USE_MOD_WSGI=TRUE
|
#WATCHER_USE_MOD_WSGI=TRUE
|
||||||
@@ -44,10 +40,8 @@ disable_service ceilometer-acompute
|
|||||||
# Enable the ceilometer api explicitly(bug:1667678)
|
# Enable the ceilometer api explicitly(bug:1667678)
|
||||||
enable_service ceilometer-api
|
enable_service ceilometer-api
|
||||||
|
|
||||||
enable_service prometheus
|
# Enable the Gnocchi plugin
|
||||||
enable_plugin aodh https://opendev.org/openstack/aodh
|
enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi
|
||||||
enable_plugin devstack-plugin-prometheus https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
enable_plugin sg-core https://github.com/openstack-k8s-operators/sg-core main
|
|
||||||
|
|
||||||
LOGFILE=$DEST/logs/stack.sh.log
|
LOGFILE=$DEST/logs/stack.sh.log
|
||||||
LOGDAYS=2
|
LOGDAYS=2
|
||||||
@@ -61,42 +55,3 @@ compute_monitors=cpu.virt_driver
|
|||||||
# can change this to just versioned when ceilometer handles versioned
|
# can change this to just versioned when ceilometer handles versioned
|
||||||
# notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449
|
# notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449
|
||||||
notification_format=both
|
notification_format=both
|
||||||
|
|
||||||
[[post-config|$WATCHER_CONF]]
|
|
||||||
[prometheus_client]
|
|
||||||
host = 127.0.0.1
|
|
||||||
port = 9090
|
|
||||||
|
|
||||||
[watcher_cluster_data_model_collectors.baremetal]
|
|
||||||
period = 120
|
|
||||||
|
|
||||||
[watcher_cluster_data_model_collectors.compute]
|
|
||||||
period = 120
|
|
||||||
|
|
||||||
[watcher_cluster_data_model_collectors.storage]
|
|
||||||
period = 120
|
|
||||||
|
|
||||||
[watcher_datasources]
|
|
||||||
datasources = prometheus
|
|
||||||
|
|
||||||
[[test-config|$TEMPEST_CONFIG]]
|
|
||||||
[optimize]
|
|
||||||
datasource = prometheus
|
|
||||||
|
|
||||||
[service_available]
|
|
||||||
sg_core = True
|
|
||||||
|
|
||||||
[telemetry]
|
|
||||||
ceilometer_polling_interval = 15
|
|
||||||
disable_ssl_certificate_validation = True
|
|
||||||
|
|
||||||
[telemetry_services]
|
|
||||||
metric_backends = prometheus
|
|
||||||
|
|
||||||
[compute]
|
|
||||||
min_compute_nodes = 2
|
|
||||||
min_microversion = 2.56
|
|
||||||
|
|
||||||
[compute-feature-enabled]
|
|
||||||
block_migration_for_live_migration = True
|
|
||||||
live_migration = True
|
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
# Sample ``local.conf`` for compute node for Watcher development
|
|
||||||
# NOTE: Copy this file to the root DevStack directory for it to work properly.
|
|
||||||
|
|
||||||
[[local|localrc]]
|
|
||||||
|
|
||||||
ADMIN_PASSWORD=nomoresecrete
|
|
||||||
DATABASE_PASSWORD=stackdb
|
|
||||||
RABBIT_PASSWORD=stackqueue
|
|
||||||
SERVICE_PASSWORD=$ADMIN_PASSWORD
|
|
||||||
SERVICE_TOKEN=azertytoken
|
|
||||||
|
|
||||||
HOST_IP=192.168.42.2 # Change this to this compute node's IP address
|
|
||||||
#HOST_IPV6=2001:db8::7
|
|
||||||
FLAT_INTERFACE=eth0
|
|
||||||
|
|
||||||
FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is
|
|
||||||
NETWORK_GATEWAY=10.254.1.1 # Change this for your network
|
|
||||||
|
|
||||||
MULTI_HOST=1
|
|
||||||
|
|
||||||
SERVICE_HOST=192.168.42.1 # Change this to the IP of your controller node
|
|
||||||
MYSQL_HOST=$SERVICE_HOST
|
|
||||||
RABBIT_HOST=$SERVICE_HOST
|
|
||||||
GLANCE_HOSTPORT=${SERVICE_HOST}:9292
|
|
||||||
|
|
||||||
DATABASE_TYPE=mysql
|
|
||||||
|
|
||||||
# Enable services (including neutron)
|
|
||||||
ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client
|
|
||||||
|
|
||||||
NOVA_VNC_ENABLED=True
|
|
||||||
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
|
|
||||||
VNCSERVER_LISTEN=0.0.0.0
|
|
||||||
VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP # or HOST_IPV6
|
|
||||||
|
|
||||||
NOVA_INSTANCES_PATH=/opt/stack/data/instances
|
|
||||||
|
|
||||||
# Enable the Ceilometer plugin for the compute agent
|
|
||||||
enable_plugin ceilometer https://opendev.org/openstack/ceilometer
|
|
||||||
disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api
|
|
||||||
|
|
||||||
LOGFILE=$DEST/logs/stack.sh.log
|
|
||||||
LOGDAYS=2
|
|
||||||
|
|
||||||
[[post-config|$NOVA_CONF]]
|
|
||||||
[DEFAULT]
|
|
||||||
compute_monitors=cpu.virt_driver
|
|
||||||
[notifications]
|
|
||||||
# Enable both versioned and unversioned notifications. Watcher only
|
|
||||||
# uses versioned notifications but ceilometer uses unversioned. We
|
|
||||||
# can change this to just versioned when ceilometer handles versioned
|
|
||||||
# notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449
|
|
||||||
notification_format=both
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# Sample ``local.conf`` for controller node for Watcher development
|
|
||||||
# NOTE: Copy this file to the root DevStack directory for it to work properly.
|
|
||||||
|
|
||||||
[[local|localrc]]
|
|
||||||
|
|
||||||
ADMIN_PASSWORD=nomoresecrete
|
|
||||||
DATABASE_PASSWORD=stackdb
|
|
||||||
RABBIT_PASSWORD=stackqueue
|
|
||||||
SERVICE_PASSWORD=$ADMIN_PASSWORD
|
|
||||||
SERVICE_TOKEN=azertytoken
|
|
||||||
|
|
||||||
HOST_IP=192.168.42.1 # Change this to your controller node IP address
|
|
||||||
#HOST_IPV6=2001:db8::7
|
|
||||||
FLAT_INTERFACE=eth0
|
|
||||||
|
|
||||||
FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is
|
|
||||||
NETWORK_GATEWAY=10.254.1.1 # Change this for your network
|
|
||||||
|
|
||||||
MULTI_HOST=1
|
|
||||||
|
|
||||||
|
|
||||||
#Set this to FALSE if do not want to run watcher-api behind mod-wsgi
|
|
||||||
#WATCHER_USE_MOD_WSGI=TRUE
|
|
||||||
|
|
||||||
# This is the controller node, so disable nova-compute
|
|
||||||
disable_service n-cpu
|
|
||||||
|
|
||||||
# Enable the Watcher Dashboard plugin
|
|
||||||
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
|
|
||||||
|
|
||||||
# Enable the Watcher plugin
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
|
||||||
|
|
||||||
# Enable the Ceilometer plugin
|
|
||||||
enable_plugin ceilometer https://opendev.org/openstack/ceilometer
|
|
||||||
|
|
||||||
# This is the controller node, so disable the ceilometer compute agent
|
|
||||||
disable_service ceilometer-acompute
|
|
||||||
|
|
||||||
# Enable the ceilometer api explicitly(bug:1667678)
|
|
||||||
enable_service ceilometer-api
|
|
||||||
|
|
||||||
# Enable the Gnocchi plugin
|
|
||||||
enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi
|
|
||||||
|
|
||||||
LOGFILE=$DEST/logs/stack.sh.log
|
|
||||||
LOGDAYS=2
|
|
||||||
|
|
||||||
[[post-config|$NOVA_CONF]]
|
|
||||||
[DEFAULT]
|
|
||||||
compute_monitors=cpu.virt_driver
|
|
||||||
[notifications]
|
|
||||||
# Enable both versioned and unversioned notifications. Watcher only
|
|
||||||
# uses versioned notifications but ceilometer uses unversioned. We
|
|
||||||
# can change this to just versioned when ceilometer handles versioned
|
|
||||||
# notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449
|
|
||||||
notification_format=both
|
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
# plugin.sh - DevStack plugin script to install watcher
|
# plugin.sh - DevStack plugin script to install watcher
|
||||||
|
|
||||||
# Save trace setting
|
# Save trace setting
|
||||||
@@ -36,9 +38,6 @@ if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then
|
|||||||
# Start the watcher components
|
# Start the watcher components
|
||||||
echo_summary "Starting watcher"
|
echo_summary "Starting watcher"
|
||||||
start_watcher
|
start_watcher
|
||||||
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
|
|
||||||
echo_summary "Configuring tempest for watcher"
|
|
||||||
configure_tempest_for_watcher
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$1" == "unstack" ]]; then
|
if [[ "$1" == "unstack" ]]; then
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
global:
|
|
||||||
scrape_interval: 10s
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: "node"
|
|
||||||
static_configs:
|
|
||||||
- targets: ["controller:3000"]
|
|
||||||
- targets: ["controller:9100"]
|
|
||||||
labels:
|
|
||||||
fqdn: "controller" # change the hostname here to your controller hostname
|
|
||||||
- targets: ["compute-1:9100"]
|
|
||||||
labels:
|
|
||||||
fqdn: "compute-1" # change the hostname here to your fist compute hostname
|
|
||||||
- targets: ["compute-2:9100"]
|
|
||||||
labels:
|
|
||||||
fqdn: "compute-2" # change the hostname her to your secondd compute hostname
|
|
||||||
# add as many blocks as compute nodes you have
|
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# ``upgrade-watcher``
|
# ``upgrade-watcher``
|
||||||
|
|
||||||
function configure_watcher_upgrade {
|
function configure_watcher_upgrade {
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ then write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim"
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Migrate the database
|
# Migrate the database
|
||||||
$WATCHER_BIN_DIR/watcher-db-manage upgrade || die $LINO "DB migration error"
|
watcher-db-manage upgrade || die $LINO "DB migration error"
|
||||||
|
|
||||||
start_watcher
|
start_watcher
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +0,0 @@
|
|||||||
thirdparty
|
|
||||||
assertin
|
|
||||||
notin
|
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ class BaseWatcherDirective(rst.Directive):
|
|||||||
obj_raw_docstring = obj.__init__.__doc__
|
obj_raw_docstring = obj.__init__.__doc__
|
||||||
|
|
||||||
if not obj_raw_docstring:
|
if not obj_raw_docstring:
|
||||||
# Raise a warning to make the tests fail with doc8
|
# Raise a warning to make the tests fail wit doc8
|
||||||
raise self.error("No docstring available for %s!" % obj)
|
raise self.error("No docstring available for %s!" % obj)
|
||||||
|
|
||||||
obj_docstring = inspect.cleandoc(obj_raw_docstring)
|
obj_docstring = inspect.cleandoc(obj_raw_docstring)
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "CANCELLED",
|
"state": "CANCELLED",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -25,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -24,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "FAILED",
|
"state": "FAILED",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -35,7 +34,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -25,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -24,7 +23,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "DELETED",
|
"state": "DELETED",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -24,7 +23,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "SUCCEEDED",
|
"state": "SUCCEEDED",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -25,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -24,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "FAILED",
|
"state": "FAILED",
|
||||||
"status_message": "Action execution failed",
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -35,7 +34,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -25,7 +24,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -18,12 +18,10 @@
|
|||||||
"watcher_object.name": "ActionStateUpdatePayload",
|
"watcher_object.name": "ActionStateUpdatePayload",
|
||||||
"watcher_object.data": {
|
"watcher_object.data": {
|
||||||
"old_state": "PENDING",
|
"old_state": "PENDING",
|
||||||
"state": "ONGOING",
|
"state": "ONGOING"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"action_plan": {
|
"action_plan": {
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
@@ -34,7 +32,6 @@
|
|||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
"audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d",
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"deleted_at": null
|
"deleted_at": null
|
||||||
|
|||||||
@@ -21,7 +21,6 @@
|
|||||||
"scope": [],
|
"scope": [],
|
||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"state": "SUCCEEDED",
|
"state": "SUCCEEDED",
|
||||||
"status_message": null,
|
|
||||||
"parameters": {},
|
"parameters": {},
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
@@ -30,7 +29,6 @@
|
|||||||
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
||||||
"fault": null,
|
"fault": null,
|
||||||
"state": "CANCELLED",
|
"state": "CANCELLED",
|
||||||
"status_message": null,
|
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"strategy": {
|
"strategy": {
|
||||||
|
|||||||
@@ -52,15 +52,13 @@
|
|||||||
"scope": [],
|
"scope": [],
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"status_message": null,
|
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"state": "SUCCEEDED"
|
"state": "SUCCEEDED"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"timestamp": "2016-10-18 09:52:05.219414"
|
"timestamp": "2016-10-18 09:52:05.219414"
|
||||||
|
|||||||
@@ -21,7 +21,6 @@
|
|||||||
"scope": [],
|
"scope": [],
|
||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"state": "SUCCEEDED",
|
"state": "SUCCEEDED",
|
||||||
"status_message": null,
|
|
||||||
"parameters": {},
|
"parameters": {},
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
@@ -30,7 +29,6 @@
|
|||||||
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
||||||
"fault": null,
|
"fault": null,
|
||||||
"state": "CANCELLING",
|
"state": "CANCELLING",
|
||||||
"status_message": null,
|
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"strategy": {
|
"strategy": {
|
||||||
|
|||||||
@@ -33,7 +33,6 @@
|
|||||||
"interval": null,
|
"interval": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
},
|
},
|
||||||
@@ -44,7 +43,6 @@
|
|||||||
"global_efficacy": {},
|
"global_efficacy": {},
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"state": "RECOMMENDED",
|
"state": "RECOMMENDED",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
},
|
},
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"created_at": "2016-10-18T09:52:05Z",
|
"created_at": "2016-10-18T09:52:05Z",
|
||||||
"parameters": {}
|
"parameters": {}
|
||||||
},
|
},
|
||||||
@@ -44,8 +43,7 @@
|
|||||||
"watcher_object.name": "StrategyPayload",
|
"watcher_object.name": "StrategyPayload",
|
||||||
"watcher_object.namespace": "watcher"
|
"watcher_object.namespace": "watcher"
|
||||||
},
|
},
|
||||||
"state": "DELETED",
|
"state": "DELETED"
|
||||||
"status_message": null
|
|
||||||
},
|
},
|
||||||
"watcher_object.version": "1.0",
|
"watcher_object.version": "1.0",
|
||||||
"watcher_object.name": "ActionPlanDeletePayload",
|
"watcher_object.name": "ActionPlanDeletePayload",
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
"scope": [],
|
"scope": [],
|
||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"state": "SUCCEEDED",
|
"state": "SUCCEEDED",
|
||||||
"status_message": null,
|
|
||||||
"parameters": {},
|
"parameters": {},
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
@@ -31,7 +30,6 @@
|
|||||||
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
||||||
"fault": null,
|
"fault": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"strategy": {
|
"strategy": {
|
||||||
|
|||||||
@@ -55,13 +55,11 @@
|
|||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"state": "PENDING",
|
"state": "PENDING"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"state": "ONGOING",
|
"state": "ONGOING"
|
||||||
"status_message": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"timestamp": "2016-10-18 09:52:05.219414"
|
"timestamp": "2016-10-18 09:52:05.219414"
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
"scope": [],
|
"scope": [],
|
||||||
"audit_type": "ONESHOT",
|
"audit_type": "ONESHOT",
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"parameters": {},
|
"parameters": {},
|
||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null
|
"updated_at": null
|
||||||
@@ -31,7 +30,6 @@
|
|||||||
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
"uuid": "76be87bd-3422-43f9-93a0-e85a577e3061",
|
||||||
"fault": null,
|
"fault": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"global_efficacy": [],
|
"global_efficacy": [],
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"strategy": {
|
"strategy": {
|
||||||
|
|||||||
@@ -16,7 +16,6 @@
|
|||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"parameters": {}
|
"parameters": {}
|
||||||
},
|
},
|
||||||
@@ -36,7 +35,6 @@
|
|||||||
"watcher_object.name": "ActionPlanStateUpdatePayload"
|
"watcher_object.name": "ActionPlanStateUpdatePayload"
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
"strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3",
|
||||||
"strategy": {
|
"strategy": {
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "PENDING",
|
"state": "PENDING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a",
|
"goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a",
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "DELETED",
|
"state": "DELETED",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a",
|
"goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a",
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": null,
|
"fault": null,
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": {
|
"fault": {
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": null,
|
"fault": null,
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": null,
|
"fault": null,
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": {
|
"fault": {
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
"para1": 3.2
|
"para1": 3.2
|
||||||
},
|
},
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"deleted_at": null,
|
"deleted_at": null,
|
||||||
"fault": null,
|
"fault": null,
|
||||||
|
|||||||
@@ -70,7 +70,6 @@
|
|||||||
"interval": null,
|
"interval": null,
|
||||||
"updated_at": null,
|
"updated_at": null,
|
||||||
"state": "ONGOING",
|
"state": "ONGOING",
|
||||||
"status_message": null,
|
|
||||||
"audit_type": "ONESHOT"
|
"audit_type": "ONESHOT"
|
||||||
},
|
},
|
||||||
"watcher_object.namespace": "watcher",
|
"watcher_object.namespace": "watcher",
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
sphinx>=2.1.1 # BSD
|
# The order of packages is significant, because pip processes them in the order
|
||||||
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
# process, which may cause wedges in the gate later.
|
||||||
sphinxcontrib-apidoc>=0.2.0 # BSD
|
|
||||||
# openstack
|
|
||||||
os-api-ref>=1.4.0 # Apache-2.0
|
|
||||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||||
# releasenotes
|
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||||
|
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||||
|
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
|
||||||
reno>=3.1.0 # Apache-2.0
|
reno>=3.1.0 # Apache-2.0
|
||||||
|
sphinxcontrib-apidoc>=0.2.0 # BSD
|
||||||
|
os-api-ref>=1.4.0 # Apache-2.0
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ own sections. However, the base *GMR* consists of several sections:
|
|||||||
|
|
||||||
Package
|
Package
|
||||||
Shows information about the package to which this process belongs, including
|
Shows information about the package to which this process belongs, including
|
||||||
version information.
|
version informations.
|
||||||
|
|
||||||
Threads
|
Threads
|
||||||
Shows stack traces and thread ids for each of the threads within this
|
Shows stack traces and thread ids for each of the threads within this
|
||||||
|
|||||||
@@ -285,7 +285,7 @@ Audit and interval (in case of CONTINUOUS type). There is three types of Audit:
|
|||||||
ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it
|
ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it
|
||||||
succeeded executed new action plan list will be provided; CONTINUOUS Audit
|
succeeded executed new action plan list will be provided; CONTINUOUS Audit
|
||||||
creates action plans with specified interval (in seconds or cron format, cron
|
creates action plans with specified interval (in seconds or cron format, cron
|
||||||
interval can be used like: ``*/5 * * * *``), if action plan
|
inteval can be used like: `*/5 * * * *`), if action plan
|
||||||
has been created, all previous action plans get CANCELLED state;
|
has been created, all previous action plans get CANCELLED state;
|
||||||
EVENT audit is launched when receiving webhooks API.
|
EVENT audit is launched when receiving webhooks API.
|
||||||
|
|
||||||
@@ -384,9 +384,7 @@ following methods of the :ref:`Action <action_definition>` handler:
|
|||||||
|
|
||||||
- **preconditions()**: this method will make sure that all conditions are met
|
- **preconditions()**: this method will make sure that all conditions are met
|
||||||
before executing the action (for example, it makes sure that an instance
|
before executing the action (for example, it makes sure that an instance
|
||||||
still exists before trying to migrate it). If action specific preconditions
|
still exists before trying to migrate it).
|
||||||
are not met in this phase, the Action is set to **SKIPPED** state and will
|
|
||||||
not be executed.
|
|
||||||
- **execute()**: this method is what triggers real commands on other
|
- **execute()**: this method is what triggers real commands on other
|
||||||
OpenStack services (such as Nova, ...) in order to change target resource
|
OpenStack services (such as Nova, ...) in order to change target resource
|
||||||
state. If the action is successfully executed, a notification message is
|
state. If the action is successfully executed, a notification message is
|
||||||
@@ -481,39 +479,6 @@ change to a new value:
|
|||||||
.. image:: ./images/action_plan_state_machine.png
|
.. image:: ./images/action_plan_state_machine.png
|
||||||
:width: 100%
|
:width: 100%
|
||||||
|
|
||||||
.. _action_state_machine:
|
|
||||||
|
|
||||||
Action State Machine
|
|
||||||
-------------------------
|
|
||||||
|
|
||||||
An :ref:`Action <action_definition>` has a life-cycle and its current state may
|
|
||||||
be one of the following:
|
|
||||||
|
|
||||||
- **PENDING** : the :ref:`Action <action_definition>` has not been executed
|
|
||||||
yet by the :ref:`Watcher Applier <watcher_applier_definition>`
|
|
||||||
- **SKIPPED** : the :ref:`Action <action_definition>` will not be executed
|
|
||||||
because a predefined skipping condition is found by
|
|
||||||
:ref:`Watcher Applier <watcher_applier_definition>` or is explicitly
|
|
||||||
skipped by the :ref:`Administrator <administrator_definition>`.
|
|
||||||
- **ONGOING** : the :ref:`Action <action_definition>` is currently being
|
|
||||||
processed by the :ref:`Watcher Applier <watcher_applier_definition>`
|
|
||||||
- **SUCCEEDED** : the :ref:`Action <action_definition>` has been executed
|
|
||||||
successfully
|
|
||||||
- **FAILED** : an error occurred while trying to execute the
|
|
||||||
:ref:`Action <action_definition>`
|
|
||||||
- **DELETED** : the :ref:`Action <action_definition>` is still stored in the
|
|
||||||
:ref:`Watcher database <watcher_database_definition>` but is not returned
|
|
||||||
any more through the Watcher APIs.
|
|
||||||
- **CANCELLED** : the :ref:`Action <action_definition>` was in **PENDING** or
|
|
||||||
**ONGOING** state and was cancelled by the
|
|
||||||
:ref:`Administrator <administrator_definition>`
|
|
||||||
|
|
||||||
The following diagram shows the different possible states of an
|
|
||||||
:ref:`Action <action_definition>` and what event makes the state change
|
|
||||||
change to a new value:
|
|
||||||
|
|
||||||
.. image:: ./images/action_state_machine.png
|
|
||||||
:width: 100%
|
|
||||||
|
|
||||||
|
|
||||||
.. _Watcher API: https://docs.openstack.org/api-ref/resource-optimization/
|
.. _Watcher API: https://docs.openstack.org/api-ref/resource-optimization/
|
||||||
|
|||||||
22
doc/source/conf.py
Normal file → Executable file
22
doc/source/conf.py
Normal file → Executable file
@@ -56,8 +56,8 @@ source_suffix = '.rst'
|
|||||||
master_doc = 'index'
|
master_doc = 'index'
|
||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = 'Watcher'
|
project = u'Watcher'
|
||||||
copyright = 'OpenStack Foundation'
|
copyright = u'OpenStack Foundation'
|
||||||
|
|
||||||
# A list of ignored prefixes for module index sorting.
|
# A list of ignored prefixes for module index sorting.
|
||||||
modindex_common_prefix = ['watcher.']
|
modindex_common_prefix = ['watcher.']
|
||||||
@@ -91,14 +91,14 @@ pygments_style = 'native'
|
|||||||
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
|
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
|
||||||
|
|
||||||
man_pages = [
|
man_pages = [
|
||||||
('man/watcher-api', 'watcher-api', 'Watcher API Server',
|
('man/watcher-api', 'watcher-api', u'Watcher API Server',
|
||||||
['OpenStack'], 1),
|
[u'OpenStack'], 1),
|
||||||
('man/watcher-applier', 'watcher-applier', 'Watcher Applier',
|
('man/watcher-applier', 'watcher-applier', u'Watcher Applier',
|
||||||
['OpenStack'], 1),
|
[u'OpenStack'], 1),
|
||||||
('man/watcher-db-manage', 'watcher-db-manage',
|
('man/watcher-db-manage', 'watcher-db-manage',
|
||||||
'Watcher Db Management Utility', ['OpenStack'], 1),
|
u'Watcher Db Management Utility', [u'OpenStack'], 1),
|
||||||
('man/watcher-decision-engine', 'watcher-decision-engine',
|
('man/watcher-decision-engine', 'watcher-decision-engine',
|
||||||
'Watcher Decision Engine', ['OpenStack'], 1),
|
u'Watcher Decision Engine', [u'OpenStack'], 1),
|
||||||
]
|
]
|
||||||
|
|
||||||
# -- Options for HTML output --------------------------------------------------
|
# -- Options for HTML output --------------------------------------------------
|
||||||
@@ -115,7 +115,7 @@ html_theme = 'openstackdocs'
|
|||||||
htmlhelp_basename = '%sdoc' % project
|
htmlhelp_basename = '%sdoc' % project
|
||||||
|
|
||||||
|
|
||||||
# openstackdocstheme options
|
#openstackdocstheme options
|
||||||
openstackdocs_repo_name = 'openstack/watcher'
|
openstackdocs_repo_name = 'openstack/watcher'
|
||||||
openstackdocs_pdf_link = True
|
openstackdocs_pdf_link = True
|
||||||
openstackdocs_auto_name = False
|
openstackdocs_auto_name = False
|
||||||
@@ -128,8 +128,8 @@ openstackdocs_bug_tag = ''
|
|||||||
latex_documents = [
|
latex_documents = [
|
||||||
('index',
|
('index',
|
||||||
'doc-watcher.tex',
|
'doc-watcher.tex',
|
||||||
'Watcher Documentation',
|
u'Watcher Documentation',
|
||||||
'OpenStack Foundation', 'manual'),
|
u'OpenStack Foundation', 'manual'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
|
|||||||
@@ -194,14 +194,11 @@ The configuration file is organized into the following sections:
|
|||||||
* ``[watcher_applier]`` - Watcher Applier module configuration
|
* ``[watcher_applier]`` - Watcher Applier module configuration
|
||||||
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
||||||
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
||||||
|
* ``[ceilometer_client]`` - Ceilometer client configuration
|
||||||
* ``[cinder_client]`` - Cinder client configuration
|
* ``[cinder_client]`` - Cinder client configuration
|
||||||
* ``[glance_client]`` - Glance client configuration
|
* ``[glance_client]`` - Glance client configuration
|
||||||
* ``[gnocchi_client]`` - Gnocchi client configuration
|
|
||||||
* ``[ironic_client]`` - Ironic client configuration
|
|
||||||
* ``[keystone_client]`` - Keystone client configuration
|
|
||||||
* ``[nova_client]`` - Nova client configuration
|
* ``[nova_client]`` - Nova client configuration
|
||||||
* ``[neutron_client]`` - Neutron client configuration
|
* ``[neutron_client]`` - Neutron client configuration
|
||||||
* ``[placement_client]`` - Placement client configuration
|
|
||||||
|
|
||||||
The Watcher configuration file is expected to be named
|
The Watcher configuration file is expected to be named
|
||||||
``watcher.conf``. When starting Watcher, you can specify a different
|
``watcher.conf``. When starting Watcher, you can specify a different
|
||||||
@@ -375,7 +372,7 @@ You can configure and install Ceilometer by following the documentation below :
|
|||||||
#. https://docs.openstack.org/ceilometer/latest
|
#. https://docs.openstack.org/ceilometer/latest
|
||||||
|
|
||||||
The built-in strategy 'basic_consolidation' provided by watcher requires
|
The built-in strategy 'basic_consolidation' provided by watcher requires
|
||||||
"**compute.node.cpu.percent**" and "**cpu**" measurements to be collected
|
"**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected
|
||||||
by Ceilometer.
|
by Ceilometer.
|
||||||
The measurements available depend on the hypervisors that OpenStack manages on
|
The measurements available depend on the hypervisors that OpenStack manages on
|
||||||
the specific implementation.
|
the specific implementation.
|
||||||
@@ -429,38 +426,20 @@ Configure Cinder Notifications
|
|||||||
|
|
||||||
Watcher can also consume notifications generated by the Cinder services, in
|
Watcher can also consume notifications generated by the Cinder services, in
|
||||||
order to build or update, in real time, its cluster data model related to
|
order to build or update, in real time, its cluster data model related to
|
||||||
storage resources.
|
storage resources. To do so, you have to update the Cinder configuration
|
||||||
|
file on controller and volume nodes, in order to let Watcher receive Cinder
|
||||||
|
notifications in a dedicated ``watcher_notifications`` channel.
|
||||||
|
|
||||||
Cinder emits notifications on the ``notifications`` topic, in the openstack
|
* In the file ``/etc/cinder/cinder.conf``, update the section
|
||||||
control exchange (as it can be seen in the `Cinder conf`_).
|
``[oslo_messaging_notifications]``, by redefining the list of topics
|
||||||
|
into which Cinder services will publish events ::
|
||||||
* In the file ``/etc/cinder/cinder.conf``, the value of driver in the section
|
|
||||||
``[oslo_messaging_notifications]`` can't be noop.
|
|
||||||
|
|
||||||
[oslo_messaging_notifications]
|
[oslo_messaging_notifications]
|
||||||
driver = messagingv2
|
driver = messagingv2
|
||||||
|
topics = notifications,watcher_notifications
|
||||||
|
|
||||||
.. _`Cinder conf`: https://docs.openstack.org/cinder/latest/configuration/block-storage/samples/cinder.conf.html
|
* Restart the Cinder services.
|
||||||
|
|
||||||
Configure Watcher listening to the Notifications
|
|
||||||
================================================
|
|
||||||
|
|
||||||
To consume either Cinder or Nova notifications, (or both), Watcher must be
|
|
||||||
configured to listen to the notifications topics that Cinder and Nova emit.
|
|
||||||
|
|
||||||
Use the `notification_topics`_ config option to indicate to Watcher that it
|
|
||||||
should listen to the correct topics. By default, Cinder emits notifications
|
|
||||||
on ``openstack.notifications``, while Nova emits notifications on
|
|
||||||
``nova.versioned_notifications``. The Watcher conf should have the topics for
|
|
||||||
the desired notifications, below is an example for both Cinder and Nova::
|
|
||||||
|
|
||||||
[watcher_decision_engine]
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
notification_topics = nova.versioned_notifications,openstack.notifications
|
|
||||||
|
|
||||||
.. _`notification_topics`: https://docs.openstack.org/watcher/latest/configuration/watcher.html#watcher_decision_engine.notification_topics
|
|
||||||
|
|
||||||
Workers
|
Workers
|
||||||
=======
|
=======
|
||||||
|
|||||||
@@ -52,43 +52,18 @@ types of concurrency used in various services of Watcher.
|
|||||||
.. _wait_for_any: https://docs.openstack.org/futurist/latest/reference/index.html#waiters
|
.. _wait_for_any: https://docs.openstack.org/futurist/latest/reference/index.html#waiters
|
||||||
|
|
||||||
|
|
||||||
Concurrency modes
|
|
||||||
#################
|
|
||||||
|
|
||||||
Evenlet has been the main concurrency library within the OpenStack community
|
|
||||||
for the last 10 years since the removal of twisted. Over the last few years,
|
|
||||||
the maintenance of eventlet has decreased and the efforts to remove the GIL
|
|
||||||
from Python (PEP 703), have fundamentally changed how concurrency is making
|
|
||||||
eventlet no longer viable. While transitioning to a new native thread
|
|
||||||
solution, Watcher services will be supporting both modes, with the usage of
|
|
||||||
native threading mode initially classified as ``experimental``.
|
|
||||||
|
|
||||||
It is possible to enable the new native threading mode by setting the following
|
|
||||||
environment variable in the corresponding service configuration:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
OS_WATCHER_DISABLE_EVENTLET_PATCHING=true
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
The only service that supports two different concurrency modes is the
|
|
||||||
``decision engine``.
|
|
||||||
|
|
||||||
Decision engine concurrency
|
Decision engine concurrency
|
||||||
***************************
|
***************************
|
||||||
|
|
||||||
The concurrency in the decision engine is governed by two independent
|
The concurrency in the decision engine is governed by two independent
|
||||||
threadpools. These threadpools can be configured as GreenThreadPoolExecutor_
|
threadpools. Both of these threadpools are GreenThreadPoolExecutor_ from the
|
||||||
or ThreadPoolExecutor_, both from the futurist_ library, depending on the
|
futurist_ library. One of these is used automatically and most contributors
|
||||||
service configuration. One of these is used automatically and most contributors
|
|
||||||
will not interact with it while developing new features. The other threadpool
|
will not interact with it while developing new features. The other threadpool
|
||||||
can frequently be used while developing new features or updating existing ones.
|
can frequently be used while developing new features or updating existing ones.
|
||||||
It is known as the DecisionEngineThreadpool and allows to achieve performance
|
It is known as the DecisionEngineThreadpool and allows to achieve performance
|
||||||
improvements in network or I/O bound operations.
|
improvements in network or I/O bound operations.
|
||||||
|
|
||||||
.. _GreenThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#futurist.GreenThreadPoolExecutor
|
.. _GreenThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#executors
|
||||||
.. _ThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#futurist.ThreadPoolExecutor
|
|
||||||
|
|
||||||
AuditEndpoint
|
AuditEndpoint
|
||||||
#############
|
#############
|
||||||
@@ -246,7 +221,7 @@ workflow engine can halt or take other actions while the action plan is being
|
|||||||
executed based on the success or failure of individual actions. However, the
|
executed based on the success or failure of individual actions. However, the
|
||||||
base workflow engine simply uses these notifies to store the result of
|
base workflow engine simply uses these notifies to store the result of
|
||||||
individual actions in the database. Additionally, since taskflow uses a graph
|
individual actions in the database. Additionally, since taskflow uses a graph
|
||||||
flow if any of the tasks would fail all children of this tasks not be executed
|
flow if any of the tasks would fail all childs of this tasks not be executed
|
||||||
while ``do_revert`` will be triggered for all parents.
|
while ``do_revert`` will be triggered for all parents.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ multinode environment to use.
|
|||||||
You can set up the Watcher services quickly and easily using a Watcher
|
You can set up the Watcher services quickly and easily using a Watcher
|
||||||
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
||||||
model. To enable the Watcher plugin with DevStack, add the following to the
|
model. To enable the Watcher plugin with DevStack, add the following to the
|
||||||
``[[local|localrc]]`` section of your controller's ``local.conf`` to enable the
|
`[[local|localrc]]` section of your controller's `local.conf` to enable the
|
||||||
Watcher plugin::
|
Watcher plugin::
|
||||||
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
enable_plugin watcher https://opendev.org/openstack/watcher
|
||||||
@@ -31,104 +31,64 @@ Quick Devstack Instructions with Datasources
|
|||||||
============================================
|
============================================
|
||||||
|
|
||||||
Watcher requires a datasource to collect metrics from compute nodes and
|
Watcher requires a datasource to collect metrics from compute nodes and
|
||||||
instances in order to execute most strategies. To enable this two possible
|
instances in order to execute most strategies. To enable this a
|
||||||
examples of ``[[local|localrc]]`` to setup DevStack for some of the
|
`[[local|localrc]]` to setup DevStack for some of the supported datasources
|
||||||
supported datasources is provided. These examples specify the minimal
|
is provided. These examples specify the minimal configuration parameters to
|
||||||
configuration parameters to get both Watcher and the datasource working
|
get both Watcher and the datasource working but can be expanded is desired.
|
||||||
but can be expanded is desired.
|
|
||||||
The first example configures watcher to user prometheus as a datasource, while
|
|
||||||
the second example show how to use gnocchi as the datasource. The procedure is
|
|
||||||
equivalent, it just requires using the ``local.conf.controller`` and
|
|
||||||
``local.conf.compute`` in the first example and
|
|
||||||
``local_gnocchi.conf.controller`` and ``local_gnocchi.conf.compute`` in the
|
|
||||||
second.
|
|
||||||
|
|
||||||
Prometheus
|
|
||||||
----------
|
|
||||||
|
|
||||||
With the Prometheus datasource most of the metrics for compute nodes and
|
|
||||||
instances will work with the provided configuration but metrics that
|
|
||||||
require Ironic such as ``host_airflow and`` ``host_power`` will still be
|
|
||||||
unavailable as well as ``instance_l3_cpu_cache``
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[[local|localrc]]
|
|
||||||
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
|
||||||
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
|
|
||||||
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
|
|
||||||
enable_plugin aodh https://opendev.org/openstack/aodh
|
|
||||||
enable_plugin devstack-plugin-prometheus https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
enable_plugin sg-core https://github.com/openstack-k8s-operators/sg-core main
|
|
||||||
|
|
||||||
|
|
||||||
CEILOMETER_BACKEND=sg-core
|
|
||||||
[[post-config|$NOVA_CONF]]
|
|
||||||
[DEFAULT]
|
|
||||||
compute_monitors=cpu.virt_driver
|
|
||||||
|
|
||||||
Gnocchi
|
Gnocchi
|
||||||
-------
|
-------
|
||||||
|
|
||||||
With the Gnocchi datasource most of the metrics for compute nodes and
|
With the Gnocchi datasource most of the metrics for compute nodes and
|
||||||
instances will work with the provided configuration but metrics that
|
instances will work with the provided configuration but metrics that
|
||||||
require Ironic such as ``host_airflow and`` ``host_power`` will still be
|
require Ironic such as `host_airflow and` `host_power` will still be
|
||||||
unavailable as well as ``instance_l3_cpu_cache``
|
unavailable as well as `instance_l3_cpu_cache`::
|
||||||
|
|
||||||
.. code-block:: ini
|
[[local|localrc]]
|
||||||
|
enable_plugin watcher https://opendev.org/openstack/watcher
|
||||||
|
|
||||||
[[local|localrc]]
|
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
|
||||||
|
CEILOMETER_BACKEND=gnocchi
|
||||||
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
enable_plugin aodh https://opendev.org/openstack/aodh
|
||||||
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
|
enable_plugin panko https://opendev.org/openstack/panko
|
||||||
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
|
|
||||||
enable_plugin aodh https://opendev.org/openstack/aodh
|
|
||||||
enable_plugin panko https://opendev.org/openstack/panko
|
|
||||||
|
|
||||||
CEILOMETER_BACKEND=gnocchi
|
[[post-config|$NOVA_CONF]]
|
||||||
[[post-config|$NOVA_CONF]]
|
[DEFAULT]
|
||||||
[DEFAULT]
|
compute_monitors=cpu.virt_driver
|
||||||
compute_monitors=cpu.virt_driver
|
|
||||||
|
|
||||||
Detailed DevStack Instructions
|
Detailed DevStack Instructions
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack).
|
#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack).
|
||||||
One of these servers will be the controller node while the others will be
|
One of these servers will be the controller node while the others will be
|
||||||
compute nodes. N is preferably >= 3 so that you have at least 2 compute
|
compute nodes. N is preferably >= 3 so that you have at least 2 compute
|
||||||
nodes, but in order to stand up the Watcher services only 1 server is
|
nodes, but in order to stand up the Watcher services only 1 server is
|
||||||
needed (i.e., no computes are needed if you want to just experiment with
|
needed (i.e., no computes are needed if you want to just experiment with
|
||||||
the Watcher services). These servers can be VMs running on your local
|
the Watcher services). These servers can be VMs running on your local
|
||||||
machine via VirtualBox if you prefer. DevStack currently recommends that
|
machine via VirtualBox if you prefer. DevStack currently recommends that
|
||||||
you use Ubuntu 16.04 LTS. The servers should also have connections to the
|
you use Ubuntu 16.04 LTS. The servers should also have connections to the
|
||||||
same network such that they are all able to communicate with one another.
|
same network such that they are all able to communicate with one another.
|
||||||
|
|
||||||
#. For each server, clone the DevStack repository and create the stack user
|
#. For each server, clone the DevStack repository and create the stack user::
|
||||||
|
|
||||||
.. code-block:: bash
|
sudo apt-get update
|
||||||
|
sudo apt-get install git
|
||||||
sudo apt-get update
|
git clone https://opendev.org/openstack/devstack.git
|
||||||
sudo apt-get install git
|
sudo ./devstack/tools/create-stack-user.sh
|
||||||
git clone https://opendev.org/openstack/devstack.git
|
|
||||||
sudo ./devstack/tools/create-stack-user.sh
|
|
||||||
|
|
||||||
Now you have a stack user that is used to run the DevStack processes. You
|
Now you have a stack user that is used to run the DevStack processes. You
|
||||||
may want to give your stack user a password to allow SSH via a password
|
may want to give your stack user a password to allow SSH via a password::
|
||||||
|
|
||||||
.. code-block:: bash
|
sudo passwd stack
|
||||||
|
|
||||||
sudo passwd stack
|
#. Switch to the stack user and clone the DevStack repo again::
|
||||||
|
|
||||||
#. Switch to the stack user and clone the DevStack repo again
|
sudo su stack
|
||||||
|
cd ~
|
||||||
|
git clone https://opendev.org/openstack/devstack.git
|
||||||
|
|
||||||
.. code-block:: bash
|
#. For each compute node, copy the provided `local.conf.compute`_ example file
|
||||||
|
|
||||||
sudo su stack
|
|
||||||
cd ~
|
|
||||||
git clone https://opendev.org/openstack/devstack.git
|
|
||||||
|
|
||||||
#. For each compute node, copy the provided `local.conf.compute`_
|
|
||||||
(`local_gnocchi.conf.compute`_ if deploying with gnocchi) example file
|
|
||||||
to the compute node's system at ~/devstack/local.conf. Make sure the
|
to the compute node's system at ~/devstack/local.conf. Make sure the
|
||||||
HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP
|
HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP
|
||||||
is set to the IP address of the compute node and SERVICE_HOST is set to the
|
is set to the IP address of the compute node and SERVICE_HOST is set to the
|
||||||
@@ -144,47 +104,29 @@ Detailed DevStack Instructions
|
|||||||
to configure similar configuration options for the projects providing those
|
to configure similar configuration options for the projects providing those
|
||||||
metrics.
|
metrics.
|
||||||
|
|
||||||
#. For the controller node, copy the provided `local.conf.controller`_
|
#. For the controller node, copy the provided `local.conf.controller`_ example
|
||||||
(`local_gnocchi.conf.controller`_ if deploying with gnocchi) example
|
|
||||||
file to the controller node's system at ~/devstack/local.conf. Make sure
|
file to the controller node's system at ~/devstack/local.conf. Make sure
|
||||||
the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP
|
the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP
|
||||||
address of the controller node.
|
address of the controller node.
|
||||||
|
|
||||||
.. NOTE::
|
Note: if you want to use another Watcher git repository (such as a local
|
||||||
if you want to use another Watcher git repository (such as a local
|
one), then change the enable plugin line::
|
||||||
one), then change the enable plugin line
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
enable_plugin watcher <your_local_git_repo> [optional_branch]
|
|
||||||
|
|
||||||
|
enable_plugin watcher <your_local_git_repo> [optional_branch]
|
||||||
|
|
||||||
If you do this, then the Watcher DevStack plugin will try to pull the
|
If you do this, then the Watcher DevStack plugin will try to pull the
|
||||||
python-watcherclient repo from ``<your_local_git_repo>/../``, so either make
|
python-watcherclient repo from <your_local_git_repo>/../, so either make
|
||||||
sure that is also available or specify WATCHERCLIENT_REPO in the ``local.conf``
|
sure that is also available or specify WATCHERCLIENT_REPO in the local.conf
|
||||||
file.
|
file.
|
||||||
|
|
||||||
.. NOTE::
|
Note: if you want to use a specific branch, specify WATCHER_BRANCH in the
|
||||||
if you want to use a specific branch, specify WATCHER_BRANCH in the
|
local.conf file. By default it will use the master branch.
|
||||||
local.conf file. By default it will use the master branch.
|
|
||||||
|
|
||||||
.. Note::
|
Note: watcher-api will default run under apache/httpd, set the variable
|
||||||
watcher-api will default run under apache/httpd, set the variable
|
WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd.
|
||||||
WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd.
|
For development environment it is suggested to set WATHCER_USE_MOD_WSGI
|
||||||
For development environment it is suggested to set WATHCER_USE_MOD_WSGI
|
to FALSE. For Production environment it is suggested to keep it at the
|
||||||
to FALSE. For Production environment it is suggested to keep it at the
|
default TRUE value.
|
||||||
default TRUE value.
|
|
||||||
|
|
||||||
#. If you want to use prometheus as a datasource, you need to provide a
|
|
||||||
Prometheus configuration with the compute nodes set as targets, so
|
|
||||||
it can consume their node-exporter metrics (if you are deploying watcher
|
|
||||||
with gnocchi as datasource you can skip this step altogether). Copy the
|
|
||||||
provided `prometheus.yml`_ example file and set the appropriate hostnames
|
|
||||||
for all the compute nodes (the example configures 2 of them plus the
|
|
||||||
controller, but you should add all of them if using more than 2 compute
|
|
||||||
nodes). Set the value of ``PROMETHEUS_CONFIG_FILE`` to the path of the
|
|
||||||
file you created in the local.conf file (the sample local.conf file uses
|
|
||||||
``$DEST`` as the default value for the prometheus config path).
|
|
||||||
|
|
||||||
#. Start stacking from the controller node::
|
#. Start stacking from the controller node::
|
||||||
|
|
||||||
@@ -192,15 +134,11 @@ Detailed DevStack Instructions
|
|||||||
|
|
||||||
#. Start stacking on each of the compute nodes using the same command.
|
#. Start stacking on each of the compute nodes using the same command.
|
||||||
|
|
||||||
.. seealso::
|
#. Configure the environment for live migration via NFS. See the
|
||||||
Configure the environment for live migration via NFS. See the
|
`Multi-Node DevStack Environment`_ section for more details.
|
||||||
`Multi-Node DevStack Environment`_ section for more details.
|
|
||||||
|
|
||||||
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
||||||
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
||||||
.. _local_gnocchi.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local_gnocchi.conf.controller
|
|
||||||
.. _local_gnocchi.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local_gnocchi.conf.compute
|
|
||||||
.. _prometheus.yml: https://github.com/openstack/watcher/tree/master/devstack/prometheus.yml
|
|
||||||
|
|
||||||
Multi-Node DevStack Environment
|
Multi-Node DevStack Environment
|
||||||
===============================
|
===============================
|
||||||
@@ -209,19 +147,60 @@ Since deploying Watcher with only a single compute node is not very useful, a
|
|||||||
few tips are given here for enabling a multi-node environment with live
|
few tips are given here for enabling a multi-node environment with live
|
||||||
migration.
|
migration.
|
||||||
|
|
||||||
.. NOTE::
|
Configuring NFS Server
|
||||||
|
----------------------
|
||||||
|
|
||||||
Nova supports live migration with local block storage so by default NFS
|
If you would like to use live migration for shared storage, then the controller
|
||||||
is not required and is considered an advance configuration.
|
can serve as the NFS server if needed::
|
||||||
The minimum requirements for live migration are:
|
|
||||||
|
|
||||||
- all hostnames are resolvable on each host
|
sudo apt-get install nfs-kernel-server
|
||||||
- all hosts have a passwordless ssh key that is trusted by the other hosts
|
sudo mkdir -p /nfs/instances
|
||||||
- all hosts have a known_hosts file that lists each hosts
|
sudo chown stack:stack /nfs/instances
|
||||||
|
|
||||||
If these requirements are met live migration will be possible.
|
Add an entry to `/etc/exports` with the appropriate gateway and netmask
|
||||||
Shared storage such as ceph, booting form cinder volume or nfs are recommend
|
information::
|
||||||
when testing evacuate if you want to preserve vm data.
|
|
||||||
|
/nfs/instances <gateway>/<netmask>(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)
|
||||||
|
|
||||||
|
Export the NFS directories::
|
||||||
|
|
||||||
|
sudo exportfs -ra
|
||||||
|
|
||||||
|
Make sure the NFS server is running::
|
||||||
|
|
||||||
|
sudo service nfs-kernel-server status
|
||||||
|
|
||||||
|
If the server is not running, then start it::
|
||||||
|
|
||||||
|
sudo service nfs-kernel-server start
|
||||||
|
|
||||||
|
Configuring NFS on Compute Node
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
Each compute node needs to use the NFS server to hold the instance data::
|
||||||
|
|
||||||
|
sudo apt-get install rpcbind nfs-common
|
||||||
|
mkdir -p /opt/stack/data/instances
|
||||||
|
sudo mount <nfs-server-ip>:/nfs/instances /opt/stack/data/instances
|
||||||
|
|
||||||
|
If you would like to have the NFS directory automatically mounted on reboot,
|
||||||
|
then add the following to `/etc/fstab`::
|
||||||
|
|
||||||
|
<nfs-server-ip>:/nfs/instances /opt/stack/data/instances nfs auto 0 0
|
||||||
|
|
||||||
|
Edit `/etc/libvirt/libvirtd.conf` to make sure the following values are set::
|
||||||
|
|
||||||
|
listen_tls = 0
|
||||||
|
listen_tcp = 1
|
||||||
|
auth_tcp = "none"
|
||||||
|
|
||||||
|
Edit `/etc/default/libvirt-bin`::
|
||||||
|
|
||||||
|
libvirtd_opts="-d -l"
|
||||||
|
|
||||||
|
Restart the libvirt service::
|
||||||
|
|
||||||
|
sudo service libvirt-bin restart
|
||||||
|
|
||||||
Setting up SSH keys between compute nodes to enable live migration
|
Setting up SSH keys between compute nodes to enable live migration
|
||||||
------------------------------------------------------------------
|
------------------------------------------------------------------
|
||||||
@@ -250,91 +229,22 @@ must exist in every other compute node's stack user's authorized_keys file and
|
|||||||
every compute node's public ECDSA key needs to be in every other compute
|
every compute node's public ECDSA key needs to be in every other compute
|
||||||
node's root user's known_hosts file.
|
node's root user's known_hosts file.
|
||||||
|
|
||||||
Configuring NFS Server (ADVANCED)
|
Disable serial console
|
||||||
---------------------------------
|
----------------------
|
||||||
|
|
||||||
If you would like to use live migration for shared storage, then the controller
|
Serial console needs to be disabled for live migration to work.
|
||||||
can serve as the NFS server if needed
|
|
||||||
|
|
||||||
.. code-block:: bash
|
On both the controller and compute node, in /etc/nova/nova.conf
|
||||||
|
|
||||||
sudo apt-get install nfs-kernel-server
|
[serial_console]
|
||||||
sudo mkdir -p /nfs/instances
|
enabled = False
|
||||||
sudo chown stack:stack /nfs/instances
|
|
||||||
|
|
||||||
Add an entry to ``/etc/exports`` with the appropriate gateway and netmask
|
Alternatively, in devstack's local.conf:
|
||||||
information
|
|
||||||
|
|
||||||
|
[[post-config|$NOVA_CONF]]
|
||||||
|
[serial_console]
|
||||||
|
#enabled=false
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
/nfs/instances <gateway>/<netmask>(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)
|
|
||||||
|
|
||||||
Export the NFS directories
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo exportfs -ra
|
|
||||||
|
|
||||||
Make sure the NFS server is running
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service nfs-kernel-server status
|
|
||||||
|
|
||||||
If the server is not running, then start it
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service nfs-kernel-server start
|
|
||||||
|
|
||||||
Configuring NFS on Compute Node (ADVANCED)
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
Each compute node needs to use the NFS server to hold the instance data
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo apt-get install rpcbind nfs-common
|
|
||||||
mkdir -p /opt/stack/data/instances
|
|
||||||
sudo mount <nfs-server-ip>:/nfs/instances /opt/stack/data/instances
|
|
||||||
|
|
||||||
If you would like to have the NFS directory automatically mounted on reboot,
|
|
||||||
then add the following to ``/etc/fstab``
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
<nfs-server-ip>:/nfs/instances /opt/stack/data/instances nfs auto 0 0
|
|
||||||
|
|
||||||
Configuring libvirt to listen on tcp (ADVANCED)
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
.. NOTE::
|
|
||||||
|
|
||||||
By default nova will use ssh as a transport for live migration
|
|
||||||
if you have a low bandwidth connection you can use tcp instead
|
|
||||||
however this is generally not recommended.
|
|
||||||
|
|
||||||
|
|
||||||
Edit ``/etc/libvirt/libvirtd.conf`` to make sure the following values are set
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
listen_tls = 0
|
|
||||||
listen_tcp = 1
|
|
||||||
auth_tcp = "none"
|
|
||||||
|
|
||||||
Edit ``/etc/default/libvirt-bin``
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
libvirtd_opts="-d -l"
|
|
||||||
|
|
||||||
Restart the libvirt service
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service libvirt-bin restart
|
|
||||||
|
|
||||||
VNC server configuration
|
VNC server configuration
|
||||||
------------------------
|
------------------------
|
||||||
@@ -342,18 +252,13 @@ VNC server configuration
|
|||||||
The VNC server listening parameter needs to be set to any address so
|
The VNC server listening parameter needs to be set to any address so
|
||||||
that the server can accept connections from all of the compute nodes.
|
that the server can accept connections from all of the compute nodes.
|
||||||
|
|
||||||
On both the controller and compute node, in ``/etc/nova/nova.conf``
|
On both the controller and compute node, in /etc/nova/nova.conf
|
||||||
|
|
||||||
.. code-block:: ini
|
vncserver_listen = 0.0.0.0
|
||||||
|
|
||||||
[vnc]
|
Alternatively, in devstack's local.conf:
|
||||||
server_listen = "0.0.0.0"
|
|
||||||
|
|
||||||
Alternatively, in devstack's ``local.conf``:
|
VNCSERVER_LISTEN=0.0.0.0
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
VNCSERVER_LISTEN="0.0.0.0"
|
|
||||||
|
|
||||||
|
|
||||||
Environment final checkup
|
Environment final checkup
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ different version of the above, please document your configuration here!
|
|||||||
Getting the latest code
|
Getting the latest code
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
Make a clone of the code from our ``Git repository``:
|
Make a clone of the code from our `Git repository`:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
@@ -72,9 +72,9 @@ These dependencies can be installed from PyPi_ using the Python tool pip_.
|
|||||||
.. _PyPi: https://pypi.org/
|
.. _PyPi: https://pypi.org/
|
||||||
.. _pip: https://pypi.org/project/pip
|
.. _pip: https://pypi.org/project/pip
|
||||||
|
|
||||||
However, your system *may* need additional dependencies that ``pip`` (and by
|
However, your system *may* need additional dependencies that `pip` (and by
|
||||||
extension, PyPi) cannot satisfy. These dependencies should be installed
|
extension, PyPi) cannot satisfy. These dependencies should be installed
|
||||||
prior to using ``pip``, and the installation method may vary depending on
|
prior to using `pip`, and the installation method may vary depending on
|
||||||
your platform.
|
your platform.
|
||||||
|
|
||||||
* Ubuntu 16.04::
|
* Ubuntu 16.04::
|
||||||
@@ -141,7 +141,7 @@ forget to activate it:
|
|||||||
|
|
||||||
$ workon watcher
|
$ workon watcher
|
||||||
|
|
||||||
You should then be able to ``import watcher`` using Python without issue:
|
You should then be able to `import watcher` using Python without issue:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
|||||||
@@ -10,4 +10,3 @@ Contribution Guide
|
|||||||
devstack
|
devstack
|
||||||
testing
|
testing
|
||||||
rally_link
|
rally_link
|
||||||
release-guide
|
|
||||||
|
|||||||
@@ -300,6 +300,6 @@ Using that you can now query the values for that specific metric:
|
|||||||
.. code-block:: py
|
.. code-block:: py
|
||||||
|
|
||||||
avg_meter = self.datasource_backend.statistic_aggregation(
|
avg_meter = self.datasource_backend.statistic_aggregation(
|
||||||
instance.uuid, 'instance_cpu_usage', self.periods['instance'],
|
instance.uuid, 'cpu_util', self.periods['instance'],
|
||||||
self.granularity,
|
self.granularity,
|
||||||
aggregation=self.aggregation_method['instance'])
|
aggregation=self.aggregation_method['instance'])
|
||||||
|
|||||||
@@ -1,462 +0,0 @@
|
|||||||
..
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
not use this file except in compliance with the License. You may obtain
|
|
||||||
a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
License for the specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
||||||
|
|
||||||
Chronological Release Liaison Guide
|
|
||||||
====================================
|
|
||||||
|
|
||||||
This is a reference guide that a release liaison may use as an aid, if
|
|
||||||
they choose.
|
|
||||||
|
|
||||||
Watcher uses the `Distributed Project Leadership (DPL)`__ model where
|
|
||||||
traditional release liaison responsibilities are distributed among various
|
|
||||||
liaisons. The release liaison is responsible for requesting releases,
|
|
||||||
reviewing Feature Freeze Exception (FFE) requests, and coordinating
|
|
||||||
release-related activities with the team.
|
|
||||||
|
|
||||||
.. __: https://governance.openstack.org/tc/reference/distributed-project-leadership.html
|
|
||||||
|
|
||||||
How to Use This Guide
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
This guide is organized chronologically to follow the OpenStack release
|
|
||||||
cycle from PTG planning through post-release activities. You can use it
|
|
||||||
in two ways:
|
|
||||||
|
|
||||||
**For New Release Liaisons**
|
|
||||||
Read through the entire guide to understand the full release cycle,
|
|
||||||
then bookmark it for reference during your term.
|
|
||||||
|
|
||||||
**For Experienced Release Liaisons**
|
|
||||||
Jump directly to the relevant section for your current phase in the
|
|
||||||
release cycle. Each major section corresponds to a specific time period.
|
|
||||||
|
|
||||||
**Key Navigation Tips**
|
|
||||||
* The :ref:`glossary` defines all acronyms and terminology used
|
|
||||||
* Time-sensitive activities are clearly marked by milestone phases
|
|
||||||
* DPL coordination notes indicate when team collaboration is required
|
|
||||||
|
|
||||||
DPL Liaison Coordination
|
|
||||||
-------------------------
|
|
||||||
|
|
||||||
Under the DPL model, the release liaison coordinates with other project
|
|
||||||
liaisons and the broader team for effective release management. The release
|
|
||||||
liaison has authority for release-specific decisions (FFE approvals, release
|
|
||||||
timing, etc.) while major process changes and strategic decisions require
|
|
||||||
team consensus.
|
|
||||||
|
|
||||||
This coordination approach ensures that:
|
|
||||||
|
|
||||||
* Release activities are properly managed by a dedicated liaison
|
|
||||||
* Team input is gathered for significant decisions
|
|
||||||
* Other liaisons are informed of release-related developments that may
|
|
||||||
affect their areas
|
|
||||||
* Release processes remain responsive while maintaining team alignment
|
|
||||||
|
|
||||||
Project Context
|
|
||||||
---------------
|
|
||||||
|
|
||||||
* Coordinate with the watcher meeting (chair rotates each meeting, with
|
|
||||||
volunteers requested at the end of each meeting)
|
|
||||||
|
|
||||||
* Meeting etherpad: https://etherpad.opendev.org/p/openstack-watcher-irc-meeting
|
|
||||||
* IRC channel: #openstack-watcher
|
|
||||||
|
|
||||||
* Get acquainted with the release schedule
|
|
||||||
|
|
||||||
* Example: https://releases.openstack.org/<current-release>/schedule.html
|
|
||||||
|
|
||||||
* Familiarize with Watcher project repositories and tracking:
|
|
||||||
|
|
||||||
Watcher Main Repository
|
|
||||||
`Primary codebase for the Watcher service <https://opendev.org/openstack/watcher>`__
|
|
||||||
|
|
||||||
Watcher Dashboard
|
|
||||||
`Horizon plugin for Watcher UI <https://opendev.org/openstack/watcher-dashboard>`__
|
|
||||||
|
|
||||||
Watcher Tempest Plugin
|
|
||||||
`Integration tests <https://opendev.org/openstack/watcher-tempest-plugin>`__ (follows tempest cycle)
|
|
||||||
|
|
||||||
Python Watcher Client
|
|
||||||
`Command-line client and Python library <https://opendev.org/openstack/python-watcherclient>`__
|
|
||||||
|
|
||||||
Watcher Specifications
|
|
||||||
`Design specifications <https://opendev.org/openstack/watcher-specs>`__ (not released)
|
|
||||||
|
|
||||||
Watcher Launchpad (Main)
|
|
||||||
`Primary bug and feature tracking <https://launchpad.net/watcher>`__
|
|
||||||
|
|
||||||
Watcher Dashboard Launchpad
|
|
||||||
`Dashboard-specific tracking <https://launchpad.net/watcher-dashboard/>`__
|
|
||||||
|
|
||||||
Watcher Tempest Plugin Launchpad
|
|
||||||
`Test plugin tracking <https://launchpad.net/watcher-tempest-plugin>`__
|
|
||||||
|
|
||||||
Python Watcher Client Launchpad
|
|
||||||
`Client library tracking <https://launchpad.net/python-watcherclient>`__
|
|
||||||
|
|
||||||
Project Team Gathering
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
Event Liaison Coordination
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
* Work with the project team to select an event liaison for PTG coordination.
|
|
||||||
The event liaison is responsible for:
|
|
||||||
|
|
||||||
* Reserving sufficient space at PTG for the project team's meetings
|
|
||||||
* Putting out an agenda for team meetings
|
|
||||||
* Ensuring meetings are organized and facilitated
|
|
||||||
* Documenting meeting results
|
|
||||||
|
|
||||||
* If no event liaison is selected, these duties revert to the release liaison.
|
|
||||||
|
|
||||||
* Monitor for OpenStack Events team queries on the mailing list requesting
|
|
||||||
event liaison volunteers - teams not responding may lose event
|
|
||||||
representation.
|
|
||||||
|
|
||||||
PTG Planning and Execution
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
* Create PTG planning etherpad, retrospective etherpad and alert about it in
|
|
||||||
watcher meeting and dev mailing list
|
|
||||||
|
|
||||||
* Example: https://etherpad.opendev.org/p/apr2025-ptg-watcher
|
|
||||||
|
|
||||||
* Run sessions at the PTG (if no event liaison is selected)
|
|
||||||
|
|
||||||
* Do a retro of the previous cycle
|
|
||||||
|
|
||||||
* Coordinate with team to establish agreement on the agenda for this release:
|
|
||||||
|
|
||||||
Review Days Planning
|
|
||||||
Determine number of review days allocated for specs and implementation work
|
|
||||||
|
|
||||||
Freeze Dates Coordination
|
|
||||||
Define Spec approval and Feature freeze dates through team collaboration
|
|
||||||
|
|
||||||
Release Schedule Modifications
|
|
||||||
Modify the OpenStack release schedule if needed by proposing new dates
|
|
||||||
(Example: https://review.opendev.org/c/openstack/releases/+/877094)
|
|
||||||
|
|
||||||
* Discuss the implications of the `SLURP or non-SLURP`__ current release
|
|
||||||
|
|
||||||
.. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
|
|
||||||
|
|
||||||
* Sign up for group photo at the PTG (if applicable)
|
|
||||||
|
|
||||||
|
|
||||||
After PTG
|
|
||||||
---------
|
|
||||||
|
|
||||||
* Send PTG session summaries to the dev mailing list
|
|
||||||
|
|
||||||
* Add `RFE bugs`__ if you have action items that are simple to do but
|
|
||||||
without a owner yet.
|
|
||||||
|
|
||||||
* Update IRC #openstack-watcher channel topic to point to new
|
|
||||||
development-planning etherpad.
|
|
||||||
|
|
||||||
.. __: https://bugs.launchpad.net/watcher/+bugs?field.tag=rfe
|
|
||||||
|
|
||||||
A few weeks before milestone 1
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
* Plan a spec review day
|
|
||||||
|
|
||||||
* Periodically check the series goals others have proposed in the “Set series
|
|
||||||
goals” link:
|
|
||||||
|
|
||||||
* Example: https://blueprints.launchpad.net/watcher/<current-release>/+setgoals
|
|
||||||
|
|
||||||
Milestone 1
|
|
||||||
-----------
|
|
||||||
|
|
||||||
* Release watcher and python-watcherclient via the openstack/releases repo.
|
|
||||||
Watcher follows the `cycle-with-intermediary`__ release model:
|
|
||||||
|
|
||||||
.. __: https://releases.openstack.org/reference/release_models.html#cycle-with-intermediary
|
|
||||||
|
|
||||||
* Create actual releases (not just launchpad bookkeeping) at milestone points
|
|
||||||
* No launchpad milestone releases are created for intermediary releases
|
|
||||||
* When releasing the first version of a library for the cycle,
|
|
||||||
bump
|
|
||||||
the minor version to leave room for future stable branch
|
|
||||||
releases
|
|
||||||
|
|
||||||
* Release stable branches of watcher
|
|
||||||
|
|
||||||
Stable Branch Release Process
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Prepare the stable branch for evaluation:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
git checkout <stable branch>
|
|
||||||
git log --no-merges <last tag>..
|
|
||||||
|
|
||||||
Analyze commits to determine version bump according to semantic versioning.
|
|
||||||
|
|
||||||
Semantic Versioning Guidelines
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Choose version bump based on changes since last release:
|
|
||||||
|
|
||||||
Major Version (X)
|
|
||||||
Backward-incompatible changes that break existing APIs
|
|
||||||
|
|
||||||
Minor Version (Y)
|
|
||||||
New features that maintain backward compatibility
|
|
||||||
|
|
||||||
Patch Version (Z)
|
|
||||||
Bug fixes that maintain backward compatibility
|
|
||||||
|
|
||||||
Release Command Usage
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Generate the release using OpenStack tooling:
|
|
||||||
|
|
||||||
* Use the `new-release command
|
|
||||||
<https://releases.openstack.org/reference/using.html#using-new-release-command>`__
|
|
||||||
* Propose the release with version according to chosen semver format
|
|
||||||
(x.y.z)
|
|
||||||
|
|
||||||
Summit
|
|
||||||
------
|
|
||||||
|
|
||||||
``Responsibility Precedence for Summit Activities:``
|
|
||||||
|
|
||||||
1. ``Project Update/Onboarding Liaisons`` (if appointed):
|
|
||||||
|
|
||||||
* ``Project Update Liaison``: responsible for giving the project update
|
|
||||||
showcasing team's achievements for the cycle to the community
|
|
||||||
* ``Project Onboarding Liaison``: responsible for giving/facilitating
|
|
||||||
onboarding sessions during events for the project's community
|
|
||||||
|
|
||||||
2. ``Event Liaison`` (if no Project Update/Onboarding liaisons exist):
|
|
||||||
|
|
||||||
* Coordinates all Summit activities including project updates and onboarding
|
|
||||||
|
|
||||||
3. ``Release Liaison`` (if no Event Liaison is appointed):
|
|
||||||
|
|
||||||
* Work with the team to ensure Summit activities are properly handled:
|
|
||||||
|
|
||||||
* Prepare the project update presentation
|
|
||||||
* Prepare the on-boarding session materials
|
|
||||||
* Prepare the operator meet-and-greet session
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
The team can choose to not have a Summit presence if desired.
|
|
||||||
|
|
||||||
A few weeks before milestone 2
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
* Plan a spec review day (optional)
|
|
||||||
|
|
||||||
Milestone 2
|
|
||||||
-----------
|
|
||||||
|
|
||||||
* Spec freeze (unless changed by team agreement at PTG)
|
|
||||||
|
|
||||||
* Release watcher and python-watcherclient (if needed)
|
|
||||||
|
|
||||||
* Stable branch releases of watcher
|
|
||||||
|
|
||||||
|
|
||||||
Shortly after spec freeze
|
|
||||||
-------------------------
|
|
||||||
|
|
||||||
* Create a blueprint status etherpad to help track, especially non-priority
|
|
||||||
blueprint work, to help things get done by Feature Freeze (FF). Example:
|
|
||||||
|
|
||||||
* https://etherpad.opendev.org/p/watcher-<release>-blueprint-status
|
|
||||||
|
|
||||||
* Create or review a patch to add the next release’s specs directory so people
|
|
||||||
can propose specs for next release after spec freeze for current release
|
|
||||||
|
|
||||||
Milestone 3
|
|
||||||
-----------
|
|
||||||
|
|
||||||
* Feature freeze day
|
|
||||||
|
|
||||||
* Client library freeze, release python-watcherclient
|
|
||||||
|
|
||||||
* Close out all blueprints, including “catch all” blueprints like mox,
|
|
||||||
versioned notifications
|
|
||||||
|
|
||||||
* Stable branch releases of watcher
|
|
||||||
|
|
||||||
* Start writing the `cycle highlights
|
|
||||||
<https://docs.openstack.org/project-team-guide/release-management.html#cycle-highlights>`__
|
|
||||||
|
|
||||||
Week following milestone 3
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
* If warranted, announce the FFE (feature freeze exception process) to
|
|
||||||
have people propose FFE requests to a special etherpad where they will
|
|
||||||
be reviewed.
|
|
||||||
FFE requests should first be discussed in the IRC meeting with the
|
|
||||||
requester present.
|
|
||||||
The release liaison has final decision on granting exceptions.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
if there is only a short time between FF and RC1 (lately it’s been 2
|
|
||||||
weeks), then the only likely candidates will be low-risk things that are
|
|
||||||
almost done. In general Feature Freeze exceptions should not be granted,
|
|
||||||
instead features should be deferred and reproposed for the next
|
|
||||||
development
|
|
||||||
cycle. FFE never extend beyond RC1.
|
|
||||||
|
|
||||||
* Mark the max microversion for the release in the
|
|
||||||
:doc:`/contributor/api_microversion_history`
|
|
||||||
|
|
||||||
A few weeks before RC
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
* Update the release status etherpad with RC1 todos and keep track
|
|
||||||
of them in meetings
|
|
||||||
|
|
||||||
* Go through the bug list and identify any rc-potential bugs and tag them
|
|
||||||
|
|
||||||
RC
|
|
||||||
--
|
|
||||||
|
|
||||||
* Follow the standard OpenStack release checklist process
|
|
||||||
|
|
||||||
* If we want to drop backward-compat RPC code, we have to do a major RPC
|
|
||||||
version bump and coordinate it just before the major release:
|
|
||||||
|
|
||||||
* https://wiki.openstack.org/wiki/RpcMajorVersionUpdates
|
|
||||||
|
|
||||||
* Example: https://review.opendev.org/541035
|
|
||||||
|
|
||||||
* “Merge latest translations" means translation patches
|
|
||||||
|
|
||||||
* Check for translations with:
|
|
||||||
|
|
||||||
* https://review.opendev.org/#/q/status:open+project:openstack/watcher+branch:master+topic:zanata/translations
|
|
||||||
|
|
||||||
* Should NOT plan to have more than one RC if possible. RC2 should only happen
|
|
||||||
if there was a mistake and something was missed for RC, or a new regression
|
|
||||||
was discovered
|
|
||||||
|
|
||||||
* Write the reno prelude for the release GA
|
|
||||||
|
|
||||||
* Example: https://review.opendev.org/644412
|
|
||||||
|
|
||||||
* Push the cycle-highlights in marketing-friendly sentences and propose to the
|
|
||||||
openstack/releases repo. Usually based on reno prelude but made more readable
|
|
||||||
and friendly
|
|
||||||
|
|
||||||
* Example: https://review.opendev.org/644697
|
|
||||||
|
|
||||||
Immediately after RC
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
* Look for bot proposed changes to reno and stable/<cycle>
|
|
||||||
|
|
||||||
* Create the launchpad series for the next cycle
|
|
||||||
|
|
||||||
* Set the development focus of the project to the new cycle series
|
|
||||||
|
|
||||||
* Set the status of the new series to “active development”
|
|
||||||
|
|
||||||
* Set the last series status to “current stable branch release”
|
|
||||||
|
|
||||||
* Set the previous to last series status to “supported”
|
|
||||||
|
|
||||||
* Repeat launchpad steps ^ for all watcher deliverables.
|
|
||||||
|
|
||||||
* Make sure the specs directory for the next cycle gets created so people can
|
|
||||||
start proposing new specs
|
|
||||||
|
|
||||||
* Make sure to move implemented specs from the previous release
|
|
||||||
|
|
||||||
* Move implemented specs manually (TODO: add tox command in future)
|
|
||||||
|
|
||||||
* Remove template files:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
rm doc/source/specs/<release>/index.rst
|
|
||||||
rm doc/source/specs/<release>/template.rst
|
|
||||||
|
|
||||||
* Ensure liaison handoff: either transition to new release liaison or confirm
|
|
||||||
reappointment for next cycle
|
|
||||||
|
|
||||||
.. _glossary:
|
|
||||||
|
|
||||||
Glossary
|
|
||||||
--------
|
|
||||||
|
|
||||||
DPL
|
|
||||||
Distributed Project Leadership - A governance model where traditional PTL
|
|
||||||
responsibilities are distributed among various specialized liaisons.
|
|
||||||
|
|
||||||
FFE
|
|
||||||
Feature Freeze Exception - A request to add a feature after the feature
|
|
||||||
freeze deadline. Should be used sparingly for low-risk, nearly
|
|
||||||
complete features.
|
|
||||||
|
|
||||||
GA
|
|
||||||
General Availability - The final release of a software version for
|
|
||||||
production use.
|
|
||||||
|
|
||||||
PTG
|
|
||||||
Project Team Gathering - A collaborative event where OpenStack project
|
|
||||||
teams meet to plan and coordinate development activities.
|
|
||||||
|
|
||||||
RC
|
|
||||||
Release Candidate - A pre-release version that is potentially the final
|
|
||||||
version, pending testing and bug fixes.
|
|
||||||
|
|
||||||
RFE
|
|
||||||
Request for Enhancement - A type of bug report requesting a new feature
|
|
||||||
or enhancement to existing functionality.
|
|
||||||
|
|
||||||
SLURP
|
|
||||||
Skip Level Upgrade Release Process - An extended maintenance release
|
|
||||||
that allows skipping intermediate versions during upgrades.
|
|
||||||
|
|
||||||
Summit
|
|
||||||
OpenStack Summit - A conference where the OpenStack community gathers
|
|
||||||
for presentations, discussions, and project updates.
|
|
||||||
|
|
||||||
Miscellaneous Notes
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
How to track launchpad blueprint approvals
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Core team approves blueprints through team consensus. The release liaison
|
|
||||||
ensures launchpad status is updated correctly after core team approval:
|
|
||||||
|
|
||||||
* Set the approver as the core team member who approved the spec
|
|
||||||
|
|
||||||
* Set the Direction => Approved and Definition => Approved and make sure the
|
|
||||||
Series goal is set to the current release. If code is already proposed, set
|
|
||||||
Implementation => Needs Code Review
|
|
||||||
|
|
||||||
* Optional: add a comment to the Whiteboard explaining the approval,
|
|
||||||
with a date
|
|
||||||
(launchpad does not record approval dates). For example: “We discussed this
|
|
||||||
in the team meeting and agreed to approve this for <release>. -- <nick>
|
|
||||||
<YYYYMMDD>”
|
|
||||||
|
|
||||||
How to complete a launchpad blueprint
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
* Set Implementation => Implemented. The completion date will be recorded by
|
|
||||||
launchpad
|
|
||||||
@@ -1,157 +0,0 @@
|
|||||||
================
|
|
||||||
Aetos datasource
|
|
||||||
================
|
|
||||||
|
|
||||||
Synopsis
|
|
||||||
--------
|
|
||||||
The Aetos datasource allows Watcher to use an Aetos reverse proxy server as the
|
|
||||||
source for collected metrics used by the Watcher decision engine. Aetos is a
|
|
||||||
multi-tenant aware reverse proxy that sits in front of a Prometheus server and
|
|
||||||
provides Keystone authentication and role-based access control. The Aetos
|
|
||||||
datasource uses Keystone service discovery to locate the Aetos endpoint and
|
|
||||||
requires authentication via Keystone tokens.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
-------------
|
|
||||||
The Aetos datasource has the following requirements:
|
|
||||||
|
|
||||||
* An Aetos reverse proxy server deployed in front of Prometheus
|
|
||||||
* Aetos service registered in Keystone with service type 'metric-storage'
|
|
||||||
* Valid Keystone credentials for Watcher with admin or service role
|
|
||||||
* Prometheus metrics with appropriate labels (same as direct Prometheus access)
|
|
||||||
|
|
||||||
Like the Prometheus datasource, it is required that Prometheus metrics contain
|
|
||||||
a label to identify the hostname of the exporter from which the metric was
|
|
||||||
collected. This is used to match against the Watcher cluster model
|
|
||||||
``ComputeNode.hostname``. The default for this label is ``fqdn`` and in the
|
|
||||||
prometheus scrape configs would look like:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: node
|
|
||||||
static_configs:
|
|
||||||
- targets: ['10.1.2.3:9100']
|
|
||||||
labels:
|
|
||||||
fqdn: "testbox.controlplane.domain"
|
|
||||||
|
|
||||||
This default can be overridden when a deployer uses a different label to
|
|
||||||
identify the exporter host (for example ``hostname`` or ``host``, or any other
|
|
||||||
label, as long as it identifies the host).
|
|
||||||
|
|
||||||
Internally this label is used in creating ``fqdn_instance_labels``, containing
|
|
||||||
the list of values assigned to the label in the Prometheus targets.
|
|
||||||
The elements of the resulting fqdn_instance_labels are expected to match the
|
|
||||||
``ComputeNode.hostname`` used in the Watcher decision engine cluster model.
|
|
||||||
An example ``fqdn_instance_labels`` is the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[
|
|
||||||
'ena.controlplane.domain',
|
|
||||||
'dio.controlplane.domain',
|
|
||||||
'tria.controlplane.domain',
|
|
||||||
]
|
|
||||||
|
|
||||||
For instance metrics, it is required that Prometheus contains a label
|
|
||||||
with the uuid of the OpenStack instance in each relevant metric. By default,
|
|
||||||
the datasource will look for the label ``resource``. The
|
|
||||||
``instance_uuid_label`` config option in watcher.conf allows deployers to
|
|
||||||
override this default to any other label name that stores the ``uuid``.
|
|
||||||
|
|
||||||
Limitations
|
|
||||||
-----------
|
|
||||||
The Aetos datasource shares the same limitations as the Prometheus datasource:
|
|
||||||
|
|
||||||
The current implementation doesn't support the ``statistic_series`` function of
|
|
||||||
the Watcher ``class DataSourceBase``. It is expected that the
|
|
||||||
``statistic_aggregation`` function (which is implemented) is sufficient in
|
|
||||||
providing the **current** state of the managed resources in the cluster.
|
|
||||||
The ``statistic_aggregation`` function defaults to querying back 300 seconds,
|
|
||||||
starting from the present time (the time period is a function parameter and
|
|
||||||
can be set to a value as required). Implementing the ``statistic_series`` can
|
|
||||||
always be re-visited if the requisite interest and work cycles are volunteered
|
|
||||||
by the interested parties.
|
|
||||||
|
|
||||||
One further note about a limitation in the implemented
|
|
||||||
``statistic_aggregation`` function. This function is defined with a
|
|
||||||
``granularity`` parameter, to be used when querying whichever of the Watcher
|
|
||||||
``DataSourceBase`` metrics providers. In the case of Aetos (like Prometheus),
|
|
||||||
we do not fetch and then process individual metrics across the specified time
|
|
||||||
period. Instead we use the PromQL querying operators and functions, so that the
|
|
||||||
server itself will process the request across the specified parameters and
|
|
||||||
then return the result. So ``granularity`` parameter is redundant and remains
|
|
||||||
unused for the Aetos implementation of ``statistic_aggregation``. The
|
|
||||||
granularity of the data fetched by Prometheus server is specified in
|
|
||||||
configuration as the server ``scrape_interval`` (current default 15 seconds).
|
|
||||||
|
|
||||||
Additionally, there is a slight performance impact compared to direct
|
|
||||||
Prometheus access. Since Aetos acts as a reverse proxy in front of Prometheus,
|
|
||||||
there is an additional step for each request, resulting in slightly longer
|
|
||||||
delays.
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
A deployer must set the ``datasources`` parameter to include ``aetos``
|
|
||||||
under the watcher_datasources section of watcher.conf (or add ``aetos`` in
|
|
||||||
datasources for a specific strategy if preferred eg. under the
|
|
||||||
``[watcher_strategies.workload_stabilization]`` section).
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Having both Prometheus and Aetos datasources configured at the same time
|
|
||||||
is not supported and will result in a configuration error. Allowing this
|
|
||||||
can be investigated in the future if a need or a proper use case is
|
|
||||||
identified.
|
|
||||||
|
|
||||||
The watcher.conf configuration file is also used to set the parameter values
|
|
||||||
required by the Watcher Aetos data source. The configuration can be
|
|
||||||
added under the ``[aetos_client]`` section and the available options are
|
|
||||||
duplicated below from the code as they are self documenting:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
cfg.StrOpt('interface',
|
|
||||||
default='public',
|
|
||||||
choices=['internal', 'public', 'admin'],
|
|
||||||
help="Type of endpoint to use in keystoneclient."),
|
|
||||||
cfg.StrOpt('region_name',
|
|
||||||
help="Region in Identity service catalog to use for "
|
|
||||||
"communication with the OpenStack service."),
|
|
||||||
cfg.StrOpt('fqdn_label',
|
|
||||||
default='fqdn',
|
|
||||||
help="The label that Prometheus uses to store the fqdn of "
|
|
||||||
"exporters. Defaults to 'fqdn'."),
|
|
||||||
cfg.StrOpt('instance_uuid_label',
|
|
||||||
default='resource',
|
|
||||||
help="The label that Prometheus uses to store the uuid of "
|
|
||||||
"OpenStack instances. Defaults to 'resource'."),
|
|
||||||
|
|
||||||
|
|
||||||
Authentication and Service Discovery
|
|
||||||
------------------------------------
|
|
||||||
Unlike the Prometheus datasource which requires explicit host and port
|
|
||||||
configuration, the Aetos datasource uses Keystone service discovery to
|
|
||||||
automatically locate the Aetos endpoint. The datasource:
|
|
||||||
|
|
||||||
1. Uses the configured Keystone credentials to authenticate
|
|
||||||
2. Searches the service catalog for a service with type 'metric-storage'
|
|
||||||
3. Uses the discovered endpoint URL to connect to Aetos
|
|
||||||
4. Attaches a Keystone token to each request for authentication
|
|
||||||
|
|
||||||
If the Aetos service is not registered in Keystone, the datasource will
|
|
||||||
fail to initialize and prevent the decision engine from starting.
|
|
||||||
|
|
||||||
So a sample watcher.conf configured to use the Aetos datasource would look
|
|
||||||
like the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[watcher_datasources]
|
|
||||||
|
|
||||||
datasources = aetos
|
|
||||||
|
|
||||||
[aetos_client]
|
|
||||||
|
|
||||||
interface = public
|
|
||||||
region_name = RegionOne
|
|
||||||
fqdn_label = fqdn
|
|
||||||
@@ -90,15 +90,15 @@ parameter will need to specify the type of http protocol and the use of
|
|||||||
plain text http is strongly discouraged due to the transmission of the access
|
plain text http is strongly discouraged due to the transmission of the access
|
||||||
token. Additionally the path to the proxy interface needs to be supplied as
|
token. Additionally the path to the proxy interface needs to be supplied as
|
||||||
well in case Grafana is placed in a sub directory of the web server. An example
|
well in case Grafana is placed in a sub directory of the web server. An example
|
||||||
would be: ``https://mygrafana.org/api/datasource/proxy/`` were
|
would be: `https://mygrafana.org/api/datasource/proxy/` were
|
||||||
``/api/datasource/proxy`` is the default path without any subdirectories.
|
`/api/datasource/proxy` is the default path without any subdirectories.
|
||||||
Likewise, this parameter can not be placed in the yaml.
|
Likewise, this parameter can not be placed in the yaml.
|
||||||
|
|
||||||
To prevent many errors from occurring and potentially filing the logs files it
|
To prevent many errors from occurring and potentially filing the logs files it
|
||||||
is advised to specify the desired datasource in the configuration as it would
|
is advised to specify the desired datasource in the configuration as it would
|
||||||
prevent the datasource manager from having to iterate and try possible
|
prevent the datasource manager from having to iterate and try possible
|
||||||
datasources with the launch of each audit. To do this specify
|
datasources with the launch of each audit. To do this specify `datasources` in
|
||||||
``datasources`` in the ``[watcher_datasources]`` group.
|
the `[watcher_datasources]` group.
|
||||||
|
|
||||||
The current configuration that is required to be placed in the traditional
|
The current configuration that is required to be placed in the traditional
|
||||||
configuration file would look like the following:
|
configuration file would look like the following:
|
||||||
@@ -120,7 +120,7 @@ traditional configuration file or in the yaml, however, it is not advised to
|
|||||||
mix and match but in the case it does occur the yaml would override the
|
mix and match but in the case it does occur the yaml would override the
|
||||||
settings from the traditional configuration file. All five of these parameters
|
settings from the traditional configuration file. All five of these parameters
|
||||||
are dictionaries mapping specific metrics to a configuration parameter. For
|
are dictionaries mapping specific metrics to a configuration parameter. For
|
||||||
instance the ``project_id_map`` will specify the specific project id in Grafana
|
instance the `project_id_map` will specify the specific project id in Grafana
|
||||||
to be used. The parameters are named as follow:
|
to be used. The parameters are named as follow:
|
||||||
|
|
||||||
* project_id_map
|
* project_id_map
|
||||||
@@ -149,10 +149,10 @@ project_id
|
|||||||
|
|
||||||
The project id's can only be determined by someone with the admin role in
|
The project id's can only be determined by someone with the admin role in
|
||||||
Grafana as that role is required to open the list of projects. The list of
|
Grafana as that role is required to open the list of projects. The list of
|
||||||
projects can be found on ``/datasources`` in the web interface but
|
projects can be found on `/datasources` in the web interface but
|
||||||
unfortunately it does not immediately display the project id. To display
|
unfortunately it does not immediately display the project id. To display
|
||||||
the id one can best hover the mouse over the projects and the url will show the
|
the id one can best hover the mouse over the projects and the url will show the
|
||||||
project id's for example ``/datasources/edit/7563``. Alternatively the entire
|
project id's for example `/datasources/edit/7563`. Alternatively the entire
|
||||||
list of projects can be retrieved using the `REST api`_. To easily make
|
list of projects can be retrieved using the `REST api`_. To easily make
|
||||||
requests to the REST api a tool such as Postman can be used.
|
requests to the REST api a tool such as Postman can be used.
|
||||||
|
|
||||||
@@ -239,24 +239,18 @@ conversion from bytes to megabytes.
|
|||||||
|
|
||||||
SELECT value/1000000 FROM memory...
|
SELECT value/1000000 FROM memory...
|
||||||
|
|
||||||
Queries will be formatted using the .format string method within Python.
|
Queries will be formatted using the .format string method within Python. This
|
||||||
This format will currently have give attributes exposed to it labeled
|
format will currently have give attributes exposed to it labeled `{0}` to
|
||||||
``{0}`` through ``{4}``.
|
`{4}`. Every occurrence of these characters within the string will be replaced
|
||||||
Every occurrence of these characters within the string will be replaced
|
|
||||||
with the specific attribute.
|
with the specific attribute.
|
||||||
|
|
||||||
{0}
|
- {0} is the aggregate typically `mean`, `min`, `max` but `count` is also
|
||||||
is the aggregate typically ``mean``, ``min``, ``max`` but ``count``
|
supported.
|
||||||
is also supported.
|
- {1} is the attribute as specified in the attribute parameter.
|
||||||
{1}
|
- {2} is the period of time to aggregate data over in seconds.
|
||||||
is the attribute as specified in the attribute parameter.
|
- {3} is the granularity or the interval between data points in seconds.
|
||||||
{2}
|
- {4} is translator specific and in the case of InfluxDB it will be used for
|
||||||
is the period of time to aggregate data over in seconds.
|
retention_periods.
|
||||||
{3}
|
|
||||||
is the granularity or the interval between data points in seconds.
|
|
||||||
{4}
|
|
||||||
is translator specific and in the case of InfluxDB it will be used for
|
|
||||||
retention_periods.
|
|
||||||
|
|
||||||
**InfluxDB**
|
**InfluxDB**
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,6 @@
|
|||||||
Datasources
|
Datasources
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. note::
|
|
||||||
The Monasca datasource is deprecated for removal and optional. To use it, install the optional extra:
|
|
||||||
``pip install watcher[monasca]``. If Monasca is configured without installing the extra, Watcher will raise
|
|
||||||
an error guiding you to install the client.
|
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:glob:
|
:glob:
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|||||||
@@ -1,140 +0,0 @@
|
|||||||
=====================
|
|
||||||
Prometheus datasource
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Synopsis
|
|
||||||
--------
|
|
||||||
The Prometheus datasource allows Watcher to use a Prometheus server as the
|
|
||||||
source for collected metrics used by the Watcher decision engine. At minimum
|
|
||||||
deployers must configure the ``host`` and ``port`` at which the Prometheus
|
|
||||||
server is listening.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
-------------
|
|
||||||
It is required that Prometheus metrics contain a label to identify the hostname
|
|
||||||
of the exporter from which the metric was collected. This is used to match
|
|
||||||
against the Watcher cluster model ``ComputeNode.hostname``. The default for
|
|
||||||
this label is ``fqdn`` and in the prometheus scrape configs would look like:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: node
|
|
||||||
static_configs:
|
|
||||||
- targets: ['10.1.2.3:9100']
|
|
||||||
labels:
|
|
||||||
fqdn: "testbox.controlplane.domain"
|
|
||||||
|
|
||||||
This default can be overridden when a deployer uses a different label to
|
|
||||||
identify the exporter host (for example ``hostname`` or ``host``, or any other
|
|
||||||
label, as long as it identifies the host).
|
|
||||||
|
|
||||||
Internally this label is used in creating ``fqdn_instance_labels``, containing
|
|
||||||
the list of values assigned to the label in the Prometheus targets.
|
|
||||||
The elements of the resulting fqdn_instance_labels are expected to match the
|
|
||||||
``ComputeNode.hostname`` used in the Watcher decision engine cluster model.
|
|
||||||
An example ``fqdn_instance_labels`` is the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[
|
|
||||||
'ena.controlplane.domain',
|
|
||||||
'dio.controlplane.domain',
|
|
||||||
'tria.controlplane.domain',
|
|
||||||
]
|
|
||||||
|
|
||||||
For instance metrics, it is required that Prometheus contains a label
|
|
||||||
with the uuid of the OpenStack instance in each relevant metric. By default,
|
|
||||||
the datasource will look for the label ``resource``. The
|
|
||||||
``instance_uuid_label`` config option in watcher.conf allows deployers to
|
|
||||||
override this default to any other label name that stores the ``uuid``.
|
|
||||||
|
|
||||||
Limitations
|
|
||||||
-----------
|
|
||||||
The current implementation doesn't support the ``statistic_series`` function of
|
|
||||||
the Watcher ``class DataSourceBase``. It is expected that the
|
|
||||||
``statistic_aggregation`` function (which is implemented) is sufficient in
|
|
||||||
providing the **current** state of the managed resources in the cluster.
|
|
||||||
The ``statistic_aggregation`` function defaults to querying back 300 seconds,
|
|
||||||
starting from the present time (the time period is a function parameter and
|
|
||||||
can be set to a value as required). Implementing the ``statistic_series`` can
|
|
||||||
always be re-visited if the requisite interest and work cycles are volunteered
|
|
||||||
by the interested parties.
|
|
||||||
|
|
||||||
One further note about a limitation in the implemented
|
|
||||||
``statistic_aggregation`` function. This function is defined with a
|
|
||||||
``granularity`` parameter, to be used when querying whichever of the Watcher
|
|
||||||
``DataSourceBase`` metrics providers. In the case of Prometheus, we do not
|
|
||||||
fetch and then process individual metrics across the specified time period.
|
|
||||||
Instead we use the PromQL querying operators and functions, so that the
|
|
||||||
server itself will process the request across the specified parameters and
|
|
||||||
then return the result. So ``granularity`` parameter is redundant and remains
|
|
||||||
unused for the Prometheus implementation of ``statistic_aggregation``. The
|
|
||||||
granularity of the data fetched by Prometheus server is specified in
|
|
||||||
configuration as the server ``scrape_interval`` (current default 15 seconds).
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
A deployer must set the ``datasources`` parameter to include ``prometheus``
|
|
||||||
under the watcher_datasources section of watcher.conf (or add ``prometheus`` in
|
|
||||||
datasources for a specific strategy if preferred eg. under the
|
|
||||||
``[watcher_strategies.workload_stabilization]`` section).
|
|
||||||
|
|
||||||
The watcher.conf configuration file is also used to set the parameter values
|
|
||||||
required by the Watcher Prometheus data source. The configuration can be
|
|
||||||
added under the ``[prometheus_client]`` section and the available options are
|
|
||||||
duplicated below from the code as they are self documenting:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
help="The hostname or IP address for the prometheus server."),
|
|
||||||
cfg.StrOpt('port',
|
|
||||||
help="The port number used by the prometheus server."),
|
|
||||||
cfg.StrOpt('fqdn_label',
|
|
||||||
default="fqdn",
|
|
||||||
help="The label that Prometheus uses to store the fqdn of "
|
|
||||||
"exporters. Defaults to 'fqdn'."),
|
|
||||||
cfg.StrOpt('instance_uuid_label',
|
|
||||||
default="resource",
|
|
||||||
help="The label that Prometheus uses to store the uuid of "
|
|
||||||
"OpenStack instances. Defaults to 'resource'."),
|
|
||||||
cfg.StrOpt('username',
|
|
||||||
help="The basic_auth username to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('password',
|
|
||||||
secret=True,
|
|
||||||
help="The basic_auth password to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('cafile',
|
|
||||||
help="Path to the CA certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('certfile',
|
|
||||||
help="Path to the client certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('keyfile',
|
|
||||||
help="Path to the client key for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
|
|
||||||
The ``host`` and ``port`` are **required** configuration options which have
|
|
||||||
no set default. These specify the hostname (or IP) and port for at which
|
|
||||||
the Prometheus server is listening. The ``fqdn_label`` allows deployers to
|
|
||||||
override the required metric label used to match Prometheus node exporters
|
|
||||||
against the Watcher ComputeNodes in the Watcher decision engine cluster data
|
|
||||||
model. The default is ``fqdn`` and deployers can specify any other value
|
|
||||||
(e.g. if they have an equivalent but different label such as ``host``).
|
|
||||||
|
|
||||||
So a sample watcher.conf configured to use the Prometheus server at
|
|
||||||
``10.2.3.4:9090`` would look like the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[watcher_datasources]
|
|
||||||
|
|
||||||
datasources = prometheus
|
|
||||||
|
|
||||||
[prometheus_client]
|
|
||||||
|
|
||||||
host = 10.2.3.4
|
|
||||||
port = 9090
|
|
||||||
fqdn_label = fqdn
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
@startuml
|
|
||||||
|
|
||||||
skinparam ArrowColor DarkRed
|
|
||||||
skinparam StateBorderColor DarkRed
|
|
||||||
skinparam StateBackgroundColor LightYellow
|
|
||||||
skinparam Shadowing true
|
|
||||||
|
|
||||||
[*] --> PENDING: The Watcher Planner\ncreates the Action
|
|
||||||
PENDING --> SKIPPED: The Action detects skipping condition\n in pre_condition or was\n skipped by cloud Admin.
|
|
||||||
PENDING --> FAILED: The Action fails unexpectedly\n in pre_condition.
|
|
||||||
PENDING --> ONGOING: The Watcher Applier starts executing/n the action.
|
|
||||||
ONGOING --> FAILED: Something failed while executing\nthe Action in the Watcher Applier
|
|
||||||
ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action successfully
|
|
||||||
FAILED --> DELETED : Administrator removes\nAction Plan
|
|
||||||
SUCCEEDED --> DELETED : Administrator removes\n theAction
|
|
||||||
ONGOING --> CANCELLED : The Action was cancelled\n as part of an Action Plan cancellation.
|
|
||||||
PENDING --> CANCELLED : The Action was cancelled\n as part of an Action Plan cancellation.
|
|
||||||
CANCELLED --> DELETED
|
|
||||||
FAILED --> DELETED
|
|
||||||
SKIPPED --> DELETED
|
|
||||||
DELETED --> [*]
|
|
||||||
|
|
||||||
@enduml
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 75 KiB |
@@ -42,7 +42,6 @@ specific prior release.
|
|||||||
user/index
|
user/index
|
||||||
configuration/index
|
configuration/index
|
||||||
contributor/plugin/index
|
contributor/plugin/index
|
||||||
integrations/index
|
|
||||||
man/index
|
man/index
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
...
|
...
|
||||||
connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8
|
connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8
|
||||||
|
|
||||||
* In the ``[DEFAULT]`` section, configure the transport url for RabbitMQ message broker.
|
* In the `[DEFAULT]` section, configure the transport url for RabbitMQ message broker.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ.
|
Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ.
|
||||||
|
|
||||||
* In the ``[keystone_authtoken]`` section, configure Identity service access.
|
* In the `[keystone_authtoken]` section, configure Identity service access.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -39,7 +39,7 @@
|
|||||||
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
||||||
|
|
||||||
* Watcher interacts with other OpenStack projects via project clients, in order to instantiate these
|
* Watcher interacts with other OpenStack projects via project clients, in order to instantiate these
|
||||||
clients, Watcher requests new session from Identity service. In the ``[watcher_clients_auth]`` section,
|
clients, Watcher requests new session from Identity service. In the `[watcher_clients_auth]` section,
|
||||||
configure the identity service access to interact with other OpenStack project clients.
|
configure the identity service access to interact with other OpenStack project clients.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
@@ -56,7 +56,7 @@
|
|||||||
|
|
||||||
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
||||||
|
|
||||||
* In the ``[api]`` section, configure host option.
|
* In the `[api]` section, configure host option.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@
|
|||||||
|
|
||||||
Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture.
|
Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture.
|
||||||
|
|
||||||
* In the ``[oslo_messaging_notifications]`` section, configure the messaging driver.
|
* In the `[oslo_messaging_notifications]` section, configure the messaging driver.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
|
|||||||
@@ -1,126 +0,0 @@
|
|||||||
============
|
|
||||||
Integrations
|
|
||||||
============
|
|
||||||
|
|
||||||
The following table provides an Integration status with different services
|
|
||||||
which Watcher interact with. Some integrations are marked as Supported,
|
|
||||||
while others as Experimental due to the lack of testing and a proper
|
|
||||||
documentations.
|
|
||||||
|
|
||||||
Integration Status Matrix
|
|
||||||
-------------------------
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 20 20 20 20
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Service Name
|
|
||||||
- Integration Status
|
|
||||||
- Documentation
|
|
||||||
- Testing
|
|
||||||
* - :ref:`Cinder <cinder_integration>`
|
|
||||||
- Supported
|
|
||||||
- Minimal
|
|
||||||
- Unit
|
|
||||||
* - :ref:`Glance <glance_integration>`
|
|
||||||
- Experimental
|
|
||||||
- Missing
|
|
||||||
- None
|
|
||||||
* - :ref:`Ironic <ironic_integration>`
|
|
||||||
- Experimental
|
|
||||||
- Minimal
|
|
||||||
- Unit
|
|
||||||
* - :ref:`Keystone <keystone_integration>`
|
|
||||||
- Supported
|
|
||||||
- Minimal
|
|
||||||
- Integration
|
|
||||||
* - :ref:`MAAS <maas_integration>`
|
|
||||||
- Experimental
|
|
||||||
- Missing
|
|
||||||
- Unit
|
|
||||||
* - :ref:`Neutron <neutron_integration>`
|
|
||||||
- Experimental
|
|
||||||
- Missing
|
|
||||||
- Unit
|
|
||||||
* - :ref:`Nova <nova_integration>`
|
|
||||||
- Supported
|
|
||||||
- Minimal
|
|
||||||
- Unit and Integration
|
|
||||||
* - :ref:`Placement <placement_integration>`
|
|
||||||
- Supported
|
|
||||||
- Minimal
|
|
||||||
- Unit and Integration
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Minimal documentation covers only basic configuration and, if available,
|
|
||||||
how to enable notifications.
|
|
||||||
|
|
||||||
.. _cinder_integration:
|
|
||||||
|
|
||||||
Cinder
|
|
||||||
^^^^^^
|
|
||||||
The OpenStack Block Storage service integration includes a cluster data
|
|
||||||
model collector that creates a in-memory representation of the storage
|
|
||||||
resources, strategies that propose solutions based on storage capacity
|
|
||||||
and Actions that perform volume migration.
|
|
||||||
|
|
||||||
.. _glance_integration:
|
|
||||||
|
|
||||||
Glance
|
|
||||||
^^^^^^
|
|
||||||
The Image service integration is consumed by Nova Helper to create instances
|
|
||||||
from images, which was used older releases of Watcher to cold migrate
|
|
||||||
instances. This procedure is not used by Watcher anymore and this integration
|
|
||||||
is classified as Experimental and may be removed in future releases.
|
|
||||||
|
|
||||||
.. _ironic_integration:
|
|
||||||
|
|
||||||
Ironic
|
|
||||||
^^^^^^
|
|
||||||
The Bare Metal service integration includes a data model collector that
|
|
||||||
creates an in-memory representation of Ironic resources and Actions that
|
|
||||||
allows the management of the power state of nodes. This integration is
|
|
||||||
classified as Experimental and may be removed in future releases.
|
|
||||||
|
|
||||||
.. _keystone_integration:
|
|
||||||
|
|
||||||
Keystone
|
|
||||||
^^^^^^^^
|
|
||||||
The Identity service integration includes authentication with other services
|
|
||||||
and retrieving information about domains, projects and users.
|
|
||||||
|
|
||||||
.. _maas_integration:
|
|
||||||
|
|
||||||
MAAS (Metal As A Service)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
This integration allows managing bare metal servers of a MAAS service,
|
|
||||||
which includes Actions that manage the power state of nodes. This
|
|
||||||
integration is classified as Experimental and may be removed in future
|
|
||||||
releases.
|
|
||||||
|
|
||||||
.. _neutron_integration:
|
|
||||||
|
|
||||||
Neutron
|
|
||||||
^^^^^^^
|
|
||||||
Neutron integration is currently consumed by Nova Helper to create instance,
|
|
||||||
which was used by older releases of Watcher to cold migrate instances. This
|
|
||||||
procedure is not used by Watcher anymore and this integration is classified
|
|
||||||
as Experimental and may be removed in future releases.
|
|
||||||
|
|
||||||
.. _nova_integration:
|
|
||||||
|
|
||||||
Nova
|
|
||||||
^^^^
|
|
||||||
Nova service integration includes a cluster data model collector that creates
|
|
||||||
an in-memory representation of the compute resources available in the cloud,
|
|
||||||
strategies that propose solutions based on available resources and Actions
|
|
||||||
that perform instance migrations.
|
|
||||||
|
|
||||||
.. _placement_integration:
|
|
||||||
|
|
||||||
Placement
|
|
||||||
^^^^^^^^^
|
|
||||||
Placement integration allows Watcher to track resource provider inventories
|
|
||||||
and usages information, building a in-memory representation of those resources
|
|
||||||
that can be used by strategies when calculating new solutions.
|
|
||||||
|
|
||||||
@@ -48,7 +48,7 @@
|
|||||||
logging configuration to any other existing logging
|
logging configuration to any other existing logging
|
||||||
options. Please see the Python logging module documentation
|
options. Please see the Python logging module documentation
|
||||||
for details on logging configuration files. The log-config
|
for details on logging configuration files. The log-config
|
||||||
name for this option is deprecated.
|
name for this option is depcrecated.
|
||||||
|
|
||||||
**--log-format FORMAT**
|
**--log-format FORMAT**
|
||||||
A logging.Formatter log message format string which may use any
|
A logging.Formatter log message format string which may use any
|
||||||
|
|||||||
@@ -26,7 +26,8 @@ metric service name plugins comment
|
|||||||
``compute_monitors`` option
|
``compute_monitors`` option
|
||||||
to ``cpu.virt_driver`` in
|
to ``cpu.virt_driver`` in
|
||||||
the nova.conf.
|
the nova.conf.
|
||||||
``cpu`` ceilometer_ none
|
``cpu_util`` ceilometer_ none cpu_util has been removed
|
||||||
|
since Stein.
|
||||||
============================ ============ ======= ===========================
|
============================ ============ ======= ===========================
|
||||||
|
|
||||||
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
||||||
|
|||||||
@@ -11,6 +11,10 @@ Synopsis
|
|||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
None.
|
||||||
|
|
||||||
Metrics
|
Metrics
|
||||||
*******
|
*******
|
||||||
@@ -52,29 +56,15 @@ Configuration
|
|||||||
|
|
||||||
Strategy parameters are:
|
Strategy parameters are:
|
||||||
|
|
||||||
========================== ======== ========================== ==========
|
==================== ====== ====================================
|
||||||
parameter type description required
|
parameter type default Value description
|
||||||
========================== ======== ========================== ==========
|
==================== ====== ====================================
|
||||||
``maintenance_node`` String The name of the Required
|
``maintenance_node`` String The name of the compute node which
|
||||||
compute node
|
need maintenance. Required.
|
||||||
which needs maintenance.
|
``backup_node`` String The name of the compute node which
|
||||||
``backup_node`` String The name of the compute Optional
|
will backup the maintenance node.
|
||||||
node which will backup
|
Optional.
|
||||||
the maintenance node.
|
==================== ====== ====================================
|
||||||
``disable_live_migration`` Boolean False: Active instances Optional
|
|
||||||
will be live migrated.
|
|
||||||
True: Active instances
|
|
||||||
will be cold migrated
|
|
||||||
if cold migration is
|
|
||||||
not disabled. Otherwise,
|
|
||||||
they will be stopped.
|
|
||||||
False by default.
|
|
||||||
``disable_cold_migration`` Boolean False: Inactive instances Optional
|
|
||||||
will be cold migrated.
|
|
||||||
True: Inactive instances
|
|
||||||
will not be cold migrated.
|
|
||||||
False by default.
|
|
||||||
========================== ======== ========================== ==========
|
|
||||||
|
|
||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
@@ -90,46 +80,13 @@ to: https://specs.openstack.org/openstack/watcher-specs/specs/queens/approved/cl
|
|||||||
How to use it ?
|
How to use it ?
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Run an audit using Host Maintenance strategy.
|
|
||||||
Executing the actions will move the servers from compute01 host
|
|
||||||
to a host determined by the Nova scheduler service.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
-g cluster_maintaining -s host_maintenance \
|
|
||||||
-p maintenance_node=compute01
|
|
||||||
|
|
||||||
Run an audit using Host Maintenance strategy with a backup node specified.
|
|
||||||
Executing the actions will move the servers from compute01 host
|
|
||||||
to compute02 host.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
$ openstack optimize audit create \
|
||||||
-g cluster_maintaining -s host_maintenance \
|
-g cluster_maintaining -s host_maintenance \
|
||||||
-p maintenance_node=compute01 \
|
-p maintenance_node=compute01 \
|
||||||
-p backup_node=compute02
|
-p backup_node=compute02 \
|
||||||
|
--auto-trigger
|
||||||
Run an audit using Host Maintenance strategy with migration disabled.
|
|
||||||
This will only stop active instances on compute01, useful for maintenance
|
|
||||||
scenarios where operators do not want to migrate workloads to other hosts.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
-g cluster_maintaining -s host_maintenance \
|
|
||||||
-p maintenance_node=compute01 \
|
|
||||||
-p disable_live_migration=True \
|
|
||||||
-p disable_cold_migration=True
|
|
||||||
|
|
||||||
Note that after executing this strategy, the *maintenance_node* will be
|
|
||||||
marked as disabled, with the reason set to ``watcher_maintaining``.
|
|
||||||
To enable the node again:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ openstack compute service set --enable compute01
|
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
--------------
|
--------------
|
||||||
|
|||||||
@@ -6,53 +6,3 @@ Strategies
|
|||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
./*
|
./*
|
||||||
|
|
||||||
Strategies status matrix
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
.. list-table::
|
|
||||||
:widths: 33 33 34
|
|
||||||
:header-rows: 1
|
|
||||||
|
|
||||||
* - Strategy Name
|
|
||||||
- Status
|
|
||||||
- Testing
|
|
||||||
* - :doc:`actuation`
|
|
||||||
- Experimental
|
|
||||||
- Unit, Integration
|
|
||||||
* - :doc:`basic-server-consolidation`
|
|
||||||
- Experimental
|
|
||||||
- Missing
|
|
||||||
* - :doc:`host_maintenance`
|
|
||||||
- Supported
|
|
||||||
- Unit, Integration
|
|
||||||
* - :doc:`node_resource_consolidation`
|
|
||||||
- Supported
|
|
||||||
- Unit, Integration
|
|
||||||
* - :doc:`noisy_neighbor`
|
|
||||||
- Deprecated
|
|
||||||
- Unit
|
|
||||||
* - :doc:`outlet_temp_control`
|
|
||||||
- Experimental
|
|
||||||
- Unit
|
|
||||||
* - :doc:`saving_energy`
|
|
||||||
- Experimental
|
|
||||||
- Unit
|
|
||||||
* - :doc:`storage_capacity_balance`
|
|
||||||
- Experimental
|
|
||||||
- Unit
|
|
||||||
* - :doc:`uniform_airflow`
|
|
||||||
- Experimental
|
|
||||||
- Unit
|
|
||||||
* - :doc:`vm_workload_consolidation`
|
|
||||||
- Supported
|
|
||||||
- Unit, Integration
|
|
||||||
* - :doc:`workload-stabilization`
|
|
||||||
- Experimental
|
|
||||||
- Missing
|
|
||||||
* - :doc:`workload_balance`
|
|
||||||
- Supported
|
|
||||||
- Unit, Integration
|
|
||||||
* - :doc:`zone_migration`
|
|
||||||
- Supported (Instance migrations), Experimental (Volume migration)
|
|
||||||
- Unit, Some Integration
|
|
||||||
|
|||||||
@@ -89,9 +89,9 @@ step 2: Create audit to do optimization
|
|||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
$ openstack optimize audittemplate create \
|
$ openstack optimize audittemplate create \
|
||||||
saving_energy_template1 saving_energy --strategy saving_energy
|
at1 saving_energy --strategy saving_energy
|
||||||
|
|
||||||
$ openstack optimize audit create -a saving_energy_audit1 \
|
$ openstack optimize audit create -a at1 \
|
||||||
-p free_used_percent=20.0
|
-p free_used_percent=20.0
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
|
|||||||
@@ -35,11 +35,6 @@ power ceilometer_ kwapi_ one point every 60s
|
|||||||
|
|
||||||
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
||||||
.. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md
|
.. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md
|
||||||
|
|
||||||
.. note::
|
|
||||||
The Monasca datasource is deprecated for removal and optional. If a strategy requires Monasca metrics,
|
|
||||||
ensure the Monasca optional extra is installed: ``pip install watcher[monasca]``.
|
|
||||||
|
|
||||||
.. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html
|
.. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,19 +22,14 @@ The *vm_workload_consolidation* strategy requires the following metrics:
|
|||||||
============================ ============ ======= =========================
|
============================ ============ ======= =========================
|
||||||
metric service name plugins comment
|
metric service name plugins comment
|
||||||
============================ ============ ======= =========================
|
============================ ============ ======= =========================
|
||||||
``cpu`` ceilometer_ none
|
``cpu_util`` ceilometer_ none cpu_util has been removed
|
||||||
|
since Stein.
|
||||||
``memory.resident`` ceilometer_ none
|
``memory.resident`` ceilometer_ none
|
||||||
``memory`` ceilometer_ none
|
``memory`` ceilometer_ none
|
||||||
``disk.root.size`` ceilometer_ none
|
``disk.root.size`` ceilometer_ none
|
||||||
``compute.node.cpu.percent`` ceilometer_ none (optional) need to set the
|
|
||||||
``compute_monitors`` option
|
|
||||||
to ``cpu.virt_driver`` in the
|
|
||||||
nova.conf.
|
|
||||||
``hardware.memory.used`` ceilometer_ SNMP_ (optional)
|
|
||||||
============================ ============ ======= =========================
|
============================ ============ ======= =========================
|
||||||
|
|
||||||
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
||||||
.. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters
|
|
||||||
|
|
||||||
Cluster data model
|
Cluster data model
|
||||||
******************
|
******************
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
===============================
|
=============================================
|
||||||
Workload Stabilization Strategy
|
Watcher Overload standard deviation algorithm
|
||||||
===============================
|
=============================================
|
||||||
|
|
||||||
Synopsis
|
Synopsis
|
||||||
--------
|
--------
|
||||||
@@ -19,20 +19,21 @@ Metrics
|
|||||||
|
|
||||||
The *workload_stabilization* strategy requires the following metrics:
|
The *workload_stabilization* strategy requires the following metrics:
|
||||||
|
|
||||||
============================ ==================================================
|
============================ ============ ======= =============================
|
||||||
metric description
|
metric service name plugins comment
|
||||||
============================ ==================================================
|
============================ ============ ======= =============================
|
||||||
``instance_ram_usage`` ram memory usage in an instance as float in
|
``compute.node.cpu.percent`` ceilometer_ none need to set the
|
||||||
megabytes
|
``compute_monitors`` option
|
||||||
``instance_cpu_usage`` cpu usage in an instance as float ranging between
|
to ``cpu.virt_driver`` in the
|
||||||
0 and 100 representing the total cpu usage as
|
nova.conf.
|
||||||
percentage
|
``hardware.memory.used`` ceilometer_ SNMP_
|
||||||
``host_ram_usage`` ram memory usage in a compute node as float in
|
``cpu_util`` ceilometer_ none cpu_util has been removed
|
||||||
megabytes
|
since Stein.
|
||||||
``host_cpu_usage`` cpu usage in a compute node as float ranging
|
``memory.resident`` ceilometer_ none
|
||||||
between 0 and 100 representing the total cpu
|
============================ ============ ======= =============================
|
||||||
usage as percentage
|
|
||||||
============================ ==================================================
|
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
||||||
|
.. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters
|
||||||
|
|
||||||
Cluster data model
|
Cluster data model
|
||||||
******************
|
******************
|
||||||
@@ -68,49 +69,23 @@ Configuration
|
|||||||
|
|
||||||
Strategy parameters are:
|
Strategy parameters are:
|
||||||
|
|
||||||
====================== ====== =================== =============================
|
==================== ====== ===================== =============================
|
||||||
parameter type default Value description
|
parameter type default Value description
|
||||||
====================== ====== =================== =============================
|
==================== ====== ===================== =============================
|
||||||
``metrics`` array |metrics| Metrics used as rates of
|
``metrics`` array |metrics| Metrics used as rates of
|
||||||
cluster loads.
|
cluster loads.
|
||||||
``thresholds`` object |thresholds| Dict where key is a metric
|
``thresholds`` object |thresholds| Dict where key is a metric
|
||||||
and value is a trigger value.
|
and value is a trigger value.
|
||||||
The strategy will only will
|
|
||||||
look for an action plan when
|
``weights`` object |weights| These weights used to
|
||||||
the standard deviation for
|
|
||||||
the usage of one of the
|
|
||||||
resources included in the
|
|
||||||
metrics, taken as a
|
|
||||||
normalized usage between
|
|
||||||
0 and 1 among the hosts is
|
|
||||||
higher than the threshold.
|
|
||||||
The value of a perfectly
|
|
||||||
balanced cluster for the
|
|
||||||
standard deviation would be
|
|
||||||
0, while in a totally
|
|
||||||
unbalanced one would be 0.5,
|
|
||||||
which should be the maximum
|
|
||||||
value.
|
|
||||||
``weights`` object |weights| These weights are used to
|
|
||||||
calculate common standard
|
calculate common standard
|
||||||
deviation when optimizing
|
deviation. Name of weight
|
||||||
the resources usage.
|
contains meter name and
|
||||||
Name of weight contains meter
|
_weight suffix.
|
||||||
name and _weight suffix.
|
``instance_metrics`` object |instance_metrics| Mapping to get hardware
|
||||||
Higher values imply the
|
statistics using instance
|
||||||
metric will be prioritized
|
metrics.
|
||||||
when calculating an optimal
|
``host_choice`` string retry Method of host's choice.
|
||||||
resulting cluster
|
|
||||||
distribution.
|
|
||||||
``instance_metrics`` object |instance_metrics| This parameter represents
|
|
||||||
the compute node metrics
|
|
||||||
representing compute resource
|
|
||||||
usage for the instances
|
|
||||||
resource indicated in the
|
|
||||||
metrics parameter.
|
|
||||||
``host_choice`` string retry Method of host’s choice when
|
|
||||||
analyzing destination for
|
|
||||||
instances.
|
|
||||||
There are cycle, retry and
|
There are cycle, retry and
|
||||||
fullsearch methods. Cycle
|
fullsearch methods. Cycle
|
||||||
will iterate hosts in cycle.
|
will iterate hosts in cycle.
|
||||||
@@ -119,49 +94,32 @@ parameter type default Value description
|
|||||||
retry_count option).
|
retry_count option).
|
||||||
Fullsearch will return each
|
Fullsearch will return each
|
||||||
host from list.
|
host from list.
|
||||||
``retry_count`` number 1 Count of random returned
|
``retry_count`` number 1 Count of random returned
|
||||||
hosts.
|
hosts.
|
||||||
``periods`` object |periods| Time, in seconds, to get
|
``periods`` object |periods| These periods are used to get
|
||||||
statistical values for
|
statistic aggregation for
|
||||||
resources usage for instance
|
instance and host metrics.
|
||||||
and host metrics.
|
The period is simply a
|
||||||
Watcher will use the last
|
repeating interval of time
|
||||||
period to calculate resource
|
into which the samples are
|
||||||
usage.
|
grouped for aggregation.
|
||||||
``granularity`` number 300 NOT RECOMMENDED TO MODIFY:
|
Watcher uses only the last
|
||||||
The time between two measures
|
period of all received ones.
|
||||||
in an aggregated timeseries
|
==================== ====== ===================== =============================
|
||||||
of a metric.
|
|
||||||
``aggregation_method`` object |aggn_method| NOT RECOMMENDED TO MODIFY:
|
|
||||||
Function used to aggregate
|
|
||||||
multiple measures into an
|
|
||||||
aggregated value.
|
|
||||||
====================== ====== =================== =============================
|
|
||||||
|
|
||||||
.. |metrics| replace:: ["instance_cpu_usage", "instance_ram_usage"]
|
.. |metrics| replace:: ["cpu_util", "memory.resident"]
|
||||||
.. |thresholds| replace:: {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2}
|
.. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||||
.. |weights| replace:: {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0}
|
.. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0}
|
||||||
.. |instance_metrics| replace:: {"instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"}
|
.. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"}
|
||||||
.. |periods| replace:: {"instance": 720, "node": 600}
|
.. |periods| replace:: {"instance": 720, "node": 600}
|
||||||
.. |aggn_method| replace:: {"instance": 'mean', "compute_node": 'mean'}
|
|
||||||
|
|
||||||
|
|
||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Global efficacy indicator:
|
|
||||||
|
|
||||||
.. watcher-func::
|
.. watcher-func::
|
||||||
:format: literal_block
|
:format: literal_block
|
||||||
|
|
||||||
watcher.decision_engine.goal.efficacy.specs.WorkloadBalancing.get_global_efficacy_indicator
|
watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator
|
||||||
|
|
||||||
Other efficacy indicators of the goal are:
|
|
||||||
|
|
||||||
- ``instance_migrations_count``: The number of VM migrations to be performed
|
|
||||||
- ``instances_count``: The total number of audited instances in strategy
|
|
||||||
- ``standard_deviation_after_audit``: The value of resulted standard deviation
|
|
||||||
- ``standard_deviation_before_audit``: The value of original standard deviation
|
|
||||||
|
|
||||||
Algorithm
|
Algorithm
|
||||||
---------
|
---------
|
||||||
@@ -178,10 +136,10 @@ How to use it ?
|
|||||||
at1 workload_balancing --strategy workload_stabilization
|
at1 workload_balancing --strategy workload_stabilization
|
||||||
|
|
||||||
$ openstack optimize audit create -a at1 \
|
$ openstack optimize audit create -a at1 \
|
||||||
-p thresholds='{"instance_ram_usage": 0.05}' \
|
-p thresholds='{"memory.resident": 0.05}' \
|
||||||
-p metrics='["instance_ram_usage"]'
|
-p metrics='["memory.resident"]'
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
None
|
- `Watcher Overload standard deviation algorithm spec <https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/sd-strategy.html>`_
|
||||||
|
|||||||
@@ -11,35 +11,26 @@ Synopsis
|
|||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
None.
|
||||||
|
|
||||||
Metrics
|
Metrics
|
||||||
*******
|
*******
|
||||||
|
|
||||||
The ``workload_balance`` strategy requires the following metrics:
|
The *workload_balance* strategy requires the following metrics:
|
||||||
|
|
||||||
======================= ============ ======= =========== ======================
|
======================= ============ ======= =========================
|
||||||
metric service name plugins unit comment
|
metric service name plugins comment
|
||||||
======================= ============ ======= =========== ======================
|
======================= ============ ======= =========================
|
||||||
``cpu`` ceilometer_ none percentage CPU of the instance.
|
``cpu_util`` ceilometer_ none cpu_util has been removed
|
||||||
Used to calculate the
|
since Stein.
|
||||||
threshold
|
``memory.resident`` ceilometer_ none
|
||||||
``memory.resident`` ceilometer_ none MB RAM of the instance.
|
======================= ============ ======= =========================
|
||||||
Used to calculate the
|
|
||||||
threshold
|
|
||||||
======================= ============ ======= =========== ======================
|
|
||||||
|
|
||||||
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
|
||||||
|
|
||||||
.. note::
|
|
||||||
* The parameters above reference the instance CPU or RAM usage, but
|
|
||||||
the threshold calculation is based of the CPU/RAM usage on the
|
|
||||||
hypervisor.
|
|
||||||
* The RAM usage can be calculated based on the RAM consumed by the instance,
|
|
||||||
and the available RAM on the hypervisor.
|
|
||||||
* The CPU percentage calculation relies on the CPU load, but also on the
|
|
||||||
number of CPUs on the hypervisor.
|
|
||||||
* The host memory metric is calculated by summing the RAM usage of each
|
|
||||||
instance on the host. This measure is close to the real usage, but is
|
|
||||||
not the exact usage on the host.
|
|
||||||
|
|
||||||
Cluster data model
|
Cluster data model
|
||||||
******************
|
******************
|
||||||
@@ -74,28 +65,15 @@ Configuration
|
|||||||
|
|
||||||
Strategy parameters are:
|
Strategy parameters are:
|
||||||
|
|
||||||
================ ====== ==================== ==================================
|
============== ====== ============= ====================================
|
||||||
parameter type default value description
|
parameter type default Value description
|
||||||
================ ====== ==================== ==================================
|
============== ====== ============= ====================================
|
||||||
``metrics`` String instance_cpu_usage Workload balance base on cpu or
|
``metrics`` String 'cpu_util' Workload balance base on cpu or ram
|
||||||
ram utilization. Choices:
|
utilization. choice: ['cpu_util',
|
||||||
['instance_cpu_usage',
|
'memory.resident']
|
||||||
'instance_ram_usage']
|
``threshold`` Number 25.0 Workload threshold for migration
|
||||||
``threshold`` Number 25.0 Workload threshold for migration.
|
``period`` Number 300 Aggregate time period of ceilometer
|
||||||
Used for both the source and the
|
============== ====== ============= ====================================
|
||||||
destination calculations.
|
|
||||||
Threshold is always a percentage.
|
|
||||||
``period`` Number 300 Aggregate time period of
|
|
||||||
ceilometer
|
|
||||||
``granularity`` Number 300 The time between two measures in
|
|
||||||
an aggregated timeseries of a
|
|
||||||
metric.
|
|
||||||
This parameter is only used
|
|
||||||
with the Gnocchi data source,
|
|
||||||
and it must match to any of the
|
|
||||||
valid archive policies for the
|
|
||||||
metric.
|
|
||||||
================ ====== ==================== ==================================
|
|
||||||
|
|
||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
@@ -111,35 +89,13 @@ to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented
|
|||||||
How to use it ?
|
How to use it ?
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Create an audit template using the Workload Balancing strategy.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
$ openstack optimize audittemplate create \
|
$ openstack optimize audittemplate create \
|
||||||
at1 workload_balancing --strategy workload_balance
|
at1 workload_balancing --strategy workload_balance
|
||||||
|
|
||||||
Run an audit using the Workload Balance strategy. The result of
|
|
||||||
the audit should be an action plan to move VMs from any host
|
|
||||||
where the CPU usage is over the threshold of 26%, to a host
|
|
||||||
where the utilization of CPU is under the threshold.
|
|
||||||
The measurements of CPU utilization are taken from the configured
|
|
||||||
datasouce plugin with an aggregate period of 310.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ openstack optimize audit create -a at1 -p threshold=26.0 \
|
$ openstack optimize audit create -a at1 -p threshold=26.0 \
|
||||||
-p period=310 -p metrics=instance_cpu_usage
|
-p period=310 -p metrics=cpu_util
|
||||||
|
|
||||||
Run an audit using the Workload Balance strategy to
|
|
||||||
obtain a plan to balance VMs over hosts with a threshold of 20%.
|
|
||||||
In this case, the stipulation of the CPU utilization metric
|
|
||||||
measurement is a combination of period and granularity.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ openstack optimize audit create -a at1 \
|
|
||||||
-p granularity=30 -p threshold=20 -p period=300 \
|
|
||||||
-p metrics=instance_cpu_usage --auto-trigger
|
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
--------------
|
--------------
|
||||||
|
|||||||
@@ -11,13 +11,6 @@ Synopsis
|
|||||||
|
|
||||||
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration
|
.. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration
|
||||||
|
|
||||||
.. note::
|
|
||||||
The term ``Zone`` in the strategy name is not a reference to
|
|
||||||
`Openstack availability zones <https://docs.openstack.org/nova/latest/admin/availability-zones.html>`_
|
|
||||||
but rather a user-defined set of Compute nodes and storage pools.
|
|
||||||
Currently, migrations across actual availability zones is not fully tested
|
|
||||||
and might not work in all cluster configurations.
|
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|
||||||
@@ -66,83 +59,66 @@ Configuration
|
|||||||
|
|
||||||
Strategy parameters are:
|
Strategy parameters are:
|
||||||
|
|
||||||
======================== ======== ======== ========= ==========================
|
======================== ======== ============= ==============================
|
||||||
parameter type default required description
|
parameter type default Value description
|
||||||
======================== ======== ======== ========= ==========================
|
======================== ======== ============= ==============================
|
||||||
``compute_nodes`` array None Optional Compute nodes to migrate.
|
``compute_nodes`` array None Compute nodes to migrate.
|
||||||
``storage_pools`` array None Optional Storage pools to migrate.
|
``storage_pools`` array None Storage pools to migrate.
|
||||||
``parallel_total`` integer 6 Optional The number of actions to
|
``parallel_total`` integer 6 The number of actions to be
|
||||||
be run in parallel in
|
run in parallel in total.
|
||||||
total.
|
``parallel_per_node`` integer 2 The number of actions to be
|
||||||
``parallel_per_node`` integer 2 Optional The number of actions to
|
run in parallel per compute
|
||||||
be run in parallel per
|
node.
|
||||||
compute node in one
|
``parallel_per_pool`` integer 2 The number of actions to be
|
||||||
action plan.
|
run in parallel per storage
|
||||||
``parallel_per_pool`` integer 2 Optional The number of actions to
|
pool.
|
||||||
be run in parallel per
|
``priority`` object None List prioritizes instances
|
||||||
storage pool.
|
and volumes.
|
||||||
``priority`` object None Optional List prioritizes instances
|
``with_attached_volume`` boolean False False: Instances will migrate
|
||||||
and volumes.
|
after all volumes migrate.
|
||||||
``with_attached_volume`` boolean False Optional False: Instances will
|
True: An instance will migrate
|
||||||
migrate after all volumes
|
after the attached volumes
|
||||||
migrate.
|
migrate.
|
||||||
True: An instance will
|
======================== ======== ============= ==============================
|
||||||
migrate after the
|
|
||||||
attached volumes migrate.
|
|
||||||
======================== ======== ======== ========= ==========================
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
* All parameters in the table above have defaults and therefore the
|
|
||||||
user can create an audit without specifying a value. However,
|
|
||||||
if **only** defaults parameters are used, there will be nothing
|
|
||||||
actionable for the audit.
|
|
||||||
* ``parallel_*`` parameters are not in reference to concurrency,
|
|
||||||
but rather on limiting the amount of actions to be added to the action
|
|
||||||
plan
|
|
||||||
* ``compute_nodes``, ``storage_pools``, and ``priority`` are optional
|
|
||||||
parameters, however, if they are passed they **require** the parameters
|
|
||||||
in the tables below:
|
|
||||||
|
|
||||||
The elements of compute_nodes array are:
|
The elements of compute_nodes array are:
|
||||||
|
|
||||||
============= ======= ======== ========= ========================
|
============= ======= =============== =============================
|
||||||
parameter type default required description
|
parameter type default Value description
|
||||||
============= ======= ======== ========= ========================
|
============= ======= =============== =============================
|
||||||
``src_node`` string None Required Compute node from which
|
``src_node`` string None Compute node from which
|
||||||
instances migrate.
|
instances migrate(mandatory).
|
||||||
``dst_node`` string None Optional Compute node to which
|
``dst_node`` string None Compute node to which
|
||||||
instances migrate.
|
instances migrate.
|
||||||
If omitted, nova will
|
============= ======= =============== =============================
|
||||||
choose the destination
|
|
||||||
node automatically.
|
|
||||||
============= ======= ======== ========= ========================
|
|
||||||
|
|
||||||
The elements of storage_pools array are:
|
The elements of storage_pools array are:
|
||||||
|
|
||||||
============= ======= ======== ========= ========================
|
============= ======= =============== ==============================
|
||||||
parameter type default required description
|
parameter type default Value description
|
||||||
============= ======= ======== ========= ========================
|
============= ======= =============== ==============================
|
||||||
``src_pool`` string None Required Storage pool from which
|
``src_pool`` string None Storage pool from which
|
||||||
volumes migrate.
|
volumes migrate(mandatory).
|
||||||
``dst_pool`` string None Optional Storage pool to which
|
``dst_pool`` string None Storage pool to which
|
||||||
volumes migrate.
|
volumes migrate.
|
||||||
``src_type`` string None Optional Source volume type.
|
``src_type`` string None Source volume type(mandatory).
|
||||||
``dst_type`` string None Required Destination volume type
|
``dst_type`` string None Destination volume type
|
||||||
============= ======= ======== ========= ========================
|
(mandatory).
|
||||||
|
============= ======= =============== ==============================
|
||||||
|
|
||||||
The elements of priority object are:
|
The elements of priority object are:
|
||||||
|
|
||||||
================ ======= ======== ========= =====================
|
================ ======= =============== ======================
|
||||||
parameter type default Required description
|
parameter type default Value description
|
||||||
================ ======= ======== ========= =====================
|
================ ======= =============== ======================
|
||||||
``project`` array None Optional Project names.
|
``project`` array None Project names.
|
||||||
``compute_node`` array None Optional Compute node names.
|
``compute_node`` array None Compute node names.
|
||||||
``storage_pool`` array None Optional Storage pool names.
|
``storage_pool`` array None Storage pool names.
|
||||||
``compute`` enum None Optional Instance attributes.
|
``compute`` enum None Instance attributes.
|
||||||
|compute|
|
|compute|
|
||||||
``storage`` enum None Optional Volume attributes.
|
``storage`` enum None Volume attributes.
|
||||||
|storage|
|
|storage|
|
||||||
================ ======= ======== ========= =====================
|
================ ======= =============== ======================
|
||||||
|
|
||||||
.. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"]
|
.. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"]
|
||||||
.. |storage| replace:: ["size", "created_at"]
|
.. |storage| replace:: ["size", "created_at"]
|
||||||
@@ -150,26 +126,11 @@ parameter type default Required description
|
|||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
The efficacy indicators for action plans built from the command line
|
|
||||||
are:
|
|
||||||
|
|
||||||
.. watcher-func::
|
.. watcher-func::
|
||||||
:format: literal_block
|
:format: literal_block
|
||||||
|
|
||||||
watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator
|
watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator
|
||||||
|
|
||||||
In **Horizon**, these indictors are shown with alternative text.
|
|
||||||
|
|
||||||
* ``live_migrate_instance_count`` is shown as
|
|
||||||
``The number of instances actually live migrated`` in Horizon
|
|
||||||
* ``planned_live_migrate_instance_count`` is shown as
|
|
||||||
``The number of instances planned to live migrate`` in Horizon
|
|
||||||
* ``planned_live_migration_instance_count`` refers to the instances planned
|
|
||||||
to live migrate in the action plan.
|
|
||||||
* ``live_migrate_instance_count`` tracks all the instances that could be
|
|
||||||
migrated according to the audit input.
|
|
||||||
|
|
||||||
|
|
||||||
Algorithm
|
Algorithm
|
||||||
---------
|
---------
|
||||||
|
|
||||||
@@ -187,19 +148,6 @@ How to use it ?
|
|||||||
$ openstack optimize audit create -a at1 \
|
$ openstack optimize audit create -a at1 \
|
||||||
-p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]'
|
-p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]'
|
||||||
|
|
||||||
.. note::
|
|
||||||
* The Cinder model collector is not enabled by default.
|
|
||||||
If the Cinder model collector is not enabled while deploying Watcher,
|
|
||||||
the model will become outdated and cause errors eventually.
|
|
||||||
See the `Configuration option to enable the storage collector <https://docs.openstack.org/watcher/latest/configuration/watcher.html#collector.collector_plugins>`_ documentation.
|
|
||||||
|
|
||||||
Support caveats
|
|
||||||
---------------
|
|
||||||
|
|
||||||
This strategy offers the option to perform both Instance migrations and
|
|
||||||
Volume migrations. Currently, Instance migrations are ready for production
|
|
||||||
use while Volume migrations remain experimental.
|
|
||||||
|
|
||||||
External Links
|
External Links
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
|||||||
@@ -1,430 +0,0 @@
|
|||||||
..
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
not use this file except in compliance with the License. You may obtain
|
|
||||||
a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
License for the specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
||||||
|
|
||||||
|
|
||||||
=======================
|
|
||||||
Using Continuous Audit
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Continuous audits allow Watcher to continuously monitor and optimize your
|
|
||||||
OpenStack infrastructure based on predefined schedules or intervals. This guide
|
|
||||||
demonstrates how to set up and use continuous audits with the dummy strategy,
|
|
||||||
which is useful for testing, development, and understanding the continuous
|
|
||||||
audit workflow. However, this doc is valid for any other combination of
|
|
||||||
strategy and goal.
|
|
||||||
|
|
||||||
Overview
|
|
||||||
========
|
|
||||||
|
|
||||||
A continuous audit differs from a oneshot audit in that it runs repeatedly
|
|
||||||
at specified intervals. It supports both time-based intervals
|
|
||||||
(in seconds) and cron-like expressions for more complex scheduling patterns.
|
|
||||||
|
|
||||||
The dummy strategy is a test strategy that doesn't perform actual optimization
|
|
||||||
but creates sample actions (nop and sleep) to demonstrate the complete audit
|
|
||||||
workflow. It's ideal for:
|
|
||||||
|
|
||||||
- Testing continuous audit functionality
|
|
||||||
- Development and debugging
|
|
||||||
- Learning how Watcher works
|
|
||||||
|
|
||||||
Prerequisites
|
|
||||||
=============
|
|
||||||
|
|
||||||
Before setting up continuous audits, ensure:
|
|
||||||
|
|
||||||
1. Watcher services are running and configured properly
|
|
||||||
2. You have administrator access to OpenStack
|
|
||||||
|
|
||||||
|
|
||||||
You can verify the services are running:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize service list
|
|
||||||
+----+-------------------------+------------+--------+
|
|
||||||
| ID | Name | Host | Status |
|
|
||||||
+----+-------------------------+------------+--------+
|
|
||||||
| 1 | watcher-decision-engine | controller | ACTIVE |
|
|
||||||
| 2 | watcher-applier | controller | ACTIVE |
|
|
||||||
+----+-------------------------+------------+--------+
|
|
||||||
|
|
||||||
|
|
||||||
Continuous Audit State Machine
|
|
||||||
==============================
|
|
||||||
|
|
||||||
You can view the Audit state machine diagram in the Watcher documentation:
|
|
||||||
`Audit State Machine`_
|
|
||||||
|
|
||||||
.. _Audit State Machine: https://docs.openstack.org/watcher/latest/architecture.html#audit-state-machine
|
|
||||||
|
|
||||||
|
|
||||||
Transitions:
|
|
||||||
|
|
||||||
- An audit is created and enters the **PENDING** state.
|
|
||||||
|
|
||||||
- When the scheduled time arrives, a **PENDING** audit becomes **ONGOING**.
|
|
||||||
|
|
||||||
- A continuous audit remains in the **ONGOING** state across executions.
|
|
||||||
It does not switch to **SUCCEEDED** after each run.
|
|
||||||
|
|
||||||
- If an execution fails, the audit transitions to **FAILED** and is no longer
|
|
||||||
executed.
|
|
||||||
|
|
||||||
- Each execution produces a new action plan. When a new action plan is created
|
|
||||||
by the same continuous audit, previous **RECOMMENDED** action plans are moved
|
|
||||||
to **CANCELLED**. Only the latest action plan remains in **RECOMMENDED**.
|
|
||||||
|
|
||||||
- An administrator can **CANCEL** an audit that is **PENDING** or **ONGOING**.
|
|
||||||
|
|
||||||
- An administrator can **SUSPEND** an **ONGOING** audit.
|
|
||||||
|
|
||||||
- A **SUSPENDED** audit can be resumed by an administrator, at which point it
|
|
||||||
becomes **ONGOING** again.
|
|
||||||
|
|
||||||
- An administrator can **DELETE** an audit only when its state is
|
|
||||||
**SUCCEEDED**, **FAILED**, or **CANCELLED**.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
You can enable the auto-trigger option if you want to automatically apply action
|
|
||||||
plans generated by continuous audits as soon as they are created.
|
|
||||||
Depending on the environment, continuous audits are often good candidates for
|
|
||||||
auto-trigger.
|
|
||||||
|
|
||||||
|
|
||||||
Create a Continuous Audit
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Create a continuous audit that will run at regular intervals. You can specify
|
|
||||||
the interval in seconds or use cron-like expressions.
|
|
||||||
|
|
||||||
Using Time Interval (seconds)
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
This example creates a continuous audit that runs every 5 minutes indefinitely
|
|
||||||
(300 seconds):
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
--goal dummy \
|
|
||||||
--strategy dummy \
|
|
||||||
--audit_type CONTINUOUS \
|
|
||||||
--interval 300 \
|
|
||||||
--name "continuous-dummy-5min"
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| Field | Value |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| UUID | 7607cf57-ea05-4e1a-b8d7-34e570f95132 |
|
|
||||||
| Name | continuous-dummy-5min |
|
|
||||||
| Created At | 2025-08-12T07:26:18.496536+00:00 |
|
|
||||||
| Updated At | None |
|
|
||||||
| Deleted At | None |
|
|
||||||
| State | PENDING |
|
|
||||||
| Audit Type | CONTINUOUS |
|
|
||||||
| Parameters | {'para1': 3.2, 'para2': 'hello'} |
|
|
||||||
| Interval | 300 |
|
|
||||||
| Goal | dummy |
|
|
||||||
| Strategy | dummy |
|
|
||||||
| Audit Scope | [] |
|
|
||||||
| Auto Trigger | False |
|
|
||||||
| Next Run Time | None |
|
|
||||||
| Hostname | None |
|
|
||||||
| Start Time | None |
|
|
||||||
| End Time | None |
|
|
||||||
| Force | False |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
|
|
||||||
|
|
||||||
Using Cron Expression
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
For more complex scheduling, you can use cron-like expressions. This example
|
|
||||||
runs the audit every hour at the 15-minute mark:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
--goal dummy \
|
|
||||||
--strategy dummy \
|
|
||||||
--audit_type CONTINUOUS \
|
|
||||||
--interval "15 * * * *" \
|
|
||||||
--name "continuous-dummy-hourly"
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| Field | Value |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| UUID | 9cbce4f1-eb75-405a-8f4e-108eb08fdd0a |
|
|
||||||
| Name | continuous-dummy-hourly |
|
|
||||||
| Created At | 2025-08-12T07:32:31.469309+00:00 |
|
|
||||||
| Updated At | None |
|
|
||||||
| Deleted At | None |
|
|
||||||
| State | PENDING |
|
|
||||||
| Audit Type | CONTINUOUS |
|
|
||||||
| Parameters | {'para1': 3.2, 'para2': 'hello'} |
|
|
||||||
| Interval | 15 * * * * |
|
|
||||||
| Goal | dummy |
|
|
||||||
| Strategy | dummy |
|
|
||||||
| Audit Scope | [] |
|
|
||||||
| Auto Trigger | False |
|
|
||||||
| Next Run Time | None |
|
|
||||||
| Hostname | None |
|
|
||||||
| Start Time | None |
|
|
||||||
| End Time | None |
|
|
||||||
| Force | False |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
|
|
||||||
Time Constraints via start_time and end_time
|
|
||||||
--------------------------------------------
|
|
||||||
|
|
||||||
We can limit when the continuous audit runs by setting start and end times
|
|
||||||
in a time-interval schedule. The interval can passed in seconds or cron expression.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
Start and End Time are interpreted in the timezone configured on the host where the
|
|
||||||
Watcher Decision Engine service is running. We can provide ``start_time`` and
|
|
||||||
``end_time`` in ISO 8601 format, for example ``'2025-08-13T14:30:00'``.
|
|
||||||
|
|
||||||
|
|
||||||
The example below creates a continuous audit that runs from 12:00 to 13:00
|
|
||||||
with a 5 minute interval.
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
--goal dummy \
|
|
||||||
--strategy dummy \
|
|
||||||
--audit_type CONTINUOUS \
|
|
||||||
--interval 300 \
|
|
||||||
--start-time "$(date -d 'today 12:00' +%Y-%m-%dT%H:%M:%S)" \
|
|
||||||
--end-time "$(date -d 'today 13:00' +%Y-%m-%dT%H:%M:%S)" \
|
|
||||||
--name "continuous-dummy-5min"
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| Field | Value |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| UUID | dadd279b-1e3d-4c38-aba6-4a730a78589b |
|
|
||||||
| Name | continuous-dummy-5min |
|
|
||||||
| Created At | 2025-08-12T08:36:42.924460+00:00 |
|
|
||||||
| Updated At | None |
|
|
||||||
| Deleted At | None |
|
|
||||||
| State | PENDING |
|
|
||||||
| Audit Type | CONTINUOUS |
|
|
||||||
| Parameters | {'para1': 3.2, 'para2': 'hello'} |
|
|
||||||
| Interval | 300 |
|
|
||||||
| Goal | dummy |
|
|
||||||
| Strategy | dummy |
|
|
||||||
| Audit Scope | [] |
|
|
||||||
| Auto Trigger | False |
|
|
||||||
| Next Run Time | None |
|
|
||||||
| Hostname | None |
|
|
||||||
| Start Time | 2025-08-12T12:00:00 |
|
|
||||||
| End Time | 2025-08-12T13:00:00 |
|
|
||||||
| Force | False |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
|
|
||||||
Monitoring Continuous Audit Execution
|
|
||||||
======================================
|
|
||||||
|
|
||||||
Create a continuous audit
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Create a continuous audit with 5 second interval:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit create \
|
|
||||||
--goal dummy \
|
|
||||||
--strategy dummy \
|
|
||||||
--audit_type CONTINUOUS \
|
|
||||||
--interval 5 \
|
|
||||||
--name "continuous-dummy-5sec"
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| Field | Value |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| UUID | 7d1f1961-41a6-47ae-a94a-cf5e43174fbd |
|
|
||||||
| Name | continuous-dummy-5sec |
|
|
||||||
| Created At | 2025-08-12T09:27:33.592575+00:00 |
|
|
||||||
| Updated At | None |
|
|
||||||
| Deleted At | None |
|
|
||||||
| State | PENDING |
|
|
||||||
| Audit Type | CONTINUOUS |
|
|
||||||
| Parameters | {'para1': 3.2, 'para2': 'hello'} |
|
|
||||||
| Interval | 5 |
|
|
||||||
| Goal | dummy |
|
|
||||||
| Strategy | dummy |
|
|
||||||
| Audit Scope | [] |
|
|
||||||
| Auto Trigger | False |
|
|
||||||
| Next Run Time | None |
|
|
||||||
| Hostname | None |
|
|
||||||
| Start Time | None |
|
|
||||||
| End Time | None |
|
|
||||||
| Force | False |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
|
|
||||||
Once created, the continuous audit will be automatically scheduled and executed
|
|
||||||
by the Watcher Decision Engine. You can monitor its progress:
|
|
||||||
|
|
||||||
Check Audit Status
|
|
||||||
------------------
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit show 7d1f1961-41a6-47ae-a94a-cf5e43174fbd
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| Field | Value |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
| UUID | 7d1f1961-41a6-47ae-a94a-cf5e43174fbd |
|
|
||||||
| Name | continuous-dummy-5sec |
|
|
||||||
| Created At | 2025-08-12T09:27:34+00:00 |
|
|
||||||
| Updated At | 2025-08-12T09:28:28+00:00 |
|
|
||||||
| Deleted At | None |
|
|
||||||
| State | ONGOING |
|
|
||||||
| Audit Type | CONTINUOUS |
|
|
||||||
| Parameters | {'para1': 3.2, 'para2': 'hello'} |
|
|
||||||
| Interval | 5 |
|
|
||||||
| Goal | dummy |
|
|
||||||
| Strategy | dummy |
|
|
||||||
| Audit Scope | [] |
|
|
||||||
| Auto Trigger | False |
|
|
||||||
| Next Run Time | 2025-08-12T09:28:33 |
|
|
||||||
| Hostname | chkumar-devstack-1 |
|
|
||||||
| Start Time | None |
|
|
||||||
| End Time | None |
|
|
||||||
| Force | False |
|
|
||||||
+---------------+--------------------------------------+
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
The *Next Run Time* is the next time the audit will run. It is calculated based on the
|
|
||||||
interval and the start and end times.
|
|
||||||
|
|
||||||
|
|
||||||
List Generated Action Plans
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Each execution of the continuous audit generates a new action plan:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize actionplan list --audit 7d1f1961-41a6-47ae-a94a-cf5e43174fbd
|
|
||||||
+--------------------------------------+--------------------------------------+-------------+
|
|
||||||
| UUID | Audit | State |
|
|
||||||
+--------------------------------------+--------------------------------------+-------------+
|
|
||||||
| b301dd17-a139-4a45-ade2-b2c2ddf006ef | 7d1f1961-41a6-47ae-a94a-cf5e43174fbd | CANCELLED |
|
|
||||||
| 22a5bc60-adef-447a-aa27-731b4f5f7ee3 | 7d1f1961-41a6-47ae-a94a-cf5e43174fbd | RECOMMENDED |
|
|
||||||
+--------------------------------------+--------------------------------------+-------------+
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
In continuous audits, when a new action plan is generated, previous
|
|
||||||
RECOMMENDED action plans are automatically set to CANCELLED state to
|
|
||||||
avoid conflicts.
|
|
||||||
|
|
||||||
|
|
||||||
Manage Continuous Audits
|
|
||||||
========================
|
|
||||||
|
|
||||||
Stop a Continuous Audit
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
To stop a continuous audit, update its state:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit update 550e8400-e29b-41d4-a716-446655440000 replace state=CANCELLED
|
|
||||||
|
|
||||||
|
|
||||||
Modify Audit Interval
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
You can change the interval of a running continuous audit:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit update 550e8400-e29b-41d4-a716-446655440000 replace interval=900
|
|
||||||
|
|
||||||
The Decision Engine will automatically reschedule the audit with the new
|
|
||||||
interval.
|
|
||||||
|
|
||||||
Modify End Time
|
|
||||||
---------------
|
|
||||||
|
|
||||||
You can change the end time of a running continuous audit:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit update 550e8400-e29b-41d4-a716-446655440000 replace end_time=2025-08-12T14:00:00
|
|
||||||
|
|
||||||
Delete a Continuous Audit
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
In order to delete a continuous audit, the audit state must be
|
|
||||||
SUCCEEDED, FAILED, or CANCELLED.
|
|
||||||
An audit with PENDING or ONGOING state cannot be deleted.
|
|
||||||
|
|
||||||
To delete an ongoing or pending continuous audit, update its state to
|
|
||||||
CANCELLED:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit update 550e8400-e29b-41d4-a716-446655440000 replace state=CANCELLED
|
|
||||||
|
|
||||||
|
|
||||||
Then, delete the audit:
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
$ openstack optimize audit delete 550e8400-e29b-41d4-a716-446655440000
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Configuration Reference
|
|
||||||
========================
|
|
||||||
|
|
||||||
Continuous Audit Intervals
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
**Numeric Intervals (seconds):**
|
|
||||||
|
|
||||||
- Minimum recommended: 60 seconds
|
|
||||||
- Common values: 300 (5 min), 600 (10 min), 1800 (30 min), 3600 (1 hour)
|
|
||||||
|
|
||||||
**Cron Expressions (5 format fields):**
|
|
||||||
|
|
||||||
See the `POSIX crontab specification <https://pubs.opengroup.org/onlinepubs/9799919799/utilities/crontab.html>`_.
|
|
||||||
|
|
||||||
- ``0 * * * *``: Every hour at minute 0
|
|
||||||
- ``*/15 * * * *``: Every 15 minutes
|
|
||||||
- ``0 9-17 * * 1-5``: Every hour during business hours (9 AM - 5 PM, Mon-Fri)
|
|
||||||
- ``30 2 * * *``: Daily at 2:30 AM
|
|
||||||
|
|
||||||
|
|
||||||
Decision Engine Configuration
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
The continuous audit polling interval is configured in ``watcher.conf``:
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
[watcher_decision_engine]
|
|
||||||
# Interval for checking continuous audits (seconds)
|
|
||||||
continuous_audit_interval = 30
|
|
||||||
|
|
||||||
Spec Linked with Continuous Audit
|
|
||||||
=================================
|
|
||||||
|
|
||||||
- `Watcher Continuous Optimization <https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/continuously-optimization.html>`_
|
|
||||||
- `Cron-based continuous audits <https://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/cron-based-continuous-audits.html>`_
|
|
||||||
- `Add the start and end time for CONTINUOUS audit <https://specs.openstack.org/openstack/watcher-specs/specs/stein/implemented/add-start-end-time-for-continuous-audit.html>`_
|
|
||||||
|
|
||||||
@@ -8,4 +8,3 @@ User Guide
|
|||||||
ways-to-install
|
ways-to-install
|
||||||
user-guide
|
user-guide
|
||||||
event_type_audit
|
event_type_audit
|
||||||
continuous_type_audit
|
|
||||||
|
|||||||
@@ -132,8 +132,8 @@ audit) that you want to use.
|
|||||||
$ openstack optimize audit create -a <your_audit_template>
|
$ openstack optimize audit create -a <your_audit_template>
|
||||||
|
|
||||||
If your_audit_template was created by --strategy <your_strategy>, and it
|
If your_audit_template was created by --strategy <your_strategy>, and it
|
||||||
defines some parameters (command ``watcher strategy show`` to check parameters
|
defines some parameters (command `watcher strategy show` to check parameters
|
||||||
format), your can append ``-p`` to input required parameters:
|
format), your can append `-p` to input required parameters:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ namespace = oslo.concurrency
|
|||||||
namespace = oslo.db
|
namespace = oslo.db
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
namespace = oslo.messaging
|
namespace = oslo.messaging
|
||||||
namespace = oslo.middleware.cors
|
|
||||||
namespace = oslo.middleware.http_proxy_to_wsgi
|
|
||||||
namespace = oslo.policy
|
namespace = oslo.policy
|
||||||
namespace = oslo.reports
|
namespace = oslo.reports
|
||||||
namespace = oslo.service.periodic_task
|
namespace = oslo.service.periodic_task
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
tasks:
|
|
||||||
- name: Generate prometheus.yml config file
|
|
||||||
delegate_to: controller
|
|
||||||
template:
|
|
||||||
src: "templates/prometheus.yml.j2"
|
|
||||||
dest: "/home/zuul/prometheus.yml"
|
|
||||||
mode: "0644"
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
global:
|
|
||||||
scrape_interval: 10s
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: "node"
|
|
||||||
static_configs:
|
|
||||||
- targets: ["localhost:3000"]
|
|
||||||
{% if 'compute' in groups %}
|
|
||||||
{% for host in groups['compute'] %}
|
|
||||||
- targets: ["{{ hostvars[host]['ansible_fqdn'] }}:9100"]
|
|
||||||
labels:
|
|
||||||
fqdn: "{{ hostvars[host]['ansible_fqdn'] }}"
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
[build-system]
|
|
||||||
requires = ["pbr>=6.0.0", "setuptools>=64.0.0"]
|
|
||||||
build-backend = "pbr.build"
|
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
Rally job
|
Rally job
|
||||||
=========
|
=========
|
||||||
|
|
||||||
We provide, with Watcher, a Rally plugin you can use to benchmark
|
We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service.
|
||||||
the optimization service.
|
|
||||||
|
|
||||||
To launch this task with configured Rally you just need to run:
|
To launch this task with configured Rally you just need to run:
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user