Compare commits
11 Commits
14.0.0.0rc
...
13.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
456f42e1b9 | ||
|
|
b5b1bc5473 | ||
|
|
c049a533e3 | ||
|
|
dbc06d1504 | ||
|
|
54b3b58428 | ||
|
|
8b0f1dbf66 | ||
|
|
0af13220da | ||
|
|
f85521f3c6 | ||
|
|
238bb50f53 | ||
|
|
db85f32675 | ||
|
|
f6015fd625 |
@@ -2,3 +2,4 @@
|
|||||||
host=review.opendev.org
|
host=review.opendev.org
|
||||||
port=29418
|
port=29418
|
||||||
project=openstack/watcher.git
|
project=openstack/watcher.git
|
||||||
|
defaultbranch=stable/2024.2
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v5.0.0
|
|
||||||
hooks:
|
|
||||||
# whitespace
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: mixed-line-ending
|
|
||||||
args: ['--fix', 'lf']
|
|
||||||
exclude: '.*\.(svg)$'
|
|
||||||
- id: check-byte-order-marker
|
|
||||||
# file format and permissions
|
|
||||||
- id: check-ast
|
|
||||||
- id: debug-statements
|
|
||||||
- id: check-json
|
|
||||||
files: .*\.json$
|
|
||||||
- id: check-yaml
|
|
||||||
files: .*\.(yaml|yml)$
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-shebang-scripts-are-executable
|
|
||||||
# git
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: detect-private-key
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
|
||||||
rev: v1.5.5
|
|
||||||
hooks:
|
|
||||||
- id: remove-tabs
|
|
||||||
exclude: '.*\.(svg)$'
|
|
||||||
- repo: https://opendev.org/openstack/hacking
|
|
||||||
rev: 7.0.0
|
|
||||||
hooks:
|
|
||||||
- id: hacking
|
|
||||||
additional_dependencies: []
|
|
||||||
exclude: '^(doc|releasenotes|tools)/.*$'
|
|
||||||
- repo: https://github.com/PyCQA/bandit
|
|
||||||
rev: 1.7.6
|
|
||||||
hooks:
|
|
||||||
- id: bandit
|
|
||||||
args: ['-x', 'tests', '-s', 'B101,B311,B320']
|
|
||||||
- repo: https://github.com/hhatto/autopep8
|
|
||||||
rev: v2.3.1
|
|
||||||
hooks:
|
|
||||||
- id: autopep8
|
|
||||||
files: '^.*\.py$'
|
|
||||||
- repo: https://github.com/codespell-project/codespell
|
|
||||||
rev: v2.3.0
|
|
||||||
hooks:
|
|
||||||
- id: codespell
|
|
||||||
args: ['--ignore-words=doc/dictionary.txt']
|
|
||||||
- repo: https://github.com/sphinx-contrib/sphinx-lint
|
|
||||||
rev: v1.0.0
|
|
||||||
hooks:
|
|
||||||
- id: sphinx-lint
|
|
||||||
args: [--enable=default-role]
|
|
||||||
files: ^doc/|releasenotes|api-guide
|
|
||||||
types: [rst]
|
|
||||||
- repo: https://github.com/PyCQA/doc8
|
|
||||||
rev: v1.1.2
|
|
||||||
hooks:
|
|
||||||
- id: doc8
|
|
||||||
194
.zuul.yaml
194
.zuul.yaml
@@ -9,36 +9,88 @@
|
|||||||
check:
|
check:
|
||||||
jobs:
|
jobs:
|
||||||
- watcher-tempest-functional
|
- watcher-tempest-functional
|
||||||
- watcher-tempest-functional-jammy
|
|
||||||
- watcher-grenade
|
- watcher-grenade
|
||||||
- watcher-tempest-strategies
|
- watcher-tempest-strategies
|
||||||
- watcher-tempest-actuator
|
- watcher-tempest-actuator
|
||||||
- watcherclient-tempest-functional
|
- watcherclient-tempest-functional
|
||||||
- watcher-tempest-functional-ipv6-only
|
- watcher-tempest-functional-ipv6-only
|
||||||
- watcher-prometheus-integration
|
|
||||||
gate:
|
gate:
|
||||||
jobs:
|
jobs:
|
||||||
- watcher-tempest-functional
|
- watcher-tempest-functional
|
||||||
- watcher-tempest-functional-jammy
|
|
||||||
- watcher-tempest-functional-ipv6-only
|
- watcher-tempest-functional-ipv6-only
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-dummy_optim
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-actuator
|
name: watcher-tempest-actuator
|
||||||
parent: watcher-tempest-multinode
|
parent: watcher-tempest-multinode
|
||||||
vars:
|
vars:
|
||||||
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-basic_optim
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_basic_optim
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-vm_workload_consolidation
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_vm_workload_consolidation
|
||||||
|
devstack_local_conf:
|
||||||
|
test-config:
|
||||||
|
$WATCHER_CONFIG:
|
||||||
|
watcher_strategies.vm_workload_consolidation:
|
||||||
|
datasource: ceilometer
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-workload_balancing
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-zone_migration
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_zone_migration
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-host_maintenance
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_host_maintenance
|
||||||
|
|
||||||
|
- job:
|
||||||
|
name: watcher-tempest-storage_balance
|
||||||
|
parent: watcher-tempest-multinode
|
||||||
|
vars:
|
||||||
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_storage_balance
|
||||||
|
devstack_local_conf:
|
||||||
|
test-config:
|
||||||
|
$TEMPEST_CONFIG:
|
||||||
|
volume:
|
||||||
|
backend_names: ['BACKEND_1', 'BACKEND_2']
|
||||||
|
volume-feature-enabled:
|
||||||
|
multi_backend: true
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-strategies
|
name: watcher-tempest-strategies
|
||||||
parent: watcher-tempest-multinode
|
parent: watcher-tempest-multinode
|
||||||
vars:
|
vars:
|
||||||
tempest_concurrency: 1
|
tempest_concurrency: 1
|
||||||
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies
|
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies
|
||||||
|
tempest_exclude_regex: .*\[.*\breal_load\b.*\].*
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-multinode
|
name: watcher-tempest-multinode
|
||||||
parent: watcher-tempest-functional
|
parent: watcher-tempest-functional
|
||||||
nodeset: openstack-two-node-noble
|
nodeset: openstack-two-node-jammy
|
||||||
roles:
|
roles:
|
||||||
- zuul: openstack/tempest
|
- zuul: openstack/tempest
|
||||||
group-vars:
|
group-vars:
|
||||||
@@ -119,17 +171,6 @@
|
|||||||
zuul_copy_output:
|
zuul_copy_output:
|
||||||
/etc/hosts: logs
|
/etc/hosts: logs
|
||||||
|
|
||||||
# TODO(gmann): As per the 2025.1 testing runtime, we need to run at least
|
|
||||||
# one job on jammy. This job can be removed in the next cycle(2025.2)
|
|
||||||
- job:
|
|
||||||
name: watcher-tempest-functional-jammy
|
|
||||||
description: This is integrated job testing on Ubuntu jammy(22.04)
|
|
||||||
parent: watcher-tempest-functional
|
|
||||||
nodeset: openstack-single-node-jammy
|
|
||||||
vars:
|
|
||||||
<<: *base_vars
|
|
||||||
python_version: '3.9'
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: watcher-tempest-functional-ipv6-only
|
name: watcher-tempest-functional-ipv6-only
|
||||||
parent: devstack-tempest-ipv6
|
parent: devstack-tempest-ipv6
|
||||||
@@ -146,7 +187,7 @@
|
|||||||
- openstack/python-watcherclient
|
- openstack/python-watcherclient
|
||||||
- openstack/watcher-tempest-plugin
|
- openstack/watcher-tempest-plugin
|
||||||
vars: *base_vars
|
vars: *base_vars
|
||||||
irrelevant-files: &irrelevent_files
|
irrelevant-files:
|
||||||
- ^(test-|)requirements.txt$
|
- ^(test-|)requirements.txt$
|
||||||
- ^.*\.rst$
|
- ^.*\.rst$
|
||||||
- ^api-ref/.*$
|
- ^api-ref/.*$
|
||||||
@@ -166,124 +207,3 @@
|
|||||||
vars:
|
vars:
|
||||||
tempest_concurrency: 1
|
tempest_concurrency: 1
|
||||||
tempest_test_regex: watcher_tempest_plugin.tests.client_functional
|
tempest_test_regex: watcher_tempest_plugin.tests.client_functional
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-sg-core-tempest-base
|
|
||||||
parent: devstack-tempest
|
|
||||||
nodeset: openstack-two-node-noble
|
|
||||||
description: |
|
|
||||||
This job is for testing watcher and sg-core/prometheus installation
|
|
||||||
abstract: true
|
|
||||||
pre-run:
|
|
||||||
- playbooks/generate_prometheus_config.yml
|
|
||||||
irrelevant-files: *irrelevent_files
|
|
||||||
timeout: 7800
|
|
||||||
required-projects: &base_sg_required_projects
|
|
||||||
- openstack/aodh
|
|
||||||
- openstack/ceilometer
|
|
||||||
- openstack/tempest
|
|
||||||
- openstack-k8s-operators/sg-core
|
|
||||||
- openstack/watcher
|
|
||||||
- openstack/python-watcherclient
|
|
||||||
- openstack/watcher-tempest-plugin
|
|
||||||
- openstack/devstack-plugin-prometheus
|
|
||||||
vars:
|
|
||||||
configure_swap_size: 8192
|
|
||||||
devstack_plugins:
|
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
|
||||||
aodh: https://opendev.org/openstack/aodh
|
|
||||||
sg-core: https://github.com/openstack-k8s-operators/sg-core
|
|
||||||
watcher: https://opendev.org/openstack/watcher
|
|
||||||
devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
devstack_services:
|
|
||||||
watcher-api: true
|
|
||||||
watcher-decision-engine: true
|
|
||||||
watcher-applier: true
|
|
||||||
tempest: true
|
|
||||||
# We do not need Swift in this job so disable it for speed
|
|
||||||
# Swift services
|
|
||||||
s-account: false
|
|
||||||
s-container: false
|
|
||||||
s-object: false
|
|
||||||
s-proxy: false
|
|
||||||
# Prometheus related service
|
|
||||||
prometheus: true
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
CEILOMETER_BACKENDS: "sg-core"
|
|
||||||
CEILOMETER_PIPELINE_INTERVAL: 15
|
|
||||||
CEILOMETER_ALARM_THRESHOLD: 6000000000
|
|
||||||
NODE_EXPORTER_ENABLE: false
|
|
||||||
PROMETHEUS_ENABLE: false
|
|
||||||
PROMETHEUS_SERVICE_SCRAPE_TARGETS: "sg-core,node-exporter"
|
|
||||||
PROMETHEUS_CONFIG_FILE: "/home/zuul/prometheus.yml"
|
|
||||||
devstack_local_conf:
|
|
||||||
post-config:
|
|
||||||
$WATCHER_CONF:
|
|
||||||
watcher_datasources:
|
|
||||||
datasources: prometheus
|
|
||||||
prometheus_client:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: 9090
|
|
||||||
watcher_cluster_data_model_collectors.compute:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.baremetal:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.storage:
|
|
||||||
period: 120
|
|
||||||
test-config:
|
|
||||||
$TEMPEST_CONFIG:
|
|
||||||
compute:
|
|
||||||
min_compute_nodes: 2
|
|
||||||
min_microversion: 2.56
|
|
||||||
compute-feature-enabled:
|
|
||||||
live_migration: true
|
|
||||||
block_migration_for_live_migration: true
|
|
||||||
placement:
|
|
||||||
min_microversion: 1.29
|
|
||||||
service_available:
|
|
||||||
sg_core: True
|
|
||||||
telemetry_services:
|
|
||||||
metric_backends: prometheus
|
|
||||||
telemetry:
|
|
||||||
disable_ssl_certificate_validation: True
|
|
||||||
ceilometer_polling_interval: 15
|
|
||||||
optimize:
|
|
||||||
datasource: prometheus
|
|
||||||
tempest_plugins:
|
|
||||||
- watcher-tempest-plugin
|
|
||||||
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies
|
|
||||||
tempest_concurrency: 1
|
|
||||||
tox_envlist: all
|
|
||||||
zuul_copy_output:
|
|
||||||
/etc/prometheus/prometheus.yml: logs
|
|
||||||
group-vars:
|
|
||||||
subnode:
|
|
||||||
devstack_plugins:
|
|
||||||
ceilometer: https://opendev.org/openstack/ceilometer
|
|
||||||
sg-core: https://github.com/openstack-k8s-operators/sg-core
|
|
||||||
devstack-plugin-prometheus: https://opendev.org/openstack/devstack-plugin-prometheus
|
|
||||||
devstack_services:
|
|
||||||
ceilometer-acompute: true
|
|
||||||
sg-core: false
|
|
||||||
prometheus: false
|
|
||||||
node_exporter: true
|
|
||||||
devstack_localrc:
|
|
||||||
CEILOMETER_BACKEND: "none"
|
|
||||||
CEILOMETER_BACKENDS: "none"
|
|
||||||
# sg_core related var
|
|
||||||
NODE_EXPORTER_ENABLE: false
|
|
||||||
PROMETHEUS_ENABLE: false
|
|
||||||
devstack_local_conf:
|
|
||||||
post-config:
|
|
||||||
$WATCHER_CONF:
|
|
||||||
watcher_cluster_data_model_collectors.compute:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.baremetal:
|
|
||||||
period: 120
|
|
||||||
watcher_cluster_data_model_collectors.storage:
|
|
||||||
period: 120
|
|
||||||
|
|
||||||
- job:
|
|
||||||
name: watcher-prometheus-integration
|
|
||||||
parent: watcher-sg-core-tempest-base
|
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"auto_trigger": false,
|
"auto_trigger": false,
|
||||||
"force": false,
|
"force": false,
|
||||||
"uuid": "65a5da84-5819-4aea-8278-a28d2b489028",
|
"uuid": "65a5da84-5819-4aea-8278-a28d2b489028",
|
||||||
"goal_name": "workload_balancing",
|
"goal_name": "workload_balancing",
|
||||||
"scope": [],
|
"scope": [],
|
||||||
|
|||||||
@@ -13,9 +13,9 @@
|
|||||||
"node_vcpu_ratio": "16.0",
|
"node_vcpu_ratio": "16.0",
|
||||||
"node_memory": "16383",
|
"node_memory": "16383",
|
||||||
"node_memory_ratio": "1.5",
|
"node_memory_ratio": "1.5",
|
||||||
"node_disk": "37",
|
"node_disk": "37"
|
||||||
"node_disk_ratio": "1.0",
|
"node_disk_ratio": "1.0",
|
||||||
"node_state": "up"
|
"node_state": "up",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4",
|
"server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4",
|
||||||
@@ -30,9 +30,9 @@
|
|||||||
"node_vcpu_ratio": "16.0",
|
"node_vcpu_ratio": "16.0",
|
||||||
"node_memory": "16383",
|
"node_memory": "16383",
|
||||||
"node_memory_ratio": "1.5",
|
"node_memory_ratio": "1.5",
|
||||||
"node_disk": "37",
|
"node_disk": "37"
|
||||||
"node_disk_ratio": "1.0",
|
"node_disk_ratio": "1.0",
|
||||||
"node_state": "up"
|
"node_state": "up",
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Here are some examples of ``Goals``:
|
|||||||
- minimize the energy consumption
|
- minimize the energy consumption
|
||||||
- minimize the number of compute nodes (consolidation)
|
- minimize the number of compute nodes (consolidation)
|
||||||
- balance the workload among compute nodes
|
- balance the workload among compute nodes
|
||||||
- minimize the license cost (some software have a licensing model which is
|
- minimize the license cost (some softwares have a licensing model which is
|
||||||
based on the number of sockets or cores where the software is deployed)
|
based on the number of sockets or cores where the software is deployed)
|
||||||
- find the most appropriate moment for a planned maintenance on a
|
- find the most appropriate moment for a planned maintenance on a
|
||||||
given group of host (which may be an entire availability zone):
|
given group of host (which may be an entire availability zone):
|
||||||
@@ -123,4 +123,4 @@ Response
|
|||||||
**Example JSON representation of a Goal:**
|
**Example JSON representation of a Goal:**
|
||||||
|
|
||||||
.. literalinclude:: samples/goal-show-response.json
|
.. literalinclude:: samples/goal-show-response.json
|
||||||
:language: javascript
|
:language: javascript
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
# lib/watcher
|
# lib/watcher
|
||||||
# Functions to control the configuration and operation of the watcher services
|
# Functions to control the configuration and operation of the watcher services
|
||||||
|
|
||||||
@@ -36,6 +38,7 @@ GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master}
|
|||||||
GITDIR["python-watcherclient"]=$DEST/python-watcherclient
|
GITDIR["python-watcherclient"]=$DEST/python-watcherclient
|
||||||
|
|
||||||
WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher}
|
WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher}
|
||||||
|
WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher}
|
||||||
|
|
||||||
WATCHER_CONF_DIR=/etc/watcher
|
WATCHER_CONF_DIR=/etc/watcher
|
||||||
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf
|
||||||
@@ -100,7 +103,7 @@ function _cleanup_watcher_apache_wsgi {
|
|||||||
# cleanup_watcher() - Remove residual data files, anything left over from previous
|
# cleanup_watcher() - Remove residual data files, anything left over from previous
|
||||||
# runs that a clean run would need to clean up
|
# runs that a clean run would need to clean up
|
||||||
function cleanup_watcher {
|
function cleanup_watcher {
|
||||||
sudo rm -rf $WATCHER_STATE_PATH
|
sudo rm -rf $WATCHER_STATE_PATH $WATCHER_AUTH_CACHE_DIR
|
||||||
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
|
||||||
remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI"
|
remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI"
|
||||||
else
|
else
|
||||||
@@ -207,8 +210,8 @@ function create_watcher_conf {
|
|||||||
|
|
||||||
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2"
|
||||||
|
|
||||||
configure_keystone_authtoken_middleware $WATCHER_CONF watcher
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR
|
||||||
configure_keystone_authtoken_middleware $WATCHER_CONF watcher "watcher_clients_auth"
|
configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth"
|
||||||
|
|
||||||
if is_fedora || is_suse; then
|
if is_fedora || is_suse; then
|
||||||
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
# watcher defaults to /usr/local/bin, but fedora and suse pip like to
|
||||||
@@ -245,6 +248,13 @@ function create_watcher_conf {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# create_watcher_cache_dir() - Part of the init_watcher() process
|
||||||
|
function create_watcher_cache_dir {
|
||||||
|
# Create cache dir
|
||||||
|
sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR
|
||||||
|
rm -rf $WATCHER_AUTH_CACHE_DIR/*
|
||||||
|
}
|
||||||
|
|
||||||
# init_watcher() - Initialize databases, etc.
|
# init_watcher() - Initialize databases, etc.
|
||||||
function init_watcher {
|
function init_watcher {
|
||||||
# clean up from previous (possibly aborted) runs
|
# clean up from previous (possibly aborted) runs
|
||||||
@@ -256,6 +266,7 @@ function init_watcher {
|
|||||||
# Create watcher schema
|
# Create watcher schema
|
||||||
$WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade
|
$WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade
|
||||||
fi
|
fi
|
||||||
|
create_watcher_cache_dir
|
||||||
}
|
}
|
||||||
|
|
||||||
# install_watcherclient() - Collect source and prepare
|
# install_watcherclient() - Collect source and prepare
|
||||||
@@ -330,6 +341,19 @@ function stop_watcher {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# configure_tempest_for_watcher() - Configure Tempest for watcher
|
||||||
|
function configure_tempest_for_watcher {
|
||||||
|
# Set default microversion for watcher-tempest-plugin
|
||||||
|
# Please make sure to update this when the microversion is updated, otherwise
|
||||||
|
# new tests may be skipped.
|
||||||
|
TEMPEST_WATCHER_MIN_MICROVERSION=${TEMPEST_WATCHER_MIN_MICROVERSION:-"1.0"}
|
||||||
|
TEMPEST_WATCHER_MAX_MICROVERSION=${TEMPEST_WATCHER_MAX_MICROVERSION:-"1.4"}
|
||||||
|
|
||||||
|
# Set microversion options in tempest.conf
|
||||||
|
iniset $TEMPEST_CONFIG optimize min_microversion $TEMPEST_WATCHER_MIN_MICROVERSION
|
||||||
|
iniset $TEMPEST_CONFIG optimize max_microversion $TEMPEST_WATCHER_MAX_MICROVERSION
|
||||||
|
}
|
||||||
|
|
||||||
# Restore xtrace
|
# Restore xtrace
|
||||||
$_XTRACE_WATCHER
|
$_XTRACE_WATCHER
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
# plugin.sh - DevStack plugin script to install watcher
|
# plugin.sh - DevStack plugin script to install watcher
|
||||||
|
|
||||||
# Save trace setting
|
# Save trace setting
|
||||||
@@ -36,6 +38,9 @@ if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then
|
|||||||
# Start the watcher components
|
# Start the watcher components
|
||||||
echo_summary "Starting watcher"
|
echo_summary "Starting watcher"
|
||||||
start_watcher
|
start_watcher
|
||||||
|
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
|
||||||
|
echo_summary "Configuring tempest for watcher"
|
||||||
|
configure_tempest_for_watcher
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$1" == "unstack" ]]; then
|
if [[ "$1" == "unstack" ]]; then
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# ``upgrade-watcher``
|
# ``upgrade-watcher``
|
||||||
|
|
||||||
function configure_watcher_upgrade {
|
function configure_watcher_upgrade {
|
||||||
|
|||||||
@@ -1,4 +0,0 @@
|
|||||||
thirdparty
|
|
||||||
assertin
|
|
||||||
notin
|
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ class BaseWatcherDirective(rst.Directive):
|
|||||||
obj_raw_docstring = obj.__init__.__doc__
|
obj_raw_docstring = obj.__init__.__doc__
|
||||||
|
|
||||||
if not obj_raw_docstring:
|
if not obj_raw_docstring:
|
||||||
# Raise a warning to make the tests fail with doc8
|
# Raise a warning to make the tests fail wit doc8
|
||||||
raise self.error("No docstring available for %s!" % obj)
|
raise self.error("No docstring available for %s!" % obj)
|
||||||
|
|
||||||
obj_docstring = inspect.cleandoc(obj_raw_docstring)
|
obj_docstring = inspect.cleandoc(obj_raw_docstring)
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
sphinx>=2.1.1 # BSD
|
# The order of packages is significant, because pip processes them in the order
|
||||||
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
# process, which may cause wedges in the gate later.
|
||||||
sphinxcontrib-apidoc>=0.2.0 # BSD
|
|
||||||
# openstack
|
|
||||||
os-api-ref>=1.4.0 # Apache-2.0
|
|
||||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||||
# releasenotes
|
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||||
|
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||||
|
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
|
||||||
reno>=3.1.0 # Apache-2.0
|
reno>=3.1.0 # Apache-2.0
|
||||||
|
sphinxcontrib-apidoc>=0.2.0 # BSD
|
||||||
|
os-api-ref>=1.4.0 # Apache-2.0
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ own sections. However, the base *GMR* consists of several sections:
|
|||||||
|
|
||||||
Package
|
Package
|
||||||
Shows information about the package to which this process belongs, including
|
Shows information about the package to which this process belongs, including
|
||||||
version information.
|
version informations.
|
||||||
|
|
||||||
Threads
|
Threads
|
||||||
Shows stack traces and thread ids for each of the threads within this
|
Shows stack traces and thread ids for each of the threads within this
|
||||||
|
|||||||
@@ -285,7 +285,7 @@ Audit and interval (in case of CONTINUOUS type). There is three types of Audit:
|
|||||||
ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it
|
ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it
|
||||||
succeeded executed new action plan list will be provided; CONTINUOUS Audit
|
succeeded executed new action plan list will be provided; CONTINUOUS Audit
|
||||||
creates action plans with specified interval (in seconds or cron format, cron
|
creates action plans with specified interval (in seconds or cron format, cron
|
||||||
interval can be used like: ``*/5 * * * *``), if action plan
|
inteval can be used like: `*/5 * * * *`), if action plan
|
||||||
has been created, all previous action plans get CANCELLED state;
|
has been created, all previous action plans get CANCELLED state;
|
||||||
EVENT audit is launched when receiving webhooks API.
|
EVENT audit is launched when receiving webhooks API.
|
||||||
|
|
||||||
|
|||||||
2
doc/source/conf.py
Normal file → Executable file
2
doc/source/conf.py
Normal file → Executable file
@@ -115,7 +115,7 @@ html_theme = 'openstackdocs'
|
|||||||
htmlhelp_basename = '%sdoc' % project
|
htmlhelp_basename = '%sdoc' % project
|
||||||
|
|
||||||
|
|
||||||
# openstackdocstheme options
|
#openstackdocstheme options
|
||||||
openstackdocs_repo_name = 'openstack/watcher'
|
openstackdocs_repo_name = 'openstack/watcher'
|
||||||
openstackdocs_pdf_link = True
|
openstackdocs_pdf_link = True
|
||||||
openstackdocs_auto_name = False
|
openstackdocs_auto_name = False
|
||||||
|
|||||||
@@ -194,14 +194,11 @@ The configuration file is organized into the following sections:
|
|||||||
* ``[watcher_applier]`` - Watcher Applier module configuration
|
* ``[watcher_applier]`` - Watcher Applier module configuration
|
||||||
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration
|
||||||
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration
|
||||||
|
* ``[ceilometer_client]`` - Ceilometer client configuration
|
||||||
* ``[cinder_client]`` - Cinder client configuration
|
* ``[cinder_client]`` - Cinder client configuration
|
||||||
* ``[glance_client]`` - Glance client configuration
|
* ``[glance_client]`` - Glance client configuration
|
||||||
* ``[gnocchi_client]`` - Gnocchi client configuration
|
|
||||||
* ``[ironic_client]`` - Ironic client configuration
|
|
||||||
* ``[keystone_client]`` - Keystone client configuration
|
|
||||||
* ``[nova_client]`` - Nova client configuration
|
* ``[nova_client]`` - Nova client configuration
|
||||||
* ``[neutron_client]`` - Neutron client configuration
|
* ``[neutron_client]`` - Neutron client configuration
|
||||||
* ``[placement_client]`` - Placement client configuration
|
|
||||||
|
|
||||||
The Watcher configuration file is expected to be named
|
The Watcher configuration file is expected to be named
|
||||||
``watcher.conf``. When starting Watcher, you can specify a different
|
``watcher.conf``. When starting Watcher, you can specify a different
|
||||||
|
|||||||
@@ -221,7 +221,7 @@ workflow engine can halt or take other actions while the action plan is being
|
|||||||
executed based on the success or failure of individual actions. However, the
|
executed based on the success or failure of individual actions. However, the
|
||||||
base workflow engine simply uses these notifies to store the result of
|
base workflow engine simply uses these notifies to store the result of
|
||||||
individual actions in the database. Additionally, since taskflow uses a graph
|
individual actions in the database. Additionally, since taskflow uses a graph
|
||||||
flow if any of the tasks would fail all children of this tasks not be executed
|
flow if any of the tasks would fail all childs of this tasks not be executed
|
||||||
while ``do_revert`` will be triggered for all parents.
|
while ``do_revert`` will be triggered for all parents.
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ multinode environment to use.
|
|||||||
You can set up the Watcher services quickly and easily using a Watcher
|
You can set up the Watcher services quickly and easily using a Watcher
|
||||||
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin
|
||||||
model. To enable the Watcher plugin with DevStack, add the following to the
|
model. To enable the Watcher plugin with DevStack, add the following to the
|
||||||
``[[local|localrc]]`` section of your controller's ``local.conf`` to enable the
|
`[[local|localrc]]` section of your controller's `local.conf` to enable the
|
||||||
Watcher plugin::
|
Watcher plugin::
|
||||||
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
enable_plugin watcher https://opendev.org/openstack/watcher
|
||||||
@@ -32,7 +32,7 @@ Quick Devstack Instructions with Datasources
|
|||||||
|
|
||||||
Watcher requires a datasource to collect metrics from compute nodes and
|
Watcher requires a datasource to collect metrics from compute nodes and
|
||||||
instances in order to execute most strategies. To enable this a
|
instances in order to execute most strategies. To enable this a
|
||||||
``[[local|localrc]]`` to setup DevStack for some of the supported datasources
|
`[[local|localrc]]` to setup DevStack for some of the supported datasources
|
||||||
is provided. These examples specify the minimal configuration parameters to
|
is provided. These examples specify the minimal configuration parameters to
|
||||||
get both Watcher and the datasource working but can be expanded is desired.
|
get both Watcher and the datasource working but can be expanded is desired.
|
||||||
|
|
||||||
@@ -41,60 +41,54 @@ Gnocchi
|
|||||||
|
|
||||||
With the Gnocchi datasource most of the metrics for compute nodes and
|
With the Gnocchi datasource most of the metrics for compute nodes and
|
||||||
instances will work with the provided configuration but metrics that
|
instances will work with the provided configuration but metrics that
|
||||||
require Ironic such as ``host_airflow and`` ``host_power`` will still be
|
require Ironic such as `host_airflow and` `host_power` will still be
|
||||||
unavailable as well as ``instance_l3_cpu_cache``
|
unavailable as well as `instance_l3_cpu_cache`::
|
||||||
|
|
||||||
.. code-block:: ini
|
[[local|localrc]]
|
||||||
|
enable_plugin watcher https://opendev.org/openstack/watcher
|
||||||
|
|
||||||
[[local|localrc]]
|
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
|
||||||
|
|
||||||
enable_plugin watcher https://opendev.org/openstack/watcher
|
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
|
||||||
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
|
CEILOMETER_BACKEND=gnocchi
|
||||||
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
|
|
||||||
enable_plugin aodh https://opendev.org/openstack/aodh
|
|
||||||
enable_plugin panko https://opendev.org/openstack/panko
|
|
||||||
|
|
||||||
CEILOMETER_BACKEND=gnocchi
|
enable_plugin aodh https://opendev.org/openstack/aodh
|
||||||
[[post-config|$NOVA_CONF]]
|
enable_plugin panko https://opendev.org/openstack/panko
|
||||||
[DEFAULT]
|
|
||||||
compute_monitors=cpu.virt_driver
|
[[post-config|$NOVA_CONF]]
|
||||||
|
[DEFAULT]
|
||||||
|
compute_monitors=cpu.virt_driver
|
||||||
|
|
||||||
Detailed DevStack Instructions
|
Detailed DevStack Instructions
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack).
|
#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack).
|
||||||
One of these servers will be the controller node while the others will be
|
One of these servers will be the controller node while the others will be
|
||||||
compute nodes. N is preferably >= 3 so that you have at least 2 compute
|
compute nodes. N is preferably >= 3 so that you have at least 2 compute
|
||||||
nodes, but in order to stand up the Watcher services only 1 server is
|
nodes, but in order to stand up the Watcher services only 1 server is
|
||||||
needed (i.e., no computes are needed if you want to just experiment with
|
needed (i.e., no computes are needed if you want to just experiment with
|
||||||
the Watcher services). These servers can be VMs running on your local
|
the Watcher services). These servers can be VMs running on your local
|
||||||
machine via VirtualBox if you prefer. DevStack currently recommends that
|
machine via VirtualBox if you prefer. DevStack currently recommends that
|
||||||
you use Ubuntu 16.04 LTS. The servers should also have connections to the
|
you use Ubuntu 16.04 LTS. The servers should also have connections to the
|
||||||
same network such that they are all able to communicate with one another.
|
same network such that they are all able to communicate with one another.
|
||||||
|
|
||||||
#. For each server, clone the DevStack repository and create the stack user
|
#. For each server, clone the DevStack repository and create the stack user::
|
||||||
|
|
||||||
.. code-block:: bash
|
sudo apt-get update
|
||||||
|
sudo apt-get install git
|
||||||
sudo apt-get update
|
git clone https://opendev.org/openstack/devstack.git
|
||||||
sudo apt-get install git
|
sudo ./devstack/tools/create-stack-user.sh
|
||||||
git clone https://opendev.org/openstack/devstack.git
|
|
||||||
sudo ./devstack/tools/create-stack-user.sh
|
|
||||||
|
|
||||||
Now you have a stack user that is used to run the DevStack processes. You
|
Now you have a stack user that is used to run the DevStack processes. You
|
||||||
may want to give your stack user a password to allow SSH via a password
|
may want to give your stack user a password to allow SSH via a password::
|
||||||
|
|
||||||
.. code-block:: bash
|
sudo passwd stack
|
||||||
|
|
||||||
sudo passwd stack
|
#. Switch to the stack user and clone the DevStack repo again::
|
||||||
|
|
||||||
#. Switch to the stack user and clone the DevStack repo again
|
sudo su stack
|
||||||
|
cd ~
|
||||||
.. code-block:: bash
|
git clone https://opendev.org/openstack/devstack.git
|
||||||
|
|
||||||
sudo su stack
|
|
||||||
cd ~
|
|
||||||
git clone https://opendev.org/openstack/devstack.git
|
|
||||||
|
|
||||||
#. For each compute node, copy the provided `local.conf.compute`_ example file
|
#. For each compute node, copy the provided `local.conf.compute`_ example file
|
||||||
to the compute node's system at ~/devstack/local.conf. Make sure the
|
to the compute node's system at ~/devstack/local.conf. Make sure the
|
||||||
@@ -117,30 +111,24 @@ Detailed DevStack Instructions
|
|||||||
the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP
|
the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP
|
||||||
address of the controller node.
|
address of the controller node.
|
||||||
|
|
||||||
.. NOTE::
|
Note: if you want to use another Watcher git repository (such as a local
|
||||||
if you want to use another Watcher git repository (such as a local
|
one), then change the enable plugin line::
|
||||||
one), then change the enable plugin line
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
enable_plugin watcher <your_local_git_repo> [optional_branch]
|
|
||||||
|
|
||||||
|
enable_plugin watcher <your_local_git_repo> [optional_branch]
|
||||||
|
|
||||||
If you do this, then the Watcher DevStack plugin will try to pull the
|
If you do this, then the Watcher DevStack plugin will try to pull the
|
||||||
python-watcherclient repo from ``<your_local_git_repo>/../``, so either make
|
python-watcherclient repo from <your_local_git_repo>/../, so either make
|
||||||
sure that is also available or specify WATCHERCLIENT_REPO in the ``local.conf``
|
sure that is also available or specify WATCHERCLIENT_REPO in the local.conf
|
||||||
file.
|
file.
|
||||||
|
|
||||||
.. NOTE::
|
Note: if you want to use a specific branch, specify WATCHER_BRANCH in the
|
||||||
if you want to use a specific branch, specify WATCHER_BRANCH in the
|
local.conf file. By default it will use the master branch.
|
||||||
local.conf file. By default it will use the master branch.
|
|
||||||
|
|
||||||
.. Note::
|
Note: watcher-api will default run under apache/httpd, set the variable
|
||||||
watcher-api will default run under apache/httpd, set the variable
|
WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd.
|
||||||
WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd.
|
For development environment it is suggested to set WATHCER_USE_MOD_WSGI
|
||||||
For development environment it is suggested to set WATHCER_USE_MOD_WSGI
|
to FALSE. For Production environment it is suggested to keep it at the
|
||||||
to FALSE. For Production environment it is suggested to keep it at the
|
default TRUE value.
|
||||||
default TRUE value.
|
|
||||||
|
|
||||||
#. Start stacking from the controller node::
|
#. Start stacking from the controller node::
|
||||||
|
|
||||||
@@ -148,9 +136,8 @@ Detailed DevStack Instructions
|
|||||||
|
|
||||||
#. Start stacking on each of the compute nodes using the same command.
|
#. Start stacking on each of the compute nodes using the same command.
|
||||||
|
|
||||||
.. seealso::
|
#. Configure the environment for live migration via NFS. See the
|
||||||
Configure the environment for live migration via NFS. See the
|
`Multi-Node DevStack Environment`_ section for more details.
|
||||||
`Multi-Node DevStack Environment`_ section for more details.
|
|
||||||
|
|
||||||
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller
|
||||||
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute
|
||||||
@@ -162,19 +149,60 @@ Since deploying Watcher with only a single compute node is not very useful, a
|
|||||||
few tips are given here for enabling a multi-node environment with live
|
few tips are given here for enabling a multi-node environment with live
|
||||||
migration.
|
migration.
|
||||||
|
|
||||||
.. NOTE::
|
Configuring NFS Server
|
||||||
|
----------------------
|
||||||
|
|
||||||
Nova supports live migration with local block storage so by default NFS
|
If you would like to use live migration for shared storage, then the controller
|
||||||
is not required and is considered an advance configuration.
|
can serve as the NFS server if needed::
|
||||||
The minimum requirements for live migration are:
|
|
||||||
|
|
||||||
- all hostnames are resolvable on each host
|
sudo apt-get install nfs-kernel-server
|
||||||
- all hosts have a passwordless ssh key that is trusted by the other hosts
|
sudo mkdir -p /nfs/instances
|
||||||
- all hosts have a known_hosts file that lists each hosts
|
sudo chown stack:stack /nfs/instances
|
||||||
|
|
||||||
If these requirements are met live migration will be possible.
|
Add an entry to `/etc/exports` with the appropriate gateway and netmask
|
||||||
Shared storage such as ceph, booting form cinder volume or nfs are recommend
|
information::
|
||||||
when testing evacuate if you want to preserve vm data.
|
|
||||||
|
/nfs/instances <gateway>/<netmask>(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)
|
||||||
|
|
||||||
|
Export the NFS directories::
|
||||||
|
|
||||||
|
sudo exportfs -ra
|
||||||
|
|
||||||
|
Make sure the NFS server is running::
|
||||||
|
|
||||||
|
sudo service nfs-kernel-server status
|
||||||
|
|
||||||
|
If the server is not running, then start it::
|
||||||
|
|
||||||
|
sudo service nfs-kernel-server start
|
||||||
|
|
||||||
|
Configuring NFS on Compute Node
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
Each compute node needs to use the NFS server to hold the instance data::
|
||||||
|
|
||||||
|
sudo apt-get install rpcbind nfs-common
|
||||||
|
mkdir -p /opt/stack/data/instances
|
||||||
|
sudo mount <nfs-server-ip>:/nfs/instances /opt/stack/data/instances
|
||||||
|
|
||||||
|
If you would like to have the NFS directory automatically mounted on reboot,
|
||||||
|
then add the following to `/etc/fstab`::
|
||||||
|
|
||||||
|
<nfs-server-ip>:/nfs/instances /opt/stack/data/instances nfs auto 0 0
|
||||||
|
|
||||||
|
Edit `/etc/libvirt/libvirtd.conf` to make sure the following values are set::
|
||||||
|
|
||||||
|
listen_tls = 0
|
||||||
|
listen_tcp = 1
|
||||||
|
auth_tcp = "none"
|
||||||
|
|
||||||
|
Edit `/etc/default/libvirt-bin`::
|
||||||
|
|
||||||
|
libvirtd_opts="-d -l"
|
||||||
|
|
||||||
|
Restart the libvirt service::
|
||||||
|
|
||||||
|
sudo service libvirt-bin restart
|
||||||
|
|
||||||
Setting up SSH keys between compute nodes to enable live migration
|
Setting up SSH keys between compute nodes to enable live migration
|
||||||
------------------------------------------------------------------
|
------------------------------------------------------------------
|
||||||
@@ -203,91 +231,22 @@ must exist in every other compute node's stack user's authorized_keys file and
|
|||||||
every compute node's public ECDSA key needs to be in every other compute
|
every compute node's public ECDSA key needs to be in every other compute
|
||||||
node's root user's known_hosts file.
|
node's root user's known_hosts file.
|
||||||
|
|
||||||
Configuring NFS Server (ADVANCED)
|
Disable serial console
|
||||||
---------------------------------
|
----------------------
|
||||||
|
|
||||||
If you would like to use live migration for shared storage, then the controller
|
Serial console needs to be disabled for live migration to work.
|
||||||
can serve as the NFS server if needed
|
|
||||||
|
|
||||||
.. code-block:: bash
|
On both the controller and compute node, in /etc/nova/nova.conf
|
||||||
|
|
||||||
sudo apt-get install nfs-kernel-server
|
[serial_console]
|
||||||
sudo mkdir -p /nfs/instances
|
enabled = False
|
||||||
sudo chown stack:stack /nfs/instances
|
|
||||||
|
|
||||||
Add an entry to ``/etc/exports`` with the appropriate gateway and netmask
|
Alternatively, in devstack's local.conf:
|
||||||
information
|
|
||||||
|
|
||||||
|
[[post-config|$NOVA_CONF]]
|
||||||
|
[serial_console]
|
||||||
|
#enabled=false
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
/nfs/instances <gateway>/<netmask>(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)
|
|
||||||
|
|
||||||
Export the NFS directories
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo exportfs -ra
|
|
||||||
|
|
||||||
Make sure the NFS server is running
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service nfs-kernel-server status
|
|
||||||
|
|
||||||
If the server is not running, then start it
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service nfs-kernel-server start
|
|
||||||
|
|
||||||
Configuring NFS on Compute Node (ADVANCED)
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
Each compute node needs to use the NFS server to hold the instance data
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo apt-get install rpcbind nfs-common
|
|
||||||
mkdir -p /opt/stack/data/instances
|
|
||||||
sudo mount <nfs-server-ip>:/nfs/instances /opt/stack/data/instances
|
|
||||||
|
|
||||||
If you would like to have the NFS directory automatically mounted on reboot,
|
|
||||||
then add the following to ``/etc/fstab``
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
<nfs-server-ip>:/nfs/instances /opt/stack/data/instances nfs auto 0 0
|
|
||||||
|
|
||||||
Configuring libvirt to listen on tcp (ADVANCED)
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
.. NOTE::
|
|
||||||
|
|
||||||
By default nova will use ssh as a transport for live migration
|
|
||||||
if you have a low bandwidth connection you can use tcp instead
|
|
||||||
however this is generally not recommended.
|
|
||||||
|
|
||||||
|
|
||||||
Edit ``/etc/libvirt/libvirtd.conf`` to make sure the following values are set
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
listen_tls = 0
|
|
||||||
listen_tcp = 1
|
|
||||||
auth_tcp = "none"
|
|
||||||
|
|
||||||
Edit ``/etc/default/libvirt-bin``
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
|
|
||||||
libvirtd_opts="-d -l"
|
|
||||||
|
|
||||||
Restart the libvirt service
|
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
sudo service libvirt-bin restart
|
|
||||||
|
|
||||||
VNC server configuration
|
VNC server configuration
|
||||||
------------------------
|
------------------------
|
||||||
@@ -295,18 +254,13 @@ VNC server configuration
|
|||||||
The VNC server listening parameter needs to be set to any address so
|
The VNC server listening parameter needs to be set to any address so
|
||||||
that the server can accept connections from all of the compute nodes.
|
that the server can accept connections from all of the compute nodes.
|
||||||
|
|
||||||
On both the controller and compute node, in ``/etc/nova/nova.conf``
|
On both the controller and compute node, in /etc/nova/nova.conf
|
||||||
|
|
||||||
.. code-block:: ini
|
vncserver_listen = 0.0.0.0
|
||||||
|
|
||||||
[vnc]
|
Alternatively, in devstack's local.conf:
|
||||||
server_listen = "0.0.0.0"
|
|
||||||
|
|
||||||
Alternatively, in devstack's ``local.conf``:
|
VNCSERVER_LISTEN=0.0.0.0
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
VNCSERVER_LISTEN="0.0.0.0"
|
|
||||||
|
|
||||||
|
|
||||||
Environment final checkup
|
Environment final checkup
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ different version of the above, please document your configuration here!
|
|||||||
Getting the latest code
|
Getting the latest code
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
Make a clone of the code from our ``Git repository``:
|
Make a clone of the code from our `Git repository`:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
@@ -72,9 +72,9 @@ These dependencies can be installed from PyPi_ using the Python tool pip_.
|
|||||||
.. _PyPi: https://pypi.org/
|
.. _PyPi: https://pypi.org/
|
||||||
.. _pip: https://pypi.org/project/pip
|
.. _pip: https://pypi.org/project/pip
|
||||||
|
|
||||||
However, your system *may* need additional dependencies that ``pip`` (and by
|
However, your system *may* need additional dependencies that `pip` (and by
|
||||||
extension, PyPi) cannot satisfy. These dependencies should be installed
|
extension, PyPi) cannot satisfy. These dependencies should be installed
|
||||||
prior to using ``pip``, and the installation method may vary depending on
|
prior to using `pip`, and the installation method may vary depending on
|
||||||
your platform.
|
your platform.
|
||||||
|
|
||||||
* Ubuntu 16.04::
|
* Ubuntu 16.04::
|
||||||
@@ -141,7 +141,7 @@ forget to activate it:
|
|||||||
|
|
||||||
$ workon watcher
|
$ workon watcher
|
||||||
|
|
||||||
You should then be able to ``import watcher`` using Python without issue:
|
You should then be able to `import watcher` using Python without issue:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
|||||||
@@ -90,15 +90,15 @@ parameter will need to specify the type of http protocol and the use of
|
|||||||
plain text http is strongly discouraged due to the transmission of the access
|
plain text http is strongly discouraged due to the transmission of the access
|
||||||
token. Additionally the path to the proxy interface needs to be supplied as
|
token. Additionally the path to the proxy interface needs to be supplied as
|
||||||
well in case Grafana is placed in a sub directory of the web server. An example
|
well in case Grafana is placed in a sub directory of the web server. An example
|
||||||
would be: ``https://mygrafana.org/api/datasource/proxy/`` were
|
would be: `https://mygrafana.org/api/datasource/proxy/` were
|
||||||
``/api/datasource/proxy`` is the default path without any subdirectories.
|
`/api/datasource/proxy` is the default path without any subdirectories.
|
||||||
Likewise, this parameter can not be placed in the yaml.
|
Likewise, this parameter can not be placed in the yaml.
|
||||||
|
|
||||||
To prevent many errors from occurring and potentially filing the logs files it
|
To prevent many errors from occurring and potentially filing the logs files it
|
||||||
is advised to specify the desired datasource in the configuration as it would
|
is advised to specify the desired datasource in the configuration as it would
|
||||||
prevent the datasource manager from having to iterate and try possible
|
prevent the datasource manager from having to iterate and try possible
|
||||||
datasources with the launch of each audit. To do this specify
|
datasources with the launch of each audit. To do this specify `datasources` in
|
||||||
``datasources`` in the ``[watcher_datasources]`` group.
|
the `[watcher_datasources]` group.
|
||||||
|
|
||||||
The current configuration that is required to be placed in the traditional
|
The current configuration that is required to be placed in the traditional
|
||||||
configuration file would look like the following:
|
configuration file would look like the following:
|
||||||
@@ -120,7 +120,7 @@ traditional configuration file or in the yaml, however, it is not advised to
|
|||||||
mix and match but in the case it does occur the yaml would override the
|
mix and match but in the case it does occur the yaml would override the
|
||||||
settings from the traditional configuration file. All five of these parameters
|
settings from the traditional configuration file. All five of these parameters
|
||||||
are dictionaries mapping specific metrics to a configuration parameter. For
|
are dictionaries mapping specific metrics to a configuration parameter. For
|
||||||
instance the ``project_id_map`` will specify the specific project id in Grafana
|
instance the `project_id_map` will specify the specific project id in Grafana
|
||||||
to be used. The parameters are named as follow:
|
to be used. The parameters are named as follow:
|
||||||
|
|
||||||
* project_id_map
|
* project_id_map
|
||||||
@@ -149,10 +149,10 @@ project_id
|
|||||||
|
|
||||||
The project id's can only be determined by someone with the admin role in
|
The project id's can only be determined by someone with the admin role in
|
||||||
Grafana as that role is required to open the list of projects. The list of
|
Grafana as that role is required to open the list of projects. The list of
|
||||||
projects can be found on ``/datasources`` in the web interface but
|
projects can be found on `/datasources` in the web interface but
|
||||||
unfortunately it does not immediately display the project id. To display
|
unfortunately it does not immediately display the project id. To display
|
||||||
the id one can best hover the mouse over the projects and the url will show the
|
the id one can best hover the mouse over the projects and the url will show the
|
||||||
project id's for example ``/datasources/edit/7563``. Alternatively the entire
|
project id's for example `/datasources/edit/7563`. Alternatively the entire
|
||||||
list of projects can be retrieved using the `REST api`_. To easily make
|
list of projects can be retrieved using the `REST api`_. To easily make
|
||||||
requests to the REST api a tool such as Postman can be used.
|
requests to the REST api a tool such as Postman can be used.
|
||||||
|
|
||||||
@@ -239,24 +239,18 @@ conversion from bytes to megabytes.
|
|||||||
|
|
||||||
SELECT value/1000000 FROM memory...
|
SELECT value/1000000 FROM memory...
|
||||||
|
|
||||||
Queries will be formatted using the .format string method within Python.
|
Queries will be formatted using the .format string method within Python. This
|
||||||
This format will currently have give attributes exposed to it labeled
|
format will currently have give attributes exposed to it labeled `{0}` to
|
||||||
``{0}`` through ``{4}``.
|
`{4}`. Every occurrence of these characters within the string will be replaced
|
||||||
Every occurrence of these characters within the string will be replaced
|
|
||||||
with the specific attribute.
|
with the specific attribute.
|
||||||
|
|
||||||
{0}
|
- {0} is the aggregate typically `mean`, `min`, `max` but `count` is also
|
||||||
is the aggregate typically ``mean``, ``min``, ``max`` but ``count``
|
supported.
|
||||||
is also supported.
|
- {1} is the attribute as specified in the attribute parameter.
|
||||||
{1}
|
- {2} is the period of time to aggregate data over in seconds.
|
||||||
is the attribute as specified in the attribute parameter.
|
- {3} is the granularity or the interval between data points in seconds.
|
||||||
{2}
|
- {4} is translator specific and in the case of InfluxDB it will be used for
|
||||||
is the period of time to aggregate data over in seconds.
|
retention_periods.
|
||||||
{3}
|
|
||||||
is the granularity or the interval between data points in seconds.
|
|
||||||
{4}
|
|
||||||
is translator specific and in the case of InfluxDB it will be used for
|
|
||||||
retention_periods.
|
|
||||||
|
|
||||||
**InfluxDB**
|
**InfluxDB**
|
||||||
|
|
||||||
|
|||||||
@@ -1,140 +0,0 @@
|
|||||||
=====================
|
|
||||||
Prometheus datasource
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Synopsis
|
|
||||||
--------
|
|
||||||
The Prometheus datasource allows Watcher to use a Prometheus server as the
|
|
||||||
source for collected metrics used by the Watcher decision engine. At minimum
|
|
||||||
deployers must configure the ``host`` and ``port`` at which the Prometheus
|
|
||||||
server is listening.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
-------------
|
|
||||||
It is required that Prometheus metrics contain a label to identify the hostname
|
|
||||||
of the exporter from which the metric was collected. This is used to match
|
|
||||||
against the Watcher cluster model ``ComputeNode.hostname``. The default for
|
|
||||||
this label is ``fqdn`` and in the prometheus scrape configs would look like:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: node
|
|
||||||
static_configs:
|
|
||||||
- targets: ['10.1.2.3:9100']
|
|
||||||
labels:
|
|
||||||
fqdn: "testbox.controlplane.domain"
|
|
||||||
|
|
||||||
This default can be overridden when a deployer uses a different label to
|
|
||||||
identify the exporter host (for example ``hostname`` or ``host``, or any other
|
|
||||||
label, as long as it identifies the host).
|
|
||||||
|
|
||||||
Internally this label is used in creating a ``fqdn_instance_map``, mapping
|
|
||||||
the fqdn with the Prometheus instance label associated with each exporter.
|
|
||||||
The keys of the resulting fqdn_instance_map are expected to match the
|
|
||||||
``ComputeNode.hostname`` used in the Watcher decision engine cluster model.
|
|
||||||
An example ``fqdn_instance_map`` is the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
{
|
|
||||||
'ena.controlplane.domain': '10.1.2.1:9100',
|
|
||||||
'dio.controlplane.domain': '10.1.2.2:9100',
|
|
||||||
'tria.controlplane.domain': '10.1.2.3:9100'
|
|
||||||
}
|
|
||||||
|
|
||||||
For instance metrics, it is required that Prometheus contains a label
|
|
||||||
with the uuid of the OpenStack instance in each relevant metric. By default,
|
|
||||||
the datasource will look for the label ``resource``. The
|
|
||||||
``instance_uuid_label`` config option in watcher.conf allows deployers to
|
|
||||||
override this default to any other label name that stores the ``uuid``.
|
|
||||||
|
|
||||||
Limitations
|
|
||||||
-----------
|
|
||||||
The current implementation doesn't support the ``statistic_series`` function of
|
|
||||||
the Watcher ``class DataSourceBase``. It is expected that the
|
|
||||||
``statistic_aggregation`` function (which is implemented) is sufficient in
|
|
||||||
providing the **current** state of the managed resources in the cluster.
|
|
||||||
The ``statistic_aggregation`` function defaults to querying back 300 seconds,
|
|
||||||
starting from the present time (the time period is a function parameter and
|
|
||||||
can be set to a value as required). Implementing the ``statistic_series`` can
|
|
||||||
always be re-visited if the requisite interest and work cycles are volunteered
|
|
||||||
by the interested parties.
|
|
||||||
|
|
||||||
One further note about a limitation in the implemented
|
|
||||||
``statistic_aggregation`` function. This function is defined with a
|
|
||||||
``granularity`` parameter, to be used when querying whichever of the Watcher
|
|
||||||
``DataSourceBase`` metrics providers. In the case of Prometheus, we do not
|
|
||||||
fetch and then process individual metrics across the specified time period.
|
|
||||||
Instead we use the PromQL querying operators and functions, so that the
|
|
||||||
server itself will process the request across the specified parameters and
|
|
||||||
then return the result. So ``granularity`` parameter is redundant and remains
|
|
||||||
unused for the Prometheus implementation of ``statistic_aggregation``. The
|
|
||||||
granularity of the data fetched by Prometheus server is specified in
|
|
||||||
configuration as the server ``scrape_interval`` (current default 15 seconds).
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
A deployer must set the ``datasources`` parameter to include ``prometheus``
|
|
||||||
under the watcher_datasources section of watcher.conf (or add ``prometheus`` in
|
|
||||||
datasources for a specific strategy if preferred eg. under the
|
|
||||||
``[watcher_strategies.workload_stabilization]`` section).
|
|
||||||
|
|
||||||
The watcher.conf configuration file is also used to set the parameter values
|
|
||||||
required by the Watcher Prometheus data source. The configuration can be
|
|
||||||
added under the ``[prometheus_client]`` section and the available options are
|
|
||||||
duplicated below from the code as they are self documenting:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
help="The hostname or IP address for the prometheus server."),
|
|
||||||
cfg.StrOpt('port',
|
|
||||||
help="The port number used by the prometheus server."),
|
|
||||||
cfg.StrOpt('fqdn_label',
|
|
||||||
default="fqdn",
|
|
||||||
help="The label that Prometheus uses to store the fqdn of "
|
|
||||||
"exporters. Defaults to 'fqdn'."),
|
|
||||||
cfg.StrOpt('instance_uuid_label',
|
|
||||||
default="resource",
|
|
||||||
help="The label that Prometheus uses to store the uuid of "
|
|
||||||
"OpenStack instances. Defaults to 'resource'."),
|
|
||||||
cfg.StrOpt('username',
|
|
||||||
help="The basic_auth username to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('password',
|
|
||||||
secret=True,
|
|
||||||
help="The basic_auth password to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('cafile',
|
|
||||||
help="Path to the CA certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('certfile',
|
|
||||||
help="Path to the client certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('keyfile',
|
|
||||||
help="Path to the client key for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
|
|
||||||
The ``host`` and ``port`` are **required** configuration options which have
|
|
||||||
no set default. These specify the hostname (or IP) and port for at which
|
|
||||||
the Prometheus server is listening. The ``fqdn_label`` allows deployers to
|
|
||||||
override the required metric label used to match Prometheus node exporters
|
|
||||||
against the Watcher ComputeNodes in the Watcher decision engine cluster data
|
|
||||||
model. The default is ``fqdn`` and deployers can specify any other value
|
|
||||||
(e.g. if they have an equivalent but different label such as ``host``).
|
|
||||||
|
|
||||||
So a sample watcher.conf configured to use the Prometheus server at
|
|
||||||
``10.2.3.4:9090`` would look like the following:
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
[watcher_datasources]
|
|
||||||
|
|
||||||
datasources = prometheus
|
|
||||||
|
|
||||||
[prometheus_client]
|
|
||||||
|
|
||||||
host = 10.2.3.4
|
|
||||||
port = 9090
|
|
||||||
fqdn_label = fqdn
|
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
...
|
...
|
||||||
connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8
|
connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8
|
||||||
|
|
||||||
* In the ``[DEFAULT]`` section, configure the transport url for RabbitMQ message broker.
|
* In the `[DEFAULT]` section, configure the transport url for RabbitMQ message broker.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ.
|
Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ.
|
||||||
|
|
||||||
* In the ``[keystone_authtoken]`` section, configure Identity service access.
|
* In the `[keystone_authtoken]` section, configure Identity service access.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -39,7 +39,7 @@
|
|||||||
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
||||||
|
|
||||||
* Watcher interacts with other OpenStack projects via project clients, in order to instantiate these
|
* Watcher interacts with other OpenStack projects via project clients, in order to instantiate these
|
||||||
clients, Watcher requests new session from Identity service. In the ``[watcher_clients_auth]`` section,
|
clients, Watcher requests new session from Identity service. In the `[watcher_clients_auth]` section,
|
||||||
configure the identity service access to interact with other OpenStack project clients.
|
configure the identity service access to interact with other OpenStack project clients.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
@@ -56,7 +56,7 @@
|
|||||||
|
|
||||||
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service.
|
||||||
|
|
||||||
* In the ``[api]`` section, configure host option.
|
* In the `[api]` section, configure host option.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@
|
|||||||
|
|
||||||
Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture.
|
Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture.
|
||||||
|
|
||||||
* In the ``[oslo_messaging_notifications]`` section, configure the messaging driver.
|
* In the `[oslo_messaging_notifications]` section, configure the messaging driver.
|
||||||
|
|
||||||
.. code-block:: ini
|
.. code-block:: ini
|
||||||
|
|
||||||
|
|||||||
@@ -48,7 +48,7 @@
|
|||||||
logging configuration to any other existing logging
|
logging configuration to any other existing logging
|
||||||
options. Please see the Python logging module documentation
|
options. Please see the Python logging module documentation
|
||||||
for details on logging configuration files. The log-config
|
for details on logging configuration files. The log-config
|
||||||
name for this option is deprecated.
|
name for this option is depcrecated.
|
||||||
|
|
||||||
**--log-format FORMAT**
|
**--log-format FORMAT**
|
||||||
A logging.Formatter log message format string which may use any
|
A logging.Formatter log message format string which may use any
|
||||||
|
|||||||
@@ -132,8 +132,8 @@ audit) that you want to use.
|
|||||||
$ openstack optimize audit create -a <your_audit_template>
|
$ openstack optimize audit create -a <your_audit_template>
|
||||||
|
|
||||||
If your_audit_template was created by --strategy <your_strategy>, and it
|
If your_audit_template was created by --strategy <your_strategy>, and it
|
||||||
defines some parameters (command ``watcher strategy show`` to check parameters
|
defines some parameters (command `watcher strategy show` to check parameters
|
||||||
format), your can append ``-p`` to input required parameters:
|
format), your can append `-p` to input required parameters:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
tasks:
|
|
||||||
- name: Generate prometheus.yml config file
|
|
||||||
delegate_to: controller
|
|
||||||
template:
|
|
||||||
src: "templates/prometheus.yml.j2"
|
|
||||||
dest: "/home/zuul/prometheus.yml"
|
|
||||||
mode: "0644"
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
global:
|
|
||||||
scrape_interval: 10s
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: "node"
|
|
||||||
static_configs:
|
|
||||||
- targets: ["localhost:3000"]
|
|
||||||
{% if 'compute' in groups %}
|
|
||||||
{% for host in groups['compute'] %}
|
|
||||||
- targets: ["{{ hostvars[host]['ansible_fqdn'] }}:9100"]
|
|
||||||
labels:
|
|
||||||
fqdn: "{{ hostvars[host]['ansible_fqdn'] }}"
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
Rally job
|
Rally job
|
||||||
=========
|
=========
|
||||||
|
|
||||||
We provide, with Watcher, a Rally plugin you can use to benchmark
|
We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service.
|
||||||
the optimization service.
|
|
||||||
|
|
||||||
To launch this task with configured Rally you just need to run:
|
To launch this task with configured Rally you just need to run:
|
||||||
|
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
prelude: |
|
|
||||||
The ``Openstack 2025.1`` (``Watcher 14.0.0``) includes several new features,
|
|
||||||
deprecations, and removals. After a period of inactivity, the Watcher
|
|
||||||
project moved to the Distributed leadership model in ``2025.1`` with
|
|
||||||
several new contributors working to modernize the code base.
|
|
||||||
Activity this cycle was mainly focused on paying down technical debt
|
|
||||||
related to supporting newer testing runtimes. With this release,
|
|
||||||
``ubuntu 24.04`` is now officially tested and supported.
|
|
||||||
|
|
||||||
``Ubuntu 24.04`` brings a new default Python runtime ``3.12`` and with it
|
|
||||||
improvements to eventlet and SQLAlchemy 2.0 compatibility where required.
|
|
||||||
``2025.1`` is the last release to officially support and test with ``Ubuntu 22.04``.
|
|
||||||
|
|
||||||
``2025.1`` is the second official skip-level upgrade release supporting
|
|
||||||
upgrades from either ``2024.1`` or ``2024.2``
|
|
||||||
|
|
||||||
Another area of focus in this cycle was the data sources supported by Watcher.
|
|
||||||
The long obsolete `Ceilometer` API data source has been removed, and the untested
|
|
||||||
`Monasca` data source has been deprecated and a new `Prometheus` data source
|
|
||||||
has been added.
|
|
||||||
https://specs.openstack.org/openstack/watcher-specs/specs/2025.1/approved/prometheus-datasource.html
|
|
||||||
fixes:
|
|
||||||
- https://bugs.launchpad.net/watcher/+bug/2086710 watcher compatibility between
|
|
||||||
eventlet, apscheduler, and python 3.12
|
|
||||||
- https://bugs.launchpad.net/watcher/+bug/2067815 refactoring of the SQLAlchemy
|
|
||||||
database layer to improve compatibility with eventlet on newer Pythons
|
|
||||||
- A number of linting issues were addressed with the introduction
|
|
||||||
of pre-commit. The issues include but are not limited to, spelling and grammar
|
|
||||||
fixes across all documentation and code, numerous sphinx documentation build warnings
|
|
||||||
, and incorrect file permission such as files having the execute bit set when not required.
|
|
||||||
While none of these changes should affect the runtime behavior of Watcher, they
|
|
||||||
generally improve the maintainability and quality of the codebase.
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
features:
|
|
||||||
- |
|
|
||||||
Support for instance metrics has been added to the prometheus data source.
|
|
||||||
The included metrics are `instance_cpu_usage`, `instance_ram_usage`,
|
|
||||||
`instance_ram_allocated` and `instance_root_disk_size`.
|
|
||||||
@@ -4,4 +4,4 @@ features:
|
|||||||
will standardize interactions with scoring engines
|
will standardize interactions with scoring engines
|
||||||
through the common API. It is possible to use the
|
through the common API. It is possible to use the
|
||||||
scoring engine by different Strategies, which
|
scoring engine by different Strategies, which
|
||||||
improve the code and data model reuse.
|
improve the code and data model re-use.
|
||||||
|
|||||||
@@ -5,5 +5,5 @@ features:
|
|||||||
failure. The amount of failures allowed before giving up and the time before
|
failure. The amount of failures allowed before giving up and the time before
|
||||||
reattempting are configurable. The `api_call_retries` and
|
reattempting are configurable. The `api_call_retries` and
|
||||||
`api_query_timeout` parameters in the `[collector]` group can be used to
|
`api_query_timeout` parameters in the `[collector]` group can be used to
|
||||||
adjust these parameters. 10 retries with a 1 second time in between
|
adjust these paremeters. 10 retries with a 1 second time in between
|
||||||
reattempts is the default.
|
reattempts is the default.
|
||||||
|
|||||||
@@ -3,6 +3,6 @@ features:
|
|||||||
Watcher starts to support API microversions since Stein cycle. From now
|
Watcher starts to support API microversions since Stein cycle. From now
|
||||||
onwards all API changes should be made with saving backward compatibility.
|
onwards all API changes should be made with saving backward compatibility.
|
||||||
To specify API version operator should use OpenStack-API-Version
|
To specify API version operator should use OpenStack-API-Version
|
||||||
HTTP header. If operator wants to know the minimum and maximum supported
|
HTTP header. If operator wants to know the mininum and maximum supported
|
||||||
versions by API, he/she can access /v1 resource and Watcher API will
|
versions by API, he/she can access /v1 resource and Watcher API will
|
||||||
return appropriate headers in response.
|
return appropriate headers in response.
|
||||||
|
|||||||
47
releasenotes/notes/bug-2112187-763bae283e0b736d.yaml
Normal file
47
releasenotes/notes/bug-2112187-763bae283e0b736d.yaml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
security:
|
||||||
|
- |
|
||||||
|
Watchers no longer forges requests on behalf of a tenant when
|
||||||
|
swapping volumes. Prior to this release watcher had 2 implementations
|
||||||
|
of moving a volume, it could use cinders volume migrate api or its own
|
||||||
|
internal implementation that directly calls nova volume attachment update
|
||||||
|
api. The former is safe and the recommend way to move volumes between
|
||||||
|
cinder storage backend the internal implementation was insecure, fragile
|
||||||
|
due to a lack of error handling and capable of deleting user data.
|
||||||
|
|
||||||
|
Insecure: the internal volume migration operation created a new keystone
|
||||||
|
user with a weak name and password and added it to the tenants project
|
||||||
|
with the admin role. It then used that user to forge request on behalf
|
||||||
|
of the tenant with admin right to swap the volume. if the applier was
|
||||||
|
restarted during the execution of this operation it would never be cleaned
|
||||||
|
up.
|
||||||
|
|
||||||
|
Fragile: the error handling was minimal, the swap volume api is async
|
||||||
|
so watcher has to poll for completion, there was no support to resume
|
||||||
|
that if interrupted of the time out was exceeded.
|
||||||
|
|
||||||
|
Data-loss: while the internal polling logic returned success or failure
|
||||||
|
watcher did not check the result, once the function returned it
|
||||||
|
unconditionally deleted the source volume. For larger volumes this
|
||||||
|
could result in irretrievable data loss.
|
||||||
|
|
||||||
|
Finally if a volume was swapped using the internal workflow it put
|
||||||
|
the nova instance in an out of sync state. If the VM was live migrated
|
||||||
|
after the swap volume completed successfully prior to a hard reboot
|
||||||
|
then the migration would fail or succeed and break tenant isolation.
|
||||||
|
|
||||||
|
see: https://bugs.launchpad.net/nova/+bug/2112187 for details.
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
All code related to creating keystone user and granting roles has been
|
||||||
|
removed. The internal swap volume implementation has been removed and
|
||||||
|
replaced by cinders volume migrate api. Note as part of this change
|
||||||
|
Watcher will no longer attempt volume migrations or retypes if the
|
||||||
|
instance is in the `Verify Resize` task state. This resolves several
|
||||||
|
issues related to volume migration in the zone migration and
|
||||||
|
Storage capacity balance strategies. While efforts have been made
|
||||||
|
to maintain backward compatibility these changes are required to
|
||||||
|
address a security weakness in watcher's prior approach.
|
||||||
|
|
||||||
|
see: https://bugs.launchpad.net/nova/+bug/2112187 for more context.
|
||||||
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
deprecations:
|
|
||||||
- |
|
|
||||||
Monasca Data Source is deprecated and will be removed in the future, due
|
|
||||||
to inactivity of Monasca project.
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
upgrade:
|
|
||||||
- |
|
|
||||||
Python 3.8 support has been dropped. Last release of watcher
|
|
||||||
supporting python 3.8 is 13.0.0.
|
|
||||||
The minimum version of Python now supported is Python 3.9.
|
|
||||||
@@ -7,7 +7,7 @@ prelude: >
|
|||||||
features:
|
features:
|
||||||
- |
|
- |
|
||||||
A new threadpool for the decision engine that contributors can use to
|
A new threadpool for the decision engine that contributors can use to
|
||||||
improve the performance of many operations, primarily I/O bound ones.
|
improve the performance of many operations, primarily I/O bound onces.
|
||||||
The amount of workers used by the decision engine threadpool can be
|
The amount of workers used by the decision engine threadpool can be
|
||||||
configured to scale according to the available infrastructure using
|
configured to scale according to the available infrastructure using
|
||||||
the `watcher_decision_engine.max_general_workers` config option.
|
the `watcher_decision_engine.max_general_workers` config option.
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ features:
|
|||||||
* disk_gb_reserved: The amount of disk a node has reserved for its own use.
|
* disk_gb_reserved: The amount of disk a node has reserved for its own use.
|
||||||
* disk_ratio: Disk allocation ratio.
|
* disk_ratio: Disk allocation ratio.
|
||||||
|
|
||||||
We also add some new properties:
|
We also add some new propeties:
|
||||||
|
|
||||||
* vcpu_capacity: The amount of vcpu, take allocation ratio into account,
|
* vcpu_capacity: The amount of vcpu, take allocation ratio into account,
|
||||||
but do not include reserved.
|
but do not include reserved.
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
features:
|
|
||||||
- |
|
|
||||||
A new Prometheus data source is added. This allows the watcher decision
|
|
||||||
engine to collect metrics from Prometheus server. For more information
|
|
||||||
about the Prometheus data source, including limitations and configuration
|
|
||||||
options see
|
|
||||||
https://docs.openstack.org/watcher/latest/datasources/prometheus.html
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
upgrade:
|
|
||||||
- |
|
|
||||||
Ceilometer datasource has been completely removed. The datasource requires
|
|
||||||
ceilometer API which was already removed from Ceilometer. Use the other
|
|
||||||
datasources such as Gnocchi.
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
features:
|
features:
|
||||||
- Check the creation time of the action plan,
|
- Check the creation time of the action plan,
|
||||||
and set its state to SUPERSEDED if it has expired.
|
and set its state to SUPERSEDED if it has expired.
|
||||||
|
|||||||
@@ -4,5 +4,5 @@ features:
|
|||||||
Whenever a Watcher object is created, updated or deleted, a versioned
|
Whenever a Watcher object is created, updated or deleted, a versioned
|
||||||
notification will, if it's relevant, be automatically sent to notify in order
|
notification will, if it's relevant, be automatically sent to notify in order
|
||||||
to allow an event-driven style of architecture within Watcher. Moreover, it
|
to allow an event-driven style of architecture within Watcher. Moreover, it
|
||||||
will also give other services and/or 3rd party software (e.g. monitoring
|
will also give other services and/or 3rd party softwares (e.g. monitoring
|
||||||
solutions or rules engines) the ability to react to such events.
|
solutions or rules engines) the ability to react to such events.
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
features:
|
features:
|
||||||
- Add a service supervisor to watch Watcher daemons.
|
- Add a service supervisor to watch Watcher deamons.
|
||||||
|
|||||||
@@ -3,4 +3,4 @@
|
|||||||
===========================
|
===========================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/2023.1
|
:branch: stable/2023.1
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
===========================
|
|
||||||
2024.1 Series Release Notes
|
|
||||||
===========================
|
|
||||||
|
|
||||||
.. release-notes::
|
|
||||||
:branch: stable/2024.1
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
===========================
|
|
||||||
2024.2 Series Release Notes
|
|
||||||
===========================
|
|
||||||
|
|
||||||
.. release-notes::
|
|
||||||
:branch: stable/2024.2
|
|
||||||
@@ -28,12 +28,12 @@ import sys
|
|||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
#sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
# -- General configuration ----------------------------------------------------
|
# -- General configuration ----------------------------------------------------
|
||||||
|
|
||||||
# If your documentation needs a minimal Sphinx version, state it here.
|
# If your documentation needs a minimal Sphinx version, state it here.
|
||||||
# needs_sphinx = '1.0'
|
#needs_sphinx = '1.0'
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
@@ -47,7 +47,7 @@ templates_path = ['_templates']
|
|||||||
source_suffix = '.rst'
|
source_suffix = '.rst'
|
||||||
|
|
||||||
# The encoding of source files.
|
# The encoding of source files.
|
||||||
# source_encoding = 'utf-8-sig'
|
#source_encoding = 'utf-8-sig'
|
||||||
|
|
||||||
# The master toctree document.
|
# The master toctree document.
|
||||||
master_doc = 'index'
|
master_doc = 'index'
|
||||||
@@ -63,37 +63,37 @@ release = ''
|
|||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
# language = None
|
#language = None
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
# non-false value, then it is used:
|
# non-false value, then it is used:
|
||||||
# today = ''
|
#today = ''
|
||||||
# Else, today_fmt is used as the format for a strftime call.
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
# today_fmt = '%B %d, %Y'
|
#today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
# List of patterns, relative to source directory, that match files and
|
||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
exclude_patterns = ['_build']
|
exclude_patterns = ['_build']
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all documents
|
# The reST default role (used for this markup: `text`) to use for all documents
|
||||||
# default_role = None
|
#default_role = None
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
# add_function_parentheses = True
|
#add_function_parentheses = True
|
||||||
|
|
||||||
# If true, the current module name will be prepended to all description
|
# If true, the current module name will be prepended to all description
|
||||||
# unit titles (such as .. function::).
|
# unit titles (such as .. function::).
|
||||||
# add_module_names = True
|
#add_module_names = True
|
||||||
|
|
||||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
# output. They are ignored by default.
|
# output. They are ignored by default.
|
||||||
# show_authors = False
|
#show_authors = False
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
pygments_style = 'native'
|
pygments_style = 'native'
|
||||||
|
|
||||||
# A list of ignored prefixes for module index sorting.
|
# A list of ignored prefixes for module index sorting.
|
||||||
# modindex_common_prefix = []
|
#modindex_common_prefix = []
|
||||||
|
|
||||||
# openstackdocstheme options
|
# openstackdocstheme options
|
||||||
openstackdocs_repo_name = 'openstack/watcher'
|
openstackdocs_repo_name = 'openstack/watcher'
|
||||||
@@ -109,26 +109,26 @@ html_theme = 'openstackdocs'
|
|||||||
# Theme options are theme-specific and customize the look and feel of a theme
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
# further. For a list of options available for each theme, see the
|
# further. For a list of options available for each theme, see the
|
||||||
# documentation.
|
# documentation.
|
||||||
# html_theme_options = {}
|
#html_theme_options = {}
|
||||||
|
|
||||||
# Add any paths that contain custom themes here, relative to this directory.
|
# Add any paths that contain custom themes here, relative to this directory.
|
||||||
# html_theme_path = []
|
#html_theme_path = []
|
||||||
|
|
||||||
# The name for this set of Sphinx documents. If None, it defaults to
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
# "<project> v<release> documentation".
|
# "<project> v<release> documentation".
|
||||||
# html_title = None
|
#html_title = None
|
||||||
|
|
||||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
# html_short_title = None
|
#html_short_title = None
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
# of the sidebar.
|
# of the sidebar.
|
||||||
# html_logo = None
|
#html_logo = None
|
||||||
|
|
||||||
# The name of an image file (within the static path) to use as favicon of the
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
# pixels large.
|
# pixels large.
|
||||||
# html_favicon = None
|
#html_favicon = None
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
@@ -137,44 +137,44 @@ html_static_path = ['_static']
|
|||||||
|
|
||||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
# using the given strftime format.
|
# using the given strftime format.
|
||||||
# html_last_updated_fmt = '%b %d, %Y'
|
#html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
# typographically correct entities.
|
# typographically correct entities.
|
||||||
# html_use_smartypants = True
|
#html_use_smartypants = True
|
||||||
|
|
||||||
# Custom sidebar templates, maps document names to template names.
|
# Custom sidebar templates, maps document names to template names.
|
||||||
# html_sidebars = {}
|
#html_sidebars = {}
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
# template names.
|
# template names.
|
||||||
# html_additional_pages = {}
|
#html_additional_pages = {}
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
# html_domain_indices = True
|
#html_domain_indices = True
|
||||||
|
|
||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
# html_use_index = True
|
#html_use_index = True
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
# If true, the index is split into individual pages for each letter.
|
||||||
# html_split_index = False
|
#html_split_index = False
|
||||||
|
|
||||||
# If true, links to the reST sources are added to the pages.
|
# If true, links to the reST sources are added to the pages.
|
||||||
# html_show_sourcelink = True
|
#html_show_sourcelink = True
|
||||||
|
|
||||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||||
# html_show_sphinx = True
|
#html_show_sphinx = True
|
||||||
|
|
||||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||||
# html_show_copyright = True
|
#html_show_copyright = True
|
||||||
|
|
||||||
# If true, an OpenSearch description file will be output, and all pages will
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
# contain a <link> tag referring to it. The value of this option must be the
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
# base URL from which the finished HTML is served.
|
# base URL from which the finished HTML is served.
|
||||||
# html_use_opensearch = ''
|
#html_use_opensearch = ''
|
||||||
|
|
||||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
# html_file_suffix = None
|
#html_file_suffix = None
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = 'watcherdoc'
|
htmlhelp_basename = 'watcherdoc'
|
||||||
@@ -183,42 +183,42 @@ htmlhelp_basename = 'watcherdoc'
|
|||||||
# -- Options for LaTeX output -------------------------------------------------
|
# -- Options for LaTeX output -------------------------------------------------
|
||||||
|
|
||||||
latex_elements = {
|
latex_elements = {
|
||||||
# The paper size ('letterpaper' or 'a4paper').
|
# The paper size ('letterpaper' or 'a4paper').
|
||||||
# 'papersize': 'letterpaper',
|
#'papersize': 'letterpaper',
|
||||||
|
|
||||||
# The font size ('10pt', '11pt' or '12pt').
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
# 'pointsize': '10pt',
|
#'pointsize': '10pt',
|
||||||
|
|
||||||
# Additional stuff for the LaTeX preamble.
|
# Additional stuff for the LaTeX preamble.
|
||||||
# 'preamble': '',
|
#'preamble': '',
|
||||||
}
|
}
|
||||||
|
|
||||||
# Grouping the document tree into LaTeX files. List of tuples
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
# (source start file, target name, title, author, documentclass [howto/manual])
|
# (source start file, target name, title, author, documentclass [howto/manual])
|
||||||
latex_documents = [
|
latex_documents = [
|
||||||
('index', 'watcher.tex', 'Watcher Documentation',
|
('index', 'watcher.tex', 'Watcher Documentation',
|
||||||
'Watcher developers', 'manual'),
|
'Watcher developers', 'manual'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top of
|
# The name of an image file (relative to this directory) to place at the top of
|
||||||
# the title page.
|
# the title page.
|
||||||
# latex_logo = None
|
#latex_logo = None
|
||||||
|
|
||||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||||
# not chapters.
|
# not chapters.
|
||||||
# latex_use_parts = False
|
#latex_use_parts = False
|
||||||
|
|
||||||
# If true, show page references after internal links.
|
# If true, show page references after internal links.
|
||||||
# latex_show_pagerefs = False
|
#latex_show_pagerefs = False
|
||||||
|
|
||||||
# If true, show URL addresses after external links.
|
# If true, show URL addresses after external links.
|
||||||
# latex_show_urls = False
|
#latex_show_urls = False
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
# Documents to append as an appendix to all manuals.
|
||||||
# latex_appendices = []
|
#latex_appendices = []
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
# latex_domain_indices = True
|
#latex_domain_indices = True
|
||||||
|
|
||||||
|
|
||||||
# -- Options for manual page output -------------------------------------------
|
# -- Options for manual page output -------------------------------------------
|
||||||
@@ -231,7 +231,7 @@ man_pages = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
# If true, show URL addresses after external links.
|
# If true, show URL addresses after external links.
|
||||||
# man_show_urls = False
|
#man_show_urls = False
|
||||||
|
|
||||||
|
|
||||||
# -- Options for Texinfo output -----------------------------------------------
|
# -- Options for Texinfo output -----------------------------------------------
|
||||||
@@ -240,19 +240,19 @@ man_pages = [
|
|||||||
# (source start file, target name, title, author,
|
# (source start file, target name, title, author,
|
||||||
# dir menu entry, description, category)
|
# dir menu entry, description, category)
|
||||||
texinfo_documents = [
|
texinfo_documents = [
|
||||||
('index', 'watcher', 'Watcher Documentation',
|
('index', 'watcher', 'Watcher Documentation',
|
||||||
'Watcher developers', 'watcher', 'One line description of project.',
|
'Watcher developers', 'watcher', 'One line description of project.',
|
||||||
'Miscellaneous'),
|
'Miscellaneous'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
# Documents to append as an appendix to all manuals.
|
||||||
# texinfo_appendices = []
|
#texinfo_appendices = []
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
# texinfo_domain_indices = True
|
#texinfo_domain_indices = True
|
||||||
|
|
||||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||||
# texinfo_show_urls = 'footnote'
|
#texinfo_show_urls = 'footnote'
|
||||||
|
|
||||||
# -- Options for Internationalization output ------------------------------
|
# -- Options for Internationalization output ------------------------------
|
||||||
locale_dirs = ['locale/']
|
locale_dirs = ['locale/']
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ Contents:
|
|||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
unreleased
|
unreleased
|
||||||
2024.2
|
|
||||||
2024.1
|
|
||||||
2023.2
|
2023.2
|
||||||
2023.1
|
2023.1
|
||||||
zed
|
zed
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ msgid ""
|
|||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: python-watcher\n"
|
"Project-Id-Version: python-watcher\n"
|
||||||
"Report-Msgid-Bugs-To: \n"
|
"Report-Msgid-Bugs-To: \n"
|
||||||
"POT-Creation-Date: 2025-01-10 00:32+0000\n"
|
"POT-Creation-Date: 2024-05-31 14:40+0000\n"
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
@@ -70,15 +70,9 @@ msgstr "2023.2 Series Release Notes"
|
|||||||
msgid "3.0.0"
|
msgid "3.0.0"
|
||||||
msgstr "3.0.0"
|
msgstr "3.0.0"
|
||||||
|
|
||||||
msgid "3.0.0.0rc1"
|
|
||||||
msgstr "3.0.0.0rc1"
|
|
||||||
|
|
||||||
msgid "4.0.0"
|
msgid "4.0.0"
|
||||||
msgstr "4.0.0"
|
msgstr "4.0.0"
|
||||||
|
|
||||||
msgid "4.0.0.0rc1"
|
|
||||||
msgstr "4.0.0.0rc1"
|
|
||||||
|
|
||||||
msgid "6.0.0"
|
msgid "6.0.0"
|
||||||
msgstr "6.0.0"
|
msgstr "6.0.0"
|
||||||
|
|
||||||
@@ -220,17 +214,6 @@ msgstr ""
|
|||||||
msgid "Added SUSPENDED audit state"
|
msgid "Added SUSPENDED audit state"
|
||||||
msgstr "Added SUSPENDED audit state"
|
msgstr "Added SUSPENDED audit state"
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Added a generic scoring engine module, which will standardize interactions "
|
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
|
||||||
"scoring engine by different Strategies, which improve the code and data "
|
|
||||||
"model re-use."
|
|
||||||
msgstr ""
|
|
||||||
"Added a generic scoring engine module, which will standardize interactions "
|
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
|
||||||
"scoring engine by different Strategies, which improve the code and data "
|
|
||||||
"model re-use."
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added a generic scoring engine module, which will standarize interactions "
|
"Added a generic scoring engine module, which will standarize interactions "
|
||||||
"with scoring engines through the common API. It is possible to use the "
|
"with scoring engines through the common API. It is possible to use the "
|
||||||
@@ -350,13 +333,6 @@ msgstr ""
|
|||||||
"Added a way to create periodic audit to be able to continuously optimise the "
|
"Added a way to create periodic audit to be able to continuously optimise the "
|
||||||
"cloud infrastructure."
|
"cloud infrastructure."
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
|
||||||
"have been deployed on the node where the decision engine is running."
|
|
||||||
msgstr ""
|
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
|
||||||
"have been deployed on the node where the decision engine is running."
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added a way to return the of available goals depending on which strategies "
|
"Added a way to return the of available goals depending on which strategies "
|
||||||
"have been deployed on the node where the decison engine is running."
|
"have been deployed on the node where the decison engine is running."
|
||||||
@@ -433,15 +409,6 @@ msgstr ""
|
|||||||
"that negatively affects performance of a high priority VM by over utilising "
|
"that negatively affects performance of a high priority VM by over utilising "
|
||||||
"Last Level Cache."
|
"Last Level Cache."
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Added strategy to identify and migrate a Noisy Neighbor - a low priority VM "
|
|
||||||
"that negatively affects performance of a high priority VM by over utilizing "
|
|
||||||
"Last Level Cache."
|
|
||||||
msgstr ""
|
|
||||||
"Added strategy to identify and migrate a Noisy Neighbour - a low-priority VM "
|
|
||||||
"that negatively affects the performance of a high-priority VM by over "
|
|
||||||
"utilising Last Level Cache."
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Added the functionality to filter out instances which have metadata field "
|
"Added the functionality to filter out instances which have metadata field "
|
||||||
"'optimize' set to False. For now, this is only available for the "
|
"'optimize' set to False. For now, this is only available for the "
|
||||||
@@ -548,13 +515,6 @@ msgstr ""
|
|||||||
msgid "Centralize all configuration options for Watcher."
|
msgid "Centralize all configuration options for Watcher."
|
||||||
msgstr "Centralise all configuration options for Watcher."
|
msgstr "Centralise all configuration options for Watcher."
|
||||||
|
|
||||||
msgid ""
|
|
||||||
"Check the creation time of the action plan, and set its state to SUPERSEDED "
|
|
||||||
"if it has expired."
|
|
||||||
msgstr ""
|
|
||||||
"Check the creation time of the action plan, and set its state to SUPERSEDED "
|
|
||||||
"if it has expired."
|
|
||||||
|
|
||||||
msgid "Contents:"
|
msgid "Contents:"
|
||||||
msgstr "Contents:"
|
msgstr "Contents:"
|
||||||
|
|
||||||
|
|||||||
33
releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
Normal file
33
releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
|
||||||
|
msgid ""
|
||||||
|
msgstr ""
|
||||||
|
"Project-Id-Version: python-watcher\n"
|
||||||
|
"Report-Msgid-Bugs-To: \n"
|
||||||
|
"POT-Creation-Date: 2019-03-22 02:21+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
|
"PO-Revision-Date: 2016-10-22 06:44+0000\n"
|
||||||
|
"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
|
||||||
|
"Language-Team: French\n"
|
||||||
|
"Language: fr\n"
|
||||||
|
"X-Generator: Zanata 4.3.3\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
||||||
|
|
||||||
|
msgid "0.29.0"
|
||||||
|
msgstr "0.29.0"
|
||||||
|
|
||||||
|
msgid "Contents:"
|
||||||
|
msgstr "Contenu :"
|
||||||
|
|
||||||
|
msgid "Current Series Release Notes"
|
||||||
|
msgstr "Note de la release actuelle"
|
||||||
|
|
||||||
|
msgid "New Features"
|
||||||
|
msgstr "Nouvelles fonctionnalités"
|
||||||
|
|
||||||
|
msgid "Newton Series Release Notes"
|
||||||
|
msgstr "Note de release pour Newton"
|
||||||
|
|
||||||
|
msgid "Welcome to watcher's Release Notes documentation!"
|
||||||
|
msgstr "Bienvenue dans la documentation de la note de Release de Watcher"
|
||||||
@@ -3,4 +3,4 @@ Victoria Series Release Notes
|
|||||||
=============================
|
=============================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/victoria
|
:branch: stable/victoria
|
||||||
|
|||||||
@@ -3,4 +3,4 @@ Wallaby Series Release Notes
|
|||||||
============================
|
============================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/wallaby
|
:branch: stable/wallaby
|
||||||
|
|||||||
@@ -3,4 +3,4 @@ Xena Series Release Notes
|
|||||||
=========================
|
=========================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/xena
|
:branch: stable/xena
|
||||||
|
|||||||
@@ -3,4 +3,4 @@ Yoga Series Release Notes
|
|||||||
=========================
|
=========================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/yoga
|
:branch: stable/yoga
|
||||||
|
|||||||
@@ -3,4 +3,4 @@ Zed Series Release Notes
|
|||||||
========================
|
========================
|
||||||
|
|
||||||
.. release-notes::
|
.. release-notes::
|
||||||
:branch: unmaintained/zed
|
:branch: stable/zed
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# Requirements lower bounds listed here are our best effort to keep them up to
|
# The order of packages is significant, because pip processes them in the order
|
||||||
# date but we do not test them so no guarantee of having them all correct. If
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
# you find any incorrect lower bounds, let us know or propose a fix.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
apscheduler>=3.5.1 # MIT License
|
apscheduler>=3.5.1 # MIT License
|
||||||
eventlet>=0.27.0 # MIT
|
|
||||||
jsonpatch>=1.21 # BSD
|
jsonpatch>=1.21 # BSD
|
||||||
keystoneauth1>=3.4.0 # Apache-2.0
|
keystoneauth1>=3.4.0 # Apache-2.0
|
||||||
jsonschema>=3.2.0 # MIT
|
jsonschema>=3.2.0 # MIT
|
||||||
@@ -18,12 +18,12 @@ oslo.db>=4.44.0 # Apache-2.0
|
|||||||
oslo.i18n>=3.20.0 # Apache-2.0
|
oslo.i18n>=3.20.0 # Apache-2.0
|
||||||
oslo.log>=3.37.0 # Apache-2.0
|
oslo.log>=3.37.0 # Apache-2.0
|
||||||
oslo.messaging>=14.1.0 # Apache-2.0
|
oslo.messaging>=14.1.0 # Apache-2.0
|
||||||
oslo.policy>=4.5.0 # Apache-2.0
|
oslo.policy>=3.6.0 # Apache-2.0
|
||||||
oslo.reports>=1.27.0 # Apache-2.0
|
oslo.reports>=1.27.0 # Apache-2.0
|
||||||
oslo.serialization>=2.25.0 # Apache-2.0
|
oslo.serialization>=2.25.0 # Apache-2.0
|
||||||
oslo.service>=1.30.0 # Apache-2.0
|
oslo.service>=1.30.0 # Apache-2.0
|
||||||
oslo.upgradecheck>=1.3.0 # Apache-2.0
|
oslo.upgradecheck>=1.3.0 # Apache-2.0
|
||||||
oslo.utils>=7.0.0 # Apache-2.0
|
oslo.utils>=3.36.0 # Apache-2.0
|
||||||
oslo.versionedobjects>=1.32.0 # Apache-2.0
|
oslo.versionedobjects>=1.32.0 # Apache-2.0
|
||||||
PasteDeploy>=1.5.2 # MIT
|
PasteDeploy>=1.5.2 # MIT
|
||||||
pbr>=3.1.1 # Apache-2.0
|
pbr>=3.1.1 # Apache-2.0
|
||||||
@@ -36,7 +36,6 @@ python-keystoneclient>=3.15.0 # Apache-2.0
|
|||||||
python-monascaclient>=1.12.0 # Apache-2.0
|
python-monascaclient>=1.12.0 # Apache-2.0
|
||||||
python-neutronclient>=6.7.0 # Apache-2.0
|
python-neutronclient>=6.7.0 # Apache-2.0
|
||||||
python-novaclient>=14.1.0 # Apache-2.0
|
python-novaclient>=14.1.0 # Apache-2.0
|
||||||
python-observabilityclient>=0.3.0 # Apache-2.0
|
|
||||||
python-openstackclient>=3.14.0 # Apache-2.0
|
python-openstackclient>=3.14.0 # Apache-2.0
|
||||||
python-ironicclient>=2.5.0 # Apache-2.0
|
python-ironicclient>=2.5.0 # Apache-2.0
|
||||||
SQLAlchemy>=1.2.5 # MIT
|
SQLAlchemy>=1.2.5 # MIT
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ description_file =
|
|||||||
author = OpenStack
|
author = OpenStack
|
||||||
author_email = openstack-discuss@lists.openstack.org
|
author_email = openstack-discuss@lists.openstack.org
|
||||||
home_page = https://docs.openstack.org/watcher/latest/
|
home_page = https://docs.openstack.org/watcher/latest/
|
||||||
python_requires = >=3.9
|
python_requires = >=3.8
|
||||||
classifier =
|
classifier =
|
||||||
Environment :: OpenStack
|
Environment :: OpenStack
|
||||||
Intended Audience :: Information Technology
|
Intended Audience :: Information Technology
|
||||||
@@ -17,10 +17,10 @@ classifier =
|
|||||||
Programming Language :: Python :: Implementation :: CPython
|
Programming Language :: Python :: Implementation :: CPython
|
||||||
Programming Language :: Python :: 3 :: Only
|
Programming Language :: Python :: 3 :: Only
|
||||||
Programming Language :: Python :: 3
|
Programming Language :: Python :: 3
|
||||||
|
Programming Language :: Python :: 3.8
|
||||||
Programming Language :: Python :: 3.9
|
Programming Language :: Python :: 3.9
|
||||||
Programming Language :: Python :: 3.10
|
Programming Language :: Python :: 3.10
|
||||||
Programming Language :: Python :: 3.11
|
Programming Language :: Python :: 3.11
|
||||||
Programming Language :: Python :: 3.12
|
|
||||||
|
|
||||||
[files]
|
[files]
|
||||||
packages =
|
packages =
|
||||||
@@ -109,8 +109,3 @@ watcher_cluster_data_model_collectors =
|
|||||||
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
|
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
|
||||||
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
|
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
|
||||||
baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector
|
baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector
|
||||||
|
|
||||||
[codespell]
|
|
||||||
skip = *.po,*.js,*.css,*.html,*.svg,HACKING.py,*hacking*,*build*,*_static*,doc/dictionary.txt,*.pyc,*.inv,*.gz,*.jpg,*.png,*.vsd,*.graffle,*.json
|
|
||||||
count =
|
|
||||||
quiet-level = 4
|
|
||||||
@@ -1,7 +1,15 @@
|
|||||||
|
# The order of packages is significant, because pip processes them in the order
|
||||||
|
# of appearance. Changing the order has an impact on the overall integration
|
||||||
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
coverage>=4.5.1 # Apache-2.0
|
coverage>=4.5.1 # Apache-2.0
|
||||||
|
doc8>=0.8.0 # Apache-2.0
|
||||||
freezegun>=0.3.10 # Apache-2.0
|
freezegun>=0.3.10 # Apache-2.0
|
||||||
|
hacking>=3.0.1,<3.1.0 # Apache-2.0
|
||||||
oslotest>=3.3.0 # Apache-2.0
|
oslotest>=3.3.0 # Apache-2.0
|
||||||
testscenarios>=0.5.0 # Apache-2.0/BSD
|
testscenarios>=0.5.0 # Apache-2.0/BSD
|
||||||
testtools>=2.3.0 # MIT
|
testtools>=2.3.0 # MIT
|
||||||
stestr>=2.0.0 # Apache-2.0
|
stestr>=2.0.0 # Apache-2.0
|
||||||
WebTest>=2.0.27 # MIT
|
os-api-ref>=1.4.0 # Apache-2.0
|
||||||
|
bandit>=1.6.0 # Apache-2.0
|
||||||
|
WebTest>=2.0.27 # MIT
|
||||||
31
tox.ini
31
tox.ini
@@ -8,13 +8,9 @@ basepython = python3
|
|||||||
usedevelop = True
|
usedevelop = True
|
||||||
allowlist_externals = find
|
allowlist_externals = find
|
||||||
rm
|
rm
|
||||||
install_command = pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
|
install_command = pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2024.2} {opts} {packages}
|
||||||
setenv =
|
setenv =
|
||||||
VIRTUAL_ENV={envdir}
|
VIRTUAL_ENV={envdir}
|
||||||
OS_STDOUT_CAPTURE=1
|
|
||||||
OS_STDERR_CAPTURE=1
|
|
||||||
OS_TEST_TIMEOUT=30
|
|
||||||
PYTHONDONTWRITEBYTECODE=1
|
|
||||||
deps =
|
deps =
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
@@ -30,22 +26,12 @@ passenv =
|
|||||||
HTTPS_PROXY
|
HTTPS_PROXY
|
||||||
no_proxy
|
no_proxy
|
||||||
NO_PROXY
|
NO_PROXY
|
||||||
OS_DEBUG
|
|
||||||
# NOTE(sean-k-mooney) optimization is enabled by default and when enabled
|
|
||||||
# asserts are complied out. Disable optimization to allow asserts in
|
|
||||||
# nova to fire in unit and functional tests. This can be useful for
|
|
||||||
# debugging issue with fixtures and mocks.
|
|
||||||
PYTHONOPTIMIZE
|
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
description =
|
|
||||||
Run style checks.
|
|
||||||
skip_install = true
|
|
||||||
deps =
|
|
||||||
pre-commit
|
|
||||||
commands =
|
commands =
|
||||||
pre-commit run --all-files --show-diff-on-failure
|
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
|
||||||
|
flake8
|
||||||
|
#bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
setenv = PYTHONHASHSEED=0
|
setenv = PYTHONHASHSEED=0
|
||||||
@@ -96,6 +82,7 @@ commands =
|
|||||||
commands = python setup.py bdist_wheel
|
commands = python setup.py bdist_wheel
|
||||||
|
|
||||||
[testenv:pdf-docs]
|
[testenv:pdf-docs]
|
||||||
|
envdir = {toxworkdir}/docs
|
||||||
deps = {[testenv:docs]deps}
|
deps = {[testenv:docs]deps}
|
||||||
allowlist_externals =
|
allowlist_externals =
|
||||||
rm
|
rm
|
||||||
@@ -110,10 +97,8 @@ deps = -r{toxinidir}/doc/requirements.txt
|
|||||||
commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
|
commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
|
||||||
|
|
||||||
[testenv:bandit]
|
[testenv:bandit]
|
||||||
skip_install = true
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
deps = {[testenv:pep8]deps}
|
commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
|
||||||
commands =
|
|
||||||
pre-commit run --all-files --show-diff-on-failure bandit
|
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
filename = *.py,app.wsgi
|
filename = *.py,app.wsgi
|
||||||
|
|||||||
@@ -55,8 +55,9 @@ possible to :ref:`develop new implementations <implement_action_plugin>` which
|
|||||||
are dynamically loaded by Watcher at launch time.
|
are dynamically loaded by Watcher at launch time.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from oslo_utils import timeutils
|
|
||||||
import pecan
|
import pecan
|
||||||
from pecan import rest
|
from pecan import rest
|
||||||
import wsme
|
import wsme
|
||||||
@@ -193,9 +194,9 @@ class Action(base.APIBase):
|
|||||||
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
|
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
|
||||||
description='action description',
|
description='action description',
|
||||||
state='PENDING',
|
state='PENDING',
|
||||||
created_at=timeutils.utcnow(),
|
created_at=datetime.datetime.utcnow(),
|
||||||
deleted_at=None,
|
deleted_at=None,
|
||||||
updated_at=timeutils.utcnow(),
|
updated_at=datetime.datetime.utcnow(),
|
||||||
parents=[])
|
parents=[])
|
||||||
sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
|
sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
|
||||||
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
||||||
@@ -229,7 +230,6 @@ class ActionCollection(collection.Collection):
|
|||||||
|
|
||||||
class ActionsController(rest.RestController):
|
class ActionsController(rest.RestController):
|
||||||
"""REST controller for Actions."""
|
"""REST controller for Actions."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ActionsController, self).__init__()
|
super(ActionsController, self).__init__()
|
||||||
|
|
||||||
@@ -334,7 +334,7 @@ class ActionsController(rest.RestController):
|
|||||||
policy.enforce(context, 'action:detail',
|
policy.enforce(context, 'action:detail',
|
||||||
action='action:detail')
|
action='action:detail')
|
||||||
|
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "actions":
|
if parent != "actions":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -54,9 +54,10 @@ To see the life-cycle and description of
|
|||||||
state machine <action_plan_state_machine>`.
|
state machine <action_plan_state_machine>`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_utils import timeutils
|
|
||||||
import pecan
|
import pecan
|
||||||
from pecan import rest
|
from pecan import rest
|
||||||
import wsme
|
import wsme
|
||||||
@@ -292,9 +293,9 @@ class ActionPlan(base.APIBase):
|
|||||||
def sample(cls, expand=True):
|
def sample(cls, expand=True):
|
||||||
sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af',
|
sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af',
|
||||||
state='ONGOING',
|
state='ONGOING',
|
||||||
created_at=timeutils.utcnow(),
|
created_at=datetime.datetime.utcnow(),
|
||||||
deleted_at=None,
|
deleted_at=None,
|
||||||
updated_at=timeutils.utcnow())
|
updated_at=datetime.datetime.utcnow())
|
||||||
sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6'
|
sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6'
|
||||||
sample._efficacy_indicators = [{'description': 'Test indicator',
|
sample._efficacy_indicators = [{'description': 'Test indicator',
|
||||||
'name': 'test_indicator',
|
'name': 'test_indicator',
|
||||||
@@ -433,7 +434,7 @@ class ActionPlansController(rest.RestController):
|
|||||||
policy.enforce(context, 'action_plan:detail',
|
policy.enforce(context, 'action_plan:detail',
|
||||||
action='action_plan:detail')
|
action='action_plan:detail')
|
||||||
|
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "action_plans":
|
if parent != "action_plans":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -33,8 +33,6 @@ import datetime
|
|||||||
from dateutil import tz
|
from dateutil import tz
|
||||||
|
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from oslo_log import log
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
import pecan
|
import pecan
|
||||||
from pecan import rest
|
from pecan import rest
|
||||||
import wsme
|
import wsme
|
||||||
@@ -42,6 +40,8 @@ from wsme import types as wtypes
|
|||||||
from wsme import utils as wutils
|
from wsme import utils as wutils
|
||||||
import wsmeext.pecan as wsme_pecan
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.api.controllers import base
|
from watcher.api.controllers import base
|
||||||
from watcher.api.controllers import link
|
from watcher.api.controllers import link
|
||||||
@@ -171,16 +171,16 @@ class AuditPostType(wtypes.Base):
|
|||||||
strategy = _get_object_by_value(context, objects.Strategy,
|
strategy = _get_object_by_value(context, objects.Strategy,
|
||||||
self.strategy)
|
self.strategy)
|
||||||
self.name = "%s-%s" % (strategy.name,
|
self.name = "%s-%s" % (strategy.name,
|
||||||
timeutils.utcnow().isoformat())
|
datetime.datetime.utcnow().isoformat())
|
||||||
elif self.audit_template_uuid:
|
elif self.audit_template_uuid:
|
||||||
audit_template = objects.AuditTemplate.get(
|
audit_template = objects.AuditTemplate.get(
|
||||||
context, self.audit_template_uuid)
|
context, self.audit_template_uuid)
|
||||||
self.name = "%s-%s" % (audit_template.name,
|
self.name = "%s-%s" % (audit_template.name,
|
||||||
timeutils.utcnow().isoformat())
|
datetime.datetime.utcnow().isoformat())
|
||||||
else:
|
else:
|
||||||
goal = _get_object_by_value(context, objects.Goal, self.goal)
|
goal = _get_object_by_value(context, objects.Goal, self.goal)
|
||||||
self.name = "%s-%s" % (goal.name,
|
self.name = "%s-%s" % (goal.name,
|
||||||
timeutils.utcnow().isoformat())
|
datetime.datetime.utcnow().isoformat())
|
||||||
# No more than 63 characters
|
# No more than 63 characters
|
||||||
if len(self.name) > 63:
|
if len(self.name) > 63:
|
||||||
LOG.warning("Audit: %s length exceeds 63 characters",
|
LOG.warning("Audit: %s length exceeds 63 characters",
|
||||||
@@ -424,15 +424,15 @@ class Audit(base.APIBase):
|
|||||||
name='My Audit',
|
name='My Audit',
|
||||||
audit_type='ONESHOT',
|
audit_type='ONESHOT',
|
||||||
state='PENDING',
|
state='PENDING',
|
||||||
created_at=timeutils.utcnow(),
|
created_at=datetime.datetime.utcnow(),
|
||||||
deleted_at=None,
|
deleted_at=None,
|
||||||
updated_at=timeutils.utcnow(),
|
updated_at=datetime.datetime.utcnow(),
|
||||||
interval='7200',
|
interval='7200',
|
||||||
scope=[],
|
scope=[],
|
||||||
auto_trigger=False,
|
auto_trigger=False,
|
||||||
next_run_time=timeutils.utcnow(),
|
next_run_time=datetime.datetime.utcnow(),
|
||||||
start_time=timeutils.utcnow(),
|
start_time=datetime.datetime.utcnow(),
|
||||||
end_time=timeutils.utcnow())
|
end_time=datetime.datetime.utcnow())
|
||||||
|
|
||||||
sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
|
sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
|
||||||
sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff'
|
sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff'
|
||||||
@@ -468,7 +468,6 @@ class AuditCollection(collection.Collection):
|
|||||||
|
|
||||||
class AuditsController(rest.RestController):
|
class AuditsController(rest.RestController):
|
||||||
"""REST controller for Audits."""
|
"""REST controller for Audits."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(AuditsController, self).__init__()
|
super(AuditsController, self).__init__()
|
||||||
self.dc_client = rpcapi.DecisionEngineAPI()
|
self.dc_client = rpcapi.DecisionEngineAPI()
|
||||||
@@ -570,7 +569,7 @@ class AuditsController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
policy.enforce(context, 'audit:detail',
|
policy.enforce(context, 'audit:detail',
|
||||||
action='audit:detail')
|
action='audit:detail')
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "audits":
|
if parent != "audits":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -43,8 +43,9 @@ will be launched automatically or will need a manual confirmation from the
|
|||||||
:ref:`Administrator <administrator_definition>`.
|
:ref:`Administrator <administrator_definition>`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from oslo_utils import timeutils
|
|
||||||
import pecan
|
import pecan
|
||||||
from pecan import rest
|
from pecan import rest
|
||||||
import wsme
|
import wsme
|
||||||
@@ -439,9 +440,9 @@ class AuditTemplate(base.APIBase):
|
|||||||
description='Description of my audit template',
|
description='Description of my audit template',
|
||||||
goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6',
|
goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6',
|
||||||
strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986',
|
strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986',
|
||||||
created_at=timeutils.utcnow(),
|
created_at=datetime.datetime.utcnow(),
|
||||||
deleted_at=None,
|
deleted_at=None,
|
||||||
updated_at=timeutils.utcnow(),
|
updated_at=datetime.datetime.utcnow(),
|
||||||
scope=[],)
|
scope=[],)
|
||||||
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
||||||
|
|
||||||
@@ -475,7 +476,6 @@ class AuditTemplateCollection(collection.Collection):
|
|||||||
|
|
||||||
class AuditTemplatesController(rest.RestController):
|
class AuditTemplatesController(rest.RestController):
|
||||||
"""REST controller for AuditTemplates."""
|
"""REST controller for AuditTemplates."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(AuditTemplatesController, self).__init__()
|
super(AuditTemplatesController, self).__init__()
|
||||||
|
|
||||||
@@ -576,7 +576,7 @@ class AuditTemplatesController(rest.RestController):
|
|||||||
policy.enforce(context, 'audit_template:detail',
|
policy.enforce(context, 'audit_template:detail',
|
||||||
action='audit_template:detail')
|
action='audit_template:detail')
|
||||||
|
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "audit_templates":
|
if parent != "audit_templates":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ from watcher.decision_engine import rpcapi
|
|||||||
|
|
||||||
class DataModelController(rest.RestController):
|
class DataModelController(rest.RestController):
|
||||||
"""REST controller for data model"""
|
"""REST controller for data model"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DataModelController, self).__init__()
|
super(DataModelController, self).__init__()
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ Here are some examples of :ref:`Goals <goal_definition>`:
|
|||||||
- minimize the energy consumption
|
- minimize the energy consumption
|
||||||
- minimize the number of compute nodes (consolidation)
|
- minimize the number of compute nodes (consolidation)
|
||||||
- balance the workload among compute nodes
|
- balance the workload among compute nodes
|
||||||
- minimize the license cost (some software have a licensing model which is
|
- minimize the license cost (some softwares have a licensing model which is
|
||||||
based on the number of sockets or cores where the software is deployed)
|
based on the number of sockets or cores where the software is deployed)
|
||||||
- find the most appropriate moment for a planned maintenance on a
|
- find the most appropriate moment for a planned maintenance on a
|
||||||
given group of host (which may be an entire availability zone):
|
given group of host (which may be an entire availability zone):
|
||||||
@@ -153,7 +153,6 @@ class GoalCollection(collection.Collection):
|
|||||||
|
|
||||||
class GoalsController(rest.RestController):
|
class GoalsController(rest.RestController):
|
||||||
"""REST controller for Goals."""
|
"""REST controller for Goals."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(GoalsController, self).__init__()
|
super(GoalsController, self).__init__()
|
||||||
|
|
||||||
@@ -217,7 +216,7 @@ class GoalsController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
policy.enforce(context, 'goal:detail',
|
policy.enforce(context, 'goal:detail',
|
||||||
action='goal:detail')
|
action='goal:detail')
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "goals":
|
if parent != "goals":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -145,7 +145,6 @@ class ScoringEngineCollection(collection.Collection):
|
|||||||
|
|
||||||
class ScoringEngineController(rest.RestController):
|
class ScoringEngineController(rest.RestController):
|
||||||
"""REST controller for Scoring Engines."""
|
"""REST controller for Scoring Engines."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ScoringEngineController, self).__init__()
|
super(ScoringEngineController, self).__init__()
|
||||||
|
|
||||||
|
|||||||
@@ -175,7 +175,6 @@ class ServiceCollection(collection.Collection):
|
|||||||
|
|
||||||
class ServicesController(rest.RestController):
|
class ServicesController(rest.RestController):
|
||||||
"""REST controller for Services."""
|
"""REST controller for Services."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ServicesController, self).__init__()
|
super(ServicesController, self).__init__()
|
||||||
|
|
||||||
@@ -237,7 +236,7 @@ class ServicesController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
policy.enforce(context, 'service:detail',
|
policy.enforce(context, 'service:detail',
|
||||||
action='service:detail')
|
action='service:detail')
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "services":
|
if parent != "services":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -196,7 +196,6 @@ class StrategyCollection(collection.Collection):
|
|||||||
|
|
||||||
class StrategiesController(rest.RestController):
|
class StrategiesController(rest.RestController):
|
||||||
"""REST controller for Strategies."""
|
"""REST controller for Strategies."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(StrategiesController, self).__init__()
|
super(StrategiesController, self).__init__()
|
||||||
|
|
||||||
@@ -284,7 +283,7 @@ class StrategiesController(rest.RestController):
|
|||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
policy.enforce(context, 'strategy:detail',
|
policy.enforce(context, 'strategy:detail',
|
||||||
action='strategy:detail')
|
action='strategy:detail')
|
||||||
# NOTE(lucasagomes): /detail should only work against collections
|
# NOTE(lucasagomes): /detail should only work agaist collections
|
||||||
parent = pecan.request.path.split('/')[:-1][-1]
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
if parent != "strategies":
|
if parent != "strategies":
|
||||||
raise exception.HTTPNotFound
|
raise exception.HTTPNotFound
|
||||||
|
|||||||
@@ -161,7 +161,6 @@ class MultiType(wtypes.UserType):
|
|||||||
:param types: Variable-length list of types.
|
:param types: Variable-length list of types.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *types):
|
def __init__(self, *types):
|
||||||
self.types = types
|
self.types = types
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ LOG = log.getLogger(__name__)
|
|||||||
|
|
||||||
class WebhookController(rest.RestController):
|
class WebhookController(rest.RestController):
|
||||||
"""REST controller for webhooks resource."""
|
"""REST controller for webhooks resource."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(WebhookController, self).__init__()
|
super(WebhookController, self).__init__()
|
||||||
self.dc_client = rpcapi.DecisionEngineAPI()
|
self.dc_client = rpcapi.DecisionEngineAPI()
|
||||||
|
|||||||
@@ -83,7 +83,6 @@ class NoExceptionTracebackHook(hooks.PecanHook):
|
|||||||
# 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator
|
# 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator
|
||||||
# catches and handles all the errors, so 'on_error' dedicated for unhandled
|
# catches and handles all the errors, so 'on_error' dedicated for unhandled
|
||||||
# exceptions never fired.
|
# exceptions never fired.
|
||||||
|
|
||||||
def after(self, state):
|
def after(self, state):
|
||||||
# Omit empty body. Some errors may not have body at this level yet.
|
# Omit empty body. Some errors may not have body at this level yet.
|
||||||
if not state.response.body:
|
if not state.response.body:
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
|
|||||||
for public routes in the API.
|
for public routes in the API.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, app, conf, public_api_routes=()):
|
def __init__(self, app, conf, public_api_routes=()):
|
||||||
route_pattern_tpl = r'%s(\.json|\.xml)?$'
|
route_pattern_tpl = r'%s(\.json|\.xml)?$'
|
||||||
|
|
||||||
|
|||||||
@@ -118,8 +118,7 @@ class BaseAction(loadable.Loadable, metaclass=abc.ABCMeta):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def schema(self):
|
def schema(self):
|
||||||
"""Defines a Schema that the input parameters shall comply to
|
"""Defines a Schema that the input parameters shall comply to
|
||||||
|
|
||||||
|
|||||||
@@ -17,14 +17,11 @@ import jsonschema
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from cinderclient import client as cinder_client
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.applier.actions import base
|
from watcher.applier.actions import base
|
||||||
from watcher.common import cinder_helper
|
from watcher.common import cinder_helper
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import keystone_helper
|
|
||||||
from watcher.common import nova_helper
|
from watcher.common import nova_helper
|
||||||
from watcher.common import utils
|
|
||||||
from watcher import conf
|
from watcher import conf
|
||||||
|
|
||||||
CONF = conf.CONF
|
CONF = conf.CONF
|
||||||
@@ -70,8 +67,6 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
|
|
||||||
def __init__(self, config, osc=None):
|
def __init__(self, config, osc=None):
|
||||||
super(VolumeMigrate, self).__init__(config)
|
super(VolumeMigrate, self).__init__(config)
|
||||||
self.temp_username = utils.random_string(10)
|
|
||||||
self.temp_password = utils.random_string(10)
|
|
||||||
self.cinder_util = cinder_helper.CinderHelper(osc=self.osc)
|
self.cinder_util = cinder_helper.CinderHelper(osc=self.osc)
|
||||||
self.nova_util = nova_helper.NovaHelper(osc=self.osc)
|
self.nova_util = nova_helper.NovaHelper(osc=self.osc)
|
||||||
|
|
||||||
@@ -134,83 +129,42 @@ class VolumeMigrate(base.BaseAction):
|
|||||||
|
|
||||||
def _can_swap(self, volume):
|
def _can_swap(self, volume):
|
||||||
"""Judge volume can be swapped"""
|
"""Judge volume can be swapped"""
|
||||||
|
# TODO(sean-k-mooney): rename this to _can_migrate and update
|
||||||
|
# tests to reflect that.
|
||||||
|
|
||||||
|
# cinder volume migration can migrate volumes that are not
|
||||||
|
# attached to instances or nova can migrate the data for cinder
|
||||||
|
# if the volume is in-use. If the volume has no attachments
|
||||||
|
# allow cinder to decided if it can be migrated.
|
||||||
if not volume.attachments:
|
if not volume.attachments:
|
||||||
return False
|
LOG.debug(f"volume: {volume.id} has no attachments")
|
||||||
instance_id = volume.attachments[0]['server_id']
|
|
||||||
instance_status = self.nova_util.find_instance(instance_id).status
|
|
||||||
|
|
||||||
if (volume.status == 'in-use' and
|
|
||||||
instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')):
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
# since it has attachments we need to validate nova's constraints
|
||||||
|
instance_id = volume.attachments[0]['server_id']
|
||||||
def _create_user(self, volume, user):
|
instance_status = self.nova_util.find_instance(instance_id).status
|
||||||
"""Create user with volume attribute and user information"""
|
LOG.debug(
|
||||||
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
|
f"volume: {volume.id} is attached to instance: {instance_id} "
|
||||||
project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id')
|
f"in instance status: {instance_status}")
|
||||||
user['project'] = project_id
|
# NOTE(sean-k-mooney): This used to allow RESIZED which
|
||||||
user['domain'] = keystone_util.get_project(project_id).domain_id
|
# is the resize_verify task state, that is not an acceptable time
|
||||||
user['roles'] = ['admin']
|
# to migrate volumes, if nova does not block this in the API
|
||||||
return keystone_util.create_user(user)
|
# today that is probably a bug. PAUSED is also questionable but
|
||||||
|
# it should generally be safe.
|
||||||
def _get_cinder_client(self, session):
|
return (volume.status == 'in-use' and
|
||||||
"""Get cinder client by session"""
|
instance_status in ('ACTIVE', 'PAUSED'))
|
||||||
return cinder_client.Client(
|
|
||||||
CONF.cinder_client.api_version,
|
|
||||||
session=session,
|
|
||||||
endpoint_type=CONF.cinder_client.endpoint_type)
|
|
||||||
|
|
||||||
def _swap_volume(self, volume, dest_type):
|
|
||||||
"""Swap volume to dest_type
|
|
||||||
|
|
||||||
Limitation note: only for compute libvirt driver
|
|
||||||
"""
|
|
||||||
if not dest_type:
|
|
||||||
raise exception.Invalid(
|
|
||||||
message=(_("destination type is required when "
|
|
||||||
"migration type is swap")))
|
|
||||||
|
|
||||||
if not self._can_swap(volume):
|
|
||||||
raise exception.Invalid(
|
|
||||||
message=(_("Invalid state for swapping volume")))
|
|
||||||
|
|
||||||
user_info = {
|
|
||||||
'name': self.temp_username,
|
|
||||||
'password': self.temp_password}
|
|
||||||
user = self._create_user(volume, user_info)
|
|
||||||
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
|
|
||||||
try:
|
|
||||||
session = keystone_util.create_session(
|
|
||||||
user.id, self.temp_password)
|
|
||||||
temp_cinder = self._get_cinder_client(session)
|
|
||||||
|
|
||||||
# swap volume
|
|
||||||
new_volume = self.cinder_util.create_volume(
|
|
||||||
temp_cinder, volume, dest_type)
|
|
||||||
self.nova_util.swap_volume(volume, new_volume)
|
|
||||||
|
|
||||||
# delete old volume
|
|
||||||
self.cinder_util.delete_volume(volume)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
keystone_util.delete_user(user)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _migrate(self, volume_id, dest_node, dest_type):
|
def _migrate(self, volume_id, dest_node, dest_type):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
volume = self.cinder_util.get_volume(volume_id)
|
volume = self.cinder_util.get_volume(volume_id)
|
||||||
if self.migration_type == self.SWAP:
|
# for backward compatibility map swap to migrate.
|
||||||
if dest_node:
|
if self.migration_type in (self.SWAP, self.MIGRATE):
|
||||||
LOG.warning("dest_node is ignored")
|
if not self._can_swap(volume):
|
||||||
return self._swap_volume(volume, dest_type)
|
raise exception.Invalid(
|
||||||
|
message=(_("Invalid state for swapping volume")))
|
||||||
|
return self.cinder_util.migrate(volume, dest_node)
|
||||||
elif self.migration_type == self.RETYPE:
|
elif self.migration_type == self.RETYPE:
|
||||||
return self.cinder_util.retype(volume, dest_type)
|
return self.cinder_util.retype(volume, dest_type)
|
||||||
elif self.migration_type == self.MIGRATE:
|
|
||||||
return self.cinder_util.migrate(volume, dest_node)
|
|
||||||
else:
|
else:
|
||||||
raise exception.Invalid(
|
raise exception.Invalid(
|
||||||
message=(_("Migration of type '%(migration_type)s' is not "
|
message=(_("Migration of type '%(migration_type)s' is not "
|
||||||
|
|||||||
0
watcher/applier/default.py
Normal file → Executable file
0
watcher/applier/default.py
Normal file → Executable file
@@ -199,6 +199,5 @@ class TaskFlowNop(flow_task.Task):
|
|||||||
|
|
||||||
We need at least two atoms to create a link.
|
We need at least two atoms to create a link.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -15,15 +15,13 @@
|
|||||||
# common/service.py. This allows the API service to run without monkey
|
# common/service.py. This allows the API service to run without monkey
|
||||||
# patching under Apache (which uses its own concurrency model). Mixing
|
# patching under Apache (which uses its own concurrency model). Mixing
|
||||||
# concurrency models can cause undefined behavior and potentially API timeouts.
|
# concurrency models can cause undefined behavior and potentially API timeouts.
|
||||||
# NOTE(sean-k-mooney) while ^ is true, since that was written asyncio was added
|
import eventlet
|
||||||
# to the code base in addition to apscheduler which provides native threads.
|
|
||||||
# As such we have a lot of technical debt to fix with regards to watchers
|
eventlet.monkey_patch()
|
||||||
# concurrency model as we are mixing up to 3 models the same process.
|
|
||||||
# apscheduler does not technically support eventlet but it has mostly worked
|
# Monkey patch the original current_thread to use the up-to-date _active
|
||||||
# until now, apscheduler is used to provide a job schedulers which mixes
|
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
|
||||||
# monkey patched and non monkey patched code in the same process.
|
# https://github.com/eventlet/eventlet/issues/592
|
||||||
# That is problematic and can lead to errors on python 3.12+.
|
import __original_module_threading as orig_threading # noqa
|
||||||
# The maas support added asyncio to the codebase which is unsafe to mix
|
import threading # noqa
|
||||||
# with eventlets by default.
|
orig_threading.current_thread.__globals__['_active'] = threading._active
|
||||||
from watcher import eventlet
|
|
||||||
eventlet.patch()
|
|
||||||
|
|||||||
@@ -153,7 +153,7 @@ class CinderHelper(object):
|
|||||||
final_status = ('success', 'error')
|
final_status = ('success', 'error')
|
||||||
while getattr(volume, 'migration_status') not in final_status:
|
while getattr(volume, 'migration_status') not in final_status:
|
||||||
volume = self.get_volume(volume.id)
|
volume = self.get_volume(volume.id)
|
||||||
LOG.debug('Waiting the migration of %s', volume)
|
LOG.debug('Waiting the migration of {0}'.format(volume))
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
if getattr(volume, 'migration_status') == 'error':
|
if getattr(volume, 'migration_status') == 'error':
|
||||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||||
@@ -230,7 +230,7 @@ class CinderHelper(object):
|
|||||||
availability_zone=getattr(volume, 'availability_zone'))
|
availability_zone=getattr(volume, 'availability_zone'))
|
||||||
while getattr(new_volume, 'status') != 'available' and retry:
|
while getattr(new_volume, 'status') != 'available' and retry:
|
||||||
new_volume = cinder.volumes.get(new_volume.id)
|
new_volume = cinder.volumes.get(new_volume.id)
|
||||||
LOG.debug('Waiting volume creation of %s', new_volume)
|
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s", retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
|
|||||||
25
watcher/common/clients.py
Normal file → Executable file
25
watcher/common/clients.py
Normal file → Executable file
@@ -27,6 +27,12 @@ from novaclient import client as nvclient
|
|||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.common import utils
|
from watcher.common import utils
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ceilometerclient import client as ceclient
|
||||||
|
HAS_CEILCLIENT = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_CEILCLIENT = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from maas import client as maas_client
|
from maas import client as maas_client
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -71,6 +77,7 @@ class OpenStackClients(object):
|
|||||||
self._glance = None
|
self._glance = None
|
||||||
self._gnocchi = None
|
self._gnocchi = None
|
||||||
self._cinder = None
|
self._cinder = None
|
||||||
|
self._ceilometer = None
|
||||||
self._monasca = None
|
self._monasca = None
|
||||||
self._neutron = None
|
self._neutron = None
|
||||||
self._ironic = None
|
self._ironic = None
|
||||||
@@ -181,6 +188,24 @@ class OpenStackClients(object):
|
|||||||
session=self.session)
|
session=self.session)
|
||||||
return self._cinder
|
return self._cinder
|
||||||
|
|
||||||
|
@exception.wrap_keystone_exception
|
||||||
|
def ceilometer(self):
|
||||||
|
if self._ceilometer:
|
||||||
|
return self._ceilometer
|
||||||
|
|
||||||
|
ceilometerclient_version = self._get_client_option('ceilometer',
|
||||||
|
'api_version')
|
||||||
|
ceilometer_endpoint_type = self._get_client_option('ceilometer',
|
||||||
|
'endpoint_type')
|
||||||
|
ceilometer_region_name = self._get_client_option('ceilometer',
|
||||||
|
'region_name')
|
||||||
|
self._ceilometer = ceclient.get_client(
|
||||||
|
ceilometerclient_version,
|
||||||
|
endpoint_type=ceilometer_endpoint_type,
|
||||||
|
region_name=ceilometer_region_name,
|
||||||
|
session=self.session)
|
||||||
|
return self._ceilometer
|
||||||
|
|
||||||
@exception.wrap_keystone_exception
|
@exception.wrap_keystone_exception
|
||||||
def monasca(self):
|
def monasca(self):
|
||||||
if self._monasca:
|
if self._monasca:
|
||||||
|
|||||||
@@ -154,10 +154,6 @@ class InvalidParameter(Invalid):
|
|||||||
msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s")
|
msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s")
|
||||||
|
|
||||||
|
|
||||||
class MissingParameter(Invalid):
|
|
||||||
msg_fmt = _("%(parameter)s is required but missing. Check watcher.conf")
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidIdentity(Invalid):
|
class InvalidIdentity(Invalid):
|
||||||
msg_fmt = _("Expected a uuid or int but received %(identity)s")
|
msg_fmt = _("Expected a uuid or int but received %(identity)s")
|
||||||
|
|
||||||
|
|||||||
@@ -15,8 +15,6 @@
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from keystoneauth1.exceptions import http as ks_exceptions
|
from keystoneauth1.exceptions import http as ks_exceptions
|
||||||
from keystoneauth1 import loading
|
|
||||||
from keystoneauth1 import session
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.common import clients
|
from watcher.common import clients
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
@@ -72,8 +70,8 @@ class KeystoneHelper(object):
|
|||||||
message=(_("Project not Found: %s") % name_or_id))
|
message=(_("Project not Found: %s") % name_or_id))
|
||||||
if len(projects) > 1:
|
if len(projects) > 1:
|
||||||
raise exception.Invalid(
|
raise exception.Invalid(
|
||||||
message=(_("Project name seems ambiguous: %s") %
|
messsage=(_("Project name seems ambiguous: %s") %
|
||||||
name_or_id))
|
name_or_id))
|
||||||
return projects[0]
|
return projects[0]
|
||||||
|
|
||||||
def get_domain(self, name_or_id):
|
def get_domain(self, name_or_id):
|
||||||
@@ -90,35 +88,3 @@ class KeystoneHelper(object):
|
|||||||
message=(_("Domain name seems ambiguous: %s") %
|
message=(_("Domain name seems ambiguous: %s") %
|
||||||
name_or_id))
|
name_or_id))
|
||||||
return domains[0]
|
return domains[0]
|
||||||
|
|
||||||
def create_session(self, user_id, password):
|
|
||||||
user = self.get_user(user_id)
|
|
||||||
loader = loading.get_plugin_loader('password')
|
|
||||||
auth = loader.load_from_options(
|
|
||||||
auth_url=CONF.watcher_clients_auth.auth_url,
|
|
||||||
password=password,
|
|
||||||
user_id=user_id,
|
|
||||||
project_id=user.default_project_id)
|
|
||||||
return session.Session(auth=auth)
|
|
||||||
|
|
||||||
def create_user(self, user):
|
|
||||||
project = self.get_project(user['project'])
|
|
||||||
domain = self.get_domain(user['domain'])
|
|
||||||
_user = self.keystone.users.create(
|
|
||||||
user['name'],
|
|
||||||
password=user['password'],
|
|
||||||
domain=domain,
|
|
||||||
project=project,
|
|
||||||
)
|
|
||||||
for role in user['roles']:
|
|
||||||
role = self.get_role(role)
|
|
||||||
self.keystone.roles.grant(
|
|
||||||
role.id, user=_user.id, project=project.id)
|
|
||||||
return _user
|
|
||||||
|
|
||||||
def delete_user(self, user):
|
|
||||||
try:
|
|
||||||
user = self.get_user(user)
|
|
||||||
self.keystone.users.delete(user)
|
|
||||||
except exception.Invalid:
|
|
||||||
pass
|
|
||||||
|
|||||||
@@ -292,7 +292,9 @@ class NovaHelper(object):
|
|||||||
'OS-EXT-STS:vm_state') != 'resized' \
|
'OS-EXT-STS:vm_state') != 'resized' \
|
||||||
and retry:
|
and retry:
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
LOG.debug('Waiting the resize of %s to %s', instance, flavor_id)
|
LOG.debug(
|
||||||
|
'Waiting the resize of {0} to {1}'.format(
|
||||||
|
instance, flavor_id))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
|
|
||||||
@@ -347,7 +349,8 @@ class NovaHelper(object):
|
|||||||
if dest_hostname is None:
|
if dest_hostname is None:
|
||||||
while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
|
while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
LOG.debug('Waiting the migration of %s', instance.id)
|
LOG.debug(
|
||||||
|
'Waiting the migration of {0}'.format(instance.id))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||||
@@ -368,9 +371,11 @@ class NovaHelper(object):
|
|||||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||||
LOG.debug("Instance task state: %s is null", instance_id)
|
LOG.debug("Instance task state: %s is null", instance_id)
|
||||||
break
|
break
|
||||||
LOG.debug('Waiting the migration of %s to %s',
|
LOG.debug(
|
||||||
instance,
|
'Waiting the migration of {0} to {1}'.format(
|
||||||
getattr(instance, 'OS-EXT-SRV-ATTR:host'))
|
instance,
|
||||||
|
getattr(instance,
|
||||||
|
'OS-EXT-SRV-ATTR:host')))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
|
|
||||||
@@ -720,7 +725,7 @@ class NovaHelper(object):
|
|||||||
instance_id, old_volume.id, new_volume.id)
|
instance_id, old_volume.id, new_volume.id)
|
||||||
while getattr(new_volume, 'status') != 'in-use' and retry:
|
while getattr(new_volume, 'status') != 'in-use' and retry:
|
||||||
new_volume = self.cinder.volumes.get(new_volume.id)
|
new_volume = self.cinder.volumes.get(new_volume.id)
|
||||||
LOG.debug('Waiting volume update to %s', new_volume)
|
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s", retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
|
|||||||
@@ -18,6 +18,7 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_policy import opts
|
||||||
from oslo_policy import policy
|
from oslo_policy import policy
|
||||||
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
@@ -26,6 +27,12 @@ from watcher.common import policies
|
|||||||
_ENFORCER = None
|
_ENFORCER = None
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
# TODO(gmann): Remove setting the default value of config policy_file
|
||||||
|
# once oslo_policy change the default value to 'policy.yaml'.
|
||||||
|
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
|
||||||
|
DEFAULT_POLICY_FILE = 'policy.yaml'
|
||||||
|
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
|
||||||
|
|
||||||
|
|
||||||
# we can get a policy enforcer by this init.
|
# we can get a policy enforcer by this init.
|
||||||
# oslo policy support change policy rule dynamically.
|
# oslo policy support change policy rule dynamically.
|
||||||
@@ -64,6 +71,7 @@ def init(policy_file=None, rules=None,
|
|||||||
|
|
||||||
def enforce(context, rule=None, target=None,
|
def enforce(context, rule=None, target=None,
|
||||||
do_raise=True, exc=None, *args, **kwargs):
|
do_raise=True, exc=None, *args, **kwargs):
|
||||||
|
|
||||||
"""Checks authorization of a rule against the target and credentials.
|
"""Checks authorization of a rule against the target and credentials.
|
||||||
|
|
||||||
:param dict context: As much information about the user performing the
|
:param dict context: As much information about the user performing the
|
||||||
|
|||||||
@@ -16,22 +16,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import eventlet
|
|
||||||
|
|
||||||
from apscheduler import events
|
from apscheduler import events
|
||||||
from apscheduler.executors import pool as pool_executor
|
from apscheduler.executors.pool import BasePoolExecutor
|
||||||
from apscheduler.schedulers import background
|
from apscheduler.schedulers import background
|
||||||
|
|
||||||
import futurist
|
import futurist
|
||||||
|
|
||||||
from oslo_service import service
|
from oslo_service import service
|
||||||
|
|
||||||
from watcher import eventlet as eventlet_helper
|
|
||||||
|
|
||||||
job_events = events
|
job_events = events
|
||||||
|
|
||||||
|
|
||||||
class GreenThreadPoolExecutor(pool_executor.BasePoolExecutor):
|
class GreenThreadPoolExecutor(BasePoolExecutor):
|
||||||
"""Green thread pool
|
"""Green thread pool
|
||||||
|
|
||||||
An executor that runs jobs in a green thread pool.
|
An executor that runs jobs in a green thread pool.
|
||||||
@@ -49,25 +43,16 @@ executors = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class BackgroundSchedulerService(
|
class BackgroundSchedulerService(service.ServiceBase,
|
||||||
service.ServiceBase, background.BackgroundScheduler):
|
background.BackgroundScheduler):
|
||||||
def __init__(self, gconfig=None, **options):
|
def __init__(self, gconfig={}, **options):
|
||||||
self.should_patch = eventlet_helper.is_patched()
|
|
||||||
if options is None:
|
if options is None:
|
||||||
options = {'executors': executors}
|
options = {'executors': executors}
|
||||||
else:
|
else:
|
||||||
if 'executors' not in options.keys():
|
if 'executors' not in options.keys():
|
||||||
options['executors'] = executors
|
options['executors'] = executors
|
||||||
super().__init__(gconfig or {}, **options)
|
super(BackgroundSchedulerService, self).__init__(
|
||||||
|
gconfig, **options)
|
||||||
def _main_loop(self):
|
|
||||||
if self.should_patch:
|
|
||||||
# NOTE(sean-k-mooney): is_patched and monkey_patch form
|
|
||||||
# watcher.eventlet check a non thread local variable to early out
|
|
||||||
# as we do not use eventlet_helper.patch() here to ensure
|
|
||||||
# eventlet.monkey_patch() is actually called.
|
|
||||||
eventlet.monkey_patch()
|
|
||||||
super()._main_loop()
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
"""Start service."""
|
"""Start service."""
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ from oslo_reports import guru_meditation_report as gmr
|
|||||||
from oslo_reports import opts as gmr_opts
|
from oslo_reports import opts as gmr_opts
|
||||||
from oslo_service import service
|
from oslo_service import service
|
||||||
from oslo_service import wsgi
|
from oslo_service import wsgi
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
from watcher.api import app
|
from watcher.api import app
|
||||||
@@ -121,7 +120,7 @@ class ServiceHeartbeat(scheduling.BackgroundSchedulerService):
|
|||||||
'host': host})
|
'host': host})
|
||||||
if watcher_list:
|
if watcher_list:
|
||||||
watcher_service = watcher_list[0]
|
watcher_service = watcher_list[0]
|
||||||
watcher_service.last_seen_up = timeutils.utcnow()
|
watcher_service.last_seen_up = datetime.datetime.utcnow()
|
||||||
watcher_service.save()
|
watcher_service.save()
|
||||||
else:
|
else:
|
||||||
watcher_service = objects.Service(self.context)
|
watcher_service = objects.Service(self.context)
|
||||||
|
|||||||
@@ -19,37 +19,30 @@ import abc
|
|||||||
|
|
||||||
class ServiceManager(object, metaclass=abc.ABCMeta):
|
class ServiceManager(object, metaclass=abc.ABCMeta):
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def service_name(self):
|
def service_name(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def api_version(self):
|
def api_version(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def publisher_id(self):
|
def publisher_id(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def conductor_topic(self):
|
def conductor_topic(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def notification_topics(self):
|
def notification_topics(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def conductor_endpoints(self):
|
def conductor_endpoints(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@abc.abstractproperty
|
||||||
@abc.abstractmethod
|
|
||||||
def notification_endpoints(self):
|
def notification_endpoints(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|||||||
@@ -19,9 +19,7 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import datetime
|
import datetime
|
||||||
import inspect
|
import inspect
|
||||||
import random
|
|
||||||
import re
|
import re
|
||||||
import string
|
|
||||||
|
|
||||||
from croniter import croniter
|
from croniter import croniter
|
||||||
import eventlet
|
import eventlet
|
||||||
@@ -160,14 +158,10 @@ def extend_with_strict_schema(validator_class):
|
|||||||
StrictDefaultValidatingDraft4Validator = extend_with_default(
|
StrictDefaultValidatingDraft4Validator = extend_with_default(
|
||||||
extend_with_strict_schema(validators.Draft4Validator))
|
extend_with_strict_schema(validators.Draft4Validator))
|
||||||
|
|
||||||
|
|
||||||
Draft4Validator = validators.Draft4Validator
|
Draft4Validator = validators.Draft4Validator
|
||||||
|
|
||||||
|
|
||||||
def random_string(n):
|
|
||||||
return ''.join([random.choice(
|
|
||||||
string.ascii_letters + string.digits) for i in range(n)])
|
|
||||||
|
|
||||||
|
|
||||||
# Some clients (e.g. MAAS) use asyncio, which isn't compatible with Eventlet.
|
# Some clients (e.g. MAAS) use asyncio, which isn't compatible with Eventlet.
|
||||||
# As a workaround, we're delegating such calls to a native thread.
|
# As a workaround, we're delegating such calls to a native thread.
|
||||||
def async_compat_call(f, *args, **kwargs):
|
def async_compat_call(f, *args, **kwargs):
|
||||||
|
|||||||
4
watcher/conf/__init__.py
Normal file → Executable file
4
watcher/conf/__init__.py
Normal file → Executable file
@@ -21,6 +21,7 @@ from oslo_config import cfg
|
|||||||
|
|
||||||
from watcher.conf import api
|
from watcher.conf import api
|
||||||
from watcher.conf import applier
|
from watcher.conf import applier
|
||||||
|
from watcher.conf import ceilometer_client
|
||||||
from watcher.conf import cinder_client
|
from watcher.conf import cinder_client
|
||||||
from watcher.conf import clients_auth
|
from watcher.conf import clients_auth
|
||||||
from watcher.conf import collector
|
from watcher.conf import collector
|
||||||
@@ -41,7 +42,6 @@ from watcher.conf import nova_client
|
|||||||
from watcher.conf import paths
|
from watcher.conf import paths
|
||||||
from watcher.conf import placement_client
|
from watcher.conf import placement_client
|
||||||
from watcher.conf import planner
|
from watcher.conf import planner
|
||||||
from watcher.conf import prometheus_client
|
|
||||||
from watcher.conf import service
|
from watcher.conf import service
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@@ -64,9 +64,9 @@ keystone_client.register_opts(CONF)
|
|||||||
grafana_client.register_opts(CONF)
|
grafana_client.register_opts(CONF)
|
||||||
grafana_translators.register_opts(CONF)
|
grafana_translators.register_opts(CONF)
|
||||||
cinder_client.register_opts(CONF)
|
cinder_client.register_opts(CONF)
|
||||||
|
ceilometer_client.register_opts(CONF)
|
||||||
neutron_client.register_opts(CONF)
|
neutron_client.register_opts(CONF)
|
||||||
clients_auth.register_opts(CONF)
|
clients_auth.register_opts(CONF)
|
||||||
ironic_client.register_opts(CONF)
|
ironic_client.register_opts(CONF)
|
||||||
collector.register_opts(CONF)
|
collector.register_opts(CONF)
|
||||||
placement_client.register_opts(CONF)
|
placement_client.register_opts(CONF)
|
||||||
prometheus_client.register_opts(CONF)
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ APPLIER_OPTS = [
|
|||||||
cfg.BoolOpt('rollback_when_actionplan_failed',
|
cfg.BoolOpt('rollback_when_actionplan_failed',
|
||||||
default=False,
|
default=False,
|
||||||
help='If set True, the failed actionplan will rollback '
|
help='If set True, the failed actionplan will rollback '
|
||||||
'when executing. Default value is False.'),
|
'when executing. Defaule value is False.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
63
watcher/conf/ceilometer_client.py
Normal file
63
watcher/conf/ceilometer_client.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2016 Intel Corp
|
||||||
|
#
|
||||||
|
# Authors: Prudhvi Rao Shedimbi <prudhvi.rao.shedimbi@intel.com>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
ceilometer_client = cfg.OptGroup(name='ceilometer_client',
|
||||||
|
title='Configuration Options for Ceilometer')
|
||||||
|
|
||||||
|
CEILOMETER_CLIENT_OPTS = [
|
||||||
|
cfg.StrOpt('api_version',
|
||||||
|
default='2',
|
||||||
|
deprecated_for_removal=True,
|
||||||
|
deprecated_since="1.13.0",
|
||||||
|
deprecated_reason="""
|
||||||
|
Ceilometer API is deprecated since Ocata release.
|
||||||
|
Any related configuration options are deprecated too.
|
||||||
|
""",
|
||||||
|
help='Version of Ceilometer API to use in '
|
||||||
|
'ceilometerclient.'),
|
||||||
|
cfg.StrOpt('endpoint_type',
|
||||||
|
default='internalURL',
|
||||||
|
deprecated_for_removal=True,
|
||||||
|
deprecated_since="1.13.0",
|
||||||
|
deprecated_reason="""
|
||||||
|
Ceilometer API is deprecated since Ocata release.
|
||||||
|
Any related configuration options are deprecated too.
|
||||||
|
""",
|
||||||
|
help='Type of endpoint to use in ceilometerclient. '
|
||||||
|
'Supported values: internalURL, publicURL, adminURL. '
|
||||||
|
'The default is internalURL.'),
|
||||||
|
cfg.StrOpt('region_name',
|
||||||
|
deprecated_for_removal=True,
|
||||||
|
deprecated_since="1.13.0",
|
||||||
|
deprecated_reason="""
|
||||||
|
Ceilometer API is deprecated since Ocata release.
|
||||||
|
Any related configuration options are deprecated too.
|
||||||
|
""",
|
||||||
|
help='Region in Identity service catalog to use for '
|
||||||
|
'communication with the OpenStack service.')]
|
||||||
|
|
||||||
|
|
||||||
|
def register_opts(conf):
|
||||||
|
conf.register_group(ceilometer_client)
|
||||||
|
conf.register_opts(CEILOMETER_CLIENT_OPTS, group=ceilometer_client)
|
||||||
|
|
||||||
|
|
||||||
|
def list_opts():
|
||||||
|
return [(ceilometer_client, CEILOMETER_CLIENT_OPTS)]
|
||||||
0
watcher/conf/ironic_client.py
Normal file → Executable file
0
watcher/conf/ironic_client.py
Normal file → Executable file
0
watcher/conf/nova_client.py
Normal file → Executable file
0
watcher/conf/nova_client.py
Normal file → Executable file
@@ -1,62 +0,0 @@
|
|||||||
# Copyright 2024 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
prometheus_client = cfg.OptGroup(name='prometheus_client',
|
|
||||||
title='Configuration Options for Prometheus',
|
|
||||||
help="See https://docs.openstack.org/watcher/"
|
|
||||||
"latest/datasources/prometheus.html for "
|
|
||||||
"details on how these options are used.")
|
|
||||||
|
|
||||||
PROMETHEUS_CLIENT_OPTS = [
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
help="The hostname or IP address for the prometheus server."),
|
|
||||||
cfg.StrOpt('port',
|
|
||||||
help="The port number used by the prometheus server."),
|
|
||||||
cfg.StrOpt('fqdn_label',
|
|
||||||
default="fqdn",
|
|
||||||
help="The label that Prometheus uses to store the fqdn of "
|
|
||||||
"exporters. Defaults to 'fqdn'."),
|
|
||||||
cfg.StrOpt('instance_uuid_label',
|
|
||||||
default="resource",
|
|
||||||
help="The label that Prometheus uses to store the uuid of "
|
|
||||||
"OpenStack instances. Defaults to 'resource'."),
|
|
||||||
cfg.StrOpt('username',
|
|
||||||
help="The basic_auth username to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('password',
|
|
||||||
secret=True,
|
|
||||||
help="The basic_auth password to use to authenticate with the "
|
|
||||||
"Prometheus server."),
|
|
||||||
cfg.StrOpt('cafile',
|
|
||||||
help="Path to the CA certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('certfile',
|
|
||||||
help="Path to the client certificate for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
cfg.StrOpt('keyfile',
|
|
||||||
help="Path to the client key for establishing a TLS "
|
|
||||||
"connection with the Prometheus server."),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def register_opts(conf):
|
|
||||||
conf.register_group(prometheus_client)
|
|
||||||
conf.register_opts(PROMETHEUS_CLIENT_OPTS, group=prometheus_client)
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
return [(prometheus_client, PROMETHEUS_CLIENT_OPTS)]
|
|
||||||
@@ -16,19 +16,23 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import pickle # nosec: B403
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from apscheduler.jobstores.base import ConflictingIdError
|
from apscheduler.jobstores.base import ConflictingIdError
|
||||||
from apscheduler.jobstores import sqlalchemy
|
from apscheduler.jobstores import sqlalchemy
|
||||||
from apscheduler.util import datetime_to_utc_timestamp
|
from apscheduler.util import datetime_to_utc_timestamp
|
||||||
from apscheduler.util import maybe_ref
|
from apscheduler.util import maybe_ref
|
||||||
from apscheduler.util import utc_timestamp_to_datetime
|
from apscheduler.util import utc_timestamp_to_datetime
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
from watcher.common import context
|
from watcher.common import context
|
||||||
from watcher.common import service
|
from watcher.common import service
|
||||||
from watcher import objects
|
from watcher import objects
|
||||||
|
|
||||||
|
try:
|
||||||
|
import cPickle as pickle
|
||||||
|
except ImportError: # pragma: nocover
|
||||||
|
import pickle
|
||||||
|
|
||||||
from sqlalchemy import Table, MetaData, select, and_, null
|
from sqlalchemy import Table, MetaData, select, and_, null
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
|
|
||||||
|
|||||||
@@ -19,10 +19,9 @@
|
|||||||
|
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
from dateutil import tz
|
||||||
|
|
||||||
from croniter import croniter
|
from croniter import croniter
|
||||||
from dateutil import tz
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from watcher.common import context
|
from watcher.common import context
|
||||||
from watcher.common import scheduling
|
from watcher.common import scheduling
|
||||||
@@ -98,7 +97,7 @@ class ContinuousAuditHandler(base.AuditHandler):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _next_cron_time(audit):
|
def _next_cron_time(audit):
|
||||||
if utils.is_cron_like(audit.interval):
|
if utils.is_cron_like(audit.interval):
|
||||||
return croniter(audit.interval, timeutils.utcnow()
|
return croniter(audit.interval, datetime.datetime.utcnow()
|
||||||
).get_next(datetime.datetime)
|
).get_next(datetime.datetime)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -112,7 +111,7 @@ class ContinuousAuditHandler(base.AuditHandler):
|
|||||||
finally:
|
finally:
|
||||||
if utils.is_int_like(audit.interval):
|
if utils.is_int_like(audit.interval):
|
||||||
audit.next_run_time = (
|
audit.next_run_time = (
|
||||||
timeutils.utcnow() +
|
datetime.datetime.utcnow() +
|
||||||
datetime.timedelta(seconds=int(audit.interval)))
|
datetime.timedelta(seconds=int(audit.interval)))
|
||||||
else:
|
else:
|
||||||
audit.next_run_time = self._next_cron_time(audit)
|
audit.next_run_time = self._next_cron_time(audit)
|
||||||
@@ -130,7 +129,7 @@ class ContinuousAuditHandler(base.AuditHandler):
|
|||||||
**trigger_args)
|
**trigger_args)
|
||||||
|
|
||||||
def check_audit_expired(self, audit):
|
def check_audit_expired(self, audit):
|
||||||
current = timeutils.utcnow()
|
current = datetime.datetime.utcnow()
|
||||||
# Note: if audit still didn't get into the timeframe,
|
# Note: if audit still didn't get into the timeframe,
|
||||||
# skip it
|
# skip it
|
||||||
if audit.start_time and audit.start_time > current:
|
if audit.start_time and audit.start_time > current:
|
||||||
@@ -197,7 +196,7 @@ class ContinuousAuditHandler(base.AuditHandler):
|
|||||||
# to restore it after shutdown
|
# to restore it after shutdown
|
||||||
if audit.next_run_time is not None:
|
if audit.next_run_time is not None:
|
||||||
old_run_time = audit.next_run_time
|
old_run_time = audit.next_run_time
|
||||||
current = timeutils.utcnow()
|
current = datetime.datetime.utcnow()
|
||||||
if old_run_time < current:
|
if old_run_time < current:
|
||||||
delta = datetime.timedelta(
|
delta = datetime.timedelta(
|
||||||
seconds=(int(audit.interval) - (
|
seconds=(int(audit.interval) - (
|
||||||
@@ -207,7 +206,7 @@ class ContinuousAuditHandler(base.AuditHandler):
|
|||||||
next_run_time = audit.next_run_time
|
next_run_time = audit.next_run_time
|
||||||
# if audit is new one
|
# if audit is new one
|
||||||
else:
|
else:
|
||||||
next_run_time = timeutils.utcnow()
|
next_run_time = datetime.datetime.utcnow()
|
||||||
self._add_job('interval', audit, audit_context,
|
self._add_job('interval', audit, audit_context,
|
||||||
seconds=int(audit.interval),
|
seconds=int(audit.interval),
|
||||||
next_run_time=next_run_time)
|
next_run_time=next_run_time)
|
||||||
|
|||||||
@@ -91,8 +91,8 @@ class DataSourceBase(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
self.query_retry_reset(e)
|
self.query_retry_reset(e)
|
||||||
LOG.warning("Retry %d of %d while retrieving metrics retry "
|
LOG.warning("Retry {0} of {1} while retrieving metrics retry "
|
||||||
"in %d seconds", i+1, num_retries, timeout)
|
"in {2} seconds".format(i+1, num_retries, timeout))
|
||||||
time.sleep(timeout)
|
time.sleep(timeout)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
|
|||||||
276
watcher/decision_engine/datasources/ceilometer.py
Normal file
276
watcher/decision_engine/datasources/ceilometer.py
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2015 b<>com
|
||||||
|
#
|
||||||
|
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
|
from watcher._i18n import _
|
||||||
|
from watcher.common import clients
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.decision_engine.datasources import base
|
||||||
|
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ceilometerclient import exc
|
||||||
|
HAS_CEILCLIENT = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_CEILCLIENT = False
|
||||||
|
|
||||||
|
|
||||||
|
class CeilometerHelper(base.DataSourceBase):
|
||||||
|
|
||||||
|
NAME = 'ceilometer'
|
||||||
|
METRIC_MAP = dict(host_cpu_usage='compute.node.cpu.percent',
|
||||||
|
host_ram_usage='hardware.memory.used',
|
||||||
|
host_outlet_temp='hardware.ipmi.node.outlet_temperature',
|
||||||
|
host_inlet_temp='hardware.ipmi.node.temperature',
|
||||||
|
host_airflow='hardware.ipmi.node.airflow',
|
||||||
|
host_power='hardware.ipmi.node.power',
|
||||||
|
instance_cpu_usage='cpu_util',
|
||||||
|
instance_ram_usage='memory.resident',
|
||||||
|
instance_ram_allocated='memory',
|
||||||
|
instance_l3_cache_usage='cpu_l3_cache',
|
||||||
|
instance_root_disk_size='disk.root.size',
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, osc=None):
|
||||||
|
""":param osc: an OpenStackClients instance"""
|
||||||
|
self.osc = osc if osc else clients.OpenStackClients()
|
||||||
|
self.ceilometer = self.osc.ceilometer()
|
||||||
|
LOG.warning("Ceilometer API is deprecated and Ceilometer Datasource "
|
||||||
|
"module is no longer maintained. We recommend to use "
|
||||||
|
"Gnocchi instead.")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_query(user_id, tenant_id, resource_id,
|
||||||
|
user_ids, tenant_ids, resource_ids):
|
||||||
|
query = []
|
||||||
|
|
||||||
|
def query_append(query, _id, _ids, field):
|
||||||
|
if _id:
|
||||||
|
_ids = [_id]
|
||||||
|
for x_id in _ids:
|
||||||
|
query.append({"field": field, "op": "eq", "value": x_id})
|
||||||
|
|
||||||
|
query_append(query, user_id, (user_ids or []), "user_id")
|
||||||
|
query_append(query, tenant_id, (tenant_ids or []), "project_id")
|
||||||
|
query_append(query, resource_id, (resource_ids or []), "resource_id")
|
||||||
|
|
||||||
|
return query
|
||||||
|
|
||||||
|
def _timestamps(self, start_time, end_time):
|
||||||
|
|
||||||
|
def _format_timestamp(_time):
|
||||||
|
if _time:
|
||||||
|
if isinstance(_time, datetime.datetime):
|
||||||
|
return _time.isoformat()
|
||||||
|
return _time
|
||||||
|
return None
|
||||||
|
|
||||||
|
start_timestamp = _format_timestamp(start_time)
|
||||||
|
end_timestamp = _format_timestamp(end_time)
|
||||||
|
|
||||||
|
if ((start_timestamp is not None) and (end_timestamp is not None) and
|
||||||
|
(timeutils.parse_isotime(start_timestamp) >
|
||||||
|
timeutils.parse_isotime(end_timestamp))):
|
||||||
|
raise exception.Invalid(
|
||||||
|
_("Invalid query: %(start_time)s > %(end_time)s") % dict(
|
||||||
|
start_time=start_timestamp, end_time=end_timestamp))
|
||||||
|
return start_timestamp, end_timestamp
|
||||||
|
|
||||||
|
def build_query(self, user_id=None, tenant_id=None, resource_id=None,
|
||||||
|
user_ids=None, tenant_ids=None, resource_ids=None,
|
||||||
|
start_time=None, end_time=None):
|
||||||
|
"""Returns query built from given parameters.
|
||||||
|
|
||||||
|
This query can be then used for querying resources, meters and
|
||||||
|
statistics.
|
||||||
|
:param user_id: user_id, has a priority over list of ids
|
||||||
|
:param tenant_id: tenant_id, has a priority over list of ids
|
||||||
|
:param resource_id: resource_id, has a priority over list of ids
|
||||||
|
:param user_ids: list of user_ids
|
||||||
|
:param tenant_ids: list of tenant_ids
|
||||||
|
:param resource_ids: list of resource_ids
|
||||||
|
:param start_time: datetime from which measurements should be collected
|
||||||
|
:param end_time: datetime until which measurements should be collected
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = self.format_query(user_id, tenant_id, resource_id,
|
||||||
|
user_ids, tenant_ids, resource_ids)
|
||||||
|
|
||||||
|
start_timestamp, end_timestamp = self._timestamps(start_time,
|
||||||
|
end_time)
|
||||||
|
|
||||||
|
if start_timestamp:
|
||||||
|
query.append({"field": "timestamp", "op": "ge",
|
||||||
|
"value": start_timestamp})
|
||||||
|
if end_timestamp:
|
||||||
|
query.append({"field": "timestamp", "op": "le",
|
||||||
|
"value": end_timestamp})
|
||||||
|
return query
|
||||||
|
|
||||||
|
def query_retry_reset(self, exception_instance):
|
||||||
|
if isinstance(exception_instance, exc.HTTPUnauthorized):
|
||||||
|
self.osc.reset_clients()
|
||||||
|
self.ceilometer = self.osc.ceilometer()
|
||||||
|
|
||||||
|
def list_metrics(self):
|
||||||
|
"""List the user's meters."""
|
||||||
|
meters = self.query_retry(f=self.ceilometer.meters.list)
|
||||||
|
if not meters:
|
||||||
|
return set()
|
||||||
|
else:
|
||||||
|
return meters
|
||||||
|
|
||||||
|
def check_availability(self):
|
||||||
|
status = self.query_retry(self.ceilometer.resources.list)
|
||||||
|
if status:
|
||||||
|
return 'available'
|
||||||
|
else:
|
||||||
|
return 'not available'
|
||||||
|
|
||||||
|
def query_sample(self, meter_name, query, limit=1):
|
||||||
|
return self.query_retry(f=self.ceilometer.samples.list,
|
||||||
|
meter_name=meter_name,
|
||||||
|
limit=limit,
|
||||||
|
q=query)
|
||||||
|
|
||||||
|
def statistic_aggregation(self, resource=None, resource_type=None,
|
||||||
|
meter_name=None, period=300, granularity=300,
|
||||||
|
aggregate='mean'):
|
||||||
|
end_time = datetime.datetime.utcnow()
|
||||||
|
start_time = end_time - datetime.timedelta(seconds=int(period))
|
||||||
|
|
||||||
|
meter = self._get_meter(meter_name)
|
||||||
|
|
||||||
|
if aggregate == 'mean':
|
||||||
|
aggregate = 'avg'
|
||||||
|
elif aggregate == 'count':
|
||||||
|
aggregate = 'avg'
|
||||||
|
LOG.warning('aggregate type count not supported by ceilometer,'
|
||||||
|
' replaced with mean.')
|
||||||
|
|
||||||
|
resource_id = resource.uuid
|
||||||
|
if resource_type == 'compute_node':
|
||||||
|
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
|
||||||
|
|
||||||
|
query = self.build_query(
|
||||||
|
resource_id=resource_id, start_time=start_time, end_time=end_time)
|
||||||
|
statistic = self.query_retry(f=self.ceilometer.statistics.list,
|
||||||
|
meter_name=meter,
|
||||||
|
q=query,
|
||||||
|
period=period,
|
||||||
|
aggregates=[
|
||||||
|
{'func': aggregate}])
|
||||||
|
|
||||||
|
item_value = None
|
||||||
|
if statistic:
|
||||||
|
item_value = statistic[-1]._info.get('aggregate').get(aggregate)
|
||||||
|
if meter_name == 'host_airflow':
|
||||||
|
# Airflow from hardware.ipmi.node.airflow is reported as
|
||||||
|
# 1/10 th of actual CFM
|
||||||
|
item_value *= 10
|
||||||
|
return item_value
|
||||||
|
|
||||||
|
def statistic_series(self, resource=None, resource_type=None,
|
||||||
|
meter_name=None, start_time=None, end_time=None,
|
||||||
|
granularity=300):
|
||||||
|
raise NotImplementedError(
|
||||||
|
_('Ceilometer helper does not support statistic series method'))
|
||||||
|
|
||||||
|
def get_host_cpu_usage(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_cpu_usage', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_host_ram_usage(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_ram_usage', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_host_outlet_temp(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_outlet_temp', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_host_inlet_temp(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_inlet_temp', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_host_airflow(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_airflow', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_host_power(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'compute_node', 'host_power', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_instance_cpu_usage(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'instance', 'instance_cpu_usage', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_instance_ram_usage(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'instance', 'instance_ram_usage', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_instance_ram_allocated(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'instance', 'instance_ram_allocated', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_instance_l3_cache_usage(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'instance', 'instance_l3_cache_usage', period,
|
||||||
|
aggregate, granularity)
|
||||||
|
|
||||||
|
def get_instance_root_disk_size(self, resource, period,
|
||||||
|
aggregate, granularity=None):
|
||||||
|
|
||||||
|
return self.statistic_aggregation(
|
||||||
|
resource, 'instance', 'instance_root_disk_size', period,
|
||||||
|
aggregate, granularity)
|
||||||
@@ -16,12 +16,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
from gnocchiclient import exceptions as gnc_exc
|
from gnocchiclient import exceptions as gnc_exc
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from watcher.common import clients
|
from watcher.common import clients
|
||||||
from watcher.decision_engine.datasources import base
|
from watcher.decision_engine.datasources import base
|
||||||
@@ -69,7 +69,7 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
def statistic_aggregation(self, resource=None, resource_type=None,
|
def statistic_aggregation(self, resource=None, resource_type=None,
|
||||||
meter_name=None, period=300, aggregate='mean',
|
meter_name=None, period=300, aggregate='mean',
|
||||||
granularity=300):
|
granularity=300):
|
||||||
stop_time = timeutils.utcnow()
|
stop_time = datetime.utcnow()
|
||||||
start_time = stop_time - timedelta(seconds=(int(period)))
|
start_time = stop_time - timedelta(seconds=(int(period)))
|
||||||
|
|
||||||
meter = self._get_meter(meter_name)
|
meter = self._get_meter(meter_name)
|
||||||
@@ -90,8 +90,8 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
if not resources:
|
if not resources:
|
||||||
LOG.warning("The %s resource %s could not be found",
|
LOG.warning("The {0} resource {1} could not be "
|
||||||
self.NAME, resource_id)
|
"found".format(self.NAME, resource_id))
|
||||||
return
|
return
|
||||||
|
|
||||||
resource_id = resources[0]['id']
|
resource_id = resources[0]['id']
|
||||||
@@ -99,7 +99,7 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
if meter_name == "instance_cpu_usage":
|
if meter_name == "instance_cpu_usage":
|
||||||
if resource_type != "instance":
|
if resource_type != "instance":
|
||||||
LOG.warning("Unsupported resource type for metric "
|
LOG.warning("Unsupported resource type for metric "
|
||||||
"'instance_cpu_usage': %s", resource_type)
|
"'instance_cpu_usage': ", resource_type)
|
||||||
return
|
return
|
||||||
|
|
||||||
# The "cpu_util" gauge (percentage) metric has been removed.
|
# The "cpu_util" gauge (percentage) metric has been removed.
|
||||||
@@ -172,8 +172,8 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
if not resources:
|
if not resources:
|
||||||
LOG.warning("The %s resource %s could not be found",
|
LOG.warning("The {0} resource {1} could not be "
|
||||||
self.NAME, resource_id)
|
"found".format(self.NAME, resource_id))
|
||||||
return
|
return
|
||||||
|
|
||||||
resource_id = resources[0]['id']
|
resource_id = resources[0]['id']
|
||||||
|
|||||||
@@ -158,9 +158,8 @@ class GrafanaHelper(base.DataSourceBase):
|
|||||||
try:
|
try:
|
||||||
self.METRIC_MAP[meter_name]
|
self.METRIC_MAP[meter_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error(
|
LOG.error("Metric: {0} does not appear in the current Grafana "
|
||||||
"Metric: %s does not appear in the current Grafana metric map",
|
"metric map".format(meter_name))
|
||||||
meter_name)
|
|
||||||
raise exception.MetricNotAvailable(metric=meter_name)
|
raise exception.MetricNotAvailable(metric=meter_name)
|
||||||
|
|
||||||
db = self.METRIC_MAP[meter_name]['db']
|
db = self.METRIC_MAP[meter_name]['db']
|
||||||
@@ -185,7 +184,7 @@ class GrafanaHelper(base.DataSourceBase):
|
|||||||
|
|
||||||
resp = self.query_retry(self._request, **kwargs)
|
resp = self.query_retry(self._request, **kwargs)
|
||||||
if not resp:
|
if not resp:
|
||||||
LOG.warning("Datasource %s is not available.", self.NAME)
|
LOG.warning("Datasource {0} is not available.".format(self.NAME))
|
||||||
return
|
return
|
||||||
|
|
||||||
result = translator.extract_result(resp.content)
|
result = translator.extract_result(resp.content)
|
||||||
|
|||||||
@@ -57,8 +57,8 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
|||||||
resource = self._extract_attribute(
|
resource = self._extract_attribute(
|
||||||
data['resource'], data['attribute'])
|
data['resource'], data['attribute'])
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.error("Resource: %s does not contain attribute %s",
|
LOG.error("Resource: {0} does not contain attribute {1}".format(
|
||||||
data['resource'], data['attribute'])
|
data['resource'], data['attribute']))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Granularity is optional if it is None the minimal value for InfluxDB
|
# Granularity is optional if it is None the minimal value for InfluxDB
|
||||||
@@ -82,7 +82,7 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
|||||||
index_aggregate = result['columns'].index(self._data['aggregate'])
|
index_aggregate = result['columns'].index(self._data['aggregate'])
|
||||||
return result['values'][0][index_aggregate]
|
return result['values'][0][index_aggregate]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error("Could not extract %s for the resource: %s",
|
LOG.error("Could not extract {0} for the resource: {1}".format(
|
||||||
self._data['metric'], self._data['resource'])
|
self._data['metric'], self._data['resource']))
|
||||||
raise exception.NoSuchMetricForHost(
|
raise exception.NoSuchMetricForHost(
|
||||||
metric=self._data['metric'], host=self._data['resource'])
|
metric=self._data['metric'], host=self._data['resource'])
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
|
from watcher.decision_engine.datasources import ceilometer as ceil
|
||||||
from watcher.decision_engine.datasources import gnocchi as gnoc
|
from watcher.decision_engine.datasources import gnocchi as gnoc
|
||||||
from watcher.decision_engine.datasources import grafana as graf
|
from watcher.decision_engine.datasources import grafana as graf
|
||||||
from watcher.decision_engine.datasources import monasca as mon
|
from watcher.decision_engine.datasources import monasca as mon
|
||||||
from watcher.decision_engine.datasources import prometheus as prom
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@@ -33,21 +33,21 @@ class DataSourceManager(object):
|
|||||||
|
|
||||||
metric_map = OrderedDict([
|
metric_map = OrderedDict([
|
||||||
(gnoc.GnocchiHelper.NAME, gnoc.GnocchiHelper.METRIC_MAP),
|
(gnoc.GnocchiHelper.NAME, gnoc.GnocchiHelper.METRIC_MAP),
|
||||||
|
(ceil.CeilometerHelper.NAME, ceil.CeilometerHelper.METRIC_MAP),
|
||||||
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
|
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
|
||||||
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
|
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
|
||||||
(prom.PrometheusHelper.NAME, prom.PrometheusHelper.METRIC_MAP),
|
|
||||||
])
|
])
|
||||||
"""Dictionary with all possible datasources, dictionary order is
|
"""Dictionary with all possible datasources, dictionary order is the default
|
||||||
the default order for attempting to use datasources
|
order for attempting to use datasources
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config=None, osc=None):
|
def __init__(self, config=None, osc=None):
|
||||||
self.osc = osc
|
self.osc = osc
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self._ceilometer = None
|
||||||
self._monasca = None
|
self._monasca = None
|
||||||
self._gnocchi = None
|
self._gnocchi = None
|
||||||
self._grafana = None
|
self._grafana = None
|
||||||
self._prometheus = None
|
|
||||||
|
|
||||||
# Dynamically update grafana metric map, only available at runtime
|
# Dynamically update grafana metric map, only available at runtime
|
||||||
# The metric map can still be overridden by a yaml config file
|
# The metric map can still be overridden by a yaml config file
|
||||||
@@ -63,9 +63,16 @@ class DataSourceManager(object):
|
|||||||
LOG.warning('Invalid Datasource: %s. Allowed: %s ', *msgargs)
|
LOG.warning('Invalid Datasource: %s. Allowed: %s ', *msgargs)
|
||||||
|
|
||||||
self.datasources = self.config.datasources
|
self.datasources = self.config.datasources
|
||||||
if self.datasources and mon.MonascaHelper.NAME in self.datasources:
|
|
||||||
LOG.warning('The monasca datasource is deprecated and will be '
|
@property
|
||||||
'removed in a future release.')
|
def ceilometer(self):
|
||||||
|
if self._ceilometer is None:
|
||||||
|
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
|
||||||
|
return self._ceilometer
|
||||||
|
|
||||||
|
@ceilometer.setter
|
||||||
|
def ceilometer(self, ceilometer):
|
||||||
|
self._ceilometer = ceilometer
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def monasca(self):
|
def monasca(self):
|
||||||
@@ -97,16 +104,6 @@ class DataSourceManager(object):
|
|||||||
def grafana(self, grafana):
|
def grafana(self, grafana):
|
||||||
self._grafana = grafana
|
self._grafana = grafana
|
||||||
|
|
||||||
@property
|
|
||||||
def prometheus(self):
|
|
||||||
if self._prometheus is None:
|
|
||||||
self._prometheus = prom.PrometheusHelper()
|
|
||||||
return self._prometheus
|
|
||||||
|
|
||||||
@prometheus.setter
|
|
||||||
def prometheus(self, prometheus):
|
|
||||||
self._prometheus = prometheus
|
|
||||||
|
|
||||||
def get_backend(self, metrics):
|
def get_backend(self, metrics):
|
||||||
"""Determine the datasource to use from the configuration
|
"""Determine the datasource to use from the configuration
|
||||||
|
|
||||||
@@ -130,9 +127,8 @@ class DataSourceManager(object):
|
|||||||
if (metric not in self.metric_map[datasource] or
|
if (metric not in self.metric_map[datasource] or
|
||||||
self.metric_map[datasource].get(metric) is None):
|
self.metric_map[datasource].get(metric) is None):
|
||||||
no_metric = True
|
no_metric = True
|
||||||
LOG.warning(
|
LOG.warning("Datasource: {0} could not be used due to "
|
||||||
"Datasource: %s could not be used due to metric: %s",
|
"metric: {1}".format(datasource, metric))
|
||||||
datasource, metric)
|
|
||||||
break
|
break
|
||||||
if not no_metric:
|
if not no_metric:
|
||||||
# Try to use a specific datasource but attempt additional
|
# Try to use a specific datasource but attempt additional
|
||||||
@@ -142,7 +138,7 @@ class DataSourceManager(object):
|
|||||||
ds.METRIC_MAP.update(self.metric_map[ds.NAME])
|
ds.METRIC_MAP.update(self.metric_map[ds.NAME])
|
||||||
return ds
|
return ds
|
||||||
except Exception:
|
except Exception:
|
||||||
pass # nosec: B110
|
pass
|
||||||
raise exception.MetricNotAvailable(metric=metric)
|
raise exception.MetricNotAvailable(metric=metric)
|
||||||
|
|
||||||
def load_metric_map(self, file_path):
|
def load_metric_map(self, file_path):
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user