Compare commits

..

3 Commits

Author SHA1 Message Date
OpenStack Proposal Bot
870e6d75e0 Imported Translations from Zanata
For more information about this automatic import see:
https://docs.openstack.org/i18n/latest/reviewing-translation-import.html

Change-Id: Ie708859051162cc7a68cfdf289398f6df1abe6c8
2020-04-26 09:05:18 +00:00
OpenStack Release Bot
3069f83731 Update TOX_CONSTRAINTS_FILE for stable/ussuri
Update the URL to the upper-constraints file to point to the redirect
rule on releases.openstack.org so that anyone working on this branch
will switch to the correct upper-constraints list automatically when
the requirements repository branches.

Until the requirements repository has as stable/ussuri branch, tests will
continue to use the upper-constraints list on master.

Change-Id: I80ec47827b91977dde874246fb94dbbeeeb7ef14
2020-04-23 09:48:50 +00:00
OpenStack Release Bot
d7d534f5d1 Update .gitreview for stable/ussuri
Change-Id: I72fffb815f040dd4f1b13a73455276f70bd10aaf
2020-04-23 09:48:48 +00:00
264 changed files with 1993 additions and 4119 deletions

View File

@@ -2,4 +2,4 @@
host=review.opendev.org host=review.opendev.org
port=29418 port=29418
project=openstack/watcher.git project=openstack/watcher.git
defaultbranch=stable/2024.1 defaultbranch=stable/ussuri

View File

@@ -1,9 +1,9 @@
- project: - project:
queue: watcher
templates: templates:
- check-requirements - check-requirements
- openstack-cover-jobs - openstack-cover-jobs
- openstack-python3-jobs - openstack-lower-constraints-jobs
- openstack-python3-ussuri-jobs
- publish-openstack-docs-pti - publish-openstack-docs-pti
- release-notes-jobs-python3 - release-notes-jobs-python3
check: check:
@@ -13,8 +13,10 @@
- watcher-tempest-strategies - watcher-tempest-strategies
- watcher-tempest-actuator - watcher-tempest-actuator
- watcherclient-tempest-functional - watcherclient-tempest-functional
- watcher-tls-test
- watcher-tempest-functional-ipv6-only - watcher-tempest-functional-ipv6-only
gate: gate:
queue: watcher
jobs: jobs:
- watcher-tempest-functional - watcher-tempest-functional
- watcher-tempest-functional-ipv6-only - watcher-tempest-functional-ipv6-only
@@ -85,12 +87,22 @@
vars: vars:
tempest_concurrency: 1 tempest_concurrency: 1
tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies
tempest_exclude_regex: .*\[.*\breal_load\b.*\].*
- job:
name: watcher-tls-test
parent: watcher-tempest-multinode
group-vars:
subnode:
devstack_services:
tls-proxy: true
vars:
devstack_services:
tls-proxy: true
- job: - job:
name: watcher-tempest-multinode name: watcher-tempest-multinode
parent: watcher-tempest-functional parent: watcher-tempest-functional
nodeset: openstack-two-node-jammy nodeset: openstack-two-node-bionic
roles: roles:
- zuul: openstack/tempest - zuul: openstack/tempest
group-vars: group-vars:
@@ -108,7 +120,8 @@
watcher-api: false watcher-api: false
watcher-decision-engine: true watcher-decision-engine: true
watcher-applier: false watcher-applier: false
c-bak: false # We need to add TLS support for watcher plugin
tls-proxy: false
ceilometer: false ceilometer: false
ceilometer-acompute: false ceilometer-acompute: false
ceilometer-acentral: false ceilometer-acentral: false
@@ -147,6 +160,7 @@
timeout: 7200 timeout: 7200
required-projects: &base_required_projects required-projects: &base_required_projects
- openstack/ceilometer - openstack/ceilometer
- openstack/devstack-gate
- openstack/python-openstackclient - openstack/python-openstackclient
- openstack/python-watcherclient - openstack/python-watcherclient
- openstack/watcher - openstack/watcher
@@ -156,6 +170,7 @@
devstack_plugins: devstack_plugins:
watcher: https://opendev.org/openstack/watcher watcher: https://opendev.org/openstack/watcher
devstack_services: devstack_services:
tls-proxy: false
watcher-api: true watcher-api: true
watcher-decision-engine: true watcher-decision-engine: true
watcher-applier: true watcher-applier: true
@@ -164,10 +179,14 @@
s-container: false s-container: false
s-object: false s-object: false
s-proxy: false s-proxy: false
tempest_plugins: devstack_localrc:
- watcher-tempest-plugin TEMPEST_PLUGINS: /opt/stack/watcher-tempest-plugin
USE_PYTHON3: true
tempest_test_regex: watcher_tempest_plugin.tests.api tempest_test_regex: watcher_tempest_plugin.tests.api
tox_envlist: all tox_envlist: all
tox_environment:
# Do we really need to set this? It's cargo culted
PYTHONUNBUFFERED: 'true'
zuul_copy_output: zuul_copy_output:
/etc/hosts: logs /etc/hosts: logs
@@ -181,12 +200,10 @@
- job: - job:
name: watcher-grenade name: watcher-grenade
parent: grenade parent: legacy-dsvm-base
required-projects: timeout: 10800
- openstack/watcher run: playbooks/legacy/grenade-devstack-watcher/run.yaml
- openstack/python-watcherclient post-run: playbooks/legacy/grenade-devstack-watcher/post.yaml
- openstack/watcher-tempest-plugin
vars: *base_vars
irrelevant-files: irrelevant-files:
- ^(test-|)requirements.txt$ - ^(test-|)requirements.txt$
- ^.*\.rst$ - ^.*\.rst$
@@ -198,6 +215,12 @@
- ^setup.cfg$ - ^setup.cfg$
- ^tools/.*$ - ^tools/.*$
- ^tox.ini$ - ^tox.ini$
required-projects:
- openstack/grenade
- openstack/devstack-gate
- openstack/watcher
- openstack/python-watcherclient
- openstack/watcher-tempest-plugin
- job: - job:
# This job is used in python-watcherclient repo # This job is used in python-watcherclient repo

View File

@@ -22,6 +22,9 @@
# All configuration values have a default; values that are commented out # All configuration values have a default; values that are commented out
# serve to show the default. # serve to show the default.
from watcher import version as watcher_version
extensions = [ extensions = [
'openstackdocstheme', 'openstackdocstheme',
'os_api_ref', 'os_api_ref',
@@ -43,13 +46,21 @@ project = u'Infrastructure Optimization API Reference'
copyright = u'2010-present, OpenStack Foundation' copyright = u'2010-present, OpenStack Foundation'
# openstackdocstheme options # openstackdocstheme options
openstackdocs_repo_name = 'openstack/watcher' repository_name = 'openstack/watcher'
openstackdocs_auto_name = False bug_project = 'watcher'
openstackdocs_bug_project = 'watcher' bug_tag = ''
openstackdocs_bug_tag = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = watcher_version.version_info.release_string()
# The short X.Y version.
version = watcher_version.version_string
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native' pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------- # -- Options for HTML output --------------------------------------------------
@@ -64,6 +75,10 @@ html_theme_options = {
"sidebar_mode": "toc", "sidebar_mode": "toc",
} }
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# -- Options for LaTeX output ------------------------------------------------- # -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples

2
babel.cfg Normal file
View File

@@ -0,0 +1,2 @@
[python: **.py]

View File

@@ -298,7 +298,7 @@ function start_watcher_api {
service_protocol="http" service_protocol="http"
fi fi
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
run_process "watcher-api" "$(which uwsgi) --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF" run_process "watcher-api" "$WATCHER_BIN_DIR/uwsgi --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF"
watcher_url=$service_protocol://$SERVICE_HOST/infra-optim watcher_url=$service_protocol://$SERVICE_HOST/infra-optim
else else
watcher_url=$service_protocol://$SERVICE_HOST:$service_port watcher_url=$service_protocol://$SERVICE_HOST:$service_port
@@ -338,19 +338,6 @@ function stop_watcher {
done done
} }
# configure_tempest_for_watcher() - Configure Tempest for watcher
function configure_tempest_for_watcher {
# Set default microversion for watcher-tempest-plugin
# Please make sure to update this when the microversion is updated, otherwise
# new tests may be skipped.
TEMPEST_WATCHER_MIN_MICROVERSION=${TEMPEST_WATCHER_MIN_MICROVERSION:-"1.0"}
TEMPEST_WATCHER_MAX_MICROVERSION=${TEMPEST_WATCHER_MAX_MICROVERSION:-"1.4"}
# Set microversion options in tempest.conf
iniset $TEMPEST_CONFIG optimize min_microversion $TEMPEST_WATCHER_MIN_MICROVERSION
iniset $TEMPEST_CONFIG optimize max_microversion $TEMPEST_WATCHER_MAX_MICROVERSION
}
# Restore xtrace # Restore xtrace
$_XTRACE_WATCHER $_XTRACE_WATCHER

View File

@@ -38,9 +38,6 @@ if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then
# Start the watcher components # Start the watcher components
echo_summary "Starting watcher" echo_summary "Starting watcher"
start_watcher start_watcher
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
echo_summary "Configuring tempest for watcher"
configure_tempest_for_watcher
fi fi
if [[ "$1" == "unstack" ]]; then if [[ "$1" == "unstack" ]]; then

View File

@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import unicode_literals
import importlib import importlib
import inspect import inspect

View File

@@ -1,10 +1,10 @@
# The order of packages is significant, because pip processes them in the order # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
openstackdocstheme>=2.2.1 # Apache-2.0 openstackdocstheme>=1.20.0 # Apache-2.0
sphinx>=2.0.0,!=2.1.0 # BSD sphinx>=1.8.0,!=2.1.0,!=3.0.0 # BSD
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
reno>=3.1.0 # Apache-2.0 reno>=2.7.0 # Apache-2.0
sphinxcontrib-apidoc>=0.2.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD
os-api-ref>=1.4.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0

View File

@@ -17,14 +17,6 @@
Policies Policies
======== ========
.. warning::
JSON formatted policy file is deprecated since Watcher 6.0.0 (Wallaby).
This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing
JSON-formatted policy file to YAML in a backward-compatible way.
.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html
Watcher's public API calls may be restricted to certain sets of users using a Watcher's public API calls may be restricted to certain sets of users using a
policy configuration file. This document explains exactly how policies are policy configuration file. This document explains exactly how policies are
configured and what they apply to. configured and what they apply to.

View File

@@ -14,6 +14,7 @@
import os import os
import sys import sys
from watcher import version as watcher_version
from watcher import objects from watcher import objects
objects.register_all() objects.register_all()
@@ -56,8 +57,18 @@ source_suffix = '.rst'
master_doc = 'index' master_doc = 'index'
# General information about the project. # General information about the project.
project = 'Watcher' project = u'Watcher'
copyright = 'OpenStack Foundation' copyright = u'OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = watcher_version.version_info.release_string()
# The short X.Y version.
version = watcher_version.version_string
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['watcher.'] modindex_common_prefix = ['watcher.']
@@ -83,7 +94,7 @@ add_module_names = True
suppress_warnings = ['app.add_directive'] suppress_warnings = ['app.add_directive']
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native' pygments_style = 'sphinx'
# -- Options for man page output -------------------------------------------- # -- Options for man page output --------------------------------------------
@@ -91,14 +102,14 @@ pygments_style = 'native'
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [ man_pages = [
('man/watcher-api', 'watcher-api', 'Watcher API Server', ('man/watcher-api', 'watcher-api', u'Watcher API Server',
['OpenStack'], 1), [u'OpenStack'], 1),
('man/watcher-applier', 'watcher-applier', 'Watcher Applier', ('man/watcher-applier', 'watcher-applier', u'Watcher Applier',
['OpenStack'], 1), [u'OpenStack'], 1),
('man/watcher-db-manage', 'watcher-db-manage', ('man/watcher-db-manage', 'watcher-db-manage',
'Watcher Db Management Utility', ['OpenStack'], 1), u'Watcher Db Management Utility', [u'OpenStack'], 1),
('man/watcher-decision-engine', 'watcher-decision-engine', ('man/watcher-decision-engine', 'watcher-decision-engine',
'Watcher Decision Engine', ['OpenStack'], 1), u'Watcher Decision Engine', [u'OpenStack'], 1),
] ]
# -- Options for HTML output -------------------------------------------------- # -- Options for HTML output --------------------------------------------------
@@ -114,13 +125,12 @@ html_theme = 'openstackdocs'
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project htmlhelp_basename = '%sdoc' % project
html_last_updated_fmt = '%Y-%m-%d %H:%M'
#openstackdocstheme options #openstackdocstheme options
openstackdocs_repo_name = 'openstack/watcher' repository_name = 'openstack/watcher'
openstackdocs_pdf_link = True bug_project = 'watcher'
openstackdocs_auto_name = False bug_tag = ''
openstackdocs_bug_project = 'watcher'
openstackdocs_bug_tag = ''
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass # (source start file, target name, title, author, documentclass
@@ -128,8 +138,8 @@ openstackdocs_bug_tag = ''
latex_documents = [ latex_documents = [
('index', ('index',
'doc-watcher.tex', 'doc-watcher.tex',
'Watcher Documentation', u'%s Documentation' % project,
'OpenStack Foundation', 'manual'), u'OpenStack Foundation', 'manual'),
] ]
# If false, no module index is generated. # If false, no module index is generated.

View File

@@ -372,7 +372,7 @@ You can configure and install Ceilometer by following the documentation below :
#. https://docs.openstack.org/ceilometer/latest #. https://docs.openstack.org/ceilometer/latest
The built-in strategy 'basic_consolidation' provided by watcher requires The built-in strategy 'basic_consolidation' provided by watcher requires
"**compute.node.cpu.percent**" and "**cpu**" measurements to be collected "**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected
by Ceilometer. by Ceilometer.
The measurements available depend on the hypervisors that OpenStack manages on The measurements available depend on the hypervisors that OpenStack manages on
the specific implementation. the specific implementation.

View File

@@ -47,8 +47,6 @@ unavailable as well as `instance_l3_cpu_cache`::
[[local|localrc]] [[local|localrc]]
enable_plugin watcher https://opendev.org/openstack/watcher enable_plugin watcher https://opendev.org/openstack/watcher
enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard
enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git
CEILOMETER_BACKEND=gnocchi CEILOMETER_BACKEND=gnocchi

View File

@@ -56,6 +56,9 @@ Here is an example showing how you can write a plugin called ``NewStrategy``:
# filepath: thirdparty/new.py # filepath: thirdparty/new.py
# import path: thirdparty.new # import path: thirdparty.new
import abc import abc
import six
from watcher._i18n import _ from watcher._i18n import _
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
@@ -300,6 +303,6 @@ Using that you can now query the values for that specific metric:
.. code-block:: py .. code-block:: py
avg_meter = self.datasource_backend.statistic_aggregation( avg_meter = self.datasource_backend.statistic_aggregation(
instance.uuid, 'instance_cpu_usage', self.periods['instance'], instance.uuid, 'cpu_util', self.periods['instance'],
self.granularity, self.granularity,
aggregation=self.aggregation_method['instance']) aggregation=self.aggregation_method['instance'])

View File

@@ -26,7 +26,8 @@ metric service name plugins comment
``compute_monitors`` option ``compute_monitors`` option
to ``cpu.virt_driver`` in to ``cpu.virt_driver`` in
the nova.conf. the nova.conf.
``cpu`` ceilometer_ none ``cpu_util`` ceilometer_ none cpu_util has been removed
since Stein.
============================ ============ ======= =========================== ============================ ============ ======= ===========================
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute

View File

@@ -89,9 +89,9 @@ step 2: Create audit to do optimization
.. code-block:: shell .. code-block:: shell
$ openstack optimize audittemplate create \ $ openstack optimize audittemplate create \
saving_energy_template1 saving_energy --strategy saving_energy at1 saving_energy --strategy saving_energy
$ openstack optimize audit create -a saving_energy_audit1 \ $ openstack optimize audit create -a at1 \
-p free_used_percent=20.0 -p free_used_percent=20.0
External Links External Links

View File

@@ -22,19 +22,14 @@ The *vm_workload_consolidation* strategy requires the following metrics:
============================ ============ ======= ========================= ============================ ============ ======= =========================
metric service name plugins comment metric service name plugins comment
============================ ============ ======= ========================= ============================ ============ ======= =========================
``cpu`` ceilometer_ none ``cpu_util`` ceilometer_ none cpu_util has been removed
since Stein.
``memory.resident`` ceilometer_ none ``memory.resident`` ceilometer_ none
``memory`` ceilometer_ none ``memory`` ceilometer_ none
``disk.root.size`` ceilometer_ none ``disk.root.size`` ceilometer_ none
``compute.node.cpu.percent`` ceilometer_ none (optional) need to set the
``compute_monitors`` option
to ``cpu.virt_driver`` in the
nova.conf.
``hardware.memory.used`` ceilometer_ SNMP_ (optional)
============================ ============ ======= ========================= ============================ ============ ======= =========================
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
.. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters
Cluster data model Cluster data model
****************** ******************

View File

@@ -27,8 +27,9 @@ metric service name plugins comment
to ``cpu.virt_driver`` in the to ``cpu.virt_driver`` in the
nova.conf. nova.conf.
``hardware.memory.used`` ceilometer_ SNMP_ ``hardware.memory.used`` ceilometer_ SNMP_
``cpu`` ceilometer_ none ``cpu_util`` ceilometer_ none cpu_util has been removed
``instance_ram_usage`` ceilometer_ none since Stein.
``memory.resident`` ceilometer_ none
============================ ============ ======= ============================= ============================ ============ ======= =============================
.. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute
@@ -106,10 +107,10 @@ parameter type default Value description
period of all received ones. period of all received ones.
==================== ====== ===================== ============================= ==================== ====== ===================== =============================
.. |metrics| replace:: ["instance_cpu_usage", "instance_ram_usage"] .. |metrics| replace:: ["cpu_util", "memory.resident"]
.. |thresholds| replace:: {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2} .. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2}
.. |weights| replace:: {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} .. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0}
.. |instance_metrics| replace:: {"instance_cpu_usage": "compute.node.cpu.percent", "instance_ram_usage": "hardware.memory.used"} .. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"}
.. |periods| replace:: {"instance": 720, "node": 600} .. |periods| replace:: {"instance": 720, "node": 600}
Efficacy Indicator Efficacy Indicator
@@ -135,8 +136,8 @@ How to use it ?
at1 workload_balancing --strategy workload_stabilization at1 workload_balancing --strategy workload_stabilization
$ openstack optimize audit create -a at1 \ $ openstack optimize audit create -a at1 \
-p thresholds='{"instance_ram_usage": 0.05}' \ -p thresholds='{"memory.resident": 0.05}' \
-p metrics='["instance_ram_usage"]' -p metrics='["memory.resident"]'
External Links External Links
-------------- --------------

View File

@@ -24,7 +24,8 @@ The *workload_balance* strategy requires the following metrics:
======================= ============ ======= ========================= ======================= ============ ======= =========================
metric service name plugins comment metric service name plugins comment
======================= ============ ======= ========================= ======================= ============ ======= =========================
``cpu`` ceilometer_ none ``cpu_util`` ceilometer_ none cpu_util has been removed
since Stein.
``memory.resident`` ceilometer_ none ``memory.resident`` ceilometer_ none
======================= ============ ======= ========================= ======================= ============ ======= =========================
@@ -64,16 +65,15 @@ Configuration
Strategy parameters are: Strategy parameters are:
============== ====== ==================== ==================================== ============== ====== ============= ====================================
parameter type default Value description parameter type default Value description
============== ====== ==================== ==================================== ============== ====== ============= ====================================
``metrics`` String 'instance_cpu_usage' Workload balance base on cpu or ram ``metrics`` String 'cpu_util' Workload balance base on cpu or ram
utilization. Choices: utilization. choice: ['cpu_util',
['instance_cpu_usage', 'memory.resident']
'instance_ram_usage'] ``threshold`` Number 25.0 Workload threshold for migration
``threshold`` Number 25.0 Workload threshold for migration ``period`` Number 300 Aggregate time period of ceilometer
``period`` Number 300 Aggregate time period of ceilometer ============== ====== ============= ====================================
============== ====== ==================== ====================================
Efficacy Indicator Efficacy Indicator
------------------ ------------------
@@ -95,7 +95,7 @@ How to use it ?
at1 workload_balancing --strategy workload_balance at1 workload_balancing --strategy workload_balance
$ openstack optimize audit create -a at1 -p threshold=26.0 \ $ openstack optimize audit create -a at1 -p threshold=26.0 \
-p period=310 -p metrics=instance_cpu_usage -p period=310 -p metrics=cpu_util
External Links External Links
-------------- --------------

161
lower-constraints.txt Normal file
View File

@@ -0,0 +1,161 @@
alabaster==0.7.10
alembic==0.9.8
amqp==2.2.2
appdirs==1.4.3
APScheduler==3.5.1
asn1crypto==0.24.0
automaton==1.14.0
Babel==2.5.3
beautifulsoup4==4.6.0
cachetools==2.0.1
certifi==2018.1.18
cffi==1.11.5
chardet==3.0.4
cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
coverage==4.5.1
croniter==0.3.20
cryptography==2.1.4
debtcollector==1.19.0
decorator==4.2.1
deprecation==2.0
doc8==0.8.0
docutils==0.14
dogpile.cache==0.6.5
dulwich==0.19.0
enum34==1.1.6
enum-compat==0.0.2
eventlet==0.20.0
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
freezegun==0.3.10
future==0.16.0
futurist==1.8.0
gitdb2==2.0.3
GitPython==2.1.8
gnocchiclient==7.0.1
greenlet==0.4.13
idna==2.6
imagesize==1.0.0
iso8601==0.1.12
Jinja2==2.10
jmespath==0.9.3
jsonpatch==1.21
jsonpointer==2.0
jsonschema==2.6.0
keystoneauth1==3.4.0
keystonemiddleware==4.21.0
kombu==4.1.0
linecache2==1.0.0
logutils==0.3.5
lxml==4.1.1
Mako==1.0.7
MarkupSafe==1.0
mccabe==0.2.1
microversion_parse==0.2.1
mock==2.0.0
monotonic==1.4
mox3==0.25.0
msgpack==0.5.6
munch==2.2.0
netaddr==0.7.19
netifaces==0.10.6
networkx==2.2
openstackdocstheme==1.20.0
openstacksdk==0.12.0
os-api-ref===1.4.0
os-client-config==1.29.0
os-service-types==1.2.0
os-testr==1.0.0
osc-lib==1.10.0
os-resource-classes==0.4.0
oslo.cache==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
oslo.context==2.21.0
oslo.db==4.35.0
oslo.i18n==3.20.0
oslo.log==3.37.0
oslo.messaging==8.1.2
oslo.middleware==3.35.0
oslo.policy==1.34.0
oslo.reports==1.27.0
oslo.serialization==2.25.0
oslo.service==1.30.0
oslo.upgradecheck==0.1.0
oslo.utils==3.36.0
oslo.versionedobjects==1.32.0
oslotest==3.3.0
packaging==17.1
Paste==2.0.3
PasteDeploy==1.5.2
pbr==3.1.1
pecan==1.3.2
pika==0.10.0
pika-pool==0.1.3
prettytable==0.7.2
psutil==5.4.3
pycadf==2.7.0
pycparser==2.18
Pygments==2.2.0
pyinotify==0.9.6
pyOpenSSL==17.5.0
pyparsing==2.2.0
pyperclip==1.6.0
python-ceilometerclient==2.9.0
python-cinderclient==3.5.0
python-dateutil==2.7.0
python-editor==1.0.3
python-glanceclient==2.9.1
python-ironicclient==2.5.0
python-keystoneclient==3.15.0
python-mimeparse==1.6.0
python-monascaclient==1.12.0
python-neutronclient==6.7.0
python-novaclient==14.1.0
python-openstackclient==3.14.0
python-subunit==1.2.0
pytz==2018.3
PyYAML==3.12
reno==2.7.0
repoze.lru==0.7
requests==2.18.4
requestsexceptions==1.4.0
restructuredtext-lint==1.1.3
rfc3986==1.1.0
Routes==2.4.1
simplegeneric==0.8.1
simplejson==3.13.2
six==1.11.0
smmap2==2.0.3
snowballstemmer==1.2.1
Sphinx==1.6.5
sphinxcontrib-httpdomain==1.6.1
sphinxcontrib-pecanwsme==0.8.0
sphinxcontrib-websupport==1.0.1
SQLAlchemy==1.2.5
sqlalchemy-migrate==0.11.0
sqlparse==0.2.4
statsd==3.2.2
stestr==2.0.0
stevedore==1.28.0
taskflow==3.7.1
Tempita==0.5.2
tenacity==4.9.0
testresources==2.0.1
testscenarios==0.5.0
testtools==2.3.0
traceback2==1.4.0
tzlocal==1.5.1
ujson==1.35
unittest2==1.1.0
urllib3==1.22
vine==1.1.4
waitress==1.1.0
warlock==1.3.0
WebOb==1.8.5
WebTest==2.0.29
wrapt==1.10.11
WSME==0.9.2

View File

@@ -0,0 +1,15 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@@ -0,0 +1,60 @@
- hosts: all
name: legacy-grenade-dsvm-watcher
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export PYTHONUNBUFFERED=true
export PROJECTS="openstack/grenade $PROJECTS"
export PROJECTS="openstack/watcher $PROJECTS"
export PROJECTS="openstack/watcher-tempest-plugin $PROJECTS"
export PROJECTS="openstack/python-watcherclient $PROJECTS"
export DEVSTACK_PROJECT_FROM_GIT="python-watcherclient $DEVSTACK_PROJECT_FROM_GIT"
export GRENADE_PLUGINRC="enable_grenade_plugin watcher https://opendev.org/openstack/watcher"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"export TEMPEST_PLUGINS='/opt/stack/new/watcher-tempest-plugin'"
export DEVSTACK_GATE_TEMPEST_NOTESTS=1
export DEVSTACK_GATE_GRENADE=pullup
export DEVSTACK_GATE_USE_PYTHON3=True
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
# Add configuration values for enabling security features in local.conf
function pre_test_hook {
if [ -f /opt/stack/old/watcher-tempest-plugin/tools/pre_test_hook.sh ] ; then
. /opt/stack/old/watcher-tempest-plugin/tools/pre_test_hook.sh
fi
}
export -f pre_test_hook
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@@ -1,47 +0,0 @@
---
security:
- |
Watchers no longer forges requests on behalf of a tenant when
swapping volumes. Prior to this release watcher had 2 implementations
of moving a volume, it could use cinders volume migrate api or its own
internal implementation that directly calls nova volume attachment update
api. The former is safe and the recommend way to move volumes between
cinder storage backend the internal implementation was insecure, fragile
due to a lack of error handling and capable of deleting user data.
Insecure: the internal volume migration operation created a new keystone
user with a weak name and password and added it to the tenants project
with the admin role. It then used that user to forge request on behalf
of the tenant with admin right to swap the volume. if the applier was
restarted during the execution of this operation it would never be cleaned
up.
Fragile: the error handling was minimal, the swap volume api is async
so watcher has to poll for completion, there was no support to resume
that if interrupted of the time out was exceeded.
Data-loss: while the internal polling logic returned success or failure
watcher did not check the result, once the function returned it
unconditionally deleted the source volume. For larger volumes this
could result in irretrievable data loss.
Finally if a volume was swapped using the internal workflow it put
the nova instance in an out of sync state. If the VM was live migrated
after the swap volume completed successfully prior to a hard reboot
then the migration would fail or succeed and break tenant isolation.
see: https://bugs.launchpad.net/nova/+bug/2112187 for details.
fixes:
- |
All code related to creating keystone user and granting roles has been
removed. The internal swap volume implementation has been removed and
replaced by cinders volume migrate api. Note as part of this change
Watcher will no longer attempt volume migrations or retypes if the
instance is in the `Verify Resize` task state. This resolves several
issues related to volume migration in the zone migration and
Storage capacity balance strategies. While efforts have been made
to maintain backward compatibility these changes are required to
address a security weakness in watcher's prior approach.
see: https://bugs.launchpad.net/nova/+bug/2112187 for more context.

View File

@@ -1,20 +0,0 @@
---
upgrade:
- |
The default value of ``[oslo_policy] policy_file`` config option has
been changed from ``policy.json`` to ``policy.yaml``.
Operators who are utilizing customized or previously generated
static policy JSON files (which are not needed by default), should
generate new policy files or convert them in YAML format. Use the
`oslopolicy-convert-json-to-yaml
<https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html>`_
tool to convert a JSON to YAML formatted policy file in
backward compatible way.
deprecations:
- |
Use of JSON policy files was deprecated by the ``oslo.policy`` library
during the Victoria development cycle. As a result, this deprecation is
being noted in the Wallaby cycle with an anticipated future removal of support
by ``oslo.policy``. As such operators will need to convert to YAML policy
files. Please see the upgrade notes for details on migration of any
custom policy files.

View File

@@ -1,6 +0,0 @@
===========================
2023.1 Series Release Notes
===========================
.. release-notes::
:branch: stable/2023.1

View File

@@ -1,6 +0,0 @@
===========================
2023.2 Series Release Notes
===========================
.. release-notes::
:branch: stable/2023.2

View File

@@ -53,7 +53,8 @@ source_suffix = '.rst'
master_doc = 'index' master_doc = 'index'
# General information about the project. # General information about the project.
copyright = '2016, Watcher developers' project = u'watcher'
copyright = u'2016, Watcher developers'
# Release notes are version independent # Release notes are version independent
# The short X.Y version. # The short X.Y version.
@@ -90,15 +91,11 @@ exclude_patterns = ['_build']
#show_authors = False #show_authors = False
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native' pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting. # A list of ignored prefixes for module index sorting.
#modindex_common_prefix = [] #modindex_common_prefix = []
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/watcher'
openstackdocs_bug_project = 'watcher'
openstackdocs_bug_tag = ''
# -- Options for HTML output -------------------------------------------------- # -- Options for HTML output --------------------------------------------------
@@ -196,8 +193,8 @@ latex_elements = {
# Grouping the document tree into LaTeX files. List of tuples # Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]) # (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [ latex_documents = [
('index', 'watcher.tex', 'Watcher Documentation', ('index', 'watcher.tex', u'Watcher Documentation',
'Watcher developers', 'manual'), u'Watcher developers', 'manual'),
] ]
# The name of an image file (relative to this directory) to place at the top of # The name of an image file (relative to this directory) to place at the top of
@@ -226,8 +223,8 @@ latex_documents = [
# One entry per manual page. List of tuples # One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section). # (source start file, name, description, authors, manual section).
man_pages = [ man_pages = [
('index', 'watcher', 'Watcher Documentation', ('index', 'watcher', u'Watcher Documentation',
['Watcher developers'], 1) [u'Watcher developers'], 1)
] ]
# If true, show URL addresses after external links. # If true, show URL addresses after external links.
@@ -240,8 +237,8 @@ man_pages = [
# (source start file, target name, title, author, # (source start file, target name, title, author,
# dir menu entry, description, category) # dir menu entry, description, category)
texinfo_documents = [ texinfo_documents = [
('index', 'watcher', 'Watcher Documentation', ('index', 'watcher', u'Watcher Documentation',
'Watcher developers', 'watcher', 'One line description of project.', u'Watcher developers', 'watcher', 'One line description of project.',
'Miscellaneous'), 'Miscellaneous'),
] ]

View File

@@ -21,14 +21,6 @@ Contents:
:maxdepth: 1 :maxdepth: 1
unreleased unreleased
2023.2
2023.1
zed
yoga
xena
wallaby
victoria
ussuri
train train
stein stein
rocky rocky

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +0,0 @@
# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: python-watcher\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2019-03-22 02:21+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-10-22 06:44+0000\n"
"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
"Language-Team: French\n"
"Language: fr\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
msgid "0.29.0"
msgstr "0.29.0"
msgid "Contents:"
msgstr "Contenu :"
msgid "Current Series Release Notes"
msgstr "Note de la release actuelle"
msgid "New Features"
msgstr "Nouvelles fonctionnalités"
msgid "Newton Series Release Notes"
msgstr "Note de release pour Newton"
msgid "Welcome to watcher's Release Notes documentation!"
msgstr "Bienvenue dans la documentation de la note de Release de Watcher"

View File

@@ -1,6 +0,0 @@
===========================
Ussuri Series Release Notes
===========================
.. release-notes::
:branch: stable/ussuri

View File

@@ -1,6 +0,0 @@
=============================
Victoria Series Release Notes
=============================
.. release-notes::
:branch: stable/victoria

View File

@@ -1,6 +0,0 @@
============================
Wallaby Series Release Notes
============================
.. release-notes::
:branch: stable/wallaby

View File

@@ -1,6 +0,0 @@
=========================
Xena Series Release Notes
=========================
.. release-notes::
:branch: stable/xena

View File

@@ -1,6 +0,0 @@
=========================
Yoga Series Release Notes
=========================
.. release-notes::
:branch: stable/yoga

View File

@@ -1,6 +0,0 @@
========================
Zed Series Release Notes
========================
.. release-notes::
:branch: stable/zed

View File

@@ -1,35 +1,36 @@
# The order of packages is significant, because pip processes them in the order # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
apscheduler>=3.5.1 # MIT License apscheduler>=3.5.1 # MIT License
jsonpatch>=1.21 # BSD jsonpatch>=1.21 # BSD
keystoneauth1>=3.4.0 # Apache-2.0 keystoneauth1>=3.4.0 # Apache-2.0
jsonschema>=3.2.0 # MIT jsonschema>=2.6.0 # MIT
keystonemiddleware>=4.21.0 # Apache-2.0 keystonemiddleware>=4.21.0 # Apache-2.0
lxml>=4.5.1 # BSD lxml>=4.1.1 # BSD
croniter>=0.3.20 # MIT License croniter>=0.3.20 # MIT License
os-resource-classes>=0.4.0 os-resource-classes>=0.4.0
oslo.concurrency>=3.26.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0
oslo.cache>=1.29.0 # Apache-2.0 oslo.cache>=1.29.0 # Apache-2.0
oslo.config>=6.8.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0
oslo.context>=2.21.0 # Apache-2.0 oslo.context>=2.21.0 # Apache-2.0
oslo.db>=4.44.0 # Apache-2.0 oslo.db>=4.35.0 # Apache-2.0
oslo.i18n>=3.20.0 # Apache-2.0 oslo.i18n>=3.20.0 # Apache-2.0
oslo.log>=3.37.0 # Apache-2.0 oslo.log>=3.37.0 # Apache-2.0
oslo.messaging>=14.1.0 # Apache-2.0 oslo.messaging>=8.1.2 # Apache-2.0
oslo.policy>=3.6.0 # Apache-2.0 oslo.policy>=1.34.0 # Apache-2.0
oslo.reports>=1.27.0 # Apache-2.0 oslo.reports>=1.27.0 # Apache-2.0
oslo.serialization>=2.25.0 # Apache-2.0 oslo.serialization>=2.25.0 # Apache-2.0
oslo.service>=1.30.0 # Apache-2.0 oslo.service>=1.30.0 # Apache-2.0
oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0
oslo.utils>=3.36.0 # Apache-2.0 oslo.utils>=3.36.0 # Apache-2.0
oslo.versionedobjects>=1.32.0 # Apache-2.0 oslo.versionedobjects>=1.32.0 # Apache-2.0
PasteDeploy>=1.5.2 # MIT PasteDeploy>=1.5.2 # MIT
pbr>=3.1.1 # Apache-2.0 pbr>=3.1.1 # Apache-2.0
pecan>=1.3.2 # BSD pecan>=1.3.2 # BSD
PrettyTable>=0.7.2 # BSD PrettyTable<0.8,>=0.7.2 # BSD
gnocchiclient>=7.0.1 # Apache-2.0 gnocchiclient>=7.0.1 # Apache-2.0
python-ceilometerclient>=2.9.0 # Apache-2.0
python-cinderclient>=3.5.0 # Apache-2.0 python-cinderclient>=3.5.0 # Apache-2.0
python-glanceclient>=2.9.1 # Apache-2.0 python-glanceclient>=2.9.1 # Apache-2.0
python-keystoneclient>=3.15.0 # Apache-2.0 python-keystoneclient>=3.15.0 # Apache-2.0
@@ -38,11 +39,12 @@ python-neutronclient>=6.7.0 # Apache-2.0
python-novaclient>=14.1.0 # Apache-2.0 python-novaclient>=14.1.0 # Apache-2.0
python-openstackclient>=3.14.0 # Apache-2.0 python-openstackclient>=3.14.0 # Apache-2.0
python-ironicclient>=2.5.0 # Apache-2.0 python-ironicclient>=2.5.0 # Apache-2.0
six>=1.11.0 # MIT
SQLAlchemy>=1.2.5 # MIT SQLAlchemy>=1.2.5 # MIT
stevedore>=1.28.0 # Apache-2.0 stevedore>=1.28.0 # Apache-2.0
taskflow>=3.8.0 # Apache-2.0 taskflow>=3.7.1 # Apache-2.0
WebOb>=1.8.5 # MIT WebOb>=1.8.5 # MIT
WSME>=0.9.2 # MIT WSME>=0.9.2 # MIT
networkx>=2.4 # BSD networkx>=2.2;python_version>='3.4' # BSD
microversion_parse>=0.2.1 # Apache-2.0 microversion_parse>=0.2.1 # Apache-2.0
futurist>=1.8.0 # Apache-2.0 futurist>=1.8.0 # Apache-2.0

View File

@@ -1,12 +1,12 @@
[metadata] [metadata]
name = python-watcher name = python-watcher
summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds.
description_file = description-file =
README.rst README.rst
author = OpenStack author = OpenStack
author_email = openstack-discuss@lists.openstack.org author-email = openstack-discuss@lists.openstack.org
home_page = https://docs.openstack.org/watcher/latest/ home-page = https://docs.openstack.org/watcher/latest/
python_requires = >=3.8 python-requires = >=3.6
classifier = classifier =
Environment :: OpenStack Environment :: OpenStack
Intended Audience :: Information Technology Intended Audience :: Information Technology
@@ -17,10 +17,8 @@ classifier =
Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3 Programming Language :: Python :: 3
Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
[files] [files]
packages = packages =
@@ -109,3 +107,18 @@ watcher_cluster_data_model_collectors =
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector
[compile_catalog]
directory = watcher/locale
domain = watcher
[update_catalog]
domain = watcher
output_dir = watcher/locale
input_file = watcher/locale/watcher.pot
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext _LI _LW _LE _LC
mapping_file = babel.cfg
output_file = watcher/locale/watcher.pot

View File

@@ -5,11 +5,12 @@
coverage>=4.5.1 # Apache-2.0 coverage>=4.5.1 # Apache-2.0
doc8>=0.8.0 # Apache-2.0 doc8>=0.8.0 # Apache-2.0
freezegun>=0.3.10 # Apache-2.0 freezegun>=0.3.10 # Apache-2.0
hacking>=3.0.1,<3.1.0 # Apache-2.0 hacking>=3.0,<3.1.0 # Apache-2.0
mock>=2.0.0 # BSD
oslotest>=3.3.0 # Apache-2.0 oslotest>=3.3.0 # Apache-2.0
os-testr>=1.0.0 # Apache-2.0
testscenarios>=0.5.0 # Apache-2.0/BSD testscenarios>=0.5.0 # Apache-2.0/BSD
testtools>=2.3.0 # MIT testtools>=2.3.0 # MIT
stestr>=2.0.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0
bandit>=1.6.0 # Apache-2.0 bandit>=1.6.0 # Apache-2.0
WebTest>=2.0.27 # MIT

79
tox.ini
View File

@@ -1,41 +1,37 @@
[tox] [tox]
minversion = 3.18.0 minversion = 2.0
envlist = py3,pep8 envlist = py36,py37,pep8
skipsdist = True
ignore_basepython_conflict = True ignore_basepython_conflict = True
[testenv] [testenv]
basepython = python3 basepython = python3
usedevelop = True usedevelop = True
allowlist_externals = find whitelist_externals = find
rm rm
install_command = pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2024.1} {opts} {packages} install_command = pip install {opts} {packages}
setenv = setenv =
VIRTUAL_ENV={envdir} VIRTUAL_ENV={envdir}
deps = deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri}
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt -r{toxinidir}/requirements.txt
python-libmaas>=0.6.8
commands = commands =
rm -f .testrepository/times.dbm rm -f .testrepository/times.dbm
find . -type f -name "*.py[c|o]" -delete find . -type f -name "*.py[c|o]" -delete
stestr run {posargs} stestr run {posargs}
passenv = passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
http_proxy
HTTP_PROXY
https_proxy
HTTPS_PROXY
no_proxy
NO_PROXY
[testenv:pep8] [testenv:pep8]
commands = commands =
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
flake8 flake8
#bandit -r watcher -x watcher/tests/* -n5 -ll -s B320 bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
[testenv:venv] [testenv:venv]
setenv = PYTHONHASHSEED=0 setenv = PYTHONHASHSEED=0
deps = deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri}
-r{toxinidir}/doc/requirements.txt -r{toxinidir}/doc/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt -r{toxinidir}/requirements.txt
@@ -53,15 +49,14 @@ commands =
[testenv:docs] [testenv:docs]
setenv = PYTHONHASHSEED=0 setenv = PYTHONHASHSEED=0
deps = deps = -r{toxinidir}/doc/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands = commands =
rm -fr doc/build doc/source/api/ .autogenerated rm -fr doc/build doc/source/api/ .autogenerated
sphinx-build -W --keep-going -b html doc/source doc/build/html sphinx-build -W --keep-going -b html doc/source doc/build/html
[testenv:api-ref] [testenv:api-ref]
deps = -r{toxinidir}/doc/requirements.txt deps = -r{toxinidir}/doc/requirements.txt
allowlist_externals = bash whitelist_externals = bash
commands = commands =
bash -c 'rm -rf api-ref/build' bash -c 'rm -rf api-ref/build'
sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
@@ -78,28 +73,6 @@ commands =
commands = commands =
oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf
[testenv:wheel]
commands = python setup.py bdist_wheel
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
allowlist_externals =
rm
make
commands =
rm -rf doc/build/pdf
sphinx-build -W --keep-going -b latex doc/source doc/build/pdf
make -C doc/build/pdf
[testenv:releasenotes]
deps = -r{toxinidir}/doc/requirements.txt
commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
[testenv:bandit]
deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
[flake8] [flake8]
filename = *.py,app.wsgi filename = *.py,app.wsgi
show-source=True show-source=True
@@ -109,6 +82,9 @@ builtins= _
enable-extensions = H106,H203,H904 enable-extensions = H106,H203,H904
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
[testenv:wheel]
commands = python setup.py bdist_wheel
[hacking] [hacking]
import_exceptions = watcher._i18n import_exceptions = watcher._i18n
@@ -129,10 +105,35 @@ extension =
N340 = checks:check_oslo_i18n_wrapper N340 = checks:check_oslo_i18n_wrapper
N341 = checks:check_builtins_gettext N341 = checks:check_builtins_gettext
N342 = checks:no_redundant_import_alias N342 = checks:no_redundant_import_alias
N366 = checks:import_stock_mock
paths = ./watcher/hacking paths = ./watcher/hacking
[doc8] [doc8]
extension=.rst extension=.rst
# todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed # todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed
ignore-path=doc/source/image_src,doc/source/man,doc/source/api ignore-path=doc/source/image_src,doc/source/man,doc/source/api
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
whitelist_externals =
rm
make
commands =
rm -rf doc/build/pdf
sphinx-build -W --keep-going -b latex doc/source doc/build/pdf
make -C doc/build/pdf
[testenv:releasenotes]
deps = -r{toxinidir}/doc/requirements.txt
commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
[testenv:bandit]
deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
[testenv:lower-constraints]
deps =
-c{toxinidir}/lower-constraints.txt
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt

View File

@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import unicode_literals
from oslo_config import cfg from oslo_config import cfg
from watcher.api import hooks from watcher.api import hooks

View File

@@ -57,7 +57,6 @@ are dynamically loaded by Watcher at launch time.
import datetime import datetime
from http import HTTPStatus
import pecan import pecan
from pecan import rest from pecan import rest
import wsme import wsme
@@ -363,7 +362,7 @@ class ActionsController(rest.RestController):
return Action.convert_with_links(action) return Action.convert_with_links(action)
@wsme_pecan.wsexpose(Action, body=Action, status_code=HTTPStatus.CREATED) @wsme_pecan.wsexpose(Action, body=Action, status_code=201)
def post(self, action): def post(self, action):
"""Create a new action(forbidden). """Create a new action(forbidden).
@@ -423,7 +422,7 @@ class ActionsController(rest.RestController):
action_to_update.save() action_to_update.save()
return Action.convert_with_links(action_to_update) return Action.convert_with_links(action_to_update)
@wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) @wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, action_uuid): def delete(self, action_uuid):
"""Delete a action(forbidden). """Delete a action(forbidden).

View File

@@ -56,7 +56,6 @@ state machine <action_plan_state_machine>`.
import datetime import datetime
from http import HTTPStatus
from oslo_log import log from oslo_log import log
import pecan import pecan
from pecan import rest from pecan import rest
@@ -461,7 +460,7 @@ class ActionPlansController(rest.RestController):
return ActionPlan.convert_with_links(action_plan) return ActionPlan.convert_with_links(action_plan)
@wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) @wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, action_plan_uuid): def delete(self, action_plan_uuid):
"""Delete an action plan. """Delete an action plan.

View File

@@ -32,7 +32,6 @@ states, visit :ref:`the Audit State machine <audit_state_machine>`.
import datetime import datetime
from dateutil import tz from dateutil import tz
from http import HTTPStatus
import pecan import pecan
from pecan import rest from pecan import rest
import wsme import wsme
@@ -596,8 +595,7 @@ class AuditsController(rest.RestController):
return Audit.convert_with_links(rpc_audit) return Audit.convert_with_links(rpc_audit)
@wsme_pecan.wsexpose(Audit, body=AuditPostType, @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=201)
status_code=HTTPStatus.CREATED)
def post(self, audit_p): def post(self, audit_p):
"""Create a new audit. """Create a new audit.
@@ -719,7 +717,7 @@ class AuditsController(rest.RestController):
audit_to_update.save() audit_to_update.save()
return Audit.convert_with_links(audit_to_update) return Audit.convert_with_links(audit_to_update)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=HTTPStatus.NO_CONTENT) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, audit): def delete(self, audit):
"""Delete an audit. """Delete an audit.

View File

@@ -45,7 +45,6 @@ will be launched automatically or will need a manual confirmation from the
import datetime import datetime
from http import HTTPStatus
import pecan import pecan
from pecan import rest from pecan import rest
import wsme import wsme
@@ -139,9 +138,6 @@ class AuditTemplatePostType(wtypes.Base):
raise exception.InvalidGoal(goal=audit_template.goal) raise exception.InvalidGoal(goal=audit_template.goal)
if audit_template.scope: if audit_template.scope:
keys = [list(s)[0] for s in audit_template.scope]
if keys[0] not in ('compute', 'storage'):
audit_template.scope = [dict(compute=audit_template.scope)]
common_utils.Draft4Validator( common_utils.Draft4Validator(
AuditTemplatePostType._build_schema() AuditTemplatePostType._build_schema()
).validate(audit_template.scope) ).validate(audit_template.scope)
@@ -162,23 +158,18 @@ class AuditTemplatePostType(wtypes.Base):
"included and excluded together")) "included and excluded together"))
if audit_template.strategy: if audit_template.strategy:
try: available_strategies = objects.Strategy.list(
if (common_utils.is_uuid_like(audit_template.strategy) or AuditTemplatePostType._ctx)
common_utils.is_int_like(audit_template.strategy)): available_strategies_map = {
strategy = objects.Strategy.get( s.uuid: s for s in available_strategies}
AuditTemplatePostType._ctx, audit_template.strategy) if audit_template.strategy not in available_strategies_map:
else:
strategy = objects.Strategy.get_by_name(
AuditTemplatePostType._ctx, audit_template.strategy)
except Exception:
raise exception.InvalidStrategy( raise exception.InvalidStrategy(
strategy=audit_template.strategy) strategy=audit_template.strategy)
strategy = available_strategies_map[audit_template.strategy]
# Check that the strategy we indicate is actually related to the # Check that the strategy we indicate is actually related to the
# specified goal # specified goal
if strategy.goal_id != goal.id: if strategy.goal_id != goal.id:
available_strategies = objects.Strategy.list(
AuditTemplatePostType._ctx)
choices = ["'%s' (%s)" % (s.uuid, s.name) choices = ["'%s' (%s)" % (s.uuid, s.name)
for s in available_strategies] for s in available_strategies]
raise exception.InvalidStrategy( raise exception.InvalidStrategy(
@@ -619,7 +610,7 @@ class AuditTemplatesController(rest.RestController):
@wsme.validate(types.uuid, AuditTemplatePostType) @wsme.validate(types.uuid, AuditTemplatePostType)
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType, @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType,
status_code=HTTPStatus.CREATED) status_code=201)
def post(self, audit_template_postdata): def post(self, audit_template_postdata):
"""Create a new audit template. """Create a new audit template.
@@ -695,7 +686,7 @@ class AuditTemplatesController(rest.RestController):
audit_template_to_update.save() audit_template_to_update.save()
return AuditTemplate.convert_with_links(audit_template_to_update) return AuditTemplate.convert_with_links(audit_template_to_update)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=HTTPStatus.NO_CONTENT) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, audit_template): def delete(self, audit_template):
"""Delete a audit template. """Delete a audit template.

View File

@@ -19,6 +19,8 @@ Service mechanism provides ability to monitor Watcher services state.
""" """
import datetime import datetime
import six
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
@@ -68,7 +70,7 @@ class Service(base.APIBase):
service = objects.Service.get(pecan.request.context, id) service = objects.Service.get(pecan.request.context, id)
last_heartbeat = (service.last_seen_up or service.updated_at or last_heartbeat = (service.last_seen_up or service.updated_at or
service.created_at) service.created_at)
if isinstance(last_heartbeat, str): if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service came in over rpc via # NOTE(russellb) If this service came in over rpc via
# conductor, then the timestamp will be a string and needs to be # conductor, then the timestamp will be a string and needs to be
# converted back to a datetime. # converted back to a datetime.

View File

@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from oslo_utils import strutils from oslo_utils import strutils
import six
import wsme import wsme
from wsme import types as wtypes from wsme import types as wtypes
@@ -131,7 +132,7 @@ class JsonType(wtypes.UserType):
def __str__(self): def __str__(self):
# These are the json serializable native types # These are the json serializable native types
return ' | '.join(map(str, (wtypes.text, int, float, return ' | '.join(map(str, (wtypes.text, six.integer_types, float,
BooleanType, list, dict, None))) BooleanType, list, dict, None)))
@staticmethod @staticmethod

View File

@@ -14,7 +14,6 @@
Webhook endpoint for Watcher v1 REST API. Webhook endpoint for Watcher v1 REST API.
""" """
from http import HTTPStatus
from oslo_log import log from oslo_log import log
import pecan import pecan
from pecan import rest from pecan import rest
@@ -37,7 +36,7 @@ class WebhookController(rest.RestController):
self.dc_client = rpcapi.DecisionEngineAPI() self.dc_client = rpcapi.DecisionEngineAPI()
@wsme_pecan.wsexpose(None, wtypes.text, body=types.jsontype, @wsme_pecan.wsexpose(None, wtypes.text, body=types.jsontype,
status_code=HTTPStatus.ACCEPTED) status_code=202)
def post(self, audit_ident, body): def post(self, audit_ident, body):
"""Trigger the given audit. """Trigger the given audit.

View File

@@ -15,9 +15,9 @@
# under the License. # under the License.
from http import HTTPStatus
from oslo_config import cfg from oslo_config import cfg
from pecan import hooks from pecan import hooks
from six.moves import http_client
from watcher.common import context from watcher.common import context
@@ -91,8 +91,8 @@ class NoExceptionTracebackHook(hooks.PecanHook):
# Do nothing if there is no error. # Do nothing if there is no error.
# Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not
# an error. # an error.
if (HTTPStatus.OK <= state.response.status_int < if (http_client.OK <= state.response.status_int <
HTTPStatus.BAD_REQUEST): http_client.BAD_REQUEST):
return return
json_body = state.response.json json_body = state.response.json

View File

@@ -24,6 +24,7 @@ from xml import etree as et
from oslo_log import log from oslo_log import log
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import six
import webob import webob
from watcher._i18n import _ from watcher._i18n import _
@@ -83,10 +84,12 @@ class ParsableErrorMiddleware(object):
'</error_message>' % state['status_code']] '</error_message>' % state['status_code']]
state['headers'].append(('Content-Type', 'application/xml')) state['headers'].append(('Content-Type', 'application/xml'))
else: else:
app_iter = [i.decode('utf-8') for i in app_iter] if six.PY3:
app_iter = [i.decode('utf-8') for i in app_iter]
body = [jsonutils.dumps( body = [jsonutils.dumps(
{'error_message': '\n'.join(app_iter)})] {'error_message': '\n'.join(app_iter)})]
body = [item.encode('utf-8') for item in body] if six.PY3:
body = [item.encode('utf-8') for item in body]
state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0])))) state['headers'].append(('Content-Length', str(len(body[0]))))
else: else:

View File

@@ -20,6 +20,7 @@ import itertools
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
import six
from watcher.common import context as watcher_context from watcher.common import context as watcher_context
from watcher.common import scheduling from watcher.common import scheduling
@@ -82,7 +83,7 @@ class APISchedulingService(scheduling.BackgroundSchedulerService):
service = objects.Service.get(context, service_id) service = objects.Service.get(context, service_id)
last_heartbeat = (service.last_seen_up or service.updated_at or last_heartbeat = (service.last_seen_up or service.updated_at or
service.created_at) service.created_at)
if isinstance(last_heartbeat, str): if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service came in over rpc via # NOTE(russellb) If this service came in over rpc via
# conductor, then the timestamp will be a string and needs to be # conductor, then the timestamp will be a string and needs to be
# converted back to a datetime. # converted back to a datetime.

View File

@@ -18,9 +18,11 @@
# #
import abc import abc
import six
class BaseActionPlanHandler(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseActionPlanHandler(object):
@abc.abstractmethod @abc.abstractmethod
def execute(self): def execute(self):
raise NotImplementedError() raise NotImplementedError()

View File

@@ -19,12 +19,14 @@
import abc import abc
import jsonschema import jsonschema
import six
from watcher.common import clients from watcher.common import clients
from watcher.common.loader import loadable from watcher.common.loader import loadable
class BaseAction(loadable.Loadable, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseAction(loadable.Loadable):
# NOTE(jed): by convention we decided # NOTE(jed): by convention we decided
# that the attribute "resource_id" is the unique id of # that the attribute "resource_id" is the unique id of
# the resource to which the Action applies to allow us to use it in the # the resource to which the Action applies to allow us to use it in the

View File

@@ -17,17 +17,17 @@
# limitations under the License. # limitations under the License.
# #
import enum
import time import time
from oslo_log import log
from watcher._i18n import _ from watcher._i18n import _
from watcher.applier.actions import base from watcher.applier.actions import base
from watcher.common import exception from watcher.common import exception
from watcher.common.metal_helper import constants as metal_constants
from watcher.common.metal_helper import factory as metal_helper_factory
LOG = log.getLogger(__name__)
class NodeState(enum.Enum):
POWERON = 'on'
POWEROFF = 'off'
class ChangeNodePowerState(base.BaseAction): class ChangeNodePowerState(base.BaseAction):
@@ -43,8 +43,8 @@ class ChangeNodePowerState(base.BaseAction):
'state': str, 'state': str,
}) })
The `resource_id` references a baremetal node id (list of available The `resource_id` references a ironic node id (list of available
ironic nodes is returned by this command: ``ironic node-list``). ironic node is returned by this command: ``ironic node-list``).
The `state` value should either be `on` or `off`. The `state` value should either be `on` or `off`.
""" """
@@ -59,14 +59,10 @@ class ChangeNodePowerState(base.BaseAction):
'type': 'string', 'type': 'string',
"minlength": 1 "minlength": 1
}, },
'resource_name': {
'type': 'string',
"minlength": 1
},
'state': { 'state': {
'type': 'string', 'type': 'string',
'enum': [metal_constants.PowerState.ON.value, 'enum': [NodeState.POWERON.value,
metal_constants.PowerState.OFF.value] NodeState.POWEROFF.value]
} }
}, },
'required': ['resource_id', 'state'], 'required': ['resource_id', 'state'],
@@ -86,10 +82,10 @@ class ChangeNodePowerState(base.BaseAction):
return self._node_manage_power(target_state) return self._node_manage_power(target_state)
def revert(self): def revert(self):
if self.state == metal_constants.PowerState.ON.value: if self.state == NodeState.POWERON.value:
target_state = metal_constants.PowerState.OFF.value target_state = NodeState.POWEROFF.value
elif self.state == metal_constants.PowerState.OFF.value: elif self.state == NodeState.POWEROFF.value:
target_state = metal_constants.PowerState.ON.value target_state = NodeState.POWERON.value
return self._node_manage_power(target_state) return self._node_manage_power(target_state)
def _node_manage_power(self, state, retry=60): def _node_manage_power(self, state, retry=60):
@@ -97,32 +93,30 @@ class ChangeNodePowerState(base.BaseAction):
raise exception.IllegalArgumentException( raise exception.IllegalArgumentException(
message=_("The target state is not defined")) message=_("The target state is not defined"))
metal_helper = metal_helper_factory.get_helper(self.osc) ironic_client = self.osc.ironic()
node = metal_helper.get_node(self.node_uuid) nova_client = self.osc.nova()
current_state = node.get_power_state() current_state = ironic_client.node.get(self.node_uuid).power_state
# power state: 'power on' or 'power off', if current node state
if state == current_state.value: # is the same as state, just return True
if state in current_state:
return True return True
if state == metal_constants.PowerState.OFF.value: if state == NodeState.POWEROFF.value:
compute_node = node.get_hypervisor_node().to_dict() node_info = ironic_client.node.get(self.node_uuid).to_dict()
compute_node_id = node_info['extra']['compute_node_id']
compute_node = nova_client.hypervisors.get(compute_node_id)
compute_node = compute_node.to_dict()
if (compute_node['running_vms'] == 0): if (compute_node['running_vms'] == 0):
node.set_power_state(state) ironic_client.node.set_power_state(
else: self.node_uuid, state)
LOG.warning(
"Compute node %s has %s running vms and will "
"NOT be shut off.",
compute_node["hypervisor_hostname"],
compute_node['running_vms'])
return False
else: else:
node.set_power_state(state) ironic_client.node.set_power_state(self.node_uuid, state)
node = metal_helper.get_node(self.node_uuid) ironic_node = ironic_client.node.get(self.node_uuid)
while node.get_power_state() == current_state and retry: while ironic_node.power_state == current_state and retry:
time.sleep(10) time.sleep(10)
retry -= 1 retry -= 1
node = metal_helper.get_node(self.node_uuid) ironic_node = ironic_client.node.get(self.node_uuid)
if retry > 0: if retry > 0:
return True return True
else: else:
@@ -136,4 +130,4 @@ class ChangeNodePowerState(base.BaseAction):
def get_description(self): def get_description(self):
"""Description of the action""" """Description of the action"""
return ("Compute node power on/off through Ironic or MaaS.") return ("Compute node power on/off through ironic.")

View File

@@ -15,6 +15,8 @@
# limitations under the License. # limitations under the License.
# #
from __future__ import unicode_literals
from oslo_log import log from oslo_log import log
from watcher.applier.loading import default from watcher.applier.loading import default

View File

@@ -186,7 +186,7 @@ class Migrate(base.BaseAction):
return self.migrate(destination=self.destination_node) return self.migrate(destination=self.destination_node)
def revert(self): def revert(self):
return self.migrate(destination=self.source_node) LOG.info('Migrate action do not revert!')
def abort(self): def abort(self):
nova = nova_helper.NovaHelper(osc=self.osc) nova = nova_helper.NovaHelper(osc=self.osc)

View File

@@ -95,7 +95,7 @@ class Resize(base.BaseAction):
return self.resize() return self.resize()
def revert(self): def revert(self):
LOG.warning("revert not supported") return self.migrate(destination=self.source_node)
def pre_condition(self): def pre_condition(self):
# TODO(jed): check if the instance exists / check if the instance is on # TODO(jed): check if the instance exists / check if the instance is on

View File

@@ -17,11 +17,14 @@ import jsonschema
from oslo_log import log from oslo_log import log
from cinderclient import client as cinder_client
from watcher._i18n import _ from watcher._i18n import _
from watcher.applier.actions import base from watcher.applier.actions import base
from watcher.common import cinder_helper from watcher.common import cinder_helper
from watcher.common import exception from watcher.common import exception
from watcher.common import keystone_helper
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.common import utils
from watcher import conf from watcher import conf
CONF = conf.CONF CONF = conf.CONF
@@ -67,6 +70,8 @@ class VolumeMigrate(base.BaseAction):
def __init__(self, config, osc=None): def __init__(self, config, osc=None):
super(VolumeMigrate, self).__init__(config) super(VolumeMigrate, self).__init__(config)
self.temp_username = utils.random_string(10)
self.temp_password = utils.random_string(10)
self.cinder_util = cinder_helper.CinderHelper(osc=self.osc) self.cinder_util = cinder_helper.CinderHelper(osc=self.osc)
self.nova_util = nova_helper.NovaHelper(osc=self.osc) self.nova_util = nova_helper.NovaHelper(osc=self.osc)
@@ -129,42 +134,83 @@ class VolumeMigrate(base.BaseAction):
def _can_swap(self, volume): def _can_swap(self, volume):
"""Judge volume can be swapped""" """Judge volume can be swapped"""
# TODO(sean-k-mooney): rename this to _can_migrate and update
# tests to reflect that.
# cinder volume migration can migrate volumes that are not
# attached to instances or nova can migrate the data for cinder
# if the volume is in-use. If the volume has no attachments
# allow cinder to decided if it can be migrated.
if not volume.attachments: if not volume.attachments:
LOG.debug(f"volume: {volume.id} has no attachments") return False
return True
# since it has attachments we need to validate nova's constraints
instance_id = volume.attachments[0]['server_id'] instance_id = volume.attachments[0]['server_id']
instance_status = self.nova_util.find_instance(instance_id).status instance_status = self.nova_util.find_instance(instance_id).status
LOG.debug(
f"volume: {volume.id} is attached to instance: {instance_id} " if (volume.status == 'in-use' and
f"in instance status: {instance_status}") instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')):
# NOTE(sean-k-mooney): This used to allow RESIZED which return True
# is the resize_verify task state, that is not an acceptable time
# to migrate volumes, if nova does not block this in the API return False
# today that is probably a bug. PAUSED is also questionable but
# it should generally be safe. def _create_user(self, volume, user):
return (volume.status == 'in-use' and """Create user with volume attribute and user information"""
instance_status in ('ACTIVE', 'PAUSED')) keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id')
user['project'] = project_id
user['domain'] = keystone_util.get_project(project_id).domain_id
user['roles'] = ['admin']
return keystone_util.create_user(user)
def _get_cinder_client(self, session):
"""Get cinder client by session"""
return cinder_client.Client(
CONF.cinder_client.api_version,
session=session,
endpoint_type=CONF.cinder_client.endpoint_type)
def _swap_volume(self, volume, dest_type):
"""Swap volume to dest_type
Limitation note: only for compute libvirt driver
"""
if not dest_type:
raise exception.Invalid(
message=(_("destination type is required when "
"migration type is swap")))
if not self._can_swap(volume):
raise exception.Invalid(
message=(_("Invalid state for swapping volume")))
user_info = {
'name': self.temp_username,
'password': self.temp_password}
user = self._create_user(volume, user_info)
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
try:
session = keystone_util.create_session(
user.id, self.temp_password)
temp_cinder = self._get_cinder_client(session)
# swap volume
new_volume = self.cinder_util.create_volume(
temp_cinder, volume, dest_type)
self.nova_util.swap_volume(volume, new_volume)
# delete old volume
self.cinder_util.delete_volume(volume)
finally:
keystone_util.delete_user(user)
return True
def _migrate(self, volume_id, dest_node, dest_type): def _migrate(self, volume_id, dest_node, dest_type):
try: try:
volume = self.cinder_util.get_volume(volume_id) volume = self.cinder_util.get_volume(volume_id)
# for backward compatibility map swap to migrate. if self.migration_type == self.SWAP:
if self.migration_type in (self.SWAP, self.MIGRATE): if dest_node:
if not self._can_swap(volume): LOG.warning("dest_node is ignored")
raise exception.Invalid( return self._swap_volume(volume, dest_type)
message=(_("Invalid state for swapping volume")))
return self.cinder_util.migrate(volume, dest_node)
elif self.migration_type == self.RETYPE: elif self.migration_type == self.RETYPE:
return self.cinder_util.retype(volume, dest_type) return self.cinder_util.retype(volume, dest_type)
elif self.migration_type == self.MIGRATE:
return self.cinder_util.migrate(volume, dest_node)
else: else:
raise exception.Invalid( raise exception.Invalid(
message=(_("Migration of type '%(migration_type)s' is not " message=(_("Migration of type '%(migration_type)s' is not "

View File

@@ -26,9 +26,11 @@ See: :doc:`../architecture` for more details on this component.
""" """
import abc import abc
import six
class BaseApplier(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseApplier(object):
@abc.abstractmethod @abc.abstractmethod
def execute(self, action_plan_uuid): def execute(self, action_plan_uuid):
raise NotImplementedError() raise NotImplementedError()

View File

@@ -11,6 +11,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import unicode_literals
from watcher.common.loader import default from watcher.common.loader import default

View File

@@ -17,6 +17,7 @@
# #
import abc import abc
import six
import time import time
import eventlet import eventlet
@@ -39,7 +40,8 @@ CANCEL_STATE = [objects.action_plan.State.CANCELLING,
objects.action_plan.State.CANCELLED] objects.action_plan.State.CANCELLED]
class BaseWorkFlowEngine(loadable.Loadable, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseWorkFlowEngine(loadable.Loadable):
def __init__(self, config, context=None, applier_manager=None): def __init__(self, config, context=None, applier_manager=None):
"""Constructor """Constructor

View File

@@ -25,11 +25,8 @@ from taskflow import task as flow_task
from watcher.applier.workflow_engine import base from watcher.applier.workflow_engine import base
from watcher.common import exception from watcher.common import exception
from watcher import conf
from watcher import objects from watcher import objects
CONF = conf.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -130,11 +127,9 @@ class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): class TaskFlowActionContainer(base.BaseTaskFlowActionContainer):
def __init__(self, db_action, engine): def __init__(self, db_action, engine):
self.name = "action_type:{0} uuid:{1}".format(db_action.action_type, name = "action_type:{0} uuid:{1}".format(db_action.action_type,
db_action.uuid) db_action.uuid)
super(TaskFlowActionContainer, self).__init__(self.name, super(TaskFlowActionContainer, self).__init__(name, db_action, engine)
db_action,
engine)
def do_pre_execute(self): def do_pre_execute(self):
db_action = self.engine.notify(self._db_action, db_action = self.engine.notify(self._db_action,
@@ -163,12 +158,6 @@ class TaskFlowActionContainer(base.BaseTaskFlowActionContainer):
self.action.post_condition() self.action.post_condition()
def do_revert(self, *args, **kwargs): def do_revert(self, *args, **kwargs):
# NOTE: Not rollback action plan
if not CONF.watcher_applier.rollback_when_actionplan_failed:
LOG.info("Failed actionplan rollback option is turned off, and "
"the following action will be skipped: %s", self.name)
return
LOG.warning("Revert action: %s", self.name) LOG.warning("Revert action: %s", self.name)
try: try:
# TODO(jed): do we need to update the states in case of failure? # TODO(jed): do we need to update the states in case of failure?

View File

@@ -18,10 +18,3 @@
import eventlet import eventlet
eventlet.monkey_patch() eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # noqa
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = threading._active

View File

@@ -14,7 +14,6 @@
import sys import sys
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck from oslo_upgradecheck import upgradecheck
from watcher._i18n import _ from watcher._i18n import _
@@ -44,10 +43,6 @@ class Checks(upgradecheck.UpgradeCommands):
_upgrade_checks = ( _upgrade_checks = (
# Added in Train. # Added in Train.
(_('Minimum Nova API Version'), _minimum_nova_api_version), (_('Minimum Nova API Version'), _minimum_nova_api_version),
# Added in Wallaby.
(_("Policy File JSON to YAML Migration"),
(common_checks.check_policy_json, {'conf': CONF})),
) )

View File

@@ -17,7 +17,7 @@ import time
from oslo_log import log from oslo_log import log
from cinderclient import exceptions as cinder_exception from cinderclient import exceptions as cinder_exception
from cinderclient.v3.volumes import Volume from cinderclient.v2.volumes import Volume
from watcher._i18n import _ from watcher._i18n import _
from watcher.common import clients from watcher.common import clients
from watcher.common import exception from watcher.common import exception

View File

@@ -25,7 +25,6 @@ from novaclient import api_versions as nova_api_versions
from novaclient import client as nvclient from novaclient import client as nvclient
from watcher.common import exception from watcher.common import exception
from watcher.common import utils
try: try:
from ceilometerclient import client as ceclient from ceilometerclient import client as ceclient
@@ -33,12 +32,6 @@ try:
except ImportError: except ImportError:
HAS_CEILCLIENT = False HAS_CEILCLIENT = False
try:
from maas import client as maas_client
except ImportError:
maas_client = None
CONF = cfg.CONF CONF = cfg.CONF
_CLIENTS_AUTH_GROUP = 'watcher_clients_auth' _CLIENTS_AUTH_GROUP = 'watcher_clients_auth'
@@ -81,7 +74,6 @@ class OpenStackClients(object):
self._monasca = None self._monasca = None
self._neutron = None self._neutron = None
self._ironic = None self._ironic = None
self._maas = None
self._placement = None self._placement = None
def _get_keystone_session(self): def _get_keystone_session(self):
@@ -273,23 +265,6 @@ class OpenStackClients(object):
session=self.session) session=self.session)
return self._ironic return self._ironic
def maas(self):
if self._maas:
return self._maas
if not maas_client:
raise exception.UnsupportedError(
"MAAS client unavailable. Please install python-libmaas.")
url = self._get_client_option('maas', 'url')
api_key = self._get_client_option('maas', 'api_key')
timeout = self._get_client_option('maas', 'timeout')
self._maas = utils.async_compat_call(
maas_client.connect,
url, apikey=api_key,
timeout=timeout)
return self._maas
@exception.wrap_keystone_exception @exception.wrap_keystone_exception
def placement(self): def placement(self):
if self._placement: if self._placement:

View File

@@ -11,15 +11,13 @@
# under the License. # under the License.
from oslo_context import context from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
import six
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext): class RequestContext(context.RequestContext):
"""Extends security contexts from the OpenStack common library.""" """Extends security contexts from the OpenStack common library."""
@@ -71,7 +69,7 @@ class RequestContext(context.RequestContext):
self.project_id = project_id self.project_id = project_id
if not timestamp: if not timestamp:
timestamp = timeutils.utcnow() timestamp = timeutils.utcnow()
if isinstance(timestamp, str): if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_isotime(timestamp) timestamp = timeutils.parse_isotime(timestamp)
self.timestamp = timestamp self.timestamp = timestamp
self.user_name = user_name self.user_name = user_name

View File

@@ -25,7 +25,6 @@ SHOULD include dedicated exception logging.
import functools import functools
import sys import sys
from http import HTTPStatus
from keystoneclient import exceptions as keystone_exceptions from keystoneclient import exceptions as keystone_exceptions
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
@@ -63,7 +62,7 @@ class WatcherException(Exception):
""" """
msg_fmt = _("An unknown exception occurred") msg_fmt = _("An unknown exception occurred")
code = HTTPStatus.INTERNAL_SERVER_ERROR code = 500
headers = {} headers = {}
safe = False safe = False
@@ -115,12 +114,12 @@ class UnsupportedError(WatcherException):
class NotAuthorized(WatcherException): class NotAuthorized(WatcherException):
msg_fmt = _("Not authorized") msg_fmt = _("Not authorized")
code = HTTPStatus.FORBIDDEN code = 403
class NotAcceptable(WatcherException): class NotAcceptable(WatcherException):
msg_fmt = _("Request not acceptable.") msg_fmt = _("Request not acceptable.")
code = HTTPStatus.NOT_ACCEPTABLE code = 406
class PolicyNotAuthorized(NotAuthorized): class PolicyNotAuthorized(NotAuthorized):
@@ -133,7 +132,7 @@ class OperationNotPermitted(NotAuthorized):
class Invalid(WatcherException, ValueError): class Invalid(WatcherException, ValueError):
msg_fmt = _("Unacceptable parameters") msg_fmt = _("Unacceptable parameters")
code = HTTPStatus.BAD_REQUEST code = 400
class ObjectNotFound(WatcherException): class ObjectNotFound(WatcherException):
@@ -142,12 +141,12 @@ class ObjectNotFound(WatcherException):
class Conflict(WatcherException): class Conflict(WatcherException):
msg_fmt = _('Conflict') msg_fmt = _('Conflict')
code = HTTPStatus.CONFLICT code = 409
class ResourceNotFound(ObjectNotFound): class ResourceNotFound(ObjectNotFound):
msg_fmt = _("The %(name)s resource %(id)s could not be found") msg_fmt = _("The %(name)s resource %(id)s could not be found")
code = HTTPStatus.NOT_FOUND code = 404
class InvalidParameter(Invalid): class InvalidParameter(Invalid):

View File

@@ -15,6 +15,8 @@
from oslo_log import log from oslo_log import log
from keystoneauth1.exceptions import http as ks_exceptions from keystoneauth1.exceptions import http as ks_exceptions
from keystoneauth1 import loading
from keystoneauth1 import session
from watcher._i18n import _ from watcher._i18n import _
from watcher.common import clients from watcher.common import clients
from watcher.common import exception from watcher.common import exception
@@ -88,3 +90,35 @@ class KeystoneHelper(object):
message=(_("Domain name seems ambiguous: %s") % message=(_("Domain name seems ambiguous: %s") %
name_or_id)) name_or_id))
return domains[0] return domains[0]
def create_session(self, user_id, password):
user = self.get_user(user_id)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=CONF.watcher_clients_auth.auth_url,
password=password,
user_id=user_id,
project_id=user.default_project_id)
return session.Session(auth=auth)
def create_user(self, user):
project = self.get_project(user['project'])
domain = self.get_domain(user['domain'])
_user = self.keystone.users.create(
user['name'],
password=user['password'],
domain=domain,
project=project,
)
for role in user['roles']:
role = self.get_role(role)
self.keystone.roles.grant(
role.id, user=_user.id, project=project.id)
return _user
def delete_user(self, user):
try:
user = self.get_user(user)
self.keystone.users.delete(user)
except exception.Invalid:
pass

View File

@@ -14,10 +14,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import unicode_literals
import abc import abc
import six
class BaseLoader(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseLoader(object):
@abc.abstractmethod @abc.abstractmethod
def list_available(self): def list_available(self):

View File

@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import unicode_literals
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from stevedore import driver as drivermanager from stevedore import driver as drivermanager

View File

@@ -16,10 +16,13 @@
import abc import abc
import six
from watcher.common import service from watcher.common import service
class Loadable(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class Loadable(object):
"""Generic interface for dynamically loading a driver/entry point. """Generic interface for dynamically loading a driver/entry point.
This defines the contract in order to let the loader manager inject This defines the contract in order to let the loader manager inject
@@ -45,7 +48,8 @@ LoadableSingletonMeta = type(
"LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {}) "LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {})
class LoadableSingleton(object, metaclass=LoadableSingletonMeta): @six.add_metaclass(LoadableSingletonMeta)
class LoadableSingleton(object):
"""Generic interface for dynamically loading a driver as a singleton. """Generic interface for dynamically loading a driver as a singleton.
This defines the contract in order to let the loader manager inject This defines the contract in order to let the loader manager inject

View File

@@ -1,81 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from watcher.common import exception
from watcher.common.metal_helper import constants as metal_constants
class BaseMetalNode(abc.ABC):
hv_up_when_powered_off = False
def __init__(self, nova_node=None):
self._nova_node = nova_node
def get_hypervisor_node(self):
if not self._nova_node:
raise exception.Invalid(message="No associated hypervisor.")
return self._nova_node
def get_hypervisor_hostname(self):
return self.get_hypervisor_node().hypervisor_hostname
@abc.abstractmethod
def get_power_state(self):
# TODO(lpetrut): document the following methods
pass
@abc.abstractmethod
def get_id(self):
"""Return the node id provided by the bare metal service."""
pass
@abc.abstractmethod
def power_on(self):
pass
@abc.abstractmethod
def power_off(self):
pass
def set_power_state(self, state):
state = metal_constants.PowerState(state)
if state == metal_constants.PowerState.ON:
self.power_on()
elif state == metal_constants.PowerState.OFF:
self.power_off()
else:
raise exception.UnsupportedActionType(
"Cannot set power state: %s" % state)
class BaseMetalHelper(abc.ABC):
def __init__(self, osc):
self._osc = osc
@property
def nova_client(self):
if not getattr(self, "_nova_client", None):
self._nova_client = self._osc.nova()
return self._nova_client
@abc.abstractmethod
def list_compute_nodes(self):
pass
@abc.abstractmethod
def get_node(self, node_id):
pass

View File

@@ -1,23 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import enum
class PowerState(str, enum.Enum):
ON = "on"
OFF = "off"
UNKNOWN = "unknown"
ERROR = "error"

View File

@@ -1,33 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from watcher.common import clients
from watcher.common.metal_helper import ironic
from watcher.common.metal_helper import maas
CONF = cfg.CONF
def get_helper(osc=None):
# TODO(lpetrut): consider caching this client.
if not osc:
osc = clients.OpenStackClients()
if CONF.maas_client.url:
return maas.MaasHelper(osc)
else:
return ironic.IronicHelper(osc)

View File

@@ -1,94 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from watcher.common.metal_helper import base
from watcher.common.metal_helper import constants as metal_constants
LOG = log.getLogger(__name__)
POWER_STATES_MAP = {
'power on': metal_constants.PowerState.ON,
'power off': metal_constants.PowerState.OFF,
# For now, we only use ON/OFF states
'rebooting': metal_constants.PowerState.ON,
'soft power off': metal_constants.PowerState.OFF,
'soft reboot': metal_constants.PowerState.ON,
}
class IronicNode(base.BaseMetalNode):
hv_up_when_powered_off = True
def __init__(self, ironic_node, nova_node, ironic_client):
super().__init__(nova_node)
self._ironic_client = ironic_client
self._ironic_node = ironic_node
def get_power_state(self):
return POWER_STATES_MAP.get(self._ironic_node.power_state,
metal_constants.PowerState.UNKNOWN)
def get_id(self):
return self._ironic_node.uuid
def power_on(self):
self._ironic_client.node.set_power_state(self.get_id(), "on")
def power_off(self):
self._ironic_client.node.set_power_state(self.get_id(), "off")
class IronicHelper(base.BaseMetalHelper):
@property
def _client(self):
if not getattr(self, "_cached_client", None):
self._cached_client = self._osc.ironic()
return self._cached_client
def list_compute_nodes(self):
out_list = []
# TODO(lpetrut): consider using "detailed=True" instead of making
# an additional GET request per node
node_list = self._client.node.list()
for node in node_list:
node_info = self._client.node.get(node.uuid)
hypervisor_id = node_info.extra.get('compute_node_id', None)
if hypervisor_id is None:
LOG.warning('Cannot find compute_node_id in extra '
'of ironic node %s', node.uuid)
continue
hypervisor_node = self.nova_client.hypervisors.get(hypervisor_id)
if hypervisor_node is None:
LOG.warning('Cannot find hypervisor %s', hypervisor_id)
continue
out_node = IronicNode(node, hypervisor_node, self._client)
out_list.append(out_node)
return out_list
def get_node(self, node_id):
ironic_node = self._client.node.get(node_id)
compute_node_id = ironic_node.extra.get('compute_node_id')
if compute_node_id:
compute_node = self.nova_client.hypervisors.get(compute_node_id)
else:
compute_node = None
return IronicNode(ironic_node, compute_node, self._client)

View File

@@ -1,125 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from watcher.common import exception
from watcher.common.metal_helper import base
from watcher.common.metal_helper import constants as metal_constants
from watcher.common import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
try:
from maas.client import enum as maas_enum
except ImportError:
maas_enum = None
class MaasNode(base.BaseMetalNode):
hv_up_when_powered_off = False
def __init__(self, maas_node, nova_node, maas_client):
super().__init__(nova_node)
self._maas_client = maas_client
self._maas_node = maas_node
def get_power_state(self):
maas_state = utils.async_compat_call(
self._maas_node.query_power_state,
timeout=CONF.maas_client.timeout)
# python-libmaas may not be available, so we'll avoid a global
# variable.
power_states_map = {
maas_enum.PowerState.ON: metal_constants.PowerState.ON,
maas_enum.PowerState.OFF: metal_constants.PowerState.OFF,
maas_enum.PowerState.ERROR: metal_constants.PowerState.ERROR,
maas_enum.PowerState.UNKNOWN: metal_constants.PowerState.UNKNOWN,
}
return power_states_map.get(maas_state,
metal_constants.PowerState.UNKNOWN)
def get_id(self):
return self._maas_node.system_id
def power_on(self):
LOG.info("Powering on MAAS node: %s %s",
self._maas_node.fqdn,
self._maas_node.system_id)
utils.async_compat_call(
self._maas_node.power_on,
timeout=CONF.maas_client.timeout)
def power_off(self):
LOG.info("Powering off MAAS node: %s %s",
self._maas_node.fqdn,
self._maas_node.system_id)
utils.async_compat_call(
self._maas_node.power_off,
timeout=CONF.maas_client.timeout)
class MaasHelper(base.BaseMetalHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not maas_enum:
raise exception.UnsupportedError(
"MAAS client unavailable. Please install python-libmaas.")
@property
def _client(self):
if not getattr(self, "_cached_client", None):
self._cached_client = self._osc.maas()
return self._cached_client
def list_compute_nodes(self):
out_list = []
node_list = utils.async_compat_call(
self._client.machines.list,
timeout=CONF.maas_client.timeout)
compute_nodes = self.nova_client.hypervisors.list()
compute_node_map = dict()
for compute_node in compute_nodes:
compute_node_map[compute_node.hypervisor_hostname] = compute_node
for node in node_list:
hypervisor_node = compute_node_map.get(node.fqdn)
if not hypervisor_node:
LOG.info('Cannot find hypervisor %s', node.fqdn)
continue
out_node = MaasNode(node, hypervisor_node, self._client)
out_list.append(out_node)
return out_list
def _get_compute_node_by_hostname(self, hostname):
compute_nodes = self.nova_client.hypervisors.search(
hostname, detailed=True)
for compute_node in compute_nodes:
if compute_node.hypervisor_hostname == hostname:
return compute_node
def get_node(self, node_id):
maas_node = utils.async_compat_call(
self._client.machines.get, node_id,
timeout=CONF.maas_client.timeout)
compute_node = self._get_compute_node_by_hostname(maas_node.fqdn)
return MaasNode(maas_node, compute_node, self._client)

View File

@@ -11,7 +11,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from http import HTTPStatus
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@@ -54,7 +53,7 @@ class PlacementHelper(object):
if rp_name: if rp_name:
url += '?name=%s' % rp_name url += '?name=%s' % rp_name
resp = self.get(url) resp = self.get(url)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
json_resp = resp.json() json_resp = resp.json()
return json_resp['resource_providers'] return json_resp['resource_providers']
@@ -78,7 +77,7 @@ class PlacementHelper(object):
""" """
url = '/resource_providers/%s/inventories' % rp_uuid url = '/resource_providers/%s/inventories' % rp_uuid
resp = self.get(url) resp = self.get(url)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
json = resp.json() json = resp.json()
return json['inventories'] return json['inventories']
msg = ("Failed to get resource provider %(rp_uuid)s inventories. " msg = ("Failed to get resource provider %(rp_uuid)s inventories. "
@@ -98,7 +97,7 @@ class PlacementHelper(object):
""" """
resp = self.get("/resource_providers/%s/traits" % rp_uuid) resp = self.get("/resource_providers/%s/traits" % rp_uuid)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
json = resp.json() json = resp.json()
return json['traits'] return json['traits']
msg = ("Failed to get resource provider %(rp_uuid)s traits. " msg = ("Failed to get resource provider %(rp_uuid)s traits. "
@@ -119,7 +118,7 @@ class PlacementHelper(object):
""" """
url = '/allocations/%s' % consumer_uuid url = '/allocations/%s' % consumer_uuid
resp = self.get(url) resp = self.get(url)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
json = resp.json() json = resp.json()
return json['allocations'] return json['allocations']
msg = ("Failed to get allocations for consumer %(c_uuid). " msg = ("Failed to get allocations for consumer %(c_uuid). "
@@ -140,7 +139,7 @@ class PlacementHelper(object):
""" """
url = '/resource_providers/%s/usages' % rp_uuid url = '/resource_providers/%s/usages' % rp_uuid
resp = self.get(url) resp = self.get(url)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
json = resp.json() json = resp.json()
return json['usages'] return json['usages']
msg = ("Failed to get resource provider %(rp_uuid)s usages. " msg = ("Failed to get resource provider %(rp_uuid)s usages. "
@@ -165,7 +164,7 @@ class PlacementHelper(object):
""" """
url = "/allocation_candidates?%s" % resources url = "/allocation_candidates?%s" % resources
resp = self.get(url) resp = self.get(url)
if resp.status_code == HTTPStatus.OK: if resp.status_code == 200:
data = resp.json() data = resp.json()
return data['provider_summaries'] return data['provider_summaries']

View File

@@ -18,7 +18,6 @@
import sys import sys
from oslo_config import cfg from oslo_config import cfg
from oslo_policy import opts
from oslo_policy import policy from oslo_policy import policy
from watcher.common import exception from watcher.common import exception
@@ -27,12 +26,6 @@ from watcher.common import policies
_ENFORCER = None _ENFORCER = None
CONF = cfg.CONF CONF = cfg.CONF
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
# we can get a policy enforcer by this init. # we can get a policy enforcer by this init.
# oslo policy support change policy rule dynamically. # oslo policy support change policy rule dynamically.

View File

@@ -121,40 +121,22 @@ class RequestContextSerializer(messaging.Serializer):
def get_client(target, version_cap=None, serializer=None): def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer) serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_client( return messaging.RPCClient(TRANSPORT,
TRANSPORT, target,
target, version_cap=version_cap,
version_cap=version_cap, serializer=serializer)
serializer=serializer
)
def get_server(target, endpoints, serializer=None): def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None assert TRANSPORT is not None
access_policy = dispatcher.DefaultRPCAccessPolicy access_policy = dispatcher.DefaultRPCAccessPolicy
serializer = RequestContextSerializer(serializer) serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server( return messaging.get_rpc_server(TRANSPORT,
TRANSPORT, target,
target, endpoints,
endpoints, executor='eventlet',
executor='eventlet', serializer=serializer,
serializer=serializer, access_policy=access_policy)
access_policy=access_policy
)
def get_notification_listener(targets, endpoints, serializer=None, pool=None):
assert NOTIFICATION_TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.get_notification_listener(
NOTIFICATION_TRANSPORT,
targets,
endpoints,
allow_requeue=False,
executor='eventlet',
pool=pool,
serializer=serializer
)
def get_notifier(publisher_id): def get_notifier(publisher_id):

View File

@@ -21,12 +21,14 @@ from oslo_concurrency import processutils
from oslo_config import cfg from oslo_config import cfg
from oslo_log import _options from oslo_log import _options
from oslo_log import log from oslo_log import log
import oslo_messaging as messaging import oslo_messaging as om
from oslo_reports import guru_meditation_report as gmr from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts from oslo_reports import opts as gmr_opts
from oslo_service import service from oslo_service import service
from oslo_service import wsgi from oslo_service import wsgi
from oslo_messaging.rpc import dispatcher
from watcher._i18n import _ from watcher._i18n import _
from watcher.api import app from watcher.api import app
from watcher.common import config from watcher.common import config
@@ -181,6 +183,11 @@ class Service(service.ServiceBase):
] ]
self.notification_endpoints = self.manager.notification_endpoints self.notification_endpoints = self.manager.notification_endpoints
self.serializer = rpc.RequestContextSerializer(
base.WatcherObjectSerializer())
self._transport = None
self._notification_transport = None
self._conductor_client = None self._conductor_client = None
self.conductor_topic_handler = None self.conductor_topic_handler = None
@@ -194,17 +201,27 @@ class Service(service.ServiceBase):
self.notification_topics, self.notification_endpoints self.notification_topics, self.notification_endpoints
) )
@property
def transport(self):
if self._transport is None:
self._transport = om.get_rpc_transport(CONF)
return self._transport
@property
def notification_transport(self):
if self._notification_transport is None:
self._notification_transport = om.get_notification_transport(CONF)
return self._notification_transport
@property @property
def conductor_client(self): def conductor_client(self):
if self._conductor_client is None: if self._conductor_client is None:
target = messaging.Target( target = om.Target(
topic=self.conductor_topic, topic=self.conductor_topic,
version=self.API_VERSION, version=self.API_VERSION,
) )
self._conductor_client = rpc.get_client( self._conductor_client = om.RPCClient(
target, self.transport, target, serializer=self.serializer)
serializer=base.WatcherObjectSerializer()
)
return self._conductor_client return self._conductor_client
@conductor_client.setter @conductor_client.setter
@@ -212,18 +229,21 @@ class Service(service.ServiceBase):
self.conductor_client = c self.conductor_client = c
def build_topic_handler(self, topic_name, endpoints=()): def build_topic_handler(self, topic_name, endpoints=()):
target = messaging.Target( access_policy = dispatcher.DefaultRPCAccessPolicy
serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer())
target = om.Target(
topic=topic_name, topic=topic_name,
# For compatibility, we can override it with 'host' opt # For compatibility, we can override it with 'host' opt
server=CONF.host or socket.gethostname(), server=CONF.host or socket.gethostname(),
version=self.api_version, version=self.api_version,
) )
return rpc.get_server( return om.get_rpc_server(
target, endpoints, self.transport, target, endpoints,
serializer=rpc.JsonPayloadSerializer() executor='eventlet', serializer=serializer,
) access_policy=access_policy)
def build_notification_handler(self, topic_names, endpoints=()): def build_notification_handler(self, topic_names, endpoints=()):
serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer())
targets = [] targets = []
for topic in topic_names: for topic in topic_names:
kwargs = {} kwargs = {}
@@ -231,13 +251,11 @@ class Service(service.ServiceBase):
exchange, topic = topic.split('.') exchange, topic = topic.split('.')
kwargs['exchange'] = exchange kwargs['exchange'] = exchange
kwargs['topic'] = topic kwargs['topic'] = topic
targets.append(messaging.Target(**kwargs)) targets.append(om.Target(**kwargs))
return om.get_notification_listener(
return rpc.get_notification_listener( self.notification_transport, targets, endpoints,
targets, endpoints, executor='eventlet', serializer=serializer,
serializer=rpc.JsonPayloadSerializer(), allow_requeue=False, pool=CONF.host)
pool=CONF.host
)
def start(self): def start(self):
LOG.debug("Connecting to '%s'", CONF.transport_url) LOG.debug("Connecting to '%s'", CONF.transport_url)

View File

@@ -15,9 +15,11 @@
# under the License. # under the License.
import abc import abc
import six
class ServiceManager(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class ServiceManager(object):
@abc.abstractproperty @abc.abstractproperty
def service_name(self): def service_name(self):

View File

@@ -16,20 +16,19 @@
"""Utilities and helper functions.""" """Utilities and helper functions."""
import asyncio
import datetime import datetime
import inspect import random
import re import re
import string
from croniter import croniter from croniter import croniter
import eventlet
from eventlet import tpool
from jsonschema import validators from jsonschema import validators
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import strutils from oslo_utils import strutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from watcher.common import exception from watcher.common import exception
@@ -83,7 +82,7 @@ def safe_rstrip(value, chars=None):
:return: Stripped value. :return: Stripped value.
""" """
if not isinstance(value, str): if not isinstance(value, six.string_types):
LOG.warning( LOG.warning(
"Failed to remove trailing character. Returning original object." "Failed to remove trailing character. Returning original object."
"Supplied object is not a string: %s,", value) "Supplied object is not a string: %s,", value)
@@ -105,7 +104,7 @@ def is_hostname_safe(hostname):
""" """
m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$'
return (isinstance(hostname, str) and return (isinstance(hostname, six.string_types) and
(re.match(m, hostname) is not None)) (re.match(m, hostname) is not None))
@@ -158,39 +157,9 @@ def extend_with_strict_schema(validator_class):
StrictDefaultValidatingDraft4Validator = extend_with_default( StrictDefaultValidatingDraft4Validator = extend_with_default(
extend_with_strict_schema(validators.Draft4Validator)) extend_with_strict_schema(validators.Draft4Validator))
Draft4Validator = validators.Draft4Validator Draft4Validator = validators.Draft4Validator
# Some clients (e.g. MAAS) use asyncio, which isn't compatible with Eventlet. def random_string(n):
# As a workaround, we're delegating such calls to a native thread. return ''.join([random.choice(
def async_compat_call(f, *args, **kwargs): string.ascii_letters + string.digits) for i in range(n)])
timeout = kwargs.pop('timeout', None)
async def async_wrapper():
ret = f(*args, **kwargs)
if inspect.isawaitable(ret):
return await asyncio.wait_for(ret, timeout)
return ret
def tpool_wrapper():
# This will run in a separate native thread. Ideally, there should be
# a single thread permanently running an asyncio loop, but for
# convenience we'll use eventlet.tpool, which leverages a thread pool.
#
# That being considered, we're setting up a temporary asyncio loop to
# handle this call.
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(async_wrapper())
finally:
loop.close()
# We'll use eventlet timeouts as an extra precaution and asyncio timeouts
# to avoid lingering threads. For consistency, we'll convert eventlet
# timeout exceptions to asyncio timeout errors.
with eventlet.timeout.Timeout(
seconds=timeout,
exception=asyncio.TimeoutError("Timeout: %ss" % timeout)):
return tpool.execute(tpool_wrapper)

View File

@@ -35,7 +35,6 @@ from watcher.conf import grafana_client
from watcher.conf import grafana_translators from watcher.conf import grafana_translators
from watcher.conf import ironic_client from watcher.conf import ironic_client
from watcher.conf import keystone_client from watcher.conf import keystone_client
from watcher.conf import maas_client
from watcher.conf import monasca_client from watcher.conf import monasca_client
from watcher.conf import neutron_client from watcher.conf import neutron_client
from watcher.conf import nova_client from watcher.conf import nova_client
@@ -55,7 +54,6 @@ db.register_opts(CONF)
planner.register_opts(CONF) planner.register_opts(CONF)
applier.register_opts(CONF) applier.register_opts(CONF)
decision_engine.register_opts(CONF) decision_engine.register_opts(CONF)
maas_client.register_opts(CONF)
monasca_client.register_opts(CONF) monasca_client.register_opts(CONF)
nova_client.register_opts(CONF) nova_client.register_opts(CONF)
glance_client.register_opts(CONF) glance_client.register_opts(CONF)

View File

@@ -43,20 +43,11 @@ APPLIER_MANAGER_OPTS = [
help='Select the engine to use to execute the workflow'), help='Select the engine to use to execute the workflow'),
] ]
APPLIER_OPTS = [
cfg.BoolOpt('rollback_when_actionplan_failed',
default=False,
help='If set True, the failed actionplan will rollback '
'when executing. Defaule value is False.'),
]
def register_opts(conf): def register_opts(conf):
conf.register_group(watcher_applier) conf.register_group(watcher_applier)
conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier) conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier)
conf.register_opts(APPLIER_OPTS, group=watcher_applier)
def list_opts(): def list_opts():
return [(watcher_applier, APPLIER_MANAGER_OPTS), return [(watcher_applier, APPLIER_MANAGER_OPTS)]
(watcher_applier, APPLIER_OPTS)]

View File

@@ -134,13 +134,7 @@ GRAFANA_CLIENT_OPTS = [
"InfluxDB this will be the retention period. " "InfluxDB this will be the retention period. "
"These queries will need to be constructed using tools " "These queries will need to be constructed using tools "
"such as Postman. Example: SELECT cpu FROM {4}." "such as Postman. Example: SELECT cpu FROM {4}."
"cpu_percent WHERE host == '{1}' AND time > now()-{2}s"), "cpu_percent WHERE host == '{1}' AND time > now()-{2}s")]
cfg.IntOpt('http_timeout',
min=0,
default=60,
mutable=True,
help='Timeout for Grafana request')
]
def register_opts(conf): def register_opts(conf):

View File

@@ -1,38 +0,0 @@
# Copyright 2023 Cloudbase Solutions
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
maas_client = cfg.OptGroup(name='maas_client',
title='Configuration Options for MaaS')
MAAS_CLIENT_OPTS = [
cfg.StrOpt('url',
help='MaaS URL, example: http://1.2.3.4:5240/MAAS'),
cfg.StrOpt('api_key',
help='MaaS API authentication key.'),
cfg.IntOpt('timeout',
default=60,
help='MaaS client operation timeout in seconds.')]
def register_opts(conf):
conf.register_group(maas_client)
conf.register_opts(MAAS_CLIENT_OPTS, group=maas_client)
def list_opts():
return [(maas_client, MAAS_CLIENT_OPTS)]

View File

@@ -18,6 +18,7 @@ Base classes for storage engines
import abc import abc
from oslo_config import cfg from oslo_config import cfg
from oslo_db import api as db_api from oslo_db import api as db_api
import six
_BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} _BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
@@ -29,7 +30,8 @@ def get_instance():
return IMPL return IMPL
class BaseConnection(object, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class BaseConnection(object):
"""Base class for storage system connections.""" """Base class for storage system connections."""
@abc.abstractmethod @abc.abstractmethod

View File

@@ -15,6 +15,8 @@
# limitations under the License. # limitations under the License.
# #
from __future__ import print_function
import collections import collections
import datetime import datetime
import itertools import itertools
@@ -23,6 +25,7 @@ import sys
from oslo_log import log from oslo_log import log
from oslo_utils import strutils from oslo_utils import strutils
import prettytable as ptable import prettytable as ptable
from six.moves import input
from watcher._i18n import _ from watcher._i18n import _
from watcher._i18n import lazy_translation_enabled from watcher._i18n import lazy_translation_enabled

View File

@@ -13,8 +13,8 @@
from logging import config as log_config from logging import config as log_config
from alembic import context from alembic import context
from oslo_db.sqlalchemy import enginefacade
from watcher.db.sqlalchemy import api as sqla_api
from watcher.db.sqlalchemy import models from watcher.db.sqlalchemy import models
# this is the Alembic Config object, which provides # this is the Alembic Config object, which provides
@@ -43,7 +43,7 @@ def run_migrations_online():
and associate a connection with the context. and associate a connection with the context.
""" """
engine = enginefacade.writer.get_engine() engine = sqla_api.get_engine()
with engine.connect() as connection: with engine.connect() as connection:
context.configure(connection=connection, context.configure(connection=connection,
target_metadata=target_metadata) target_metadata=target_metadata)

View File

@@ -6,7 +6,6 @@ Create Date: 2017-03-24 11:21:29.036532
""" """
from alembic import op from alembic import op
from sqlalchemy import inspect
import sqlalchemy as sa import sqlalchemy as sa
from watcher.db.sqlalchemy import models from watcher.db.sqlalchemy import models
@@ -15,17 +14,8 @@ from watcher.db.sqlalchemy import models
revision = '0f6042416884' revision = '0f6042416884'
down_revision = '001' down_revision = '001'
def _table_exists(table_name):
bind = op.get_context().bind
insp = inspect(bind)
names = insp.get_table_names()
return any(t == table_name for t in names)
def upgrade(): def upgrade():
if _table_exists('apscheduler_jobs'):
return
op.create_table( op.create_table(
'apscheduler_jobs', 'apscheduler_jobs',
sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False), sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False),

View File

@@ -19,12 +19,10 @@
import collections import collections
import datetime import datetime
import operator import operator
import threading
from oslo_config import cfg from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import timeutils from oslo_utils import timeutils
from sqlalchemy.inspection import inspect from sqlalchemy.inspection import inspect
@@ -40,7 +38,24 @@ from watcher import objects
CONF = cfg.CONF CONF = cfg.CONF
_CONTEXT = threading.local() _FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend(): def get_backend():
@@ -48,15 +63,14 @@ def get_backend():
return Connection() return Connection()
def _session_for_read(): def model_query(model, *args, **kwargs):
return enginefacade.reader.using(_CONTEXT) """Query helper for simpler session usage.
:param session: if present, the session to use
# NOTE(tylerchristie) Please add @oslo_db_api.retry_on_deadlock decorator to """
# any new methods using _session_for_write (as deadlocks happen on write), so session = kwargs.get('session') or get_session()
# that oslo_db is able to retry in case of deadlocks. query = session.query(model, *args)
def _session_for_write(): return query
return enginefacade.writer.using(_CONTEXT)
def add_identity_filter(query, value): def add_identity_filter(query, value):
@@ -79,6 +93,8 @@ def add_identity_filter(query, value):
def _paginate_query(model, limit=None, marker=None, sort_key=None, def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None): sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id'] sort_keys = ['id']
if sort_key and sort_key not in sort_keys: if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key) sort_keys.insert(0, sort_key)
@@ -231,54 +247,49 @@ class Connection(api.BaseConnection):
query = query.options(joinedload(relationship.key)) query = query.options(joinedload(relationship.key))
return query return query
@oslo_db_api.retry_on_deadlock
def _create(self, model, values): def _create(self, model, values):
with _session_for_write() as session: obj = model()
obj = model() cleaned_values = {k: v for k, v in values.items()
cleaned_values = {k: v for k, v in values.items() if k not in self._get_relationships(model)}
if k not in self._get_relationships(model)} obj.update(cleaned_values)
obj.update(cleaned_values) obj.save()
session.add(obj) return obj
session.flush()
return obj
def _get(self, context, model, fieldname, value, eager): def _get(self, context, model, fieldname, value, eager):
with _session_for_read() as session: query = model_query(model)
query = session.query(model) if eager:
if eager: query = self._set_eager_options(model, query)
query = self._set_eager_options(model, query)
query = query.filter(getattr(model, fieldname) == value) query = query.filter(getattr(model, fieldname) == value)
if not context.show_deleted: if not context.show_deleted:
query = query.filter(model.deleted_at.is_(None)) query = query.filter(model.deleted_at.is_(None))
try: try:
obj = query.one() obj = query.one()
except exc.NoResultFound: except exc.NoResultFound:
raise exception.ResourceNotFound(name=model.__name__, id=value) raise exception.ResourceNotFound(name=model.__name__, id=value)
return obj return obj
@staticmethod @staticmethod
@oslo_db_api.retry_on_deadlock
def _update(model, id_, values): def _update(model, id_, values):
with _session_for_write() as session: session = get_session()
query = session.query(model) with session.begin():
query = model_query(model, session=session)
query = add_identity_filter(query, id_) query = add_identity_filter(query, id_)
try: try:
ref = query.with_for_update().one() ref = query.with_lockmode('update').one()
except exc.NoResultFound: except exc.NoResultFound:
raise exception.ResourceNotFound(name=model.__name__, id=id_) raise exception.ResourceNotFound(name=model.__name__, id=id_)
ref.update(values) ref.update(values)
return ref
return ref
@staticmethod @staticmethod
@oslo_db_api.retry_on_deadlock
def _soft_delete(model, id_): def _soft_delete(model, id_):
with _session_for_write() as session: session = get_session()
query = session.query(model) with session.begin():
query = model_query(model, session=session)
query = add_identity_filter(query, id_) query = add_identity_filter(query, id_)
try: try:
row = query.one() row = query.one()
@@ -290,10 +301,10 @@ class Connection(api.BaseConnection):
return row return row
@staticmethod @staticmethod
@oslo_db_api.retry_on_deadlock
def _destroy(model, id_): def _destroy(model, id_):
with _session_for_write() as session: session = get_session()
query = session.query(model) with session.begin():
query = model_query(model, session=session)
query = add_identity_filter(query, id_) query = add_identity_filter(query, id_)
try: try:
@@ -306,15 +317,14 @@ class Connection(api.BaseConnection):
def _get_model_list(self, model, add_filters_func, context, filters=None, def _get_model_list(self, model, add_filters_func, context, filters=None,
limit=None, marker=None, sort_key=None, sort_dir=None, limit=None, marker=None, sort_key=None, sort_dir=None,
eager=False): eager=False):
with _session_for_read() as session: query = model_query(model)
query = session.query(model) if eager:
if eager: query = self._set_eager_options(model, query)
query = self._set_eager_options(model, query) query = add_filters_func(query, filters)
query = add_filters_func(query, filters) if not context.show_deleted:
if not context.show_deleted: query = query.filter(model.deleted_at.is_(None))
query = query.filter(model.deleted_at.is_(None)) return _paginate_query(model, limit, marker,
return _paginate_query(model, limit, marker, sort_key, sort_dir, query)
sort_key, sort_dir, query)
# NOTE(erakli): _add_..._filters methods should be refactored to have same # NOTE(erakli): _add_..._filters methods should be refactored to have same
# content. join_fieldmap should be filled with JoinMap instead of dict # content. join_fieldmap should be filled with JoinMap instead of dict
@@ -409,12 +419,11 @@ class Connection(api.BaseConnection):
plain_fields=plain_fields, join_fieldmap=join_fieldmap) plain_fields=plain_fields, join_fieldmap=join_fieldmap)
if 'audit_uuid' in filters: if 'audit_uuid' in filters:
with _session_for_read() as session: stmt = model_query(models.ActionPlan).join(
stmt = session.query(models.ActionPlan).join( models.Audit,
models.Audit, models.Audit.id == models.ActionPlan.audit_id)\
models.Audit.id == models.ActionPlan.audit_id)\ .filter_by(uuid=filters['audit_uuid']).subquery()
.filter_by(uuid=filters['audit_uuid']).subquery() query = query.filter_by(action_plan_id=stmt.c.id)
query = query.filter_by(action_plan_id=stmt.c.id)
return query return query
@@ -592,21 +601,20 @@ class Connection(api.BaseConnection):
if not values.get('uuid'): if not values.get('uuid'):
values['uuid'] = utils.generate_uuid() values['uuid'] = utils.generate_uuid()
with _session_for_write() as session: query = model_query(models.AuditTemplate)
query = session.query(models.AuditTemplate) query = query.filter_by(name=values.get('name'),
query = query.filter_by(name=values.get('name'), deleted_at=None)
deleted_at=None)
if len(query.all()) > 0: if len(query.all()) > 0:
raise exception.AuditTemplateAlreadyExists( raise exception.AuditTemplateAlreadyExists(
audit_template=values['name']) audit_template=values['name'])
try: try:
audit_template = self._create(models.AuditTemplate, values) audit_template = self._create(models.AuditTemplate, values)
except db_exc.DBDuplicateEntry: except db_exc.DBDuplicateEntry:
raise exception.AuditTemplateAlreadyExists( raise exception.AuditTemplateAlreadyExists(
audit_template=values['name']) audit_template=values['name'])
return audit_template return audit_template
def _get_audit_template(self, context, fieldname, value, eager): def _get_audit_template(self, context, fieldname, value, eager):
try: try:
@@ -668,26 +676,25 @@ class Connection(api.BaseConnection):
if not values.get('uuid'): if not values.get('uuid'):
values['uuid'] = utils.generate_uuid() values['uuid'] = utils.generate_uuid()
with _session_for_write() as session: query = model_query(models.Audit)
query = session.query(models.Audit) query = query.filter_by(name=values.get('name'),
query = query.filter_by(name=values.get('name'), deleted_at=None)
deleted_at=None)
if len(query.all()) > 0: if len(query.all()) > 0:
raise exception.AuditAlreadyExists( raise exception.AuditAlreadyExists(
audit=values['name']) audit=values['name'])
if values.get('state') is None: if values.get('state') is None:
values['state'] = objects.audit.State.PENDING values['state'] = objects.audit.State.PENDING
if not values.get('auto_trigger'): if not values.get('auto_trigger'):
values['auto_trigger'] = False values['auto_trigger'] = False
try: try:
audit = self._create(models.Audit, values) audit = self._create(models.Audit, values)
except db_exc.DBDuplicateEntry: except db_exc.DBDuplicateEntry:
raise exception.AuditAlreadyExists(audit=values['uuid']) raise exception.AuditAlreadyExists(audit=values['uuid'])
return audit return audit
def _get_audit(self, context, fieldname, value, eager): def _get_audit(self, context, fieldname, value, eager):
try: try:
@@ -711,13 +718,14 @@ class Connection(api.BaseConnection):
def destroy_audit(self, audit_id): def destroy_audit(self, audit_id):
def is_audit_referenced(session, audit_id): def is_audit_referenced(session, audit_id):
"""Checks whether the audit is referenced by action_plan(s).""" """Checks whether the audit is referenced by action_plan(s)."""
query = session.query(models.ActionPlan) query = model_query(models.ActionPlan, session=session)
query = self._add_action_plans_filters( query = self._add_action_plans_filters(
query, {'audit_id': audit_id}) query, {'audit_id': audit_id})
return query.count() != 0 return query.count() != 0
with _session_for_write() as session: session = get_session()
query = session.query(models.Audit) with session.begin():
query = model_query(models.Audit, session=session)
query = add_identity_filter(query, audit_id) query = add_identity_filter(query, audit_id)
try: try:
@@ -784,8 +792,9 @@ class Connection(api.BaseConnection):
context, fieldname="uuid", value=action_uuid, eager=eager) context, fieldname="uuid", value=action_uuid, eager=eager)
def destroy_action(self, action_id): def destroy_action(self, action_id):
with _session_for_write() as session: session = get_session()
query = session.query(models.Action) with session.begin():
query = model_query(models.Action, session=session)
query = add_identity_filter(query, action_id) query = add_identity_filter(query, action_id)
count = query.delete() count = query.delete()
if count != 1: if count != 1:
@@ -801,16 +810,17 @@ class Connection(api.BaseConnection):
@staticmethod @staticmethod
def _do_update_action(action_id, values): def _do_update_action(action_id, values):
with _session_for_write() as session: session = get_session()
query = session.query(models.Action) with session.begin():
query = model_query(models.Action, session=session)
query = add_identity_filter(query, action_id) query = add_identity_filter(query, action_id)
try: try:
ref = query.with_for_update().one() ref = query.with_lockmode('update').one()
except exc.NoResultFound: except exc.NoResultFound:
raise exception.ActionNotFound(action=action_id) raise exception.ActionNotFound(action=action_id)
ref.update(values) ref.update(values)
return ref return ref
def soft_delete_action(self, action_id): def soft_delete_action(self, action_id):
try: try:
@@ -854,13 +864,14 @@ class Connection(api.BaseConnection):
def destroy_action_plan(self, action_plan_id): def destroy_action_plan(self, action_plan_id):
def is_action_plan_referenced(session, action_plan_id): def is_action_plan_referenced(session, action_plan_id):
"""Checks whether the action_plan is referenced by action(s).""" """Checks whether the action_plan is referenced by action(s)."""
query = session.query(models.Action) query = model_query(models.Action, session=session)
query = self._add_actions_filters( query = self._add_actions_filters(
query, {'action_plan_id': action_plan_id}) query, {'action_plan_id': action_plan_id})
return query.count() != 0 return query.count() != 0
with _session_for_write() as session: session = get_session()
query = session.query(models.ActionPlan) with session.begin():
query = model_query(models.ActionPlan, session=session)
query = add_identity_filter(query, action_plan_id) query = add_identity_filter(query, action_plan_id)
try: try:
@@ -884,16 +895,17 @@ class Connection(api.BaseConnection):
@staticmethod @staticmethod
def _do_update_action_plan(action_plan_id, values): def _do_update_action_plan(action_plan_id, values):
with _session_for_write() as session: session = get_session()
query = session.query(models.ActionPlan) with session.begin():
query = model_query(models.ActionPlan, session=session)
query = add_identity_filter(query, action_plan_id) query = add_identity_filter(query, action_plan_id)
try: try:
ref = query.with_for_update().one() ref = query.with_lockmode('update').one()
except exc.NoResultFound: except exc.NoResultFound:
raise exception.ActionPlanNotFound(action_plan=action_plan_id) raise exception.ActionPlanNotFound(action_plan=action_plan_id)
ref.update(values) ref.update(values)
return ref return ref
def soft_delete_action_plan(self, action_plan_id): def soft_delete_action_plan(self, action_plan_id):
try: try:

View File

@@ -20,9 +20,9 @@ import alembic
from alembic import config as alembic_config from alembic import config as alembic_config
import alembic.migration as alembic_migration import alembic.migration as alembic_migration
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from watcher._i18n import _ from watcher._i18n import _
from watcher.db.sqlalchemy import api as sqla_api
from watcher.db.sqlalchemy import models from watcher.db.sqlalchemy import models
@@ -39,7 +39,7 @@ def version(engine=None):
:rtype: string :rtype: string
""" """
if engine is None: if engine is None:
engine = enginefacade.reader.get_engine() engine = sqla_api.get_engine()
with engine.connect() as conn: with engine.connect() as conn:
context = alembic_migration.MigrationContext.configure(conn) context = alembic_migration.MigrationContext.configure(conn)
return context.get_current_revision() return context.get_current_revision()
@@ -63,7 +63,7 @@ def create_schema(config=None, engine=None):
Can be used for initial installation instead of upgrade('head'). Can be used for initial installation instead of upgrade('head').
""" """
if engine is None: if engine is None:
engine = enginefacade.writer.get_engine() engine = sqla_api.get_engine()
# NOTE(viktors): If we will use metadata.create_all() for non empty db # NOTE(viktors): If we will use metadata.create_all() for non empty db
# schema, it will only add the new tables, but leave # schema, it will only add the new tables, but leave

View File

@@ -18,6 +18,7 @@ SQLAlchemy models for watcher service
from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import models
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import six.moves.urllib.parse as urlparse
from sqlalchemy import Boolean from sqlalchemy import Boolean
from sqlalchemy import Column from sqlalchemy import Column
from sqlalchemy import DateTime from sqlalchemy import DateTime
@@ -32,7 +33,7 @@ from sqlalchemy import String
from sqlalchemy import Text from sqlalchemy import Text
from sqlalchemy.types import TypeDecorator, TEXT from sqlalchemy.types import TypeDecorator, TEXT
from sqlalchemy import UniqueConstraint from sqlalchemy import UniqueConstraint
import urllib.parse as urlparse
from watcher import conf from watcher import conf
CONF = conf.CONF CONF = conf.CONF
@@ -93,6 +94,14 @@ class WatcherBase(models.SoftDeleteMixin,
d[c.name] = self[c.name] d[c.name] = self[c.name]
return d return d
def save(self, session=None):
import watcher.db.sqlalchemy.api as db_api
if session is None:
session = db_api.get_session()
super(WatcherBase, self).save(session)
Base = declarative_base(cls=WatcherBase) Base = declarative_base(cls=WatcherBase)

View File

@@ -18,6 +18,7 @@
# limitations under the License. # limitations under the License.
# #
import abc import abc
import six
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
@@ -35,11 +36,9 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
class BaseMetaClass(service.Singleton, abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
pass @six.add_metaclass(service.Singleton)
class BaseAuditHandler(object):
class BaseAuditHandler(object, metaclass=BaseMetaClass):
@abc.abstractmethod @abc.abstractmethod
def execute(self, audit, request_context): def execute(self, audit, request_context):
@@ -58,7 +57,8 @@ class BaseAuditHandler(object, metaclass=BaseMetaClass):
raise NotImplementedError() raise NotImplementedError()
class AuditHandler(BaseAuditHandler, metaclass=abc.ABCMeta): @six.add_metaclass(abc.ABCMeta)
class AuditHandler(BaseAuditHandler):
def __init__(self): def __init__(self):
super(AuditHandler, self).__init__() super(AuditHandler, self).__init__()

View File

@@ -52,7 +52,7 @@ class ContinuousAuditHandler(base.AuditHandler):
self._audit_scheduler = scheduling.BackgroundSchedulerService( self._audit_scheduler = scheduling.BackgroundSchedulerService(
jobstores={ jobstores={
'default': job_store.WatcherJobStore( 'default': job_store.WatcherJobStore(
engine=sq_api.enginefacade.writer.get_engine()), engine=sq_api.get_engine()),
} }
) )
return self._audit_scheduler return self._audit_scheduler

View File

@@ -19,8 +19,6 @@ import time
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from watcher.common import exception
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -56,14 +54,7 @@ class DataSourceBase(object):
instance_root_disk_size=None, instance_root_disk_size=None,
) )
def _get_meter(self, meter_name): def query_retry(self, f, *args, **kwargs):
"""Retrieve the meter from the metric map or raise error"""
meter = self.METRIC_MAP.get(meter_name)
if meter is None:
raise exception.MetricNotAvailable(metric=meter_name)
return meter
def query_retry(self, f, *args, ignored_exc=None, **kwargs):
"""Attempts to retrieve metrics from the external service """Attempts to retrieve metrics from the external service
Attempts to access data from the external service and handles Attempts to access data from the external service and handles
@@ -71,23 +62,15 @@ class DataSourceBase(object):
to the value of query_max_retries to the value of query_max_retries
:param f: The method that performs the actual querying for metrics :param f: The method that performs the actual querying for metrics
:param args: Array of arguments supplied to the method :param args: Array of arguments supplied to the method
:param ignored_exc: An exception or tuple of exceptions that shouldn't
be retried, for example "NotFound" exceptions.
:param kwargs: The amount of arguments supplied to the method :param kwargs: The amount of arguments supplied to the method
:return: The value as retrieved from the external service :return: The value as retrieved from the external service
""" """
num_retries = CONF.watcher_datasources.query_max_retries num_retries = CONF.watcher_datasources.query_max_retries
timeout = CONF.watcher_datasources.query_timeout timeout = CONF.watcher_datasources.query_timeout
ignored_exc = ignored_exc or tuple()
for i in range(num_retries): for i in range(num_retries):
try: try:
return f(*args, **kwargs) return f(*args, **kwargs)
except ignored_exc as e:
LOG.debug("Got an ignored exception (%s) while calling: %s ",
e, f)
return
except Exception as e: except Exception as e:
LOG.exception(e) LOG.exception(e)
self.query_retry_reset(e) self.query_retry_reset(e)
@@ -139,30 +122,6 @@ class DataSourceBase(object):
pass pass
@abc.abstractmethod
def statistic_series(self, resource=None, resource_type=None,
meter_name=None, start_time=None, end_time=None,
granularity=300):
"""Retrieves metrics based on the specified parameters over a period
:param resource: Resource object as defined in watcher models such as
ComputeNode and Instance
:param resource_type: Indicates which type of object is supplied
to the resource parameter
:param meter_name: The desired metric to retrieve as key from
METRIC_MAP
:param start_time: The datetime to start retrieving metrics for
:type start_time: datetime.datetime
:param end_time: The datetime to limit the retrieval of metrics to
:type end_time: datetime.datetime
:param granularity: Interval between samples in measurements in
seconds
:return: Dictionary of key value pairs with timestamps and metric
values
"""
pass
@abc.abstractmethod @abc.abstractmethod
def get_host_cpu_usage(self, resource, period, aggregate, def get_host_cpu_usage(self, resource, period, aggregate,
granularity=None): granularity=None):

View File

@@ -161,7 +161,9 @@ class CeilometerHelper(base.DataSourceBase):
end_time = datetime.datetime.utcnow() end_time = datetime.datetime.utcnow()
start_time = end_time - datetime.timedelta(seconds=int(period)) start_time = end_time - datetime.timedelta(seconds=int(period))
meter = self._get_meter(meter_name) meter = self.METRIC_MAP.get(meter_name)
if meter is None:
raise exception.MetricNotAvailable(metric=meter_name)
if aggregate == 'mean': if aggregate == 'mean':
aggregate = 'avg' aggregate = 'avg'
@@ -192,12 +194,6 @@ class CeilometerHelper(base.DataSourceBase):
item_value *= 10 item_value *= 10
return item_value return item_value
def statistic_series(self, resource=None, resource_type=None,
meter_name=None, start_time=None, end_time=None,
granularity=300):
raise NotImplementedError(
_('Ceilometer helper does not support statistic series method'))
def get_host_cpu_usage(self, resource, period, def get_host_cpu_usage(self, resource, period,
aggregate, granularity=None): aggregate, granularity=None):

View File

@@ -19,11 +19,11 @@
from datetime import datetime from datetime import datetime
from datetime import timedelta from datetime import timedelta
from gnocchiclient import exceptions as gnc_exc
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from watcher.common import clients from watcher.common import clients
from watcher.common import exception
from watcher.decision_engine.datasources import base from watcher.decision_engine.datasources import base
CONF = cfg.CONF CONF = cfg.CONF
@@ -39,7 +39,7 @@ class GnocchiHelper(base.DataSourceBase):
host_inlet_temp='hardware.ipmi.node.temperature', host_inlet_temp='hardware.ipmi.node.temperature',
host_airflow='hardware.ipmi.node.airflow', host_airflow='hardware.ipmi.node.airflow',
host_power='hardware.ipmi.node.power', host_power='hardware.ipmi.node.power',
instance_cpu_usage='cpu', instance_cpu_usage='cpu_util',
instance_ram_usage='memory.resident', instance_ram_usage='memory.resident',
instance_ram_allocated='memory', instance_ram_allocated='memory',
instance_l3_cache_usage='cpu_l3_cache', instance_l3_cache_usage='cpu_l3_cache',
@@ -72,7 +72,9 @@ class GnocchiHelper(base.DataSourceBase):
stop_time = datetime.utcnow() stop_time = datetime.utcnow()
start_time = stop_time - timedelta(seconds=(int(period))) start_time = stop_time - timedelta(seconds=(int(period)))
meter = self._get_meter(meter_name) meter = self.METRIC_MAP.get(meter_name)
if meter is None:
raise exception.MetricNotAvailable(metric=meter_name)
if aggregate == 'count': if aggregate == 'count':
aggregate = 'mean' aggregate = 'mean'
@@ -85,9 +87,7 @@ class GnocchiHelper(base.DataSourceBase):
kwargs = dict(query={"=": {"original_resource_id": resource_id}}, kwargs = dict(query={"=": {"original_resource_id": resource_id}},
limit=1) limit=1)
resources = self.query_retry( resources = self.query_retry(
f=self.gnocchi.resource.search, f=self.gnocchi.resource.search, **kwargs)
ignored_exc=gnc_exc.NotFound,
**kwargs)
if not resources: if not resources:
LOG.warning("The {0} resource {1} could not be " LOG.warning("The {0} resource {1} could not be "
@@ -96,25 +96,6 @@ class GnocchiHelper(base.DataSourceBase):
resource_id = resources[0]['id'] resource_id = resources[0]['id']
if meter_name == "instance_cpu_usage":
if resource_type != "instance":
LOG.warning("Unsupported resource type for metric "
"'instance_cpu_usage': ", resource_type)
return
# The "cpu_util" gauge (percentage) metric has been removed.
# We're going to obtain the same result by using the rate of change
# aggregate operation.
if aggregate not in ("mean", "rate:mean"):
LOG.warning("Unsupported aggregate for instance_cpu_usage "
"metric: %s. "
"Supported aggregates: mean, rate:mean ",
aggregate)
return
# TODO(lpetrut): consider supporting other aggregates.
aggregate = "rate:mean"
raw_kwargs = dict( raw_kwargs = dict(
metric=meter, metric=meter,
start=start_time, start=start_time,
@@ -127,9 +108,7 @@ class GnocchiHelper(base.DataSourceBase):
kwargs = {k: v for k, v in raw_kwargs.items() if k and v} kwargs = {k: v for k, v in raw_kwargs.items() if k and v}
statistics = self.query_retry( statistics = self.query_retry(
f=self.gnocchi.metric.get_measures, f=self.gnocchi.metric.get_measures, **kwargs)
ignored_exc=gnc_exc.NotFound,
**kwargs)
return_value = None return_value = None
if statistics: if statistics:
@@ -141,67 +120,6 @@ class GnocchiHelper(base.DataSourceBase):
# Airflow from hardware.ipmi.node.airflow is reported as # Airflow from hardware.ipmi.node.airflow is reported as
# 1/10 th of actual CFM # 1/10 th of actual CFM
return_value *= 10 return_value *= 10
if meter_name == "instance_cpu_usage":
# "rate:mean" can return negative values for migrated vms.
return_value = max(0, return_value)
# We're converting the cumulative cpu time (ns) to cpu usage
# percentage.
vcpus = resource.vcpus
if not vcpus:
LOG.warning("instance vcpu count not set, assuming 1")
vcpus = 1
return_value *= 100 / (granularity * 10e+8) / vcpus
return return_value
def statistic_series(self, resource=None, resource_type=None,
meter_name=None, start_time=None, end_time=None,
granularity=300):
meter = self._get_meter(meter_name)
resource_id = resource.uuid
if resource_type == 'compute_node':
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
limit=1)
resources = self.query_retry(
f=self.gnocchi.resource.search,
ignored_exc=gnc_exc.NotFound,
**kwargs)
if not resources:
LOG.warning("The {0} resource {1} could not be "
"found".format(self.NAME, resource_id))
return
resource_id = resources[0]['id']
raw_kwargs = dict(
metric=meter,
start=start_time,
stop=end_time,
resource_id=resource_id,
granularity=granularity,
)
kwargs = {k: v for k, v in raw_kwargs.items() if k and v}
statistics = self.query_retry(
f=self.gnocchi.metric.get_measures,
ignored_exc=gnc_exc.NotFound,
**kwargs)
return_value = None
if statistics:
# measure has structure [time, granularity, value]
if meter_name == 'host_airflow':
# Airflow from hardware.ipmi.node.airflow is reported as
# 1/10 th of actual CFM
return_value = {s[0]: s[2]*10 for s in statistics}
else:
return_value = {s[0]: s[2] for s in statistics}
return return_value return return_value

View File

@@ -16,13 +16,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from urllib import parse as urlparse
from http import HTTPStatus
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six.moves.urllib.parse as urlparse
from watcher._i18n import _
from watcher.common import clients from watcher.common import clients
from watcher.common import exception from watcher.common import exception
from watcher.decision_engine.datasources import base from watcher.decision_engine.datasources import base
@@ -138,13 +135,12 @@ class GrafanaHelper(base.DataSourceBase):
raise exception.DataSourceNotAvailable(self.NAME) raise exception.DataSourceNotAvailable(self.NAME)
resp = requests.get(self._base_url + str(project_id) + '/query', resp = requests.get(self._base_url + str(project_id) + '/query',
params=params, headers=self._headers, params=params, headers=self._headers)
timeout=CONF.grafana_client.http_timeout) if resp.status_code == 200:
if resp.status_code == HTTPStatus.OK:
return resp return resp
elif resp.status_code == HTTPStatus.BAD_REQUEST: elif resp.status_code == 400:
LOG.error("Query for metric is invalid") LOG.error("Query for metric is invalid")
elif resp.status_code == HTTPStatus.UNAUTHORIZED: elif resp.status_code == 401:
LOG.error("Authorization token is invalid") LOG.error("Authorization token is invalid")
raise exception.DataSourceNotAvailable(self.NAME) raise exception.DataSourceNotAvailable(self.NAME)
@@ -191,12 +187,6 @@ class GrafanaHelper(base.DataSourceBase):
return result return result
def statistic_series(self, resource=None, resource_type=None,
meter_name=None, start_time=None, end_time=None,
granularity=300):
raise NotImplementedError(
_('Grafana helper does not support statistic series method'))
def get_host_cpu_usage(self, resource, period=300, def get_host_cpu_usage(self, resource, period=300,
aggregate="mean", granularity=None): aggregate="mean", granularity=None):
return self.statistic_aggregation( return self.statistic_aggregation(

Some files were not shown because too many files have changed in this diff Show More