Compare commits

..

8 Commits

Author SHA1 Message Date
Zuul
e37bbf3be3 Merge "Don't throw exception when missing metrics" into stable/train 2019-10-21 02:07:33 +00:00
licanwei
8e143ca8bf Remove print()
Change-Id: Ida31237b77e98c803cb1ccb3bd5b190289434207
(cherry picked from commit aa36e6a881)
2019-10-18 06:28:45 +00:00
licanwei
e5884a963b Don't throw exception when missing metrics
When querying data from datasource, it's possible to miss some data.
In this case if we throw an exception, Audit will failed because of
the exception. We should remove the exception and give the decision
to the strategy.

Change-Id: I1b0e6b78b3bba4df9ba16e093b3910aab1de922e
Closes-Bug: #1847434
(cherry picked from commit f685bf62ab)
2019-10-18 02:54:10 +00:00
Zuul
85763ccfce Merge "Update TOX/UPPER_CONSTRAINTS_FILE for stable/train" into stable/train 2019-09-27 01:41:35 +00:00
Zuul
1ffb7ef0e7 Merge "Update .gitreview for stable/train" into stable/train 2019-09-27 01:41:35 +00:00
OpenStack Proposal Bot
c02ddd58a1 Imported Translations from Zanata
For more information about this automatic import see:
https://docs.openstack.org/i18n/latest/reviewing-translation-import.html

Change-Id: Ia813b39b68e523facde25a9130020767cc7ab1fb
2019-09-26 08:42:58 +00:00
OpenStack Release Bot
7abb57dcd3 Update TOX/UPPER_CONSTRAINTS_FILE for stable/train
Update the URL to the upper-constraints file to point to the redirect
rule on releases.openstack.org so that anyone working on this branch
will switch to the correct upper-constraints list automatically when
the requirements repository branches.

Until the requirements repository has as stable/train branch, tests will
continue to use the upper-constraints list on master.

Change-Id: Ia9e0181e1013445b62abd8128c189eafa83906db
2019-09-25 08:46:31 +00:00
OpenStack Release Bot
963d026d06 Update .gitreview for stable/train
Change-Id: I9a272eb7c13c274cd8e362838ee3168295bd3b84
2019-09-25 08:46:29 +00:00
102 changed files with 568 additions and 2348 deletions

View File

@@ -2,3 +2,4 @@
host=review.opendev.org host=review.opendev.org
port=29418 port=29418
project=openstack/watcher.git project=openstack/watcher.git
defaultbranch=stable/train

View File

@@ -3,7 +3,8 @@
- check-requirements - check-requirements
- openstack-cover-jobs - openstack-cover-jobs
- openstack-lower-constraints-jobs - openstack-lower-constraints-jobs
- openstack-python3-ussuri-jobs - openstack-python-jobs
- openstack-python3-train-jobs
- publish-openstack-docs-pti - publish-openstack-docs-pti
- release-notes-jobs-python3 - release-notes-jobs-python3
check: check:
@@ -181,7 +182,6 @@
s-proxy: false s-proxy: false
devstack_localrc: devstack_localrc:
TEMPEST_PLUGINS: /opt/stack/watcher-tempest-plugin TEMPEST_PLUGINS: /opt/stack/watcher-tempest-plugin
USE_PYTHON3: true
tempest_test_regex: watcher_tempest_plugin.tests.api tempest_test_regex: watcher_tempest_plugin.tests.api
tox_envlist: all tox_envlist: all
tox_environment: tox_environment:

View File

@@ -1,6 +1,6 @@
======= ========================
Watcher Team and repository tags
======= ========================
.. image:: https://governance.openstack.org/tc/badges/watcher.svg .. image:: https://governance.openstack.org/tc/badges/watcher.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html :target: https://governance.openstack.org/tc/reference/tags/index.html
@@ -13,6 +13,10 @@ Watcher
https://creativecommons.org/licenses/by/3.0/ https://creativecommons.org/licenses/by/3.0/
=======
Watcher
=======
OpenStack Watcher provides a flexible and scalable resource optimization OpenStack Watcher provides a flexible and scalable resource optimization
service for multi-tenant OpenStack-based clouds. service for multi-tenant OpenStack-based clouds.
Watcher provides a robust framework to realize a wide range of cloud Watcher provides a robust framework to realize a wide range of cloud

View File

@@ -16,4 +16,3 @@ Watcher API
.. include:: watcher-api-v1-services.inc .. include:: watcher-api-v1-services.inc
.. include:: watcher-api-v1-scoring_engines.inc .. include:: watcher-api-v1-scoring_engines.inc
.. include:: watcher-api-v1-datamodel.inc .. include:: watcher-api-v1-datamodel.inc
.. include:: watcher-api-v1-webhooks.inc

View File

@@ -4,8 +4,6 @@
Data Model Data Model
========== ==========
.. versionadded:: 1.3
``Data Model`` is very important for Watcher to generate resource ``Data Model`` is very important for Watcher to generate resource
optimization solutions. Users can easily view the data model by the optimization solutions. Users can easily view the data model by the
API. API.
@@ -20,7 +18,7 @@ Returns the information about Data Model.
Normal response codes: 200 Normal response codes: 200
Error codes: 400,401,406 Error codes: 400,401
Request Request
------- -------

View File

@@ -1,26 +0,0 @@
.. -*- rst -*-
========
Webhooks
========
.. versionadded:: 1.4
Triggers an event based Audit.
Trigger EVENT Audit
===================
.. rest_method:: POST /v1/webhooks/{audit_ident}
Normal response codes: 202
Error codes: 400,404
Request
-------
.. rest_parameters:: parameters.yaml
- audit_ident: audit_ident

View File

@@ -298,7 +298,7 @@ function start_watcher_api {
service_protocol="http" service_protocol="http"
fi fi
if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then
run_process "watcher-api" "$WATCHER_BIN_DIR/uwsgi --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF" run_process "watcher-api" "$WATCHER_BIN_DIR/uwsgi --ini $WATCHER_UWSGI_CONF"
watcher_url=$service_protocol://$SERVICE_HOST/infra-optim watcher_url=$service_protocol://$SERVICE_HOST/infra-optim
else else
watcher_url=$service_protocol://$SERVICE_HOST:$service_port watcher_url=$service_protocol://$SERVICE_HOST:$service_port

View File

@@ -2,7 +2,8 @@
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
openstackdocstheme>=1.20.0 # Apache-2.0 openstackdocstheme>=1.20.0 # Apache-2.0
sphinx>=1.8.0,!=2.1.0,!=3.0.0 # BSD sphinx>=1.6.5,!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD
sphinx>=1.6.5,!=1.6.6,!=1.6.7,!=2.1.0;python_version>='3.4' # BSD
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
reno>=2.7.0 # Apache-2.0 reno>=2.7.0 # Apache-2.0

View File

@@ -8,7 +8,6 @@ Administrator Guide
apache-mod-wsgi apache-mod-wsgi
gmr gmr
policy policy
ways-to-install
../strategies/index ../strategies/index
../datasources/index ../datasources/index
../contributor/notifications
../contributor/concurrency

View File

@@ -281,13 +281,11 @@ previously created :ref:`Audit template <audit_template_definition>`:
:width: 100% :width: 100%
The :ref:`Administrator <administrator_definition>` also can specify type of The :ref:`Administrator <administrator_definition>` also can specify type of
Audit and interval (in case of CONTINUOUS type). There is three types of Audit: Audit and interval (in case of CONTINUOUS type). There is two types of Audit:
ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it ONESHOT and CONTINUOUS. Oneshot Audit is launched once and if it succeeded
succeeded executed new action plan list will be provided; CONTINUOUS Audit executed new action plan list will be provided. Continuous Audit creates
creates action plans with specified interval (in seconds or cron format, cron action plans with specified interval (in seconds); if action plan
inteval can be used like: `*/5 * * * *`), if action plan has been created, all previous action plans get CANCELLED state.
has been created, all previous action plans get CANCELLED state;
EVENT audit is launched when receiving webhooks API.
A message is sent on the :ref:`AMQP bus <amqp_bus_definition>` which triggers A message is sent on the :ref:`AMQP bus <amqp_bus_definition>` which triggers
the Audit in the the Audit in the

View File

@@ -36,6 +36,7 @@ extensions = [
'sphinxcontrib.httpdomain', 'sphinxcontrib.httpdomain',
'sphinxcontrib.pecanwsme.rest', 'sphinxcontrib.pecanwsme.rest',
'stevedore.sphinxext', 'stevedore.sphinxext',
'wsmeext.sphinxext',
'ext.term', 'ext.term',
'ext.versioned_notifications', 'ext.versioned_notifications',
'oslo_config.sphinxconfiggen', 'oslo_config.sphinxconfiggen',

View File

@@ -1,248 +0,0 @@
===========
Concurrency
===========
Introduction
************
Modern processors typically contain multiple cores all capable of executing
instructions in parallel. Ensuring applications can fully utilize modern
underlying hardware requires developing with these concepts in mind. The
OpenStack foundation maintains a number of libraries to facilitate this
utilization, combined with constructs like CPython's GIL_ the proper use of
these concepts becomes more straightforward compared to other programming
languages.
The primary libraries maintained by OpenStack to facilitate concurrency are
futurist_ and taskflow_. Here futurist is a more straightforward and
lightweight library while taskflow is more advanced supporting features like
rollback mechanisms. Within Watcher both libraries are used to facilitate
concurrency.
.. _GIL: https://wiki.python.org/moin/GlobalInterpreterLock
.. _futurist: https://docs.openstack.org/futurist/latest/
.. _taskflow: https://docs.openstack.org/taskflow/latest/
Threadpool
**********
A threadpool is a collection of one or more threads typically called *workers*
to which tasks can be submitted. These submitted tasks will be scheduled by a
threadpool and subsequently executed. In the case of Python tasks typically are
bounded or unbounded methods while other programming languages like Java
require implementing an interface.
The order and amount of concurrency with which these tasks are executed is up
to the threadpool to decide. Some libraries like taskflow allow for either
strong or loose ordering of tasks while others like futurist might only support
loose ordering. Taskflow supports building tree-based hierarchies of dependent
tasks for example.
Upon submission of a task to a threadpool a so called future_ is returned.
These objects allow to determine information about the task such as if it is
currently being executed or if it has finished execution. When the task has
finished execution the future can also be used to retrieve what was returned by
the method.
Some libraries like futurist provide synchronization primitives for collections
of futures such as wait_for_any_. The following sections will cover different
types of concurrency used in various services of Watcher.
.. _future: https://docs.python.org/3/library/concurrent.futures.html
.. _wait_for_any: https://docs.openstack.org/futurist/latest/reference/index.html#waiters
Decision engine concurrency
***************************
The concurrency in the decision engine is governed by two independent
threadpools. Both of these threadpools are GreenThreadPoolExecutor_ from the
futurist_ library. One of these is used automatically and most contributors
will not interact with it while developing new features. The other threadpool
can frequently be used while developing new features or updating existing ones.
It is known as the DecisionEngineThreadpool and allows to achieve performance
improvements in network or I/O bound operations.
.. _GreenThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#executors
AuditEndpoint
#############
The first threadpool is used to allow multiple audits to be run in parallel.
In practice, however, only one audit can be run in parallel. This is due to
the data model used by audits being a singleton. To prevent audits destroying
each others data model one must wait for the other to complete before being
allowed to access this data model. A performance improvement could be achieved
by being more intelligent in the use, caching and construction of these
data models.
DecisionEngineThreadPool
########################
The second threadpool is used for generic tasks, typically networking and I/O
could benefit the most of this threadpool. Upon execution of an audit this
threadpool can be utilized to retrieve information from the Nova compute
service for instance. This second threadpool is a singleton and is shared
amongst concurrently running audits as a result the amount of workers is static
and independent from the amount of workers in the first threadpool. The use of
the :class:`~.DecisionEngineThreadpool` while building the Nova compute data
model is demonstrated to show how it can effectively be used.
In the following example a reference to the
:class:`~.DecisionEngineThreadpool` is stored in ``self.executor``. Here two
tasks are submitted one with function ``self._collect_aggregates`` and the
other function ``self._collect_zones``. With both ``self.executor.submit``
calls subsequent arguments are passed to the function. All subsequent arguments
are passed to the function being submitted as task following the common
``(fn, *args, **kwargs)`` signature. One of the original signatures would be
``def _collect_aggregates(host_aggregates, compute_nodes)`` for example.
.. code-block:: python
zone_aggregate_futures = {
self.executor.submit(
self._collect_aggregates, host_aggregates, compute_nodes),
self.executor.submit(
self._collect_zones, availability_zones, compute_nodes)
}
waiters.wait_for_all(zone_aggregate_futures)
The last statement of the example above waits on all futures to complete.
Similarly, ``waiters.wait_for_any`` will wait for any future of the specified
collection to complete. To simplify the usage of ``wait_for_any`` the
:class:`~.DecisiongEngineThreadpool` defines a ``do_while_futures`` method.
This method will iterate in a do_while loop over a collection of futures until
all of them have completed. The advantage of ``do_while_futures`` is that it
allows to immediately call a method as soon as a future finishes. The arguments
for this callback method can be supplied when calling ``do_while_futures``,
however, the first argument to the callback is always the future itself! If
the collection of futures can safely be modified ``do_while_futures_modify``
can be used and should have slightly better performance. The following example
will show how ``do_while_futures`` is used in the decision engine.
.. code-block:: python
# For every compute node from compute_nodes submit a task to gather the node it's information.
# List comprehension is used to store all the futures of the submitted tasks in node_futures.
node_futures = [self.executor.submit(
self.nova_helper.get_compute_node_by_name,
node, servers=True, detailed=True)
for node in compute_nodes]
LOG.debug("submitted {0} jobs".format(len(compute_nodes)))
future_instances = []
# do_while iterate over node_futures and upon completion of a future call
# self._compute_node_future with the future and future_instances as arguments.
self.executor.do_while_futures_modify(
node_futures, self._compute_node_future, future_instances)
# Wait for all instance jobs to finish
waiters.wait_for_all(future_instances)
Finally, let's demonstrate how powerful this ``do_while_futures`` can be by
showing what the ``compute_node_future`` callback does. First, it retrieves the
result from the future and adds the compute node to the data model. Afterwards,
it checks if the compute node has any associated instances and if so it submits
an additional task to the :class:`~.DecisionEngineThreadpool`. The future is
appended to the ``future_instances`` so ``waiters.wait_for_all`` can be called
on this list. This is important as otherwise the building of the data model
might return before all tasks for instances have finished.
.. code-block:: python
# Get the result from the future.
node_info = future.result()[0]
# Filter out baremetal nodes.
if node_info.hypervisor_type == 'ironic':
LOG.debug("filtering out baremetal node: %s", node_info)
return
# Add the compute node to the data model.
self.add_compute_node(node_info)
# Get the instances from the compute node.
instances = getattr(node_info, "servers", None)
# Do not submit job if there are no instances on compute node.
if instances is None:
LOG.info("No instances on compute_node: {0}".format(node_info))
return
# Submit a job to retrieve detailed information about the instances.
future_instances.append(
self.executor.submit(
self.add_instance_node, node_info, instances)
)
Without ``do_while_futures`` an additional ``waiters.wait_for_all`` would be
required in between the compute node tasks and the instance tasks. This would
cause the progress of the decision engine to stall as less and less tasks
remain active before the instance tasks could be submitted. This demonstrates
how ``do_while_futures`` can be used to achieve more constant utilization of
the underlying hardware.
Applier concurrency
*******************
The applier does not use the futurist_ GreenThreadPoolExecutor_ directly but
instead uses taskflow_. However, taskflow still utilizes a greenthreadpool.
This threadpool is initialized in the workflow engine called
:class:`~.DefaultWorkFlowEngine`. Currently Watcher supports one workflow
engine but the base class allows contributors to develop other workflow engines
as well. In taskflow tasks are created using different types of flows such as a
linear, unordered or a graph flow. The linear and graph flow allow for strong
ordering between individual tasks and it is for this reason that the workflow
engine utilizes a graph flow. The creation of tasks, subsequently linking them
into a graph like structure and submitting them is shown below.
.. code-block:: python
self.execution_rule = self.get_execution_rule(actions)
flow = gf.Flow("watcher_flow")
actions_uuid = {}
for a in actions:
task = TaskFlowActionContainer(a, self)
flow.add(task)
actions_uuid[a.uuid] = task
for a in actions:
for parent_id in a.parents:
flow.link(actions_uuid[parent_id], actions_uuid[a.uuid],
decider=self.decider)
e = engines.load(
flow, executor='greenthreaded', engine='parallel',
max_workers=self.config.max_workers)
e.run()
return flow
In the applier tasks are contained in a :class:`~.TaskFlowActionContainer`
which allows them to trigger events in the workflow engine. This way the
workflow engine can halt or take other actions while the action plan is being
executed based on the success or failure of individual actions. However, the
base workflow engine simply uses these notifies to store the result of
individual actions in the database. Additionally, since taskflow uses a graph
flow if any of the tasks would fail all childs of this tasks not be executed
while ``do_revert`` will be triggered for all parents.
.. code-block:: python
class TaskFlowActionContainer(...):
...
def do_execute(self, *args, **kwargs):
...
result = self.action.execute()
if result is True:
return self.engine.notify(self._db_action,
objects.action.State.SUCCEEDED)
else:
self.engine.notify(self._db_action,
objects.action.State.FAILED)
class BaseWorkFlowEngine(...):
...
def notify(self, action, state):
db_action = objects.Action.get_by_uuid(self.context, action.uuid,
eager=True)
db_action.state = state
db_action.save()
return db_action

View File

@@ -1,111 +1,71 @@
============================ ..
So You Want to Contribute... Except where otherwise noted, this document is licensed under Creative
============================ Commons Attribution 3.0 License. You can view the license at:
For general information on contributing to OpenStack, please check out the https://creativecommons.org/licenses/by/3.0/
`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
It covers all the basics that are common to all OpenStack projects:
the accounts you need, the basics of interacting with our Gerrit review system,
how we communicate as a community, etc.
Below will cover the more project specific information you need to get started .. _contributing:
with Watcher.
Communication =======================
~~~~~~~~~~~~~~ Contributing to Watcher
.. This would be a good place to put the channel you chat in as a project; when/ =======================
where your meeting is, the tags you prepend to your ML threads, etc.
If you're interested in contributing to the Watcher project,
the following will help get you started.
Contributor License Agreement
-----------------------------
.. index::
single: license; agreement
In order to contribute to the Watcher project, you need to have
signed OpenStack's contributor's agreement.
.. seealso::
* https://docs.openstack.org/infra/manual/developers.html
* https://wiki.openstack.org/CLA
LaunchPad Project
-----------------
Most of the tools used for OpenStack depend on a launchpad.net ID for
authentication. After signing up for a launchpad account, join the
"openstack" team to have access to the mailing list and receive
notifications of important events.
.. seealso::
* https://launchpad.net
* https://launchpad.net/watcher
* https://launchpad.net/openstack
Project Hosting Details
-----------------------
Bug tracker
https://launchpad.net/watcher
Mailing list (prefix subjects with ``[watcher]`` for faster responses)
http://lists.openstack.org/pipermail/openstack-discuss/
Wiki
https://wiki.openstack.org/Watcher
Code Hosting
https://opendev.org/openstack/watcher
Code Review
https://review.opendev.org/#/q/status:open+project:openstack/watcher,n,z
IRC Channel IRC Channel
``#openstack-watcher`` (changelog_) ``#openstack-watcher`` (changelog_)
Mailing list(prefix subjects with ``[watcher]``)
http://lists.openstack.org/pipermail/openstack-discuss/
Weekly Meetings Weekly Meetings
Bi-weekly, on Wednesdays at 08:00 UTC on odd weeks in the Bi-weekly, on Wednesdays at 08:00 UTC on odd weeks in the
``#openstack-meeting-alt`` IRC channel (`meetings logs`_) ``#openstack-meeting-alt`` IRC channel (`meetings logs`_)
Meeting Agenda
https://wiki.openstack.org/wiki/Watcher_Meeting_Agenda
.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/ .. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/
.. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/ .. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/
Contacting the Core Team
~~~~~~~~~~~~~~~~~~~~~~~~~
.. This section should list the core team, their irc nicks, emails, timezones etc.
If all this info is maintained elsewhere (i.e. a wiki), you can link to that
instead of enumerating everyone here.
+--------------------+---------------+------------------------------------+
| Name | IRC | Email |
+====================+===============+====================================+
| `Li Canwei`_ | licanwei | li.canwei2@zte.com.cn |
+--------------------+---------------+------------------------------------+
| `chen ke`_ | chenke | chen.ke14@zte.com.cn |
+--------------------+---------------+------------------------------------+
| `Corne Lukken`_ | dantalion | info@dantalion.nl |
+--------------------+---------------+------------------------------------+
| `su zhengwei`_ | suzhengwei | sugar-2008@163.com |
+--------------------+---------------+------------------------------------+
| `Yumeng Bao`_ | Yumeng | yumeng_bao@yahoo.com |
+--------------------+---------------+------------------------------------+
.. _Corne Lukken: https://launchpad.net/~dantalion
.. _Li Canwei: https://launchpad.net/~li-canwei2
.. _su zhengwei: https://launchpad.net/~sue.sam
.. _Yumeng Bao: https://launchpad.net/~yumeng-bao
.. _chen ke: https://launchpad.net/~chenker
New Feature Planning
~~~~~~~~~~~~~~~~~~~~
.. This section is for talking about the process to get a new feature in. Some
projects use blueprints, some want specs, some want both! Some projects
stick to a strict schedule when selecting what new features will be reviewed
for a release.
New feature will be discussed via IRC or ML (with [Watcher] prefix).
Watcher team uses blueprints in `Launchpad`_ to manage the new features.
.. _Launchpad: https://launchpad.net/watcher
Task Tracking
~~~~~~~~~~~~~~
.. This section is about where you track tasks- launchpad? storyboard?
is there more than one launchpad project? what's the name of the project
group in storyboard?
We track our tasks in Launchpad.
If you're looking for some smaller, easier work item to pick up and get started
on, search for the 'low-hanging-fruit' tag.
.. NOTE: If your tag is not 'low-hanging-fruit' please change the text above.
Reporting a Bug
~~~~~~~~~~~~~~~
.. Pretty self explanatory section, link directly to where people should report bugs for
your project.
You found an issue and want to make sure we are aware of it? You can do so
`HERE`_.
.. _HERE: https://bugs.launchpad.net/watcher
Getting Your Patch Merged
~~~~~~~~~~~~~~~~~~~~~~~~~
.. This section should have info about what it takes to get something merged.
Do you require one or two +2's before +W? Do some of your repos require
unit test changes with all patches? etc.
Due to the small number of core reviewers of the Watcher project,
we only need one +2 before +W (merge). All patches excepting for documentation
or typos fixes must have unit test.
Project Team Lead Duties
------------------------
.. this section is where you can put PTL specific duties not already listed in
the common PTL guide (linked below) or if you already have them written
up elsewhere, you can link to that doc here.
All common PTL duties are enumerated here in the `PTL guide <https://docs.openstack.org/project-team-guide/ptl.html>`_.

View File

@@ -1,12 +1,8 @@
==================
Contribution Guide
==================
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 1
contributing
environment environment
devstack devstack
notifications
testing testing
rally_link rally_link

View File

@@ -1,7 +1,3 @@
============
Plugin Guide
============
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1

View File

@@ -4,9 +4,9 @@
https://creativecommons.org/licenses/by/3.0/ https://creativecommons.org/licenses/by/3.0/
================= =======
Developer Testing Testing
================= =======
.. _unit_tests: .. _unit_tests:
@@ -15,7 +15,7 @@ Unit tests
All unit tests should be run using `tox`_. Before running the unit tests, you All unit tests should be run using `tox`_. Before running the unit tests, you
should download the latest `watcher`_ from the github. To run the same unit should download the latest `watcher`_ from the github. To run the same unit
tests that are executing onto `Gerrit`_ which includes ``py36``, ``py37`` and tests that are executing onto `Gerrit`_ which includes ``py35``, ``py27`` and
``pep8``, you can issue the following command:: ``pep8``, you can issue the following command::
$ git clone https://opendev.org/openstack/watcher $ git clone https://opendev.org/openstack/watcher
@@ -26,8 +26,8 @@ tests that are executing onto `Gerrit`_ which includes ``py36``, ``py37`` and
If you only want to run one of the aforementioned, you can then issue one of If you only want to run one of the aforementioned, you can then issue one of
the following:: the following::
$ tox -e py36 $ tox -e py35
$ tox -e py37 $ tox -e py27
$ tox -e pep8 $ tox -e pep8
.. _tox: https://tox.readthedocs.org/ .. _tox: https://tox.readthedocs.org/
@@ -38,7 +38,7 @@ If you only want to run specific unit test code and don't like to waste time
waiting for all unit tests to execute, you can add parameters ``--`` followed waiting for all unit tests to execute, you can add parameters ``--`` followed
by a regex string:: by a regex string::
$ tox -e py37 -- watcher.tests.api $ tox -e py27 -- watcher.tests.api
.. _tempest_tests: .. _tempest_tests:

View File

@@ -32,21 +32,91 @@ specific prior release.
.. _python-watcherclient: https://opendev.org/openstack/python-watcherclient/ .. _python-watcherclient: https://opendev.org/openstack/python-watcherclient/
.. _watcher-dashboard: https://opendev.org/openstack/watcher-dashboard/ .. _watcher-dashboard: https://opendev.org/openstack/watcher-dashboard/
Developer Guide
===============
Introduction
------------
.. toctree::
:maxdepth: 1
glossary
architecture
contributor/contributing
Getting Started
---------------
.. toctree::
:maxdepth: 1
contributor/index
Installation
============
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
architecture
contributor/index
install/index install/index
Admin Guide
===========
.. toctree::
:maxdepth: 2
admin/index admin/index
User Guide
==========
.. toctree::
:maxdepth: 2
user/index user/index
configuration/index
contributor/plugin/index API References
man/index ==============
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
API Reference <https://docs.openstack.org/api-ref/resource-optimization/> API Reference <https://docs.openstack.org/api-ref/resource-optimization/>
Watcher API Microversion History </contributor/api_microversion_history> Watcher API Microversion History </contributor/api_microversion_history>
glossary
Plugins
-------
.. toctree::
:maxdepth: 1
contributor/plugin/index
Watcher Configuration Options
=============================
.. toctree::
:maxdepth: 2
configuration/index
Watcher Manual Pages
====================
.. toctree::
:glob:
:maxdepth: 1
man/index
.. only:: html
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -1,6 +1,6 @@
============= ===================================
Install Guide Infrastructure Optimization service
============= ===================================
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2

View File

@@ -1,7 +1,3 @@
====================
Watcher Manual Pages
====================
.. toctree:: .. toctree::
:glob: :glob:
:maxdepth: 1 :maxdepth: 1

View File

@@ -1,195 +0,0 @@
..
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
======================
Audit using Aodh alarm
======================
Audit with EVENT type can be triggered by special alarm. This guide walks
you through the steps to build an event-driven optimization solution by
integrating Watcher with Ceilometer/Aodh.
Step 1: Create an audit with EVENT type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The first step is to create an audit with EVENT type,
you can create an audit template firstly:
.. code-block:: bash
$ openstack optimize audittemplate create your_template_name <your_goal> \
--strategy <your_strategy>
or create an audit directly with special goal and strategy:
.. code-block:: bash
$ openstack optimize audit create --goal <your_goal> \
--strategy <your_strategy> --audit_type EVENT
This is an example for creating an audit with dummy strategy:
.. code-block:: bash
$ openstack optimize audit create --goal dummy \
--strategy dummy --audit_type EVENT
+---------------+--------------------------------------+
| Field | Value |
+---------------+--------------------------------------+
| UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 |
| Name | dummy-2020-01-14T03:21:19.168467 |
| Created At | 2020-01-14T03:21:19.200279+00:00 |
| Updated At | None |
| Deleted At | None |
| State | PENDING |
| Audit Type | EVENT |
| Parameters | {u'para2': u'hello', u'para1': 3.2} |
| Interval | None |
| Goal | dummy |
| Strategy | dummy |
| Audit Scope | [] |
| Auto Trigger | False |
| Next Run Time | None |
| Hostname | None |
| Start Time | None |
| End Time | None |
| Force | False |
+---------------+--------------------------------------+
We need to build Aodh action url using Watcher webhook API.
For convenience we export the url into an environment variable:
.. code-block:: bash
$ export AUDIT_UUID=a3326a6a-c18e-4e8e-adba-d0c61ad404c5
$ export ALARM_URL="trust+http://localhost/infra-optim/v1/webhooks/$AUDIT_UUID"
Step 2: Create Aodh Alarm
~~~~~~~~~~~~~~~~~~~~~~~~~
Once we have the audit created, we can continue to create Aodh alarm and
set the alarm action to Watcher webhook API. The alarm type can be event(
i.e. ``compute.instance.create.end``) or gnocchi_resources_threshold(i.e.
``cpu_util``), more info refer to alarm-creation_
For example:
.. code-block:: bash
$ openstack alarm create \
--type event --name instance_create \
--event-type "compute.instance.create.end" \
--enable True --repeat-actions False \
--alarm-action $ALARM_URL
+---------------------------+------------------------------------------------------------------------------------------+
| Field | Value |
+---------------------------+------------------------------------------------------------------------------------------+
| alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] |
| alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 |
| description | Alarm when compute.instance.create.end event occurred. |
| enabled | True |
| event_type | compute.instance.create.end |
| insufficient_data_actions | [] |
| name | instance_create |
| ok_actions | [] |
| project_id | 728d66e18c914af1a41e2a585cf766af |
| query | |
| repeat_actions | False |
| severity | low |
| state | insufficient data |
| state_reason | Not evaluated yet |
| state_timestamp | 2020-01-14T03:56:26.894416 |
| time_constraints | [] |
| timestamp | 2020-01-14T03:56:26.894416 |
| type | event |
| user_id | 88c40156af7445cc80580a1e7e3ba308 |
+---------------------------+------------------------------------------------------------------------------------------+
.. _alarm-creation: https://docs.openstack.org/aodh/latest/admin/telemetry-alarms.html#alarm-creation
Step 3: Trigger the alarm
~~~~~~~~~~~~~~~~~~~~~~~~~
In this example, you can create a new instance to trigger the alarm.
The alarm state will translate from ``insufficient data`` to ``alarm``.
.. code-block:: bash
$ openstack alarm show b9e381fc-8e3e-4943-82ee-647e7a2ef644
+---------------------------+-------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+---------------------------+-------------------------------------------------------------------------------------------------------------------+
| alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] |
| alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 |
| description | Alarm when compute.instance.create.end event occurred. |
| enabled | True |
| event_type | compute.instance.create.end |
| insufficient_data_actions | [] |
| name | instance_create |
| ok_actions | [] |
| project_id | 728d66e18c914af1a41e2a585cf766af |
| query | |
| repeat_actions | False |
| severity | low |
| state | alarm |
| state_reason | Event <id=67dd0afa-2082-45a4-8825-9573b2cc60e5,event_type=compute.instance.create.end> hits the query <query=[]>. |
| state_timestamp | 2020-01-14T03:56:26.894416 |
| time_constraints | [] |
| timestamp | 2020-01-14T06:17:40.350649 |
| type | event |
| user_id | 88c40156af7445cc80580a1e7e3ba308 |
+---------------------------+-------------------------------------------------------------------------------------------------------------------+
Step 4: Verify the audit
~~~~~~~~~~~~~~~~~~~~~~~~
This can be verified to check if the audit state was ``SUCCEEDED``:
.. code-block:: bash
$ openstack optimize audit show a3326a6a-c18e-4e8e-adba-d0c61ad404c5
+---------------+--------------------------------------+
| Field | Value |
+---------------+--------------------------------------+
| UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 |
| Name | dummy-2020-01-14T03:21:19.168467 |
| Created At | 2020-01-14T03:21:19+00:00 |
| Updated At | 2020-01-14T06:26:40+00:00 |
| Deleted At | None |
| State | SUCCEEDED |
| Audit Type | EVENT |
| Parameters | {u'para2': u'hello', u'para1': 3.2} |
| Interval | None |
| Goal | dummy |
| Strategy | dummy |
| Audit Scope | [] |
| Auto Trigger | False |
| Next Run Time | None |
| Hostname | ubuntudbs |
| Start Time | None |
| End Time | None |
| Force | False |
+---------------+--------------------------------------+
and you can use the following command to check if the action plan
was created:
.. code-block:: bash
$ openstack optimize actionplan list --audit a3326a6a-c18e-4e8e-adba-d0c61ad404c5
+--------------------------------------+--------------------------------------+-------------+------------+-----------------+
| UUID | Audit | State | Updated At | Global efficacy |
+--------------------------------------+--------------------------------------+-------------+------------+-----------------+
| 673b3fcb-8c16-4a41-9ee3-2956d9f6ca9e | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | RECOMMENDED | None | |
+--------------------------------------+--------------------------------------+-------------+------------+-----------------+

View File

@@ -1,10 +1,4 @@
==========
User Guide
==========
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
ways-to-install
user-guide user-guide
event_type_audit

View File

@@ -4,6 +4,8 @@
https://creativecommons.org/licenses/by/3.0/ https://creativecommons.org/licenses/by/3.0/
.. _user-guide:
================== ==================
Watcher User Guide Watcher User Guide
================== ==================
@@ -58,8 +60,8 @@ plugin installation guide`_.
.. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html .. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html
.. _`Watcher CLI`: https://docs.openstack.org/python-watcherclient/latest/cli/index.html .. _`Watcher CLI`: https://docs.openstack.org/python-watcherclient/latest/cli/index.html
Watcher CLI Command Seeing what the Watcher CLI can do ?
------------------- ------------------------------------
We can see all of the commands available with Watcher CLI by running the We can see all of the commands available with Watcher CLI by running the
watcher binary without options. watcher binary without options.
@@ -67,8 +69,8 @@ watcher binary without options.
$ openstack help optimize $ openstack help optimize
Running an audit of the cluster How do I run an audit of my cluster ?
------------------------------- -------------------------------------
First, you need to find the :ref:`goal <goal_definition>` you want to achieve: First, you need to find the :ref:`goal <goal_definition>` you want to achieve:

View File

@@ -30,6 +30,7 @@ eventlet==0.20.0
extras==1.0.0 extras==1.0.0
fasteners==0.14.1 fasteners==0.14.1
fixtures==3.0.0 fixtures==3.0.0
flake8==2.5.5
freezegun==0.3.10 freezegun==0.3.10
future==0.16.0 future==0.16.0
futurist==1.8.0 futurist==1.8.0
@@ -37,6 +38,7 @@ gitdb2==2.0.3
GitPython==2.1.8 GitPython==2.1.8
gnocchiclient==7.0.1 gnocchiclient==7.0.1
greenlet==0.4.13 greenlet==0.4.13
hacking==0.12.0
idna==2.6 idna==2.6
imagesize==1.0.0 imagesize==1.0.0
iso8601==0.1.12 iso8601==0.1.12
@@ -62,7 +64,7 @@ msgpack==0.5.6
munch==2.2.0 munch==2.2.0
netaddr==0.7.19 netaddr==0.7.19
netifaces==0.10.6 netifaces==0.10.6
networkx==2.2 networkx==1.11
openstackdocstheme==1.20.0 openstackdocstheme==1.20.0
openstacksdk==0.12.0 openstacksdk==0.12.0
os-api-ref===1.4.0 os-api-ref===1.4.0
@@ -93,12 +95,14 @@ Paste==2.0.3
PasteDeploy==1.5.2 PasteDeploy==1.5.2
pbr==3.1.1 pbr==3.1.1
pecan==1.3.2 pecan==1.3.2
pep8==1.5.7
pika==0.10.0 pika==0.10.0
pika-pool==0.1.3 pika-pool==0.1.3
prettytable==0.7.2 prettytable==0.7.2
psutil==5.4.3 psutil==5.4.3
pycadf==2.7.0 pycadf==2.7.0
pycparser==2.18 pycparser==2.18
pyflakes==0.8.1
Pygments==2.2.0 Pygments==2.2.0
pyinotify==0.9.6 pyinotify==0.9.6
pyOpenSSL==17.5.0 pyOpenSSL==17.5.0
@@ -141,7 +145,7 @@ sqlparse==0.2.4
statsd==3.2.2 statsd==3.2.2
stestr==2.0.0 stestr==2.0.0
stevedore==1.28.0 stevedore==1.28.0
taskflow==3.7.1 taskflow==3.1.0
Tempita==0.5.2 Tempita==0.5.2
tenacity==4.9.0 tenacity==4.9.0
testresources==2.0.1 testresources==2.0.1

View File

@@ -40,7 +40,7 @@
export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1
export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_GRENADE=pullup
export DEVSTACK_GATE_USE_PYTHON3=True
export BRANCH_OVERRIDE=default export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE

View File

@@ -1,6 +0,0 @@
---
upgrade:
- |
Python 2.7 support has been dropped. Last release of Watcher
to support py2.7 is OpenStack Train. The minimum version of Python now
supported by Watcher is Python 3.6.

View File

@@ -1,8 +0,0 @@
---
features:
- |
Add a new webhook API and a new audit type EVENT, the microversion is 1.4.
Now Watcher user can create audit with EVENT type and the audit will be
triggered by webhook API.
The user guide is available online:
https://docs.openstack.org/watcher/latest/user/event_type_audit.html

View File

@@ -1,20 +0,0 @@
---
prelude: >
Many operations in the decision engine will block on I/O. Such I/O
operations can stall the execution of a sequential application
significantly. To reduce the potential bottleneck of many operations
the general purpose decision engine threadpool is introduced.
features:
- |
A new threadpool for the decision engine that contributors can use to
improve the performance of many operations, primarily I/O bound onces.
The amount of workers used by the decision engine threadpool can be
configured to scale according to the available infrastructure using
the `watcher_decision_engine.max_general_workers` config option.
Documentation for contributors to effectively use this threadpool is
available online:
https://docs.openstack.org/watcher/latest/contributor/concurrency.html
- |
The building of the compute (Nova) data model will be done using the
decision engine threadpool, thereby, significantly reducing the total
time required to build it.

View File

@@ -21,7 +21,6 @@ Contents:
:maxdepth: 1 :maxdepth: 1
unreleased unreleased
train
stein stein
rocky rocky
queens queens

View File

@@ -1,651 +0,0 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
msgid ""
msgstr ""
"Project-Id-Version: python-watcher\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-11-08 01:22+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2018-11-07 06:15+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "0.29.0"
msgstr "0.29.0"
msgid "0.34.0"
msgstr "0.34.0"
msgid "1.0.0"
msgstr "1.0.0"
msgid "1.1.0"
msgstr "1.1.0"
msgid "1.10.0"
msgstr "1.10.0"
msgid "1.11.0"
msgstr "1.11.0"
msgid "1.3.0"
msgstr "1.3.0"
msgid "1.4.0"
msgstr "1.4.0"
msgid "1.4.1"
msgstr "1.4.1"
msgid "1.5.0"
msgstr "1.5.0"
msgid "1.6.0"
msgstr "1.6.0"
msgid "1.7.0"
msgstr "1.7.0"
msgid "1.9.0"
msgstr "1.9.0"
msgid "Add a service supervisor to watch Watcher deamons."
msgstr "Add a service supervisor to watch Watcher daemons."
msgid "Add action for compute node power on/off"
msgstr "Add action for compute node power on/off"
msgid ""
"Add description property for dynamic action. Admin can see detail "
"information of any specify action."
msgstr ""
"Add description property for dynamic action. Admin can see detail "
"information of any specify action."
msgid "Add notifications related to Action object."
msgstr "Add notifications related to Action object."
msgid "Add notifications related to Action plan object."
msgstr "Add notifications related to Action plan object."
msgid "Add notifications related to Audit object."
msgstr "Add notifications related to Audit object."
msgid "Add notifications related to Service object."
msgstr "Add notifications related to Service object."
msgid ""
"Add start_time and end_time fields in audits table. User can set the start "
"time and/or end time when creating CONTINUOUS audit."
msgstr ""
"Add start_time and end_time fields in audits table. User can set the start "
"time and/or end time when creating CONTINUOUS audit."
msgid ""
"Add superseded state for an action plan if the cluster data model has "
"changed after it has been created."
msgstr ""
"Add superseded state for an action plan if the cluster data model has "
"changed after it has been created."
msgid "Added SUSPENDED audit state"
msgstr "Added SUSPENDED audit state"
msgid ""
"Added a generic scoring engine module, which will standarize interactions "
"with scoring engines through the common API. It is possible to use the "
"scoring engine by different Strategies, which improve the code and data "
"model re-use."
msgstr ""
"Added a generic scoring engine module, which will standardise interactions "
"with scoring engines through the common API. It is possible to use the "
"scoring engine by different Strategies, which improve the code and data "
"model re-use."
msgid ""
"Added a new strategy based on the airflow of servers. This strategy makes "
"decisions to migrate VMs to make the airflow uniform."
msgstr ""
"Added a new strategy based on the airflow of servers. This strategy makes "
"decisions to migrate VMs to make the airflow uniform."
msgid ""
"Added a standard way to both declare and fetch configuration options so that "
"whenever the administrator generates the Watcher configuration sample file, "
"it contains the configuration options of the plugins that are currently "
"available."
msgstr ""
"Added a standard way to both declare and fetch configuration options so that "
"whenever the administrator generates the Watcher configuration sample file, "
"it contains the configuration options of the plugins that are currently "
"available."
msgid ""
"Added a strategy based on the VM workloads of hypervisors. This strategy "
"makes decisions to migrate workloads to make the total VM workloads of each "
"hypervisor balanced, when the total VM workloads of hypervisor reaches "
"threshold."
msgstr ""
"Added a strategy based on the VM workloads of hypervisors. This strategy "
"makes decisions to migrate workloads to make the total VM workloads of each "
"hypervisor balanced, when the total VM workloads of hypervisor reaches "
"threshold."
msgid ""
"Added a strategy for one compute node maintenance, without having the user's "
"application been interrupted. If given one backup node, the strategy will "
"firstly migrate all instances from the maintenance node to the backup node. "
"If the backup node is not provided, it will migrate all instances, relying "
"on nova-scheduler."
msgstr ""
"Added a strategy for one compute node maintenance, without having the user's "
"application been interrupted. If given one backup node, the strategy will "
"firstly migrate all instances from the maintenance node to the backup node. "
"If the backup node is not provided, it will migrate all instances, relying "
"on nova-scheduler."
msgid ""
"Added a strategy that monitors if there is a higher load on some hosts "
"compared to other hosts in the cluster and re-balances the work across hosts "
"to minimize the standard deviation of the loads in the cluster."
msgstr ""
"Added a strategy that monitors if there is a higher load on some hosts "
"compared to other hosts in the cluster and re-balances the work across hosts "
"to minimise the standard deviation of the loads in the cluster."
msgid ""
"Added a way to add a new action without having to amend the source code of "
"the default planner."
msgstr ""
"Added a way to add a new action without having to amend the source code of "
"the default planner."
msgid ""
"Added a way to check state of strategy before audit's execution. "
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
"get information about metrics' availability, datasource's availability and "
"CDM's availability."
msgstr ""
"Added a way to check state of strategy before audit's execution. "
"Administrator can use \"watcher strategy state <strategy_name>\" command to "
"get information about metrics' availability, datasource's availability and "
"CDM's availability."
msgid ""
"Added a way to compare the efficacy of different strategies for a give "
"optimization goal."
msgstr ""
"Added a way to compare the efficacy of different strategies for a give "
"optimisation goal."
msgid ""
"Added a way to create periodic audit to be able to optimize continuously the "
"cloud infrastructure."
msgstr ""
"Added a way to create periodic audit to be able to continuously optimise the "
"cloud infrastructure."
msgid ""
"Added a way to return the of available goals depending on which strategies "
"have been deployed on the node where the decison engine is running."
msgstr ""
"Added a way to return the of available goals depending on which strategies "
"have been deployed on the node where the decision engine is running."
msgid ""
"Added an in-memory cache of the cluster model built up and kept fresh via "
"notifications from services of interest in addition to periodic syncing "
"logic."
msgstr ""
"Added an in-memory cache of the cluster model built up and kept fresh via "
"notifications from services of interest in addition to periodic syncing "
"logic."
msgid ""
"Added binding between apscheduler job and Watcher decision engine service. "
"It will allow to provide HA support in the future."
msgstr ""
"Added binding between apscheduler job and Watcher decision engine service. "
"It will allow to provide HA support in the future."
msgid "Added cinder cluster data model"
msgstr "Added cinder cluster data model"
msgid ""
"Added gnocchi support as data source for metrics. Administrator can change "
"data source for each strategy using config file."
msgstr ""
"Added Gnocchi support as data source for metrics. Administrator can change "
"data source for each strategy using config file."
msgid "Added new tool ``watcher-status upgrade check``."
msgstr "Added new tool ``watcher-status upgrade check``."
msgid ""
"Added notifications about cancelling of action plan. Now event based plugins "
"know when action plan cancel started and completed."
msgstr ""
"Added notifications about cancelling of action plan. Now event based plugins "
"know when action plan cancel started and completed."
msgid "Added policies to handle user rights to access Watcher API."
msgstr "Added policies to handle user rights to access Watcher API."
msgid "Added storage capacity balance strategy."
msgstr "Added storage capacity balance strategy."
msgid ""
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
"The strategy migrates many instances and volumes efficiently with minimum "
"downtime automatically."
msgstr ""
"Added strategy \"Zone migration\" and it's goal \"Hardware maintenance\". "
"The strategy migrates many instances and volumes efficiently with minimum "
"downtime automatically."
msgid ""
"Added strategy to identify and migrate a Noisy Neighbor - a low priority VM "
"that negatively affects peformance of a high priority VM by over utilizing "
"Last Level Cache."
msgstr ""
"Added strategy to identify and migrate a Noisy Neighbour - a low priority VM "
"that negatively affects performance of a high priority VM by over utilising "
"Last Level Cache."
msgid ""
"Added the functionality to filter out instances which have metadata field "
"'optimize' set to False. For now, this is only available for the "
"basic_consolidation strategy (if \"check_optimize_metadata\" configuration "
"option is enabled)."
msgstr ""
"Added the functionality to filter out instances which have metadata field "
"'optimize' set to False. For now, this is only available for the "
"basic_consolidation strategy (if \"check_optimize_metadata\" configuration "
"option is enabled)."
msgid "Added using of JSONSchema instead of voluptuous to validate Actions."
msgstr "Added using of JSONSchema instead of voluptuous to validate Actions."
msgid "Added volume migrate action"
msgstr "Added volume migrate action"
msgid ""
"Adds audit scoper for storage data model, now watcher users can specify "
"audit scope for storage CDM in the same manner as compute scope."
msgstr ""
"Adds audit scoper for storage data model, now watcher users can specify "
"audit scope for storage CDM in the same manner as compute scope."
msgid "Adds baremetal data model in Watcher"
msgstr "Adds baremetal data model in Watcher"
msgid ""
"Allow decision engine to pass strategy parameters, like optimization "
"threshold, to selected strategy, also strategy to provide parameters info to "
"end user."
msgstr ""
"Allow decision engine to pass strategy parameters, like optimisation "
"threshold, to selected strategy, also strategy to provide parameters info to "
"end user."
msgid ""
"Audits have 'name' field now, that is more friendly to end users. Audit's "
"name can't exceed 63 characters."
msgstr ""
"Audits have 'name' field now, that is more friendly to end users. Audit's "
"name can't exceed 63 characters."
msgid "Bug Fixes"
msgstr "Bug Fixes"
msgid "Centralize all configuration options for Watcher."
msgstr "Centralise all configuration options for Watcher."
msgid "Contents:"
msgstr "Contents:"
msgid ""
"Copy all audit templates parameters into audit instead of having a reference "
"to the audit template."
msgstr ""
"Copy all audit templates parameters into audit instead of having a reference "
"to the audit template."
msgid "Current Series Release Notes"
msgstr "Current Series Release Notes"
msgid "Deprecation Notes"
msgstr "Deprecation Notes"
msgid ""
"Each CDM collector can have its own CDM scoper now. This changed Scope JSON "
"schema definition for the audit template POST data. Please see audit "
"template create help message in python-watcherclient."
msgstr ""
"Each CDM collector can have its own CDM scoper now. This changed Scope JSON "
"schema definition for the audit template POST data. Please see audit "
"template create help message in python-watcherclient."
msgid ""
"Enhancement of vm_workload_consolidation strategy by using 'memory.resident' "
"metric in place of 'memory.usage', as memory.usage shows the memory usage "
"inside guest-os and memory.resident represents volume of RAM used by "
"instance on host machine."
msgstr ""
"Enhancement of vm_workload_consolidation strategy by using 'memory.resident' "
"metric in place of 'memory.usage', as memory.usage shows the memory usage "
"inside guest-os and memory.resident represents volume of RAM used by "
"instance on host machine."
msgid ""
"Existing workload_balance strategy based on the VM workloads of CPU. This "
"feature improves the strategy. By the input parameter \"metrics\", it makes "
"decision to migrate a VM base on CPU or memory utilization."
msgstr ""
"Existing workload_balance strategy based on the VM workloads of CPU. This "
"feature improves the strategy. By the input parameter \"metrics\", it makes "
"decision to migrate a VM base on CPU or memory utilisation."
msgid ""
"Feature to exclude instances from audit scope based on project_id is added. "
"Now instances from particular project in OpenStack can be excluded from "
"audit defining scope in audit templates."
msgstr ""
"Feature to exclude instances from audit scope based on project_id is added. "
"Now instances from particular project in OpenStack can be excluded from "
"audit defining scope in audit templates."
msgid ""
"Instance cold migration logic is now replaced with using Nova migrate "
"Server(migrate Action) API which has host option since v2.56."
msgstr ""
"Instance cold migration logic is now replaced with using Nova migrate "
"Server(migrate Action) API which has host option since v2.56."
msgid "New Features"
msgstr "New Features"
msgid ""
"New framework for ``watcher-status upgrade check`` command is added. This "
"framework allows adding various checks which can be run before a Watcher "
"upgrade to ensure if the upgrade can be performed safely."
msgstr ""
"New framework for ``watcher-status upgrade check`` command is added. This "
"framework allows adding various checks which can be run before a Watcher "
"upgrade to ensure if the upgrade can be performed safely."
msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid ""
"Nova API version is now set to 2.56 by default. This needs the migrate "
"action of migration type cold with destination_node parameter to work."
msgstr ""
"Nova API version is now set to 2.56 by default. This needs the migrate "
"action of migration type cold with destination_node parameter to work."
msgid "Ocata Series Release Notes"
msgstr "Ocata Series Release Notes"
msgid ""
"Operator can now use new CLI tool ``watcher-status upgrade check`` to check "
"if Watcher deployment can be safely upgraded from N-1 to N release."
msgstr ""
"Operator can now use new CLI tool ``watcher-status upgrade check`` to check "
"if Watcher deployment can be safely upgraded from N-1 to N release."
msgid "Pike Series Release Notes"
msgstr "Pike Series Release Notes"
msgid "Prelude"
msgstr "Prelude"
msgid ""
"Provide a notification mechanism into Watcher that supports versioning. "
"Whenever a Watcher object is created, updated or deleted, a versioned "
"notification will, if it's relevant, be automatically sent to notify in "
"order to allow an event-driven style of architecture within Watcher. "
"Moreover, it will also give other services and/or 3rd party softwares (e.g. "
"monitoring solutions or rules engines) the ability to react to such events."
msgstr ""
"Provide a notification mechanism into Watcher that supports versioning. "
"Whenever a Watcher object is created, updated or deleted, a versioned "
"notification will, if it's relevant, be automatically sent to notify in "
"order to allow an event-driven style of architecture within Watcher. "
"Moreover, it will also give other services and/or 3rd party software (e.g. "
"monitoring solutions or rules engines) the ability to react to such events."
msgid ""
"Provides a generic way to define the scope of an audit. The set of audited "
"resources will be called \"Audit scope\" and will be defined in each audit "
"template (which contains the audit settings)."
msgstr ""
"Provides a generic way to define the scope of an audit. The set of audited "
"resources will be called \"Audit scope\" and will be defined in each audit "
"template (which contains the audit settings)."
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
msgid "Rocky Series Release Notes"
msgstr "Rocky Series Release Notes"
msgid ""
"The graph model describes how VMs are associated to compute hosts. This "
"allows for seeing relationships upfront between the entities and hence can "
"be used to identify hot/cold spots in the data center and influence a "
"strategy decision."
msgstr ""
"The graph model describes how VMs are associated to compute hosts. This "
"allows for seeing relationships upfront between the entities and hence can "
"be used to identify hot/cold spots in the data centre and influence a "
"strategy decision."
msgid ""
"The migrate action of migration type cold with destination_node parameter "
"was fixed. Before fixing, it booted an instance in the service project as a "
"migrated instance."
msgstr ""
"The migrate action of migration type cold with destination_node parameter "
"was fixed. Before fixing, it booted an instance in the service project as a "
"migrated instance."
msgid ""
"There is new ability to create Watcher continuous audits with cron interval. "
"It means you may use, for example, optional argument '--interval \"\\*/5 \\* "
"\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a "
"best effort basis and therefore, we recommend you to use a minimal cron "
"interval of at least one minute."
msgstr ""
"There is new ability to create Watcher continuous audits with cron interval. "
"It means you may use, for example, optional argument '--interval \"\\*/5 \\* "
"\\* \\* \\*\"' to launch audit every 5 minutes. These jobs are executed on a "
"best effort basis and therefore, we recommend you to use a minimal cron "
"interval of at least one minute."
msgid "Upgrade Notes"
msgstr "Upgrade Notes"
msgid ""
"Watcher can continuously optimize the OpenStack cloud for a specific "
"strategy or goal by triggering an audit periodically which generates an "
"action plan and run it automatically."
msgstr ""
"Watcher can continuously optimise the OpenStack cloud for a specific "
"strategy or goal by triggering an audit periodically which generates an "
"action plan and run it automatically."
msgid ""
"Watcher can now run specific actions in parallel improving the performances "
"dramatically when executing an action plan."
msgstr ""
"Watcher can now run specific actions in parallel improving the performance "
"dramatically when executing an action plan."
msgid ""
"Watcher consumes Nova notifications to update its internal Compute "
"CDM(Cluster Data Model). All the notifications as below"
msgstr ""
"Watcher consumes Nova notifications to update its internal Compute "
"CDM(Cluster Data Model). All the notifications as below"
msgid "Watcher database can now be upgraded thanks to Alembic."
msgstr "Watcher database can now be upgraded thanks to Alembic."
msgid ""
"Watcher got an ability to calculate multiple global efficacy indicators "
"during audit's execution. Now global efficacy can be calculated for many "
"resource types (like volumes, instances, network) if strategy supports "
"efficacy indicators."
msgstr ""
"Watcher got an ability to calculate multiple global efficacy indicators "
"during audit's execution. Now global efficacy can be calculated for many "
"resource types (like volumes, instances, network) if strategy supports "
"efficacy indicators."
msgid ""
"Watcher has a whole scope of the cluster, when building compute CDM which "
"includes all instances. It filters excluded instances when migration during "
"the audit."
msgstr ""
"Watcher has a whole scope of the cluster, when building compute CDM which "
"includes all instances. It filters excluded instances when migration during "
"the audit."
msgid ""
"Watcher removes the support to Nova legacy notifications because of Nova "
"will deprecate them."
msgstr ""
"Watcher removes the support to Nova legacy notifications because of Nova "
"will deprecate them."
msgid ""
"Watcher services can be launched in HA mode. From now on Watcher Decision "
"Engine and Watcher Applier services may be deployed on different nodes to "
"run in active-active or active-passive mode. Any ONGOING Audits or Action "
"Plans will be CANCELLED if service they are executed on is restarted."
msgstr ""
"Watcher services can be launched in HA mode. From now on Watcher Decision "
"Engine and Watcher Applier services may be deployed on different nodes to "
"run in active-active or active-passive mode. Any ONGOING Audits or Action "
"Plans will be CANCELLED if service they are executed on is restarted."
msgid ""
"Watcher starts to support API microversions since Stein cycle. From now "
"onwards all API changes should be made with saving backward compatibility. "
"To specify API version operator should use OpenStack-API-Version HTTP "
"header. If operator wants to know the mininum and maximum supported versions "
"by API, he/she can access /v1 resource and Watcher API will return "
"appropriate headers in response."
msgstr ""
"Watcher starts to support API microversions since the Stein cycle. From now "
"onwards all API changes should be made with saving backward compatibility. "
"To specify API version operator should use OpenStack-API-Version HTTP "
"header. If operator wants to know the minimum and maximum supported versions "
"by API, he/she can access /v1 resource and Watcher API will return "
"appropriate headers in response."
msgid ""
"Watcher supports multiple metrics backend and relies on Ceilometer and "
"Monasca."
msgstr ""
"Watcher supports multiple metrics backend and relies on Ceilometer and "
"Monasca."
msgid "Welcome to watcher's Release Notes documentation!"
msgstr "Welcome to watcher's Release Notes documentation!"
msgid ""
"all Watcher objects have been refactored to support OVO (oslo."
"versionedobjects) which was a prerequisite step in order to implement "
"versioned notifications."
msgstr ""
"all Watcher objects have been refactored to support OVO (oslo."
"versionedobjects) which was a prerequisite step in order to implement "
"versioned notifications."
msgid "instance.create.end"
msgstr "instance.create.end"
msgid "instance.delete.end"
msgstr "instance.delete.end"
msgid "instance.live_migration_force_complete.end"
msgstr "instance.live_migration_force_complete.end"
msgid "instance.live_migration_post_dest.end"
msgstr "instance.live_migration_post_dest.end"
msgid "instance.lock"
msgstr "instance.lock"
msgid "instance.pause.end"
msgstr "instance.pause.end"
msgid "instance.power_off.end"
msgstr "instance.power_off.end"
msgid "instance.power_on.end"
msgstr "instance.power_on.end"
msgid "instance.rebuild.end"
msgstr "instance.rebuild.end"
msgid "instance.rescue.end"
msgstr "instance.rescue.end"
msgid "instance.resize_confirm.end"
msgstr "instance.resize_confirm.end"
msgid "instance.restore.end"
msgstr "instance.restore.end"
msgid "instance.resume.end"
msgstr "instance.resume.end"
msgid "instance.shelve.end"
msgstr "instance.shelve.end"
msgid "instance.shutdown.end"
msgstr "instance.shutdown.end"
msgid "instance.soft_delete.end"
msgstr "instance.soft_delete.end"
msgid "instance.suspend.end"
msgstr "instance.suspend.end"
msgid "instance.unlock"
msgstr "instance.unlock"
msgid "instance.unpause.end"
msgstr "instance.unpause.end"
msgid "instance.unrescue.end"
msgstr "instance.unrescue.end"
msgid "instance.unshelve.end"
msgstr "instance.unshelve.end"
msgid "instance.update"
msgstr "instance.update"
msgid "new:"
msgstr "new:"
msgid "pre-existing:"
msgstr "pre-existing:"
msgid "service.create"
msgstr "service.create"
msgid "service.delete"
msgstr "service.delete"
msgid "service.update"
msgstr "service.update"

View File

@@ -1,33 +0,0 @@
# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: python-watcher\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2019-03-22 02:21+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-10-22 06:44+0000\n"
"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
"Language-Team: French\n"
"Language: fr\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
msgid "0.29.0"
msgstr "0.29.0"
msgid "Contents:"
msgstr "Contenu :"
msgid "Current Series Release Notes"
msgstr "Note de la release actuelle"
msgid "New Features"
msgstr "Nouvelles fonctionnalités"
msgid "Newton Series Release Notes"
msgstr "Note de release pour Newton"
msgid "Welcome to watcher's Release Notes documentation!"
msgstr "Bienvenue dans la documentation de la note de Release de Watcher"

View File

@@ -1,6 +0,0 @@
==========================
Train Series Release Notes
==========================
.. release-notes::
:branch: stable/train

View File

@@ -3,6 +3,7 @@
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
apscheduler>=3.5.1 # MIT License apscheduler>=3.5.1 # MIT License
enum34>=1.1.6;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
jsonpatch>=1.21 # BSD jsonpatch>=1.21 # BSD
keystoneauth1>=3.4.0 # Apache-2.0 keystoneauth1>=3.4.0 # Apache-2.0
jsonschema>=2.6.0 # MIT jsonschema>=2.6.0 # MIT
@@ -42,9 +43,11 @@ python-ironicclient>=2.5.0 # Apache-2.0
six>=1.11.0 # MIT six>=1.11.0 # MIT
SQLAlchemy>=1.2.5 # MIT SQLAlchemy>=1.2.5 # MIT
stevedore>=1.28.0 # Apache-2.0 stevedore>=1.28.0 # Apache-2.0
taskflow>=3.7.1 # Apache-2.0 taskflow>=3.1.0 # Apache-2.0
WebOb>=1.8.5 # MIT WebOb>=1.8.5 # MIT
WSME>=0.9.2 # MIT WSME>=0.9.2 # MIT
networkx>=2.2;python_version>='3.4' # BSD # NOTE(fdegir): NetworkX 2.3 dropped support for Python 2
networkx>=1.11,<2.3;python_version<'3.0' # BSD
networkx>=1.11;python_version>='3.4' # BSD
microversion_parse>=0.2.1 # Apache-2.0 microversion_parse>=0.2.1 # Apache-2.0
futurist>=1.8.0 # Apache-2.0 futurist>=1.8.0 # Apache-2.0

View File

@@ -6,7 +6,6 @@ description-file =
author = OpenStack author = OpenStack
author-email = openstack-discuss@lists.openstack.org author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/watcher/latest/ home-page = https://docs.openstack.org/watcher/latest/
python-requires = >=3.6
classifier = classifier =
Environment :: OpenStack Environment :: OpenStack
Intended Audience :: Information Technology Intended Audience :: Information Technology
@@ -14,8 +13,8 @@ classifier =
License :: OSI Approved :: Apache Software License License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux Operating System :: POSIX :: Linux
Programming Language :: Python Programming Language :: Python
Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 2
Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 2.7
Programming Language :: Python :: 3 Programming Language :: Python :: 3
Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.7
@@ -26,6 +25,10 @@ packages =
data_files = data_files =
etc/ = etc/* etc/ = etc/*
[global]
setup-hooks =
pbr.hooks.setup_hook
[entry_points] [entry_points]
oslo.config.opts = oslo.config.opts =
watcher = watcher.conf.opts:list_opts watcher = watcher.conf.opts:list_opts

View File

@@ -13,8 +13,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup( setuptools.setup(
setup_requires=['pbr>=2.0.0'], setup_requires=['pbr>=2.0.0'],
pbr=True) pbr=True)

View File

@@ -5,7 +5,7 @@
coverage>=4.5.1 # Apache-2.0 coverage>=4.5.1 # Apache-2.0
doc8>=0.8.0 # Apache-2.0 doc8>=0.8.0 # Apache-2.0
freezegun>=0.3.10 # Apache-2.0 freezegun>=0.3.10 # Apache-2.0
hacking>=3.0,<3.1.0 # Apache-2.0 hacking>=1.1.0,<1.2.0 # Apache-2.0
mock>=2.0.0 # BSD mock>=2.0.0 # BSD
oslotest>=3.3.0 # Apache-2.0 oslotest>=3.3.0 # Apache-2.0
os-testr>=1.0.0 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0

53
tox.ini
View File

@@ -1,11 +1,9 @@
[tox] [tox]
minversion = 2.0 minversion = 2.0
envlist = py36,py37,pep8 envlist = py36,py37,py27,pep8
skipsdist = True skipsdist = True
ignore_basepython_conflict = True
[testenv] [testenv]
basepython = python3
usedevelop = True usedevelop = True
whitelist_externals = find whitelist_externals = find
rm rm
@@ -13,7 +11,7 @@ install_command = pip install {opts} {packages}
setenv = setenv =
VIRTUAL_ENV={envdir} VIRTUAL_ENV={envdir}
deps = deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/train}
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt -r{toxinidir}/requirements.txt
commands = commands =
@@ -23,21 +21,24 @@ commands =
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
[testenv:pep8] [testenv:pep8]
basepython = python3
commands = commands =
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
flake8 flake8
bandit -r watcher -x watcher/tests/* -n5 -ll -s B320 bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
[testenv:venv] [testenv:venv]
basepython = python3
setenv = PYTHONHASHSEED=0 setenv = PYTHONHASHSEED=0
deps = deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/train}
-r{toxinidir}/doc/requirements.txt -r{toxinidir}/doc/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt -r{toxinidir}/requirements.txt
commands = {posargs} commands = {posargs}
[testenv:cover] [testenv:cover]
basepython = python3
setenv = setenv =
PYTHON=coverage run --source watcher --parallel-mode PYTHON=coverage run --source watcher --parallel-mode
commands = commands =
@@ -48,65 +49,51 @@ commands =
coverage report coverage report
[testenv:docs] [testenv:docs]
basepython = python3
setenv = PYTHONHASHSEED=0 setenv = PYTHONHASHSEED=0
deps = -r{toxinidir}/doc/requirements.txt deps = -r{toxinidir}/doc/requirements.txt
commands = commands =
rm -fr doc/build doc/source/api/ .autogenerated rm -fr doc/build doc/source/api/ .autogenerated
sphinx-build -W --keep-going -b html doc/source doc/build/html sphinx-build -W -b html doc/source doc/build/html
[testenv:api-ref] [testenv:api-ref]
basepython = python3
deps = -r{toxinidir}/doc/requirements.txt deps = -r{toxinidir}/doc/requirements.txt
whitelist_externals = bash whitelist_externals = bash
commands = commands =
bash -c 'rm -rf api-ref/build' bash -c 'rm -rf api-ref/build'
sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
[testenv:debug] [testenv:debug]
basepython = python3
commands = oslo_debug_helper -t watcher/tests {posargs} commands = oslo_debug_helper -t watcher/tests {posargs}
[testenv:genconfig] [testenv:genconfig]
basepython = python3
sitepackages = False sitepackages = False
commands = commands =
oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf
[testenv:genpolicy] [testenv:genpolicy]
basepython = python3
commands = commands =
oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf
[flake8] [flake8]
filename = *.py,app.wsgi filename = *.py,app.wsgi
show-source=True show-source=True
# W504 line break after binary operator ignore= H105,E123,E226,N320,H202
ignore= H105,E123,E226,N320,H202,W504
builtins= _ builtins= _
enable-extensions = H106,H203,H904 enable-extensions = H106,H203,H904
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes
[testenv:wheel] [testenv:wheel]
basepython = python3
commands = python setup.py bdist_wheel commands = python setup.py bdist_wheel
[hacking] [hacking]
import_exceptions = watcher._i18n import_exceptions = watcher._i18n
local-check-factory = watcher.hacking.checks.factory
[flake8:local-plugins]
extension =
N319 = checks:no_translate_debug_logs
N321 = checks:use_jsonutils
N322 = checks:check_assert_called_once_with
N325 = checks:check_python3_xrange
N326 = checks:check_no_basestring
N327 = checks:check_python3_no_iteritems
N328 = checks:check_asserttrue
N329 = checks:check_assertfalse
N330 = checks:check_assertempty
N331 = checks:check_assertisinstance
N332 = checks:check_assertequal_for_httpcode
N333 = checks:check_log_warn_deprecated
N340 = checks:check_oslo_i18n_wrapper
N341 = checks:check_builtins_gettext
N342 = checks:no_redundant_import_alias
paths = ./watcher/hacking
[doc8] [doc8]
extension=.rst extension=.rst
@@ -114,6 +101,7 @@ extension=.rst
ignore-path=doc/source/image_src,doc/source/man,doc/source/api ignore-path=doc/source/image_src,doc/source/man,doc/source/api
[testenv:pdf-docs] [testenv:pdf-docs]
basepython = python3
envdir = {toxworkdir}/docs envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps} deps = {[testenv:docs]deps}
whitelist_externals = whitelist_externals =
@@ -121,18 +109,21 @@ whitelist_externals =
make make
commands = commands =
rm -rf doc/build/pdf rm -rf doc/build/pdf
sphinx-build -W --keep-going -b latex doc/source doc/build/pdf sphinx-build -W -b latex doc/source doc/build/pdf
make -C doc/build/pdf make -C doc/build/pdf
[testenv:releasenotes] [testenv:releasenotes]
basepython = python3
deps = -r{toxinidir}/doc/requirements.txt deps = -r{toxinidir}/doc/requirements.txt
commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:bandit] [testenv:bandit]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320 commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320
[testenv:lower-constraints] [testenv:lower-constraints]
basepython = python3
deps = deps =
-c{toxinidir}/lower-constraints.txt -c{toxinidir}/lower-constraints.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt

View File

@@ -37,5 +37,5 @@ def install(app, conf, public_routes):
if not CONF.get('enable_authentication'): if not CONF.get('enable_authentication'):
return app return app
return auth_token.AuthTokenMiddleware(app, return auth_token.AuthTokenMiddleware(app,
conf=dict(conf.keystone_authtoken), conf=dict(conf),
public_api_routes=public_routes) public_api_routes=public_routes)

View File

@@ -27,10 +27,6 @@ server = {
# Pecan Application Configurations # Pecan Application Configurations
# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa
acl_public_routes = ['/']
if not cfg.CONF.api.get("enable_webhooks_auth"):
acl_public_routes.append('/v1/webhooks/.*')
app = { app = {
'root': 'watcher.api.controllers.root.RootController', 'root': 'watcher.api.controllers.root.RootController',
'modules': ['watcher.api'], 'modules': ['watcher.api'],
@@ -40,7 +36,9 @@ app = {
], ],
'static_root': '%(confdir)s/public', 'static_root': '%(confdir)s/public',
'enable_acl': True, 'enable_acl': True,
'acl_public_routes': acl_public_routes, 'acl_public_routes': [
'/',
],
} }
# WSME Configurations # WSME Configurations

View File

@@ -23,7 +23,7 @@ from watcher.api.controllers import base
def build_url(resource, resource_args, bookmark=False, base_url=None): def build_url(resource, resource_args, bookmark=False, base_url=None):
if base_url is None: if base_url is None:
base_url = pecan.request.application_url base_url = pecan.request.host_url
template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
# FIXME(lucasagomes): I'm getting a 404 when doing a GET on # FIXME(lucasagomes): I'm getting a 404 when doing a GET on

View File

@@ -30,12 +30,3 @@ audits.
--- ---
Added ``force`` into create audit request. If ``force`` is true, Added ``force`` into create audit request. If ``force`` is true,
audit will be executed despite of ongoing actionplan. audit will be executed despite of ongoing actionplan.
1.3
---
Added list data model API.
1.4
---
Added Watcher webhook API. It can be used to trigger audit
with ``event`` type.

View File

@@ -59,8 +59,7 @@ class Version(base.APIBase):
version.status = status version.status = status
version.max_version = v.max_version_string() version.max_version = v.max_version_string()
version.min_version = v.min_version_string() version.min_version = v.min_version_string()
version.links = [link.Link.make_link('self', version.links = [link.Link.make_link('self', pecan.request.host_url,
pecan.request.application_url,
id, '', bookmark=True)] id, '', bookmark=True)]
return version return version

View File

@@ -40,9 +40,7 @@ from watcher.api.controllers.v1 import goal
from watcher.api.controllers.v1 import scoring_engine from watcher.api.controllers.v1 import scoring_engine
from watcher.api.controllers.v1 import service from watcher.api.controllers.v1 import service
from watcher.api.controllers.v1 import strategy from watcher.api.controllers.v1 import strategy
from watcher.api.controllers.v1 import utils
from watcher.api.controllers.v1 import versions from watcher.api.controllers.v1 import versions
from watcher.api.controllers.v1 import webhooks
def min_version(): def min_version():
@@ -132,9 +130,6 @@ class V1(APIBase):
services = [link.Link] services = [link.Link]
"""Links to the services resource""" """Links to the services resource"""
webhooks = [link.Link]
"""Links to the webhooks resource"""
links = [link.Link] links = [link.Link]
"""Links that point to a specific URL for this version and documentation""" """Links that point to a specific URL for this version and documentation"""
@@ -142,8 +137,7 @@ class V1(APIBase):
def convert(): def convert():
v1 = V1() v1 = V1()
v1.id = "v1" v1.id = "v1"
base_url = pecan.request.application_url v1.links = [link.Link.make_link('self', pecan.request.host_url,
v1.links = [link.Link.make_link('self', base_url,
'v1', '', bookmark=True), 'v1', '', bookmark=True),
link.Link.make_link('describedby', link.Link.make_link('describedby',
'http://docs.openstack.org', 'http://docs.openstack.org',
@@ -154,66 +148,57 @@ class V1(APIBase):
v1.media_types = [MediaType('application/json', v1.media_types = [MediaType('application/json',
'application/vnd.openstack.watcher.v1+json')] 'application/vnd.openstack.watcher.v1+json')]
v1.audit_templates = [link.Link.make_link('self', v1.audit_templates = [link.Link.make_link('self',
base_url, pecan.request.host_url,
'audit_templates', ''), 'audit_templates', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'audit_templates', '', 'audit_templates', '',
bookmark=True) bookmark=True)
] ]
v1.audits = [link.Link.make_link('self', base_url, v1.audits = [link.Link.make_link('self', pecan.request.host_url,
'audits', ''), 'audits', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'audits', '', 'audits', '',
bookmark=True) bookmark=True)
] ]
if utils.allow_list_datamodel(): v1.data_model = [link.Link.make_link('self', pecan.request.host_url,
v1.data_model = [link.Link.make_link('self', base_url, 'data_model', ''),
'data_model', ''), link.Link.make_link('bookmark',
link.Link.make_link('bookmark', pecan.request.host_url,
base_url, 'data_model', '',
'data_model', '', bookmark=True)
bookmark=True) ]
] v1.actions = [link.Link.make_link('self', pecan.request.host_url,
v1.actions = [link.Link.make_link('self', base_url,
'actions', ''), 'actions', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'actions', '', 'actions', '',
bookmark=True) bookmark=True)
] ]
v1.action_plans = [link.Link.make_link( v1.action_plans = [link.Link.make_link(
'self', base_url, 'action_plans', ''), 'self', pecan.request.host_url, 'action_plans', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'action_plans', '', 'action_plans', '',
bookmark=True) bookmark=True)
] ]
v1.scoring_engines = [link.Link.make_link( v1.scoring_engines = [link.Link.make_link(
'self', base_url, 'scoring_engines', ''), 'self', pecan.request.host_url, 'scoring_engines', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'scoring_engines', '', 'scoring_engines', '',
bookmark=True) bookmark=True)
] ]
v1.services = [link.Link.make_link( v1.services = [link.Link.make_link(
'self', base_url, 'services', ''), 'self', pecan.request.host_url, 'services', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
base_url, pecan.request.host_url,
'services', '', 'services', '',
bookmark=True) bookmark=True)
] ]
if utils.allow_webhook_api():
v1.webhooks = [link.Link.make_link(
'self', base_url, 'webhooks', ''),
link.Link.make_link('bookmark',
base_url,
'webhooks', '',
bookmark=True)
]
return v1 return v1
@@ -229,7 +214,6 @@ class Controller(rest.RestController):
services = service.ServicesController() services = service.ServicesController()
strategies = strategy.StrategiesController() strategies = strategy.StrategiesController()
data_model = data_model.DataModelController() data_model = data_model.DataModelController()
webhooks = webhooks.WebhookController()
@wsme_pecan.wsexpose(V1) @wsme_pecan.wsexpose(V1)
def get(self): def get(self):

View File

@@ -165,7 +165,7 @@ class ActionPlan(base.APIBase):
name=indicator.name, name=indicator.name,
description=indicator.description, description=indicator.description,
unit=indicator.unit, unit=indicator.unit,
value=float(indicator.value), value=indicator.value,
) )
efficacy_indicators.append(efficacy_indicator.as_dict()) efficacy_indicators.append(efficacy_indicator.as_dict())
self._efficacy_indicators = efficacy_indicators self._efficacy_indicators = efficacy_indicators

View File

@@ -24,7 +24,6 @@ from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan import wsmeext.pecan as wsme_pecan
from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils
from watcher.common import exception from watcher.common import exception
from watcher.common import policy from watcher.common import policy
from watcher.decision_engine import rpcapi from watcher.decision_engine import rpcapi
@@ -50,8 +49,6 @@ class DataModelController(rest.RestController):
:param audit_uuid: The UUID of the audit, used to filter data model :param audit_uuid: The UUID of the audit, used to filter data model
by the scope in audit. by the scope in audit.
""" """
if not utils.allow_list_datamodel():
raise exception.NotAcceptable
if self.from_data_model: if self.from_data_model:
raise exception.OperationNotPermitted raise exception.OperationNotPermitted
allowed_data_model_type = [ allowed_data_model_type = [

View File

@@ -184,7 +184,7 @@ class MultiType(wtypes.UserType):
class JsonPatchType(wtypes.Base): class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation.""" """A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'), path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
mandatory=True) mandatory=True)
op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
mandatory=True) mandatory=True)

View File

@@ -164,8 +164,7 @@ def allow_start_end_audit_time():
Version 1.1 of the API added support for start and end time of continuous Version 1.1 of the API added support for start and end time of continuous
audits. audits.
""" """
return pecan.request.version.minor >= ( return pecan.request.version.minor >= versions.MINOR_1_START_END_TIMING
versions.VERSIONS.MINOR_1_START_END_TIMING.value)
def allow_force(): def allow_force():
@@ -174,23 +173,4 @@ def allow_force():
Version 1.2 of the API added support for forced audits that allows to Version 1.2 of the API added support for forced audits that allows to
launch audit when other action plan is ongoing. launch audit when other action plan is ongoing.
""" """
return pecan.request.version.minor >= ( return pecan.request.version.minor >= versions.MINOR_2_FORCE
versions.VERSIONS.MINOR_2_FORCE.value)
def allow_list_datamodel():
"""Check if we should support list data model API.
Version 1.3 of the API added support to list data model.
"""
return pecan.request.version.minor >= (
versions.VERSIONS.MINOR_3_DATAMODEL.value)
def allow_webhook_api():
"""Check if we should support webhook API.
Version 1.4 of the API added support to trigger webhook.
"""
return pecan.request.version.minor >= (
versions.VERSIONS.MINOR_4_WEBHOOK_API.value)

View File

@@ -14,25 +14,25 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import enum
class VERSIONS(enum.Enum):
MINOR_0_ROCKY = 0 # v1.0: corresponds to Rocky API
MINOR_1_START_END_TIMING = 1 # v1.1: Add start/end timei for audit
MINOR_2_FORCE = 2 # v1.2: Add force field to audit
MINOR_3_DATAMODEL = 3 # v1.3: Add list datamodel API
MINOR_4_WEBHOOK_API = 4 # v1.4: Add webhook trigger API
MINOR_MAX_VERSION = 4
# This is the version 1 API # This is the version 1 API
BASE_VERSION = 1 BASE_VERSION = 1
# Here goes a short log of changes in every version.
#
# v1.0: corresponds to Rocky API
# v1.1: Add start/end time for continuous audit
# v1.2: Add force field to audit
MINOR_0_ROCKY = 0
MINOR_1_START_END_TIMING = 1
MINOR_2_FORCE = 2
MINOR_MAX_VERSION = MINOR_2_FORCE
# String representations of the minor and maximum versions # String representations of the minor and maximum versions
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_0_ROCKY)
VERSIONS.MINOR_0_ROCKY.value) _MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_MAX_VERSION)
_MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION,
VERSIONS.MINOR_MAX_VERSION.value)
def service_type_string(): def service_type_string():

View File

@@ -1,62 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Webhook endpoint for Watcher v1 REST API.
"""
from oslo_log import log
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils
from watcher.common import exception
from watcher.decision_engine import rpcapi
from watcher import objects
LOG = log.getLogger(__name__)
class WebhookController(rest.RestController):
"""REST controller for webhooks resource."""
def __init__(self):
super(WebhookController, self).__init__()
self.dc_client = rpcapi.DecisionEngineAPI()
@wsme_pecan.wsexpose(None, wtypes.text, body=types.jsontype,
status_code=202)
def post(self, audit_ident, body):
"""Trigger the given audit.
:param audit_ident: UUID or name of an audit.
"""
LOG.debug("Webhook trigger Audit: %s.", audit_ident)
context = pecan.request.context
audit = utils.get_resource('Audit', audit_ident)
if audit is None:
raise exception.AuditNotFound(audit=audit_ident)
if audit.audit_type != objects.audit.AuditType.EVENT.value:
raise exception.AuditTypeNotAllowed(audit_type=audit.audit_type)
allowed_state = (
objects.audit.State.PENDING,
objects.audit.State.SUCCEEDED,
)
if audit.state not in allowed_state:
raise exception.AuditStateNotAllowed(state=audit.state)
# trigger decision-engine to run the audit
self.dc_client.trigger_audit(context, audit.uuid)

View File

@@ -34,7 +34,7 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
""" """
def __init__(self, app, conf, public_api_routes=()): def __init__(self, app, conf, public_api_routes=()):
route_pattern_tpl = r'%s(\.json|\.xml)?$' route_pattern_tpl = '%s(\.json|\.xml)?$'
try: try:
self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl)

View File

@@ -140,7 +140,7 @@ class BaseAction(loadable.Loadable):
raise NotImplementedError() raise NotImplementedError()
def check_abort(self): def check_abort(self):
if self.__class__.__name__ == 'Migrate': if self.__class__.__name__ is 'Migrate':
if self.migration_type == self.LIVE_MIGRATION: if self.migration_type == self.LIVE_MIGRATION:
return True return True
else: else:

View File

@@ -47,24 +47,24 @@ class Resize(base.BaseAction):
@property @property
def schema(self): def schema(self):
return { return {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'resource_id': { 'resource_id': {
'type': 'string', 'type': 'string',
'minlength': 1, 'minlength': 1,
'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-' 'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-'
'([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-' '([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-'
'([a-fA-F0-9]){12}$') '([a-fA-F0-9]){12}$')
},
'flavor': {
'type': 'string',
'minlength': 1,
},
}, },
'flavor': { 'required': ['resource_id', 'flavor'],
'type': 'string', 'additionalProperties': False,
'minlength': 1, }
},
},
'required': ['resource_id', 'flavor'],
'additionalProperties': False,
}
@property @property
def instance_uuid(self): def instance_uuid(self):

View File

@@ -112,7 +112,7 @@ class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
return flow return flow
except exception.ActionPlanCancelled: except exception.ActionPlanCancelled as e:
raise raise
except tf_exception.WrappedFailure as e: except tf_exception.WrappedFailure as e:

View File

@@ -15,6 +15,7 @@
import sys import sys
from oslo_upgradecheck import upgradecheck from oslo_upgradecheck import upgradecheck
import six
from watcher._i18n import _ from watcher._i18n import _
from watcher.common import clients from watcher.common import clients
@@ -37,7 +38,7 @@ class Checks(upgradecheck.UpgradeCommands):
clients.check_min_nova_api_version(CONF.nova_client.api_version) clients.check_min_nova_api_version(CONF.nova_client.api_version)
except ValueError as e: except ValueError as e:
return upgradecheck.Result( return upgradecheck.Result(
upgradecheck.Code.FAILURE, str(e)) upgradecheck.Code.FAILURE, six.text_type(e))
return upgradecheck.Result(upgradecheck.Code.SUCCESS) return upgradecheck.Result(upgradecheck.Code.SUCCESS)
_upgrade_checks = ( _upgrade_checks = (

View File

@@ -28,6 +28,7 @@ import sys
from keystoneclient import exceptions as keystone_exceptions from keystoneclient import exceptions as keystone_exceptions
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six
from watcher._i18n import _ from watcher._i18n import _
@@ -96,16 +97,19 @@ class WatcherException(Exception):
def __str__(self): def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well""" """Encode to utf-8 then wsme api can consume it as well"""
return self.args[0] if not six.PY3:
return six.text_type(self.args[0]).encode('utf-8')
else:
return self.args[0]
def __unicode__(self): def __unicode__(self):
return str(self.args[0]) return six.text_type(self.args[0])
def format_message(self): def format_message(self):
if self.__class__.__name__.endswith('_Remote'): if self.__class__.__name__.endswith('_Remote'):
return self.args[0] return self.args[0]
else: else:
return str(self) return six.text_type(self)
class UnsupportedError(WatcherException): class UnsupportedError(WatcherException):
@@ -239,14 +243,6 @@ class AuditTypeNotFound(Invalid):
msg_fmt = _("Audit type %(audit_type)s could not be found") msg_fmt = _("Audit type %(audit_type)s could not be found")
class AuditTypeNotAllowed(Invalid):
msg_fmt = _("Audit type %(audit_type)s is disallowed.")
class AuditStateNotAllowed(Invalid):
msg_fmt = _("Audit state %(state)s is disallowed.")
class AuditParameterNotAllowed(Invalid): class AuditParameterNotAllowed(Invalid):
msg_fmt = _("Audit parameter %(parameter)s are not allowed") msg_fmt = _("Audit parameter %(parameter)s are not allowed")

View File

@@ -37,7 +37,6 @@ class GreenThreadPoolExecutor(BasePoolExecutor):
pool = futurist.GreenThreadPoolExecutor(int(max_workers)) pool = futurist.GreenThreadPoolExecutor(int(max_workers))
super(GreenThreadPoolExecutor, self).__init__(pool) super(GreenThreadPoolExecutor, self).__init__(pool)
executors = { executors = {
'default': GreenThreadPoolExecutor(), 'default': GreenThreadPoolExecutor(),
} }

View File

@@ -153,7 +153,6 @@ def extend_with_strict_schema(validator_class):
return validators.extend(validator_class, {"properties": strict_schema}) return validators.extend(validator_class, {"properties": strict_schema})
StrictDefaultValidatingDraft4Validator = extend_with_default( StrictDefaultValidatingDraft4Validator = extend_with_default(
extend_with_strict_schema(validators.Draft4Validator)) extend_with_strict_schema(validators.Draft4Validator))

View File

@@ -55,11 +55,6 @@ API_SERVICE_OPTS = [
"the service, this option should be False; note, you " "the service, this option should be False; note, you "
"will want to change public API endpoint to represent " "will want to change public API endpoint to represent "
"SSL termination URL with 'public_endpoint' option."), "SSL termination URL with 'public_endpoint' option."),
cfg.BoolOpt('enable_webhooks_auth',
default=True,
help='This option enables or disables webhook request '
'authentication via keystone. Default value is True.'),
] ]

View File

@@ -40,18 +40,11 @@ WATCHER_DECISION_ENGINE_OPTS = [
default='watcher.decision.api', default='watcher.decision.api',
help='The identifier used by the Watcher ' help='The identifier used by the Watcher '
'module on the message broker'), 'module on the message broker'),
cfg.IntOpt('max_audit_workers', cfg.IntOpt('max_workers',
default=2, default=2,
required=True, required=True,
help='The maximum number of threads that can be used to ' help='The maximum number of threads that can be used to '
'execute audits in parallel.'), 'execute strategies'),
cfg.IntOpt('max_general_workers',
default=4,
required=True,
help='The maximum number of threads that can be used to '
'execute general tasks in parallel. The number of general '
'workers will not increase depending on the number of '
'audit workers!'),
cfg.IntOpt('action_plan_expiry', cfg.IntOpt('action_plan_expiry',
default=24, default=24,
mutable=True, mutable=True,

View File

@@ -1125,8 +1125,8 @@ class Connection(api.BaseConnection):
def get_action_description_by_id(self, context, def get_action_description_by_id(self, context,
action_id, eager=False): action_id, eager=False):
return self._get_action_description( return self._get_action_description(
context, fieldname="id", value=action_id, eager=eager) context, fieldname="id", value=action_id, eager=eager)
def get_action_description_by_type(self, context, def get_action_description_by_type(self, context,
action_type, eager=False): action_type, eager=False):

View File

@@ -1,27 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 ZTE Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from watcher.decision_engine.audit import base
from watcher import objects
class EventAuditHandler(base.AuditHandler):
def post_execute(self, audit, solution, request_context):
super(EventAuditHandler, self).post_execute(audit, solution,
request_context)
# change state of the audit to SUCCEEDED
self.update_audit_state(audit, objects.audit.State.SUCCEEDED)

View File

@@ -188,7 +188,7 @@ class CeilometerHelper(base.DataSourceBase):
item_value = None item_value = None
if statistic: if statistic:
item_value = statistic[-1]._info.get('aggregate').get(aggregate) item_value = statistic[-1]._info.get('aggregate').get(aggregate)
if meter_name == 'host_airflow': if meter_name is 'host_airflow':
# Airflow from hardware.ipmi.node.airflow is reported as # Airflow from hardware.ipmi.node.airflow is reported as
# 1/10 th of actual CFM # 1/10 th of actual CFM
item_value *= 10 item_value *= 10

View File

@@ -116,7 +116,7 @@ class GnocchiHelper(base.DataSourceBase):
# measure has structure [time, granularity, value] # measure has structure [time, granularity, value]
return_value = statistics[-1][2] return_value = statistics[-1][2]
if meter_name == 'host_airflow': if meter_name is 'host_airflow':
# Airflow from hardware.ipmi.node.airflow is reported as # Airflow from hardware.ipmi.node.airflow is reported as
# 1/10 th of actual CFM # 1/10 th of actual CFM
return_value *= 10 return_value *= 10

View File

@@ -72,7 +72,7 @@ class GrafanaHelper(base.DataSourceBase):
# Very basic url parsing # Very basic url parsing
parse = urlparse.urlparse(self._base_url) parse = urlparse.urlparse(self._base_url)
if parse.scheme == '' or parse.netloc == '' or parse.path == '': if parse.scheme is '' or parse.netloc is '' or parse.path is '':
LOG.critical("GrafanaHelper url not properly configured, " LOG.critical("GrafanaHelper url not properly configured, "
"check base_url and project_id") "check base_url and project_id")
return return

View File

@@ -112,10 +112,10 @@ class DataSourceManager(object):
datasource is attempted. datasource is attempted.
""" """
if not self.datasources or len(self.datasources) == 0: if not self.datasources or len(self.datasources) is 0:
raise exception.NoDatasourceAvailable raise exception.NoDatasourceAvailable
if not metrics or len(metrics) == 0: if not metrics or len(metrics) is 0:
LOG.critical("Can not retrieve datasource without specifying " LOG.critical("Can not retrieve datasource without specifying "
"list of required metrics.") "list of required metrics.")
raise exception.InvalidParameter(parameter='metrics', raise exception.InvalidParameter(parameter='metrics',
@@ -125,11 +125,11 @@ class DataSourceManager(object):
no_metric = False no_metric = False
for metric in metrics: for metric in metrics:
if (metric not in self.metric_map[datasource] or if (metric not in self.metric_map[datasource] or
self.metric_map[datasource].get(metric) is None): self.metric_map[datasource].get(metric) is None):
no_metric = True no_metric = True
LOG.warning("Datasource: {0} could not be used due to " LOG.warning("Datasource: {0} could not be used due to "
"metric: {1}".format(datasource, metric)) "metric: {1}".format(datasource, metric))
break break
if not no_metric: if not no_metric:
# Try to use a specific datasource but attempt additional # Try to use a specific datasource but attempt additional
# datasources upon exceptions (if config has more datasources) # datasources upon exceptions (if config has more datasources)

View File

@@ -22,7 +22,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from watcher.decision_engine.audit import continuous as c_handler from watcher.decision_engine.audit import continuous as c_handler
from watcher.decision_engine.audit import event as e_handler
from watcher.decision_engine.audit import oneshot as o_handler from watcher.decision_engine.audit import oneshot as o_handler
from watcher import objects from watcher import objects
@@ -36,10 +35,9 @@ class AuditEndpoint(object):
def __init__(self, messaging): def __init__(self, messaging):
self._messaging = messaging self._messaging = messaging
self._executor = futurist.GreenThreadPoolExecutor( self._executor = futurist.GreenThreadPoolExecutor(
max_workers=CONF.watcher_decision_engine.max_audit_workers) max_workers=CONF.watcher_decision_engine.max_workers)
self._oneshot_handler = o_handler.OneShotAuditHandler() self._oneshot_handler = o_handler.OneShotAuditHandler()
self._continuous_handler = c_handler.ContinuousAuditHandler().start() self._continuous_handler = c_handler.ContinuousAuditHandler().start()
self._event_handler = e_handler.EventAuditHandler()
@property @property
def executor(self): def executor(self):
@@ -47,10 +45,7 @@ class AuditEndpoint(object):
def do_trigger_audit(self, context, audit_uuid): def do_trigger_audit(self, context, audit_uuid):
audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True) audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True)
if audit.audit_type == objects.audit.AuditType.ONESHOT.value: self._oneshot_handler.execute(audit, context)
self._oneshot_handler.execute(audit, context)
if audit.audit_type == objects.audit.AuditType.EVENT.value:
self._event_handler.execute(audit, context)
def trigger_audit(self, context, audit_uuid): def trigger_audit(self, context, audit_uuid):
LOG.debug("Trigger audit %s", audit_uuid) LOG.debug("Trigger audit %s", audit_uuid)

View File

@@ -16,8 +16,6 @@
import os_resource_classes as orc import os_resource_classes as orc
from oslo_log import log from oslo_log import log
from futurist import waiters
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.common import placement_helper from watcher.common import placement_helper
from watcher.decision_engine.model.collector import base from watcher.decision_engine.model.collector import base
@@ -25,7 +23,6 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.model.notification import nova from watcher.decision_engine.model.notification import nova
from watcher.decision_engine.scope import compute as compute_scope from watcher.decision_engine.scope import compute as compute_scope
from watcher.decision_engine import threading
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -215,12 +212,8 @@ class NovaModelBuilder(base.BaseModelBuilder):
self.nova = osc.nova() self.nova = osc.nova()
self.nova_helper = nova_helper.NovaHelper(osc=self.osc) self.nova_helper = nova_helper.NovaHelper(osc=self.osc)
self.placement_helper = placement_helper.PlacementHelper(osc=self.osc) self.placement_helper = placement_helper.PlacementHelper(osc=self.osc)
self.executor = threading.DecisionEngineThreadPool()
def _collect_aggregates(self, host_aggregates, _nodes): def _collect_aggregates(self, host_aggregates, _nodes):
if not host_aggregates:
return
aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list) aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list)
aggregate_ids = [aggregate['id'] for aggregate aggregate_ids = [aggregate['id'] for aggregate
in host_aggregates if 'id' in aggregate] in host_aggregates if 'id' in aggregate]
@@ -236,9 +229,6 @@ class NovaModelBuilder(base.BaseModelBuilder):
_nodes.update(aggregate.hosts) _nodes.update(aggregate.hosts)
def _collect_zones(self, availability_zones, _nodes): def _collect_zones(self, availability_zones, _nodes):
if not availability_zones:
return
service_list = self.call_retry(f=self.nova_helper.get_service_list) service_list = self.call_retry(f=self.nova_helper.get_service_list)
zone_names = [zone['name'] for zone zone_names = [zone['name'] for zone
in availability_zones] in availability_zones]
@@ -249,71 +239,20 @@ class NovaModelBuilder(base.BaseModelBuilder):
if service.zone in zone_names or include_all_nodes: if service.zone in zone_names or include_all_nodes:
_nodes.add(service.host) _nodes.add(service.host)
def _compute_node_future(self, future, future_instances):
"""Add compute node information to model and schedule instance info job
:param future: The future from the finished execution
:rtype future: :py:class:`futurist.GreenFuture`
:param future_instances: list of futures for instance jobs
:rtype future_instances: list :py:class:`futurist.GreenFuture`
"""
try:
node_info = future.result()[0]
# filter out baremetal node
if node_info.hypervisor_type == 'ironic':
LOG.debug("filtering out baremetal node: %s", node_info)
return
self.add_compute_node(node_info)
# node.servers is a list of server objects
# New in nova version 2.53
instances = getattr(node_info, "servers", None)
# Do not submit job if there are no instances on compute node
if instances is None:
LOG.info("No instances on compute_node: {0}".format(node_info))
return
future_instances.append(
self.executor.submit(
self.add_instance_node, node_info, instances)
)
except Exception:
LOG.error("compute node from aggregate / "
"availability_zone could not be found")
def _add_physical_layer(self): def _add_physical_layer(self):
"""Collects all information on compute nodes and instances """Add the physical layer of the graph.
Will collect all required compute node and instance information based This includes components which represent actual infrastructure
on the host aggregates and availability zones. If aggregates and zones hardware.
do not specify any compute nodes all nodes are retrieved instead.
The collection of information happens concurrently using the
DecisionEngineThreadpool. The collection is parallelized in three steps
first information about aggregates and zones is gathered. Secondly,
for each of the compute nodes a tasks is submitted to get detailed
information about the compute node. Finally, Each of these submitted
tasks will submit an additional task if the compute node contains
instances. Before returning from this function all instance tasks are
waited upon to complete.
""" """
compute_nodes = set() compute_nodes = set()
host_aggregates = self.model_scope.get("host_aggregates") host_aggregates = self.model_scope.get("host_aggregates")
availability_zones = self.model_scope.get("availability_zones") availability_zones = self.model_scope.get("availability_zones")
if host_aggregates:
self._collect_aggregates(host_aggregates, compute_nodes)
if availability_zones:
self._collect_zones(availability_zones, compute_nodes)
"""Submit tasks to gather compute nodes from availability zones and
host aggregates. Each task adds compute nodes to the set, this set is
threadsafe under the assumption that CPython is used with the GIL
enabled."""
zone_aggregate_futures = {
self.executor.submit(
self._collect_aggregates, host_aggregates, compute_nodes),
self.executor.submit(
self._collect_zones, availability_zones, compute_nodes)
}
waiters.wait_for_all(zone_aggregate_futures)
# if zones and aggregates did not contain any nodes get every node.
if not compute_nodes: if not compute_nodes:
self.no_model_scope_flag = True self.no_model_scope_flag = True
all_nodes = self.call_retry( all_nodes = self.call_retry(
@@ -321,20 +260,24 @@ class NovaModelBuilder(base.BaseModelBuilder):
compute_nodes = set( compute_nodes = set(
[node.hypervisor_hostname for node in all_nodes]) [node.hypervisor_hostname for node in all_nodes])
LOG.debug("compute nodes: %s", compute_nodes) LOG.debug("compute nodes: %s", compute_nodes)
for node_name in compute_nodes:
node_futures = [self.executor.submit( cnode = self.call_retry(
self.nova_helper.get_compute_node_by_name, self.nova_helper.get_compute_node_by_name,
node, servers=True, detailed=True) node_name, servers=True, detailed=True)
for node in compute_nodes] if cnode:
LOG.debug("submitted {0} jobs".format(len(compute_nodes))) node_info = cnode[0]
# filter out baremetal node
# Futures will concurrently be added, only safe with CPython GIL if node_info.hypervisor_type == 'ironic':
future_instances = [] LOG.debug("filtering out baremetal node: %s", node_name)
self.executor.do_while_futures_modify( continue
node_futures, self._compute_node_future, future_instances) self.add_compute_node(node_info)
# node.servers is a list of server objects
# Wait for all instance jobs to finish # New in nova version 2.53
waiters.wait_for_all(future_instances) instances = getattr(node_info, "servers", None)
self.add_instance_node(node_info, instances)
else:
LOG.error("compute_node from aggregate / availability_zone "
"could not be found: {0}".format(node_name))
def add_compute_node(self, node): def add_compute_node(self, node):
# Build and add base node. # Build and add base node.

View File

@@ -171,7 +171,7 @@ class ModelRoot(nx.DiGraph, base.Model):
def _get_by_uuid(self, uuid): def _get_by_uuid(self, uuid):
try: try:
return self.nodes[uuid]['attr'] return self.node[uuid]['attr']
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
raise exception.ComputeResourceNotFound(name=uuid) raise exception.ComputeResourceNotFound(name=uuid)
@@ -466,14 +466,14 @@ class StorageModelRoot(nx.DiGraph, base.Model):
def _get_by_uuid(self, uuid): def _get_by_uuid(self, uuid):
try: try:
return self.nodes[uuid]['attr'] return self.node[uuid]['attr']
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
raise exception.StorageResourceNotFound(name=uuid) raise exception.StorageResourceNotFound(name=uuid)
def _get_by_name(self, name): def _get_by_name(self, name):
try: try:
return self.nodes[name]['attr'] return self.node[name]['attr']
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
raise exception.StorageResourceNotFound(name=name) raise exception.StorageResourceNotFound(name=name)
@@ -648,7 +648,7 @@ class BaremetalModelRoot(nx.DiGraph, base.Model):
def _get_by_uuid(self, uuid): def _get_by_uuid(self, uuid):
try: try:
return self.nodes[uuid]['attr'] return self.node[uuid]['attr']
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
raise exception.BaremetalResourceNotFound(name=uuid) raise exception.BaremetalResourceNotFound(name=uuid)

View File

@@ -201,13 +201,19 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
LOG.debug('Migrate instance %s from %s to %s', LOG.debug('Migrate instance %s from %s to %s',
instance_to_migrate, source_node, destination_node) instance_to_migrate, source_node, destination_node)
used_resources = self.compute_model.get_node_used_resources( total_cores = 0
destination_node) total_disk = 0
total_mem = 0
for instance in self.compute_model.get_node_instances(
destination_node):
total_cores += instance.vcpus
total_disk += instance.disk
total_mem += instance.memory
# capacity requested by the compute node # capacity requested by the compute node
total_cores = used_resources['vcpu'] + instance_to_migrate.vcpus total_cores += instance_to_migrate.vcpus
total_disk = used_resources['disk'] + instance_to_migrate.disk total_disk += instance_to_migrate.disk
total_mem = used_resources['memory'] + instance_to_migrate.memory total_mem += instance_to_migrate.memory
return self.check_threshold(destination_node, total_cores, total_disk, return self.check_threshold(destination_node, total_cores, total_disk,
total_mem) total_mem)
@@ -401,7 +407,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
self._pre_execute() self._pre_execute()
# backwards compatibility for node parameter. # backwards compatibility for node parameter.
if self.aggregation_method['node'] != '': if self.aggregation_method['node'] is not '':
LOG.warning('Parameter node has been renamed to compute_node and ' LOG.warning('Parameter node has been renamed to compute_node and '
'will be removed in next release.') 'will be removed in next release.')
self.aggregation_method['compute_node'] = \ self.aggregation_method['compute_node'] = \

View File

@@ -137,6 +137,37 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
ram=node.memory_mb_capacity, ram=node.memory_mb_capacity,
disk=node.disk_gb_capacity) disk=node.disk_gb_capacity)
def get_node_used(self, node):
"""Collect cpu, ram and disk used of a node.
:param node: node object
:return: dict(cpu(cores), ram(MB), disk(B))
"""
vcpus_used = 0
memory_used = 0
disk_used = 0
for instance in self.compute_model.get_node_instances(node):
vcpus_used += instance.vcpus
memory_used += instance.memory
disk_used += instance.disk
return dict(cpu=vcpus_used,
ram=memory_used,
disk=disk_used)
def get_node_free(self, node):
"""Collect cpu, ram and disk free of a node.
:param node: node object
:return: dict(cpu(cores), ram(MB), disk(B))
"""
node_capacity = self.get_node_capacity(node)
node_used = self.get_node_used(node)
return dict(cpu=node_capacity['cpu']-node_used['cpu'],
ram=node_capacity['ram']-node_used['ram'],
disk=node_capacity['disk']-node_used['disk'],
)
def host_fits(self, source_node, destination_node): def host_fits(self, source_node, destination_node):
"""check host fits """check host fits
@@ -144,11 +175,9 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
from source_node to destination_node. from source_node to destination_node.
""" """
source_node_used = self.compute_model.get_node_used_resources( source_node_used = self.get_node_used(source_node)
source_node) destination_node_free = self.get_node_free(destination_node)
destination_node_free = self.compute_model.get_node_free_resources( metrics = ['cpu', 'ram']
destination_node)
metrics = ['vcpu', 'memory']
for m in metrics: for m in metrics:
if source_node_used[m] > destination_node_free[m]: if source_node_used[m] > destination_node_free[m]:
return False return False

View File

@@ -208,6 +208,19 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
return hosts_need_release, hosts_target return hosts_need_release, hosts_target
def calc_used_resource(self, node):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
instances = self.compute_model.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for instance in instances:
vcpus_used += instance.vcpus
memory_mb_used += instance.memory
disk_gb_used += instance.disk
return vcpus_used, memory_mb_used, disk_gb_used
def filter_dest_servers(self, hosts, instance_to_migrate): def filter_dest_servers(self, hosts, instance_to_migrate):
required_cores = instance_to_migrate.vcpus required_cores = instance_to_migrate.vcpus
required_disk = instance_to_migrate.disk required_disk = instance_to_migrate.disk
@@ -215,9 +228,12 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
dest_servers = [] dest_servers = []
for host in hosts: for host in hosts:
free_res = self.compute_model.get_node_free_resources(host) cores_used, mem_used, disk_used = self.calc_used_resource(host)
if (free_res['vcpu'] >= required_cores and free_res['disk'] >= cores_available = host.vcpu_capacity - cores_used
required_disk and free_res['memory'] >= required_memory): disk_available = host.disk_gb_capacity - disk_used
mem_available = host.memory_mb_capacity - mem_used
if (cores_available >= required_cores and disk_available >=
required_disk and mem_available >= required_memory):
dest_servers.append(host) dest_servers.append(host)
return dest_servers return dest_servers

View File

@@ -140,6 +140,19 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
if cn.state == element.ServiceState.ONLINE.value and if cn.state == element.ServiceState.ONLINE.value and
cn.status in default_node_scope} cn.status in default_node_scope}
def calc_used_resource(self, node):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
instances = self.compute_model.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for instance in instances:
vcpus_used += instance.vcpus
memory_mb_used += instance.memory
disk_gb_used += instance.disk
return vcpus_used, memory_mb_used, disk_gb_used
def group_hosts_by_outlet_temp(self): def group_hosts_by_outlet_temp(self):
"""Group hosts based on outlet temp meters""" """Group hosts based on outlet temp meters"""
nodes = self.get_available_compute_nodes() nodes = self.get_available_compute_nodes()
@@ -209,9 +222,13 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
for instance_data in hosts: for instance_data in hosts:
host = instance_data['compute_node'] host = instance_data['compute_node']
# available # available
free_res = self.compute_model.get_node_free_resources(host) cores_used, mem_used, disk_used = self.calc_used_resource(host)
if (free_res['vcpu'] >= required_cores and free_res['disk'] >= cores_available = host.vcpu_capacity - cores_used
required_disk and free_res['memory'] >= required_memory): disk_available = host.disk_gb_capacity - disk_used
mem_available = host.memory_mb_capacity - mem_used
if cores_available >= required_cores \
and disk_available >= required_disk \
and mem_available >= required_memory:
dest_servers.append(instance_data) dest_servers.append(instance_data)
return dest_servers return dest_servers

View File

@@ -135,12 +135,19 @@ class UniformAirflow(base.BaseStrategy):
def calculate_used_resource(self, node): def calculate_used_resource(self, node):
"""Compute the used vcpus, memory and disk based on instance flavors""" """Compute the used vcpus, memory and disk based on instance flavors"""
used_res = self.compute_model.get_node_used_resources(node) instances = self.compute_model.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for instance in instances:
vcpus_used += instance.vcpus
memory_mb_used += instance.memory
disk_gb_used += instance.disk
return used_res['vcpu'], used_res['memory'], used_res['disk'] return vcpus_used, memory_mb_used, disk_gb_used
def choose_instance_to_migrate(self, hosts): def choose_instance_to_migrate(self, hosts):
"""Pick up an active instance to migrate from provided hosts """Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains node object :param hosts: the array of dict which contains node object
""" """

View File

@@ -48,7 +48,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
A capacity coefficients (cc) might be used to adjust optimization A capacity coefficients (cc) might be used to adjust optimization
thresholds. Different resources may require different coefficient thresholds. Different resources may require different coefficient
values as well as setting up different coefficient values in both values as well as setting up different coefficient values in both
phases may lead to more efficient consolidation in the end. phases may lead to to more efficient consolidation in the end.
If the cc equals 1 the full resource capacity may be used, cc If the cc equals 1 the full resource capacity may be used, cc
values lower than 1 will lead to resource under utilization and values lower than 1 will lead to resource under utilization and
values higher than 1 will lead to resource overbooking. values higher than 1 will lead to resource overbooking.

View File

@@ -132,8 +132,21 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
if cn.state == element.ServiceState.ONLINE.value and if cn.state == element.ServiceState.ONLINE.value and
cn.status in default_node_scope} cn.status in default_node_scope}
def calculate_used_resource(self, node):
"""Calculate the used vcpus, memory and disk based on VM flavors"""
instances = self.compute_model.get_node_instances(node)
vcpus_used = 0
memory_mb_used = 0
disk_gb_used = 0
for instance in instances:
vcpus_used += instance.vcpus
memory_mb_used += instance.memory
disk_gb_used += instance.disk
return vcpus_used, memory_mb_used, disk_gb_used
def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache): def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache):
"""Pick up an active instance to migrate from provided hosts """Pick up an active instance instance to migrate from provided hosts
:param hosts: the array of dict which contains node object :param hosts: the array of dict which contains node object
:param avg_workload: the average workload value of all nodes :param avg_workload: the average workload value of all nodes
@@ -190,10 +203,14 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
host = instance_data['compute_node'] host = instance_data['compute_node']
workload = instance_data['workload'] workload = instance_data['workload']
# calculate the available resources # calculate the available resources
free_res = self.compute_model.get_node_free_resources(host) cores_used, mem_used, disk_used = self.calculate_used_resource(
if (free_res['vcpu'] >= required_cores and host)
free_res['memory'] >= required_mem and cores_available = host.vcpu_capacity - cores_used
free_res['disk'] >= required_disk): disk_available = host.disk_gb_capacity - disk_used
mem_available = host.memory_mb_capacity - mem_used
if (cores_available >= required_cores and
mem_available >= required_mem and
disk_available >= required_disk):
if (self._meter == 'instance_cpu_usage' and if (self._meter == 'instance_cpu_usage' and
((src_instance_workload + workload) < ((src_instance_workload + workload) <
self.threshold / 100 * host.vcpus)): self.threshold / 100 * host.vcpus)):

View File

@@ -514,7 +514,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
self.aggregation_method['node'] self.aggregation_method['node']
# backwards compatibility for node parameter with period. # backwards compatibility for node parameter with period.
if self.periods['node'] != 0: if self.periods['node'] is not 0:
LOG.warning('Parameter node has been renamed to compute_node and ' LOG.warning('Parameter node has been renamed to compute_node and '
'will be removed in next release.') 'will be removed in next release.')
self.periods['compute_node'] = self.periods['node'] self.periods['compute_node'] = self.periods['node']

View File

@@ -1,98 +0,0 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 European Organization for Nuclear Research (CERN)
#
# Authors: Corne Lukken <info@dantalion.nl>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import futurist
from futurist import waiters
import six
from oslo_config import cfg
from oslo_log import log
from oslo_service import service
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@six.add_metaclass(service.Singleton)
class DecisionEngineThreadPool(object):
"""Singleton threadpool to submit general tasks to"""
def __init__(self):
self.amount_workers = CONF.watcher_decision_engine.max_general_workers
self._threadpool = futurist.GreenThreadPoolExecutor(
max_workers=self.amount_workers)
def submit(self, fn, *args, **kwargs):
"""Will submit the job to the underlying threadpool
:param fn: function to execute in another thread
:param args: arguments for the function
:param kwargs: amount of arguments for the function
:return: future to monitor progress of execution
:rtype: :py:class"`futurist.GreenFuture`
"""
return self._threadpool.submit(fn, *args, **kwargs)
@staticmethod
def do_while_futures(futures, fn, *args, **kwargs):
"""Do while to execute a function upon completion from a collection
Will execute the specified function with its arguments when one of the
futures from the passed collection finishes. Additionally, the future
is passed as first argument to the function. Does not modify the passed
collection of futures.
:param futures: list, set or dictionary of futures
:type futures: list :py:class:`futurist.GreenFuture`
:param fn: function to execute upon the future finishing exection
:param args: arguments for the function
:param kwargs: amount of arguments for the function
"""
# shallow copy the collection to not modify it outside of this method.
# shallow copy must be used because the type of collection needs to be
# determined at runtime (can be both list, set and dict).
futures = copy.copy(futures)
DecisionEngineThreadPool.do_while_futures_modify(
futures, fn, *args, **kwargs)
@staticmethod
def do_while_futures_modify(futures, fn, *args, **kwargs):
"""Do while to execute a function upon completion from a collection
Will execute the specified function with its arguments when one of the
futures from the passed collection finishes. Additionally, the future
is passed as first argument to the function. Modifies the collection
by removing completed elements,
:param futures: list, set or dictionary of futures
:type futures: list :py:class:`futurist.GreenFuture`
:param fn: function to execute upon the future finishing exection
:param args: arguments for the function
:param kwargs: amount of arguments for the function
"""
waits = waiters.wait_for_any(futures)
while len(waits[0]) > 0 or len(waits[1]) > 0:
for future in waiters.wait_for_any(futures)[0]:
fn(future, *args, **kwargs)
futures.remove(future)
waits = waiters.wait_for_any(futures)

View File

@@ -23,8 +23,6 @@ def flake8ext(f):
only for unit tests to know which are watcher flake8 extensions. only for unit tests to know which are watcher flake8 extensions.
""" """
f.name = __name__ f.name = __name__
f.version = '0.0.1'
f.skip_on_py3 = False
return f return f
@@ -164,11 +162,11 @@ def check_asserttrue(logical_line, filename):
def check_assertfalse(logical_line, filename): def check_assertfalse(logical_line, filename):
if 'watcher/tests/' in filename: if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N329: Use assertFalse(observed) instead of " msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)") "assertEqual(False, observed)")
yield (0, msg) yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line):
msg = ("N329: Use assertFalse(observed) instead of " msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)") "assertEqual(False, observed)")
yield (0, msg) yield (0, msg)
@@ -285,3 +283,21 @@ def no_redundant_import_alias(logical_line):
""" """
if re.match(re_redundant_import_alias, logical_line): if re.match(re_redundant_import_alias, logical_line):
yield(0, "N342: No redundant import alias.") yield(0, "N342: No redundant import alias.")
def factory(register):
register(use_jsonutils)
register(check_assert_called_once_with)
register(no_translate_debug_logs)
register(check_python3_xrange)
register(check_no_basestring)
register(check_python3_no_iteritems)
register(check_asserttrue)
register(check_assertfalse)
register(check_assertempty)
register(check_assertisinstance)
register(check_assertequal_for_httpcode)
register(check_log_warn_deprecated)
register(check_oslo_i18n_wrapper)
register(check_builtins_gettext)
register(no_redundant_import_alias)

View File

@@ -1,14 +1,15 @@
# Frank Kloeker <eumel@arcor.de>, 2018. #zanata # Frank Kloeker <eumel@arcor.de>, 2018. #zanata
# Andreas Jaeger <jaegerandi@gmail.com>, 2019. #zanata
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: watcher VERSION\n" "Project-Id-Version: watcher VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2019-03-22 02:21+0000\n" "POT-Creation-Date: 2019-09-23 08:56+0000\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2018-03-06 07:56+0000\n" "PO-Revision-Date: 2019-09-25 09:29+0000\n"
"Last-Translator: Frank Kloeker <eumel@arcor.de>\n" "Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
"Language-Team: German\n" "Language-Team: German\n"
"Language: de\n" "Language: de\n"
"X-Generator: Zanata 4.3.3\n" "X-Generator: Zanata 4.3.3\n"
@@ -62,10 +63,6 @@ msgstr "Der Argumenttyp 'obj' ist nicht gültig"
msgid "'obj' argument type is not valid: %s" msgid "'obj' argument type is not valid: %s"
msgstr "Der Argumenttyp 'obj' ist nicht gültig: %s" msgstr "Der Argumenttyp 'obj' ist nicht gültig: %s"
#, python-format
msgid "A datetime.datetime is required here. Got %s"
msgstr "Eine datetime.datetime ist hier erforderlich. Bekam %s"
#, python-format #, python-format
msgid "A goal with UUID %(uuid)s already exists" msgid "A goal with UUID %(uuid)s already exists"
msgstr "Ein Ziel mit UUID %(uuid)s ist bereits vorhanden" msgstr "Ein Ziel mit UUID %(uuid)s ist bereits vorhanden"
@@ -169,10 +166,6 @@ msgstr ""
msgid "An indicator value should be a number" msgid "An indicator value should be a number"
msgstr "Ein Indikatorwert sollte eine Zahl sein" msgstr "Ein Indikatorwert sollte eine Zahl sein"
#, python-format
msgid "An object of class %s is required here"
msgstr "Ein Objekt der Klasse %s ist hier erforderlich"
msgid "An unknown exception occurred" msgid "An unknown exception occurred"
msgstr "Eine unbekannte Ausnahme ist aufgetreten" msgstr "Eine unbekannte Ausnahme ist aufgetreten"
@@ -211,13 +204,6 @@ msgstr "Audit-Typ %(audit_type)s konnte nicht gefunden werden"
msgid "AuditTemplate %(audit_template)s could not be found" msgid "AuditTemplate %(audit_template)s could not be found"
msgstr "AuditTemplate %(audit_template)s konnte nicht gefunden werden" msgstr "AuditTemplate %(audit_template)s konnte nicht gefunden werden"
#, python-format
msgid ""
"AuditTemplate %(audit_template)s is referenced by one or multiple audits"
msgstr ""
"AuditTemplate %(audit_template)s wird von einem oder mehreren Audits "
"referenziert"
msgid "Audits" msgid "Audits"
msgstr "Audits" msgstr "Audits"
@@ -445,9 +431,6 @@ msgstr "Ungültiger Status: %(state)s"
msgid "JSON list expected in feature argument" msgid "JSON list expected in feature argument"
msgstr "JSON-Liste in Feature-Argument erwartet" msgstr "JSON-Liste in Feature-Argument erwartet"
msgid "Keystone API endpoint is missing"
msgstr "Der Keystone-API-Endpunkt fehlt"
msgid "Limit must be positive" msgid "Limit must be positive"
msgstr "Limit muss positiv sein" msgstr "Limit muss positiv sein"
@@ -461,26 +444,19 @@ msgstr "Maximale Zeit seit dem letzten Check-in für den Up-Service."
msgid "Migration of type '%(migration_type)s' is not supported." msgid "Migration of type '%(migration_type)s' is not supported."
msgstr "Die Migration vom Typ '%(migration_type)s' wird nicht unterstützt." msgstr "Die Migration vom Typ '%(migration_type)s' wird nicht unterstützt."
msgid "Minimum Nova API Version"
msgstr "Minimale Nova API Version"
#, python-format #, python-format
msgid "No %(metric)s metric for %(host)s found." msgid "No %(metric)s metric for %(host)s found."
msgstr "Keine %(metric)s Metrik für %(host)s gefunden." msgstr "Keine %(metric)s Metrik für %(host)s gefunden."
msgid "No rows were returned"
msgstr "Es wurden keine Zeilen zurückgegeben"
#, python-format #, python-format
msgid "No strategy could be found to achieve the '%(goal)s' goal." msgid "No strategy could be found to achieve the '%(goal)s' goal."
msgstr "" msgstr ""
"Es konnte keine Strategie gefunden werden, um das Ziel '%(goal)s' zu " "Es konnte keine Strategie gefunden werden, um das Ziel '%(goal)s' zu "
"erreichen." "erreichen."
msgid "No such metric"
msgstr "Keine solche Metrik"
#, python-format
msgid "No values returned by %(resource_id)s for %(metric_name)s."
msgstr "Keine Werte von %(resource_id)s für %(metric_name)s zurückgegeben."
msgid "Noisy Neighbor" msgid "Noisy Neighbor"
msgstr "Lauter Nachbar" msgstr "Lauter Nachbar"
@@ -648,10 +624,6 @@ msgstr "Die UUID oder der Name der Überprüfungsvorlage ist ungültig"
msgid "The baremetal resource '%(name)s' could not be found" msgid "The baremetal resource '%(name)s' could not be found"
msgstr "Die Barmetal-Ressource '%(name)s' konnte nicht gefunden werden" msgstr "Die Barmetal-Ressource '%(name)s' konnte nicht gefunden werden"
#, python-format
msgid "The capacity %(capacity)s is not defined for '%(resource)s'"
msgstr "Die Kapazität %(capacity)s ist nicht definiert für '%(resource)s'"
#, python-format #, python-format
msgid "The cluster data model '%(cdm)s' could not be built" msgid "The cluster data model '%(cdm)s' could not be built"
msgstr "Das Clusterdatenmodell '%(cdm)s' konnte nicht erstellt werden" msgstr "Das Clusterdatenmodell '%(cdm)s' konnte nicht erstellt werden"
@@ -690,9 +662,6 @@ msgstr "Die Instanz '%(name)s' konnte nicht gefunden werden"
msgid "The ironic node %(uuid)s could not be found" msgid "The ironic node %(uuid)s could not be found"
msgstr "Der Ironic Knoten %(uuid)s konnte nicht gefunden werden" msgstr "Der Ironic Knoten %(uuid)s konnte nicht gefunden werden"
msgid "The metrics resource collector is not defined"
msgstr "Der Metrikressourcen-Collector ist nicht definiert"
msgid "The number of VM migrations to be performed." msgid "The number of VM migrations to be performed."
msgstr "Die Anzahl der VM-Migrationen, die ausgeführt werden sollen." msgstr "Die Anzahl der VM-Migrationen, die ausgeführt werden sollen."

View File

@@ -4,7 +4,7 @@ msgid ""
msgstr "" msgstr ""
"Project-Id-Version: watcher VERSION\n" "Project-Id-Version: watcher VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2019-03-22 02:21+0000\n" "POT-Creation-Date: 2019-09-23 08:56+0000\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
@@ -63,10 +63,6 @@ msgstr "'obj' argument type is not valid"
msgid "'obj' argument type is not valid: %s" msgid "'obj' argument type is not valid: %s"
msgstr "'obj' argument type is not valid: %s" msgstr "'obj' argument type is not valid: %s"
#, python-format
msgid "A datetime.datetime is required here. Got %s"
msgstr "A datetime.datetime is required here. Got %s"
#, python-format #, python-format
msgid "A goal with UUID %(uuid)s already exists" msgid "A goal with UUID %(uuid)s already exists"
msgstr "A goal with UUID %(uuid)s already exists" msgstr "A goal with UUID %(uuid)s already exists"
@@ -162,10 +158,6 @@ msgstr "An audit_template with UUID or name %(audit_template)s already exists"
msgid "An indicator value should be a number" msgid "An indicator value should be a number"
msgstr "An indicator value should be a number" msgstr "An indicator value should be a number"
#, python-format
msgid "An object of class %s is required here"
msgstr "An object of class %s is required here"
msgid "An unknown exception occurred" msgid "An unknown exception occurred"
msgstr "An unknown exception occurred" msgstr "An unknown exception occurred"
@@ -203,12 +195,6 @@ msgstr "Audit type %(audit_type)s could not be found"
msgid "AuditTemplate %(audit_template)s could not be found" msgid "AuditTemplate %(audit_template)s could not be found"
msgstr "AuditTemplate %(audit_template)s could not be found" msgstr "AuditTemplate %(audit_template)s could not be found"
#, python-format
msgid ""
"AuditTemplate %(audit_template)s is referenced by one or multiple audits"
msgstr ""
"AuditTemplate %(audit_template)s is referenced by one or multiple audits"
msgid "Audits" msgid "Audits"
msgstr "Audits" msgstr "Audits"
@@ -439,9 +425,6 @@ msgstr "Invalid state: %(state)s"
msgid "JSON list expected in feature argument" msgid "JSON list expected in feature argument"
msgstr "JSON list expected in feature argument" msgstr "JSON list expected in feature argument"
msgid "Keystone API endpoint is missing"
msgstr "Keystone API endpoint is missing"
msgid "Limit must be positive" msgid "Limit must be positive"
msgstr "Limit must be positive" msgstr "Limit must be positive"
@@ -468,20 +451,10 @@ msgstr ""
msgid "No %(metric)s metric for %(host)s found." msgid "No %(metric)s metric for %(host)s found."
msgstr "No %(metric)s metric for %(host)s found." msgstr "No %(metric)s metric for %(host)s found."
msgid "No rows were returned"
msgstr "No rows were returned"
#, python-format #, python-format
msgid "No strategy could be found to achieve the '%(goal)s' goal." msgid "No strategy could be found to achieve the '%(goal)s' goal."
msgstr "No strategy could be found to achieve the '%(goal)s' goal." msgstr "No strategy could be found to achieve the '%(goal)s' goal."
msgid "No such metric"
msgstr "No such metric"
#, python-format
msgid "No values returned by %(resource_id)s for %(metric_name)s."
msgstr "No values returned by %(resource_id)s for %(metric_name)s."
msgid "Noisy Neighbor" msgid "Noisy Neighbor"
msgstr "Noisy Neighbour" msgstr "Noisy Neighbour"
@@ -575,9 +548,6 @@ msgstr "Role name seems ambiguous: %s"
msgid "Role not Found: %s" msgid "Role not Found: %s"
msgstr "Role not Found: %s" msgstr "Role not Found: %s"
msgid "Sample Check"
msgstr "Sample Check"
msgid "Saving Energy" msgid "Saving Energy"
msgstr "Saving Energy" msgstr "Saving Energy"
@@ -667,10 +637,6 @@ msgstr "The audit template UUID or name specified is invalid"
msgid "The baremetal resource '%(name)s' could not be found" msgid "The baremetal resource '%(name)s' could not be found"
msgstr "The baremetal resource '%(name)s' could not be found" msgstr "The baremetal resource '%(name)s' could not be found"
#, python-format
msgid "The capacity %(capacity)s is not defined for '%(resource)s'"
msgstr "The capacity %(capacity)s is not defined for '%(resource)s'"
#, python-format #, python-format
msgid "The cluster data model '%(cdm)s' could not be built" msgid "The cluster data model '%(cdm)s' could not be built"
msgstr "The cluster data model '%(cdm)s' could not be built" msgstr "The cluster data model '%(cdm)s' could not be built"
@@ -709,9 +675,6 @@ msgstr "The instance '%(name)s' could not be found"
msgid "The ironic node %(uuid)s could not be found" msgid "The ironic node %(uuid)s could not be found"
msgstr "The Ironic node %(uuid)s could not be found" msgstr "The Ironic node %(uuid)s could not be found"
msgid "The metrics resource collector is not defined"
msgstr "The metrics resource collector is not defined"
msgid "The number of VM migrations to be performed." msgid "The number of VM migrations to be performed."
msgstr "The number of VM migrations to be performed." msgstr "The number of VM migrations to be performed."

View File

@@ -13,6 +13,8 @@
import inspect import inspect
import sys import sys
import six
from watcher.notifications import base as notificationbase from watcher.notifications import base as notificationbase
from watcher.objects import base from watcher.objects import base
from watcher.objects import fields as wfields from watcher.objects import fields as wfields
@@ -40,7 +42,7 @@ class ExceptionPayload(notificationbase.NotificationPayloadBase):
function_name=trace[3], function_name=trace[3],
module_name=inspect.getmodule(trace[0]).__name__, module_name=inspect.getmodule(trace[0]).__name__,
exception=fault.__class__.__name__, exception=fault.__class__.__name__,
exception_message=str(fault)) exception_message=six.text_type(fault))
@notificationbase.notification_sample('infra-optim-exception.json') @notificationbase.notification_sample('infra-optim-exception.json')

View File

@@ -75,7 +75,6 @@ class State(object):
class AuditType(enum.Enum): class AuditType(enum.Enum):
ONESHOT = 'ONESHOT' ONESHOT = 'ONESHOT'
CONTINUOUS = 'CONTINUOUS' CONTINUOUS = 'CONTINUOUS'
EVENT = 'EVENT'
@base.WatcherObjectRegistry.register @base.WatcherObjectRegistry.register

View File

@@ -1,35 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
from oslo_config import cfg
from watcher.api import config as api_config
from watcher.tests.api import base
class TestRoot(base.FunctionalTest):
def test_config_enable_webhooks_auth(self):
acl_public_routes = ['/']
cfg.CONF.set_override('enable_webhooks_auth', True, 'api')
imp.reload(api_config)
self.assertEqual(acl_public_routes,
api_config.app['acl_public_routes'])
def test_config_disable_webhooks_auth(self):
acl_public_routes = ['/', '/v1/webhooks/.*']
cfg.CONF.set_override('enable_webhooks_auth', False, 'api')
imp.reload(api_config)
self.assertEqual(acl_public_routes,
api_config.app['acl_public_routes'])

View File

@@ -27,9 +27,8 @@ class TestRoot(base.FunctionalTest):
class TestV1Root(base.FunctionalTest): class TestV1Root(base.FunctionalTest):
def test_get_v1_root_all(self): def test_get_v1_root(self):
data = self.get_json( data = self.get_json('/')
'/', headers={'OpenStack-API-Version': 'infra-optim 1.4'})
self.assertEqual('v1', data['id']) self.assertEqual('v1', data['id'])
# Check fields are not empty # Check fields are not empty
for f in data.keys(): for f in data.keys():
@@ -39,24 +38,6 @@ class TestV1Root(base.FunctionalTest):
actual_resources = tuple(set(data.keys()) - set(not_resources)) actual_resources = tuple(set(data.keys()) - set(not_resources))
expected_resources = ('audit_templates', 'audits', 'actions', expected_resources = ('audit_templates', 'audits', 'actions',
'action_plans', 'data_model', 'scoring_engines', 'action_plans', 'data_model', 'scoring_engines',
'services', 'webhooks')
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json',
'base': 'application/json'}, data['media_types'])
def test_get_v1_root_without_datamodel(self):
data = self.get_json(
'/', headers={'OpenStack-API-Version': 'infra-optim 1.2'})
self.assertEqual('v1', data['id'])
# Check fields are not empty
for f in data.keys():
self.assertNotIn(f, ['', []])
# Check if all known resources are present and there are no extra ones.
not_resources = ('id', 'links', 'media_types')
actual_resources = tuple(set(data.keys()) - set(not_resources))
expected_resources = ('audit_templates', 'audits', 'actions',
'action_plans', 'scoring_engines',
'services') 'services')
self.assertEqual(sorted(expected_resources), sorted(actual_resources)) self.assertEqual(sorted(expected_resources), sorted(actual_resources))

View File

@@ -32,18 +32,9 @@ class TestListDataModel(api_base.FunctionalTest):
self.addCleanup(p_dcapi.stop) self.addCleanup(p_dcapi.stop)
def test_get_all(self): def test_get_all(self):
response = self.get_json( response = self.get_json('/data_model/?data_model_type=compute')
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.3'})
self.assertEqual('fake_response_value', response) self.assertEqual('fake_response_value', response)
def test_get_all_not_acceptable(self):
response = self.get_json(
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.2'},
expect_errors=True)
self.assertEqual(406, response.status_int)
class TestDataModelPolicyEnforcement(api_base.FunctionalTest): class TestDataModelPolicyEnforcement(api_base.FunctionalTest):
@@ -69,7 +60,6 @@ class TestDataModelPolicyEnforcement(api_base.FunctionalTest):
self._common_policy_check( self._common_policy_check(
"data_model:get_all", self.get_json, "data_model:get_all", self.get_json,
"/data_model/?data_model_type=compute", "/data_model/?data_model_type=compute",
headers={'OpenStack-API-Version': 'infra-optim 1.3'},
expect_errors=True) expect_errors=True)

View File

@@ -1,71 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from watcher.decision_engine import rpcapi as deapi
from watcher import objects
from watcher.tests.api import base as api_base
from watcher.tests.objects import utils as obj_utils
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
obj_utils.create_test_goal(self.context)
obj_utils.create_test_strategy(self.context)
obj_utils.create_test_audit_template(self.context)
@mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit')
def test_trigger_audit(self, mock_trigger_audit):
audit = obj_utils.create_test_audit(
self.context,
audit_type=objects.audit.AuditType.EVENT.value)
response = self.post_json(
'/webhooks/%s' % audit['uuid'], {},
headers={'OpenStack-API-Version': 'infra-optim 1.4'})
self.assertEqual(202, response.status_int)
mock_trigger_audit.assert_called_once_with(
mock.ANY, audit['uuid'])
def test_trigger_audit_with_no_audit(self):
response = self.post_json(
'/webhooks/no-audit', {},
headers={'OpenStack-API-Version': 'infra-optim 1.4'},
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_trigger_audit_with_not_allowed_audittype(self):
audit = obj_utils.create_test_audit(self.context)
response = self.post_json(
'/webhooks/%s' % audit['uuid'], {},
headers={'OpenStack-API-Version': 'infra-optim 1.4'},
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_trigger_audit_with_not_allowed_audit_state(self):
audit = obj_utils.create_test_audit(
self.context,
audit_type=objects.audit.AuditType.EVENT.value,
state=objects.audit.State.FAILED)
response = self.post_json(
'/webhooks/%s' % audit['uuid'], {},
headers={'OpenStack-API-Version': 'infra-optim 1.4'},
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])

View File

@@ -27,6 +27,7 @@ from monascaclient.v2_0 import client as monclient_v2
from neutronclient.neutron import client as netclient from neutronclient.neutron import client as netclient
from neutronclient.v2_0 import client as netclient_v2 from neutronclient.v2_0 import client as netclient_v2
from novaclient import client as nvclient from novaclient import client as nvclient
import six
from watcher.common import clients from watcher.common import clients
from watcher import conf from watcher import conf
@@ -138,7 +139,8 @@ class TestClients(base.TestCase):
osc = clients.OpenStackClients() osc = clients.OpenStackClients()
osc._nova = None osc._nova = None
ex = self.assertRaises(ValueError, osc.nova) ex = self.assertRaises(ValueError, osc.nova)
self.assertIn('Invalid nova_client.api_version 2.47', str(ex)) self.assertIn('Invalid nova_client.api_version 2.47',
six.text_type(ex))
@mock.patch.object(clients.OpenStackClients, 'session') @mock.patch.object(clients.OpenStackClients, 'session')
def test_clients_nova_diff_endpoint(self, mock_session): def test_clients_nova_diff_endpoint(self, mock_session):

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating Action via the DB API""" """Tests for manipulating Action via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -236,7 +237,7 @@ class DbActionTestCase(base.DbTestCase):
uuids = [] uuids = []
for _ in range(1, 4): for _ in range(1, 4):
action = utils.create_test_action(uuid=w_utils.generate_uuid()) action = utils.create_test_action(uuid=w_utils.generate_uuid())
uuids.append(str(action['uuid'])) uuids.append(six.text_type(action['uuid']))
actions = self.dbapi.get_action_list(self.context) actions = self.dbapi.get_action_list(self.context)
action_uuids = [a.uuid for a in actions] action_uuids = [a.uuid for a in actions]
self.assertEqual(3, len(action_uuids)) self.assertEqual(3, len(action_uuids))
@@ -253,7 +254,7 @@ class DbActionTestCase(base.DbTestCase):
action = utils.create_test_action( action = utils.create_test_action(
id=i, uuid=w_utils.generate_uuid(), id=i, uuid=w_utils.generate_uuid(),
action_plan_id=action_plan.id) action_plan_id=action_plan.id)
uuids.append(str(action['uuid'])) uuids.append(six.text_type(action['uuid']))
actions = self.dbapi.get_action_list(self.context, eager=True) actions = self.dbapi.get_action_list(self.context, eager=True)
action_map = {a.uuid: a for a in actions} action_map = {a.uuid: a for a in actions}
self.assertEqual(sorted(uuids), sorted(action_map.keys())) self.assertEqual(sorted(uuids), sorted(action_map.keys()))

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating ActionPlan via the DB API""" """Tests for manipulating ActionPlan via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -234,7 +235,7 @@ class DbActionPlanTestCase(base.DbTestCase):
for _ in range(1, 4): for _ in range(1, 4):
action_plan = utils.create_test_action_plan( action_plan = utils.create_test_action_plan(
uuid=w_utils.generate_uuid()) uuid=w_utils.generate_uuid())
uuids.append(str(action_plan['uuid'])) uuids.append(six.text_type(action_plan['uuid']))
action_plans = self.dbapi.get_action_plan_list(self.context) action_plans = self.dbapi.get_action_plan_list(self.context)
action_plan_uuids = [ap.uuid for ap in action_plans] action_plan_uuids = [ap.uuid for ap in action_plans]
self.assertEqual(sorted(uuids), sorted(action_plan_uuids)) self.assertEqual(sorted(uuids), sorted(action_plan_uuids))
@@ -252,7 +253,7 @@ class DbActionPlanTestCase(base.DbTestCase):
for _ in range(1, 4): for _ in range(1, 4):
action_plan = utils.create_test_action_plan( action_plan = utils.create_test_action_plan(
uuid=w_utils.generate_uuid()) uuid=w_utils.generate_uuid())
uuids.append(str(action_plan['uuid'])) uuids.append(six.text_type(action_plan['uuid']))
action_plans = self.dbapi.get_action_plan_list( action_plans = self.dbapi.get_action_plan_list(
self.context, eager=True) self.context, eager=True)
action_plan_map = {a.uuid: a for a in action_plans} action_plan_map = {a.uuid: a for a in action_plans}

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating Audit via the DB API""" """Tests for manipulating Audit via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -272,7 +273,7 @@ class DbAuditTestCase(base.DbTestCase):
for id_ in range(1, 4): for id_ in range(1, 4):
audit = utils.create_test_audit(uuid=w_utils.generate_uuid(), audit = utils.create_test_audit(uuid=w_utils.generate_uuid(),
name='My Audit {0}'.format(id_)) name='My Audit {0}'.format(id_))
uuids.append(str(audit['uuid'])) uuids.append(six.text_type(audit['uuid']))
audits = self.dbapi.get_audit_list(self.context) audits = self.dbapi.get_audit_list(self.context)
audit_uuids = [a.uuid for a in audits] audit_uuids = [a.uuid for a in audits]
self.assertEqual(sorted(uuids), sorted(audit_uuids)) self.assertEqual(sorted(uuids), sorted(audit_uuids))
@@ -292,7 +293,7 @@ class DbAuditTestCase(base.DbTestCase):
id=i, uuid=w_utils.generate_uuid(), id=i, uuid=w_utils.generate_uuid(),
name='My Audit {0}'.format(i), name='My Audit {0}'.format(i),
goal_id=goal.id, strategy_id=strategy.id) goal_id=goal.id, strategy_id=strategy.id)
uuids.append(str(audit['uuid'])) uuids.append(six.text_type(audit['uuid']))
audits = self.dbapi.get_audit_list(self.context, eager=True) audits = self.dbapi.get_audit_list(self.context, eager=True)
audit_map = {a.uuid: a for a in audits} audit_map = {a.uuid: a for a in audits}
self.assertEqual(sorted(uuids), sorted(audit_map.keys())) self.assertEqual(sorted(uuids), sorted(audit_map.keys()))

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating AuditTemplate via the DB API""" """Tests for manipulating AuditTemplate via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -234,7 +235,7 @@ class DbAuditTemplateTestCase(base.DbTestCase):
id=i, id=i,
uuid=w_utils.generate_uuid(), uuid=w_utils.generate_uuid(),
name='My Audit Template {0}'.format(i)) name='My Audit Template {0}'.format(i))
uuids.append(str(audit_template['uuid'])) uuids.append(six.text_type(audit_template['uuid']))
audit_templates = self.dbapi.get_audit_template_list(self.context) audit_templates = self.dbapi.get_audit_template_list(self.context)
audit_template_uuids = [at.uuid for at in audit_templates] audit_template_uuids = [at.uuid for at in audit_templates]
self.assertEqual(sorted(uuids), sorted(audit_template_uuids)) self.assertEqual(sorted(uuids), sorted(audit_template_uuids))
@@ -254,7 +255,7 @@ class DbAuditTemplateTestCase(base.DbTestCase):
id=i, uuid=w_utils.generate_uuid(), id=i, uuid=w_utils.generate_uuid(),
name='My Audit Template {0}'.format(i), name='My Audit Template {0}'.format(i),
goal_id=goal.id, strategy_id=strategy.id) goal_id=goal.id, strategy_id=strategy.id)
uuids.append(str(audit_template['uuid'])) uuids.append(six.text_type(audit_template['uuid']))
audit_templates = self.dbapi.get_audit_template_list( audit_templates = self.dbapi.get_audit_template_list(
self.context, eager=True) self.context, eager=True)
audit_template_map = {a.uuid: a for a in audit_templates} audit_template_map = {a.uuid: a for a in audit_templates}

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating EfficacyIndicator via the DB API""" """Tests for manipulating EfficacyIndicator via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -248,7 +249,7 @@ class DbEfficacyIndicatorTestCase(base.DbTestCase):
efficacy_indicator = utils.create_test_efficacy_indicator( efficacy_indicator = utils.create_test_efficacy_indicator(
action_plan_id=action_plan.id, id=id_, uuid=None, action_plan_id=action_plan.id, id=id_, uuid=None,
name="efficacy_indicator", description="Test Indicator ") name="efficacy_indicator", description="Test Indicator ")
uuids.append(str(efficacy_indicator['uuid'])) uuids.append(six.text_type(efficacy_indicator['uuid']))
efficacy_indicators = self.dbapi.get_efficacy_indicator_list( efficacy_indicators = self.dbapi.get_efficacy_indicator_list(
self.context) self.context)
efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators] efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators]
@@ -265,7 +266,7 @@ class DbEfficacyIndicatorTestCase(base.DbTestCase):
efficacy_indicator = utils.create_test_efficacy_indicator( efficacy_indicator = utils.create_test_efficacy_indicator(
id=i, uuid=w_utils.generate_uuid(), id=i, uuid=w_utils.generate_uuid(),
action_plan_id=action_plan.id) action_plan_id=action_plan.id)
uuids.append(str(efficacy_indicator['uuid'])) uuids.append(six.text_type(efficacy_indicator['uuid']))
efficacy_indicators = self.dbapi.get_efficacy_indicator_list( efficacy_indicators = self.dbapi.get_efficacy_indicator_list(
self.context, eager=True) self.context, eager=True)
efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators} efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators}

View File

@@ -16,6 +16,7 @@
"""Tests for manipulating Goal via the DB API""" """Tests for manipulating Goal via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -230,7 +231,7 @@ class DbGoalTestCase(base.DbTestCase):
uuid=w_utils.generate_uuid(), uuid=w_utils.generate_uuid(),
name="GOAL_%s" % i, name="GOAL_%s" % i,
display_name='My Goal %s' % i) display_name='My Goal %s' % i)
uuids.append(str(goal['uuid'])) uuids.append(six.text_type(goal['uuid']))
goals = self.dbapi.get_goal_list(self.context) goals = self.dbapi.get_goal_list(self.context)
goal_uuids = [g.uuid for g in goals] goal_uuids = [g.uuid for g in goals]
self.assertEqual(sorted(uuids), sorted(goal_uuids)) self.assertEqual(sorted(uuids), sorted(goal_uuids))

View File

@@ -18,6 +18,7 @@
"""Tests for manipulating ScoringEngine via the DB API""" """Tests for manipulating ScoringEngine via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -236,7 +237,7 @@ class DbScoringEngineTestCase(base.DbTestCase):
name="SE_ID_%s" % i, name="SE_ID_%s" % i,
description='My ScoringEngine {0}'.format(i), description='My ScoringEngine {0}'.format(i),
metainfo='a{0}=b{0}'.format(i)) metainfo='a{0}=b{0}'.format(i))
names.append(str(scoring_engine['name'])) names.append(six.text_type(scoring_engine['name']))
scoring_engines = self.dbapi.get_scoring_engine_list(self.context) scoring_engines = self.dbapi.get_scoring_engine_list(self.context)
scoring_engines_names = [se.name for se in scoring_engines] scoring_engines_names = [se.name for se in scoring_engines]
self.assertEqual(sorted(names), sorted(scoring_engines_names)) self.assertEqual(sorted(names), sorted(scoring_engines_names))

View File

@@ -18,6 +18,7 @@
"""Tests for manipulating Strategy via the DB API""" """Tests for manipulating Strategy via the DB API"""
import freezegun import freezegun
import six
from watcher.common import exception from watcher.common import exception
from watcher.common import utils as w_utils from watcher.common import utils as w_utils
@@ -246,7 +247,7 @@ class DbStrategyTestCase(base.DbTestCase):
uuid=w_utils.generate_uuid(), uuid=w_utils.generate_uuid(),
name="STRATEGY_ID_%s" % i, name="STRATEGY_ID_%s" % i,
display_name='My Strategy {0}'.format(i)) display_name='My Strategy {0}'.format(i))
uuids.append(str(strategy['uuid'])) uuids.append(six.text_type(strategy['uuid']))
strategies = self.dbapi.get_strategy_list(self.context) strategies = self.dbapi.get_strategy_list(self.context)
strategy_uuids = [s.uuid for s in strategies] strategy_uuids = [s.uuid for s in strategies]
self.assertEqual(sorted(uuids), sorted(strategy_uuids)) self.assertEqual(sorted(uuids), sorted(strategy_uuids))
@@ -264,7 +265,7 @@ class DbStrategyTestCase(base.DbTestCase):
name="STRATEGY_ID_%s" % i, name="STRATEGY_ID_%s" % i,
display_name='My Strategy {0}'.format(i), display_name='My Strategy {0}'.format(i),
goal_id=goal.id) goal_id=goal.id)
uuids.append(str(strategy['uuid'])) uuids.append(six.text_type(strategy['uuid']))
strategys = self.dbapi.get_strategy_list(self.context, eager=True) strategys = self.dbapi.get_strategy_list(self.context, eager=True)
strategy_map = {a.uuid: a for a in strategys} strategy_map = {a.uuid: a for a in strategys}
self.assertEqual(sorted(uuids), sorted(strategy_map.keys())) self.assertEqual(sorted(uuids), sorted(strategy_map.keys()))

View File

@@ -291,15 +291,6 @@ class TestNovaModelBuilder(base.TestCase):
self.assertEqual(set(['hostone', 'hosttwo']), result) self.assertEqual(set(['hostone', 'hosttwo']), result)
@mock.patch.object(nova_helper, 'NovaHelper')
def test_collect_aggregates_none(self, m_nova):
"""Test collect_aggregates with host_aggregates None"""
result = set()
t_nova_cluster = nova.NovaModelBuilder(mock.Mock())
t_nova_cluster._collect_aggregates(None, result)
self.assertEqual(set(), result)
@mock.patch.object(nova_helper, 'NovaHelper') @mock.patch.object(nova_helper, 'NovaHelper')
def test_collect_zones(self, m_nova): def test_collect_zones(self, m_nova):
"""""" """"""
@@ -319,35 +310,8 @@ class TestNovaModelBuilder(base.TestCase):
self.assertEqual(set(['hostone']), result) self.assertEqual(set(['hostone']), result)
@mock.patch.object(nova_helper, 'NovaHelper') @mock.patch.object(nova_helper, 'NovaHelper')
def test_collect_zones_none(self, m_nova): def test_add_physical_layer(self, m_nova):
"""Test collect_zones with availability_zones None""" """"""
result = set()
t_nova_cluster = nova.NovaModelBuilder(mock.Mock())
t_nova_cluster._collect_zones(None, result)
self.assertEqual(set(), result)
@mock.patch.object(placement_helper, 'PlacementHelper')
@mock.patch.object(nova_helper, 'NovaHelper')
def test_add_physical_layer(self, m_nova, m_placement):
"""Ensure all three steps of the physical layer are fully executed
First the return value for get_aggregate_list and get_service_list are
mocked. These return 3 hosts of which hostone is returned by both the
aggregate and service call. This will help verify the elimination of
duplicates. The scope is setup so that only hostone and hosttwo should
remain.
There will be 2 simulated compute nodes and 2 associated instances.
These will be returned by their matching calls in nova helper. The
calls to get_compute_node_by_name and get_instance_list are asserted
as to verify the correct operation of add_physical_layer.
"""
mock_placement = mock.Mock(name="placement_helper")
mock_placement.get_inventories.return_value = dict()
mock_placement.get_usages_for_resource_provider.return_value = None
m_placement.return_value = mock_placement
m_nova.return_value.get_aggregate_list.return_value = \ m_nova.return_value.get_aggregate_list.return_value = \
[mock.Mock(id=1, name='example'), [mock.Mock(id=1, name='example'),
@@ -357,69 +321,7 @@ class TestNovaModelBuilder(base.TestCase):
[mock.Mock(zone='av_b', host='hostthree'), [mock.Mock(zone='av_b', host='hostthree'),
mock.Mock(zone='av_a', host='hostone')] mock.Mock(zone='av_a', host='hostone')]
compute_node_one = mock.Mock( m_nova.return_value.get_compute_node_by_name.return_value = False
id='796fee99-65dd-4262-aa-fd2a1143faa6',
hypervisor_hostname='hostone',
hypervisor_type='QEMU',
state='TEST_STATE',
status='TEST_STATUS',
memory_mb=333,
memory_mb_used=100,
free_disk_gb=222,
local_gb=111,
local_gb_used=10,
vcpus=4,
vcpus_used=0,
servers=[
{'name': 'fake_instance',
'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'}
],
service={'id': 123, 'host': 'hostone',
'disabled_reason': ''},
)
compute_node_two = mock.Mock(
id='756fef99-65dd-4262-aa-fd2a1143faa6',
hypervisor_hostname='hosttwo',
hypervisor_type='QEMU',
state='TEST_STATE',
status='TEST_STATUS',
memory_mb=333,
memory_mb_used=100,
free_disk_gb=222,
local_gb=111,
local_gb_used=10,
vcpus=4,
vcpus_used=0,
servers=[
{'name': 'fake_instance2',
'uuid': 'ef500f7e-dac8-47f0-960c-169486fce71b'}
],
service={'id': 123, 'host': 'hosttwo',
'disabled_reason': ''},
)
m_nova.return_value.get_compute_node_by_name.side_effect = [
[compute_node_one], [compute_node_two]
]
fake_instance_one = mock.Mock(
id='796fee99-65dd-4262-aa-fd2a1143faa6',
name='fake_instance',
flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1},
metadata={'hi': 'hello'},
tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b',
)
fake_instance_two = mock.Mock(
id='ef500f7e-dac8-47f0-960c-169486fce71b',
name='fake_instance2',
flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1},
metadata={'hi': 'hello'},
tenant_id='756fef99-65dd-4262-aa-fd2a1143faa6',
)
m_nova.return_value.get_instance_list.side_effect = [
[fake_instance_one], [fake_instance_two]
]
m_scope = [{"compute": [ m_scope = [{"compute": [
{"host_aggregates": [{"id": 5}]}, {"host_aggregates": [{"id": 5}]},
@@ -435,13 +337,6 @@ class TestNovaModelBuilder(base.TestCase):
self.assertEqual( self.assertEqual(
m_nova.return_value.get_compute_node_by_name.call_count, 2) m_nova.return_value.get_compute_node_by_name.call_count, 2)
m_nova.return_value.get_instance_list.assert_any_call(
filters={'host': 'hostone'}, limit=1)
m_nova.return_value.get_instance_list.assert_any_call(
filters={'host': 'hosttwo'}, limit=1)
self.assertEqual(
m_nova.return_value.get_instance_list.call_count, 2)
@mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(placement_helper, 'PlacementHelper')
@mock.patch.object(nova_helper, 'NovaHelper') @mock.patch.object(nova_helper, 'NovaHelper')
def test_add_physical_layer_with_baremetal_node(self, m_nova, def test_add_physical_layer_with_baremetal_node(self, m_nova,

View File

@@ -70,6 +70,22 @@ class TestHostMaintenance(TestBaseStrategy):
self.assertEqual(node_capacity, self.assertEqual(node_capacity,
self.strategy.get_node_capacity(node_0)) self.strategy.get_node_capacity(node_0))
def test_get_node_used(self):
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_used = dict(cpu=20, ram=4, disk=40)
self.assertEqual(node_used,
self.strategy.get_node_used(node_0))
def test_get_node_free(self):
model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model
node_0 = model.get_node_by_uuid("Node_0")
node_free = dict(cpu=20, ram=128, disk=210)
self.assertEqual(node_free,
self.strategy.get_node_free(node_0))
def test_host_fits(self): def test_host_fits(self):
model = self.fake_c_cluster.generate_scenario_1() model = self.fake_c_cluster.generate_scenario_1()
self.m_c_model.return_value = model self.m_c_model.return_value = model

View File

@@ -60,6 +60,15 @@ class TestNoisyNeighbor(TestBaseStrategy):
self.strategy.input_parameters.update({'period': 100}) self.strategy.input_parameters.update({'period': 100})
self.strategy.threshold = 100 self.strategy.threshold = 100
def test_calc_used_resource(self):
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
node)
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts(self): def test_group_hosts(self):
self.strategy.cache_threshold = 35 self.strategy.cache_threshold = 35
self.strategy.period = 100 self.strategy.period = 100

View File

@@ -60,6 +60,15 @@ class TestOutletTempControl(TestBaseStrategy):
self.strategy.input_parameters.update({'threshold': 34.3}) self.strategy.input_parameters.update({'threshold': 34.3})
self.strategy.threshold = 34.3 self.strategy.threshold = 34.3
def test_calc_used_resource(self):
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
node)
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
def test_group_hosts_by_outlet_temp(self): def test_group_hosts_by_outlet_temp(self):
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
self.m_c_model.return_value = model self.m_c_model.return_value = model

View File

@@ -64,6 +64,15 @@ class TestWorkloadBalance(TestBaseStrategy):
self.strategy._meter = 'instance_cpu_usage' self.strategy._meter = 'instance_cpu_usage'
self.strategy._granularity = 300 self.strategy._granularity = 300
def test_calc_used_resource(self):
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model
node = model.get_node_by_uuid('Node_0')
cores_used, mem_used, disk_used = (
self.strategy.calculate_used_resource(node))
self.assertEqual((cores_used, mem_used, disk_used), (20, 64, 40))
def test_group_hosts_by_cpu_util(self): def test_group_hosts_by_cpu_util(self):
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
self.m_c_model.return_value = model self.m_c_model.return_value = model

Some files were not shown because too many files have changed in this diff Show More