Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46d5094add | ||
|
|
783c7c0177 | ||
|
|
6d0717199c | ||
|
|
8b77e78f3d | ||
|
|
22c9c4df87 | ||
|
|
99ff6d3348 | ||
|
|
c67f83cce0 | ||
|
|
397bb3497e | ||
|
|
4c924fc505 | ||
|
|
4c5ecc808d | ||
|
|
64b5a7c3e4 | ||
|
|
40bb92f749 | ||
|
|
92bd06cf94 | ||
|
|
c9e0dfd3f5 | ||
|
|
446fe1307a | ||
|
|
2836f460e3 | ||
|
|
cb9bb7301b | ||
|
|
cb644fcef9 | ||
|
|
0a7c87eebf | ||
|
|
d7f4f42772 | ||
|
|
bdc0eb196a | ||
|
|
59427eb0d9 | ||
|
|
b6801b192a | ||
|
|
0a6c2c16a4 | ||
|
|
9a44941c66 | ||
|
|
a6508a0013 | ||
|
|
c3db66ca09 | ||
|
|
5d0fe553c4 | ||
|
|
8b8239c3d8 | ||
|
|
920bd502ec | ||
|
|
c68d33f341 | ||
|
|
8e8fdbd809 | ||
|
|
681536c8c7 | ||
|
|
083b170083 | ||
|
|
c440cdd69f |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -44,6 +44,8 @@ output/*/index.html
|
||||
# Sphinx
|
||||
doc/build
|
||||
doc/source/api
|
||||
doc/source/samples
|
||||
doc/source/watcher.conf.sample
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
|
||||
@@ -10,12 +10,12 @@ Watcher
|
||||
|
||||
OpenStack Watcher provides a flexible and scalable resource optimization
|
||||
service for multi-tenant OpenStack-based clouds.
|
||||
Watcher provides a complete optimization loop—including everything from a
|
||||
Watcher provides a complete optimization loop-including everything from a
|
||||
metrics receiver, complex event processor and profiler, optimization processor
|
||||
and an action plan applier. This provides a robust framework to realize a wide
|
||||
range of cloud optimization goals, including the reduction of data center
|
||||
operating costs, increased system performance via intelligent virtual machine
|
||||
migration, increased energy efficiency—and more!
|
||||
migration, increased energy efficiency-and more!
|
||||
|
||||
* Free software: Apache license
|
||||
* Wiki: http://wiki.openstack.org/wiki/Watcher
|
||||
|
||||
@@ -361,6 +361,28 @@ State Machine diagrams
|
||||
Audit State Machine
|
||||
-------------------
|
||||
|
||||
An :ref:`Audit <audit_definition>` has a life-cycle and its current state may
|
||||
be one of the following:
|
||||
|
||||
- **PENDING** : a request for an :ref:`Audit <audit_definition>` has been
|
||||
submitted (either manually by the
|
||||
:ref:`Administrator <administrator_definition>` or automatically via some
|
||||
event handling mechanism) and is in the queue for being processed by the
|
||||
:ref:`Watcher Decision Engine <watcher_decision_engine_definition>`
|
||||
- **ONGOING** : the :ref:`Audit <audit_definition>` is currently being
|
||||
processed by the
|
||||
:ref:`Watcher Decision Engine <watcher_decision_engine_definition>`
|
||||
- **SUCCEEDED** : the :ref:`Audit <audit_definition>` has been executed
|
||||
successfully and at least one solution was found
|
||||
- **FAILED** : an error occured while executing the
|
||||
:ref:`Audit <audit_definition>`
|
||||
- **DELETED** : the :ref:`Audit <audit_definition>` is still stored in the
|
||||
:ref:`Watcher database <watcher_database_definition>` but is not returned
|
||||
any more through the Watcher APIs.
|
||||
- **CANCELLED** : the :ref:`Audit <audit_definition>` was in **PENDING** or
|
||||
**ONGOING** state and was cancelled by the
|
||||
:ref:`Administrator <administrator_definition>`
|
||||
|
||||
The following diagram shows the different possible states of an
|
||||
:ref:`Audit <audit_definition>` and what event makes the state change to a new
|
||||
value:
|
||||
@@ -373,6 +395,31 @@ value:
|
||||
Action Plan State Machine
|
||||
-------------------------
|
||||
|
||||
An :ref:`Action Plan <action_plan_definition>` has a life-cycle and its current
|
||||
state may be one of the following:
|
||||
|
||||
- **RECOMMENDED** : the :ref:`Action Plan <action_plan_definition>` is waiting
|
||||
for a validation from the :ref:`Administrator <administrator_definition>`
|
||||
- **PENDING** : a request for an :ref:`Action Plan <action_plan_definition>`
|
||||
has been submitted (due to an
|
||||
:ref:`Administrator <administrator_definition>` executing an
|
||||
:ref:`Audit <audit_definition>`) and is in the queue for
|
||||
being processed by the :ref:`Watcher Applier <watcher_applier_definition>`
|
||||
- **ONGOING** : the :ref:`Action Plan <action_plan_definition>` is currently
|
||||
being processed by the :ref:`Watcher Applier <watcher_applier_definition>`
|
||||
- **SUCCEEDED** : the :ref:`Action Plan <action_plan_definition>` has been
|
||||
executed successfully (i.e. all :ref:`Actions <action_definition>` that it
|
||||
contains have been executed successfully)
|
||||
- **FAILED** : an error occured while executing the
|
||||
:ref:`Action Plan <action_plan_definition>`
|
||||
- **DELETED** : the :ref:`Action Plan <action_plan_definition>` is still
|
||||
stored in the :ref:`Watcher database <watcher_database_definition>` but is
|
||||
not returned any more through the Watcher APIs.
|
||||
- **CANCELLED** : the :ref:`Action Plan <action_plan_definition>` was in
|
||||
**PENDING** or **ONGOING** state and was cancelled by the
|
||||
:ref:`Administrator <administrator_definition>`
|
||||
|
||||
|
||||
The following diagram shows the different possible states of an
|
||||
:ref:`Action Plan <action_plan_definition>` and what event makes the state
|
||||
change to a new value:
|
||||
|
||||
@@ -18,6 +18,7 @@ from watcher import version as watcher_version
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'oslo_config.sphinxconfiggen',
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinxcontrib.httpdomain',
|
||||
@@ -28,7 +29,8 @@ extensions = [
|
||||
]
|
||||
|
||||
wsme_protocols = ['restjson']
|
||||
|
||||
config_generator_config_file = '../../etc/watcher/watcher-config-generator.conf'
|
||||
sample_config_basename = 'watcher'
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
|
||||
1
doc/source/config-generator.conf
Normal file
1
doc/source/config-generator.conf
Normal file
@@ -0,0 +1 @@
|
||||
../../etc/watcher/watcher-config-generator.conf
|
||||
14
doc/source/deploy/conf-files.rst
Normal file
14
doc/source/deploy/conf-files.rst
Normal file
@@ -0,0 +1,14 @@
|
||||
.. _watcher_sample_configuration_files:
|
||||
|
||||
==================================
|
||||
Watcher sample configuration files
|
||||
==================================
|
||||
|
||||
watcher.conf
|
||||
~~~~~~~~~~~~
|
||||
|
||||
The ``watcher.conf`` file contains most of the options to configure the
|
||||
Watcher services.
|
||||
|
||||
.. literalinclude:: ../watcher.conf.sample
|
||||
:language: ini
|
||||
@@ -163,6 +163,16 @@ Configure the Watcher service
|
||||
The Watcher service is configured via its configuration file. This file
|
||||
is typically located at ``/etc/watcher/watcher.conf``.
|
||||
|
||||
You can easily generate and update a sample configuration file
|
||||
named :ref:`watcher.conf.sample <watcher_sample_configuration_files>` by using
|
||||
these following commands::
|
||||
|
||||
$ git clone git://git.openstack.org/openstack/watcher
|
||||
$ cd watcher/
|
||||
$ tox -econfig
|
||||
$ vi etc/watcher/watcher.conf.sample
|
||||
|
||||
|
||||
The configuration file is organized into the following sections:
|
||||
|
||||
* ``[DEFAULT]`` - General configuration
|
||||
|
||||
@@ -50,6 +50,8 @@ Here is an example showing how you can write a plugin called ``DummyAction``:
|
||||
|
||||
# Filepath = <PROJECT_DIR>/thirdparty/dummy.py
|
||||
# Import path = thirdparty.dummy
|
||||
import voluptuous
|
||||
|
||||
from watcher.applier.actions import base
|
||||
|
||||
|
||||
@@ -57,7 +59,7 @@ Here is an example showing how you can write a plugin called ``DummyAction``:
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
return Schema({})
|
||||
return voluptuous.Schema({})
|
||||
|
||||
def execute(self):
|
||||
# Does nothing
|
||||
@@ -83,6 +85,16 @@ To get a better understanding on how to implement a more advanced action,
|
||||
have a look at the :py:class:`~watcher.applier.actions.migration.Migrate`
|
||||
class.
|
||||
|
||||
Input validation
|
||||
----------------
|
||||
|
||||
As you can see in the previous example, we are using `Voluptuous`_ to validate
|
||||
the input parameters of an action. So if you want to learn more about how to
|
||||
work with `Voluptuous`_, you can have a look at their `documentation`_ here:
|
||||
|
||||
.. _Voluptuous: https://github.com/alecthomas/voluptuous
|
||||
.. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md
|
||||
|
||||
Abstract Plugin Class
|
||||
=====================
|
||||
|
||||
|
||||
@@ -213,27 +213,27 @@ Here are some examples of
|
||||
|
||||
It can be any of the `the official list of available resource types defined in OpenStack for HEAT <http://docs.openstack.org/developer/heat/template_guide/openstack.html>`_.
|
||||
|
||||
.. _efficiency_definition:
|
||||
.. _efficacy_definition:
|
||||
|
||||
Optimization Efficiency
|
||||
=======================
|
||||
Optimization Efficacy
|
||||
=====================
|
||||
|
||||
The :ref:`Optimization Efficiency <efficiency_definition>` is the objective
|
||||
The :ref:`Optimization Efficacy <efficacy_definition>` is the objective
|
||||
measure of how much of the :ref:`Goal <goal_definition>` has been achieved in
|
||||
respect with constraints and :ref:`SLAs <sla_definition>` defined by the
|
||||
:ref:`Customer <customer_definition>`.
|
||||
|
||||
The way efficiency is evaluated will depend on the
|
||||
The way efficacy is evaluated will depend on the
|
||||
:ref:`Goal <goal_definition>` to achieve.
|
||||
|
||||
Of course, the efficiency will be relevant only as long as the
|
||||
Of course, the efficacy will be relevant only as long as the
|
||||
:ref:`Action Plan <action_plan_definition>` is relevant
|
||||
(i.e., the current state of the :ref:`Cluster <cluster_definition>`
|
||||
has not changed in a way that a new :ref:`Audit <audit_definition>` would need
|
||||
to be launched).
|
||||
|
||||
For example, if the :ref:`Goal <goal_definition>` is to lower the energy
|
||||
consumption, the :ref:`Efficiency <efficiency_definition>` will be computed
|
||||
consumption, the :ref:`Efficacy <efficacy_definition>` will be computed
|
||||
using several indicators (KPIs):
|
||||
|
||||
- the percentage of energy gain (which must be the highest possible)
|
||||
@@ -244,7 +244,7 @@ using several indicators (KPIs):
|
||||
All those indicators (KPIs) are computed within a given timeframe, which is the
|
||||
time taken to execute the whole :ref:`Action Plan <action_plan_definition>`.
|
||||
|
||||
The efficiency also enables the :ref:`Administrator <administrator_definition>`
|
||||
The efficacy also enables the :ref:`Administrator <administrator_definition>`
|
||||
to objectively compare different :ref:`Strategies <strategy_definition>` for
|
||||
the same goal and same workload of the :ref:`Cluster <cluster_definition>`.
|
||||
|
||||
|
||||
@@ -55,9 +55,9 @@ Getting Started
|
||||
dev/environment
|
||||
dev/devstack
|
||||
deploy/configuration
|
||||
deploy/conf-files
|
||||
dev/testing
|
||||
|
||||
|
||||
API References
|
||||
--------------
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ run the following::
|
||||
|
||||
Show the program's version number and exit.
|
||||
|
||||
.. option:: upgrade, downgrade, stamp, revision, version, create_schema
|
||||
.. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge
|
||||
|
||||
The :ref:`command <db-manage_cmds>` to run.
|
||||
|
||||
@@ -219,3 +219,42 @@ version
|
||||
Show help for version and exit.
|
||||
|
||||
This command will output the current database version.
|
||||
|
||||
purge
|
||||
-----
|
||||
|
||||
.. program:: purge
|
||||
|
||||
.. option:: -h, --help
|
||||
|
||||
Show help for purge and exit.
|
||||
|
||||
.. option:: -d, --age-in-days
|
||||
|
||||
The number of days (starting from today) before which we consider soft
|
||||
deleted objects as expired and should hence be erased. By default, all
|
||||
objects soft deleted are considered expired. This can be useful as removing
|
||||
a significant amount of objects may cause a performance issues.
|
||||
|
||||
.. option:: -n, --max-number
|
||||
|
||||
The maximum number of database objects we expect to be deleted. If exceeded,
|
||||
this will prevent any deletion.
|
||||
|
||||
.. option:: -t, --audit-template
|
||||
|
||||
Either the UUID or name of the soft deleted audit template to purge. This
|
||||
will also include any related objects with it.
|
||||
|
||||
.. option:: -e, --exclude-orphans
|
||||
|
||||
This is a flag to indicate when we want to exclude orphan objects from
|
||||
deletion.
|
||||
|
||||
.. option:: --dry-run
|
||||
|
||||
This is a flag to indicate when we want to perform a dry run. This will show
|
||||
the objects that would be deleted instead of actually deleting them.
|
||||
|
||||
This command will purge the current database by removing both its soft deleted
|
||||
and orphan objects.
|
||||
|
||||
4
etc/watcher/README-watcher.conf.txt
Normal file
4
etc/watcher/README-watcher.conf.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
To generate the sample watcher.conf file, run the following
|
||||
command from the top level of the watcher directory:
|
||||
|
||||
tox -econfig
|
||||
9
etc/watcher/watcher-config-generator.conf
Normal file
9
etc/watcher/watcher-config-generator.conf
Normal file
@@ -0,0 +1,9 @@
|
||||
[DEFAULT]
|
||||
output_file = etc/watcher/watcher.conf.sample
|
||||
wrap_width = 79
|
||||
|
||||
namespace = watcher
|
||||
namespace = keystonemiddleware.auth_token
|
||||
namespace = oslo.log
|
||||
namespace = oslo.db
|
||||
namespace = oslo.messaging
|
||||
@@ -1,962 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
#
|
||||
# From oslo.log
|
||||
#
|
||||
|
||||
# Defines the format string for %(user_identity)s that is used in
|
||||
# logging_context_format_string. (string value)
|
||||
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
|
||||
|
||||
# List of package logging levels in logger=LEVEL pairs. This option is
|
||||
# ignored if log_config_append is set. (list value)
|
||||
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN
|
||||
|
||||
# Enables or disables publication of error events. (boolean value)
|
||||
#publish_errors = false
|
||||
|
||||
# If set to true, the logging level will be set to DEBUG instead of
|
||||
# the default INFO level. (boolean value)
|
||||
#debug = false
|
||||
|
||||
# The format for an instance that is passed with the log message.
|
||||
# (string value)
|
||||
#instance_format = "[instance: %(uuid)s] "
|
||||
|
||||
# The format for an instance UUID that is passed with the log message.
|
||||
# (string value)
|
||||
#instance_uuid_format = "[instance: %(uuid)s] "
|
||||
|
||||
# If set to false, the logging level will be set to WARNING instead of
|
||||
# the default INFO level. (boolean value)
|
||||
# This option is deprecated for removal.
|
||||
# Its value may be silently ignored in the future.
|
||||
#verbose = true
|
||||
|
||||
# Enables or disables fatal status of deprecations. (boolean value)
|
||||
#fatal_deprecations = false
|
||||
|
||||
# The name of a logging configuration file. This file is appended to
|
||||
# any existing logging configuration files. For details about logging
|
||||
# configuration files, see the Python logging module documentation.
|
||||
# Note that when logging configuration files are used all logging
|
||||
# configuration is defined in the configuration file and other logging
|
||||
# configuration options are ignored (for example, log_format). (string
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/log_config
|
||||
#log_config_append = <None>
|
||||
|
||||
# DEPRECATED. A logging.Formatter log message format string which may
|
||||
# use any of the available logging.LogRecord attributes. This option
|
||||
# is deprecated. Please use logging_context_format_string and
|
||||
# logging_default_format_string instead. This option is ignored if
|
||||
# log_config_append is set. (string value)
|
||||
#log_format = <None>
|
||||
|
||||
# Defines the format string for %%(asctime)s in log records. Default:
|
||||
# %(default)s . This option is ignored if log_config_append is set.
|
||||
# (string value)
|
||||
#log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# (Optional) Name of log file to send logging output to. If no default
|
||||
# is set, logging will go to stderr as defined by use_stderr. This
|
||||
# option is ignored if log_config_append is set. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logfile
|
||||
#log_file = <None>
|
||||
|
||||
# (Optional) The base directory used for relative log_file paths.
|
||||
# This option is ignored if log_config_append is set. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/logdir
|
||||
#log_dir = <None>
|
||||
|
||||
# Uses logging handler designed to watch file system. When log file is
|
||||
# moved or removed this handler will open a new log file with
|
||||
# specified path instantaneously. It makes sense only if log_file
|
||||
# option is specified and Linux platform is used. This option is
|
||||
# ignored if log_config_append is set. (boolean value)
|
||||
#watch_log_file = false
|
||||
|
||||
# Use syslog for logging. Existing syslog format is DEPRECATED and
|
||||
# will be changed later to honor RFC5424. This option is ignored if
|
||||
# log_config_append is set. (boolean value)
|
||||
#use_syslog = false
|
||||
|
||||
# Enables or disables syslog rfc5424 format for logging. If enabled,
|
||||
# prefixes the MSG part of the syslog message with APP-NAME (RFC5424).
|
||||
# The format without the APP-NAME is deprecated in Kilo, and will be
|
||||
# removed in Mitaka, along with this option. This option is ignored if
|
||||
# log_config_append is set. (boolean value)
|
||||
# This option is deprecated for removal.
|
||||
# Its value may be silently ignored in the future.
|
||||
#use_syslog_rfc_format = true
|
||||
|
||||
# Syslog facility to receive log lines. This option is ignored if
|
||||
# log_config_append is set. (string value)
|
||||
#syslog_log_facility = LOG_USER
|
||||
|
||||
# Log output to standard error. This option is ignored if
|
||||
# log_config_append is set. (boolean value)
|
||||
#use_stderr = true
|
||||
|
||||
# Format string to use for log messages with context. (string value)
|
||||
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
|
||||
|
||||
# Format string to use for log messages when context is undefined.
|
||||
# (string value)
|
||||
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
|
||||
|
||||
# Additional data to append to log message when logging level for the
|
||||
# message is DEBUG. (string value)
|
||||
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
|
||||
|
||||
# Prefix each line of exception output with this format. (string
|
||||
# value)
|
||||
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# Size of executor thread pool. (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
|
||||
#executor_thread_pool_size = 64
|
||||
|
||||
# Minimal port number for random ports range. (port value)
|
||||
# Minimum value: 0
|
||||
# Maximum value: 65535
|
||||
#rpc_zmq_min_port = 49152
|
||||
|
||||
# Seconds to wait for a response from a call. (integer value)
|
||||
#rpc_response_timeout = 60
|
||||
|
||||
# Size of RPC connection pool. (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
|
||||
#rpc_conn_pool_size = 30
|
||||
|
||||
# A URL representing the messaging driver to use and its full
|
||||
# configuration. If not set, we fall back to the rpc_backend option
|
||||
# and driver specific configuration. (string value)
|
||||
#transport_url = <None>
|
||||
|
||||
# Number of retries to find free port number before fail with
|
||||
# ZMQBindError. (integer value)
|
||||
#rpc_zmq_bind_port_retries = 100
|
||||
|
||||
# The messaging driver to use, defaults to rabbit. Other drivers
|
||||
# include amqp and zmq. (string value)
|
||||
#rpc_backend = rabbit
|
||||
|
||||
# Host to locate redis. (string value)
|
||||
#host = 127.0.0.1
|
||||
|
||||
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
||||
# interface, or IP. The "host" option should point or resolve to this
|
||||
# address. (string value)
|
||||
#rpc_zmq_bind_address = *
|
||||
|
||||
# MatchMaker driver. (string value)
|
||||
#rpc_zmq_matchmaker = redis
|
||||
|
||||
# The default exchange under which topics are scoped. May be
|
||||
# overridden by an exchange name specified in the transport_url
|
||||
# option. (string value)
|
||||
#control_exchange = openstack
|
||||
|
||||
# Use this port to connect to redis host. (port value)
|
||||
# Minimum value: 0
|
||||
# Maximum value: 65535
|
||||
#port = 6379
|
||||
|
||||
# Type of concurrency used. Either "native" or "eventlet" (string
|
||||
# value)
|
||||
#rpc_zmq_concurrency = eventlet
|
||||
|
||||
# Password for Redis server (optional). (string value)
|
||||
#password =
|
||||
|
||||
# Number of ZeroMQ contexts, defaults to 1. (integer value)
|
||||
#rpc_zmq_contexts = 1
|
||||
|
||||
# Maximum number of ingress messages to locally buffer per topic.
|
||||
# Default is unlimited. (integer value)
|
||||
#rpc_zmq_topic_backlog = <None>
|
||||
|
||||
# List of Redis Sentinel hosts (fault tolerance mode) e.g.
|
||||
# [host:port, host1:port ... ] (list value)
|
||||
#sentinel_hosts =
|
||||
|
||||
# Directory for holding IPC sockets. (string value)
|
||||
#rpc_zmq_ipc_dir = /var/run/openstack
|
||||
|
||||
# Name of this node. Must be a valid hostname, FQDN, or IP address.
|
||||
# Must match "host" option, if running Nova. (string value)
|
||||
#rpc_zmq_host = localhost
|
||||
|
||||
# Redis replica set name. (string value)
|
||||
#sentinel_group_name = oslo-messaging-zeromq
|
||||
|
||||
# Seconds to wait before a cast expires (TTL). Only supported by
|
||||
# impl_zmq. (integer value)
|
||||
#rpc_cast_timeout = 30
|
||||
|
||||
# Time in ms to wait between connection attempts. (integer value)
|
||||
#wait_timeout = 500
|
||||
|
||||
# The default number of seconds that poll should wait. Poll raises
|
||||
# timeout exception when timeout expired. (integer value)
|
||||
#rpc_poll_timeout = 1
|
||||
|
||||
# Time in ms to wait before the transaction is killed. (integer value)
|
||||
#check_timeout = 20000
|
||||
|
||||
# Expiration timeout in seconds of a name service record about
|
||||
# existing target ( < 0 means no timeout). (integer value)
|
||||
#zmq_target_expire = 120
|
||||
|
||||
# Timeout in ms on blocking socket operations (integer value)
|
||||
#socket_timeout = 1000
|
||||
|
||||
# Maximal port number for random ports range. (integer value)
|
||||
# Minimum value: 1
|
||||
# Maximum value: 65536
|
||||
#rpc_zmq_max_port = 65536
|
||||
|
||||
# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
|
||||
# (boolean value)
|
||||
#use_pub_sub = true
|
||||
|
||||
|
||||
[api]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# The port for the watcher API server (integer value)
|
||||
#port = 9322
|
||||
|
||||
# The maximum number of items returned in a single response from a
|
||||
# collection resource. (integer value)
|
||||
#max_limit = 1000
|
||||
|
||||
# The listen IP for the watcher API server (string value)
|
||||
#host = 0.0.0.0
|
||||
|
||||
|
||||
[ceilometer_client]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Version of Ceilometer API to use in ceilometerclient. (string value)
|
||||
#api_version = 2
|
||||
|
||||
|
||||
[cinder_client]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Version of Cinder API to use in cinderclient. (string value)
|
||||
#api_version = 2
|
||||
|
||||
|
||||
[database]
|
||||
|
||||
#
|
||||
# From oslo.db
|
||||
#
|
||||
|
||||
# If set, use this value for max_overflow with SQLAlchemy. (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_overflow
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
|
||||
#max_overflow = <None>
|
||||
|
||||
# Add Python stack traces to SQL as comment strings. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_trace
|
||||
#connection_trace = false
|
||||
|
||||
# The SQLAlchemy connection string to use to connect to the database.
|
||||
# (string value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection
|
||||
# Deprecated group/name - [DATABASE]/sql_connection
|
||||
# Deprecated group/name - [sql]/connection
|
||||
#connection = <None>
|
||||
|
||||
# If db_inc_retry_interval is set, the maximum seconds between retries
|
||||
# of a database operation. (integer value)
|
||||
#db_max_retry_interval = 10
|
||||
|
||||
# Interval between retries of opening a SQL connection. (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_retry_interval
|
||||
# Deprecated group/name - [DATABASE]/reconnect_interval
|
||||
#retry_interval = 10
|
||||
|
||||
# The file name to use with SQLite. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/sqlite_db
|
||||
#sqlite_db = oslo.sqlite
|
||||
|
||||
# Timeout before idle SQL connections are reaped. (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
|
||||
# Deprecated group/name - [DATABASE]/sql_idle_timeout
|
||||
# Deprecated group/name - [sql]/idle_timeout
|
||||
#idle_timeout = 3600
|
||||
|
||||
# If set, use this value for pool_timeout with SQLAlchemy. (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
|
||||
#pool_timeout = <None>
|
||||
|
||||
# Maximum number of database connection retries during startup. Set to
|
||||
# -1 to specify an infinite retry count. (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_retries
|
||||
# Deprecated group/name - [DATABASE]/sql_max_retries
|
||||
#max_retries = 10
|
||||
|
||||
# Maximum retries in case of connection error or deadlock error before
|
||||
# error is raised. Set to -1 to specify an infinite retry count.
|
||||
# (integer value)
|
||||
#db_max_retries = 20
|
||||
|
||||
# Enable the experimental use of database reconnect on connection
|
||||
# lost. (boolean value)
|
||||
#use_db_reconnect = false
|
||||
|
||||
# The SQLAlchemy connection string to use to connect to the slave
|
||||
# database. (string value)
|
||||
#slave_connection = <None>
|
||||
|
||||
# Minimum number of SQL connections to keep open in a pool. (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_min_pool_size
|
||||
#min_pool_size = 1
|
||||
|
||||
# Maximum number of SQL connections to keep open in a pool. (integer
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
|
||||
# Deprecated group/name - [DATABASE]/sql_max_pool_size
|
||||
#max_pool_size = <None>
|
||||
|
||||
# If True, SQLite uses synchronous mode. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
|
||||
#sqlite_synchronous = true
|
||||
|
||||
# The SQL mode to be used for MySQL sessions. This option, including
|
||||
# the default, overrides any server-set SQL mode. To use whatever SQL
|
||||
# mode is set by the server configuration, set this to no value.
|
||||
# Example: mysql_sql_mode= (string value)
|
||||
#mysql_sql_mode = TRADITIONAL
|
||||
|
||||
# Seconds between retries of a database transaction. (integer value)
|
||||
#db_retry_interval = 1
|
||||
|
||||
# Verbosity of SQL debugging information: 0=None, 100=Everything.
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/sql_connection_debug
|
||||
#connection_debug = 0
|
||||
|
||||
# The back end to use for the database. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/db_backend
|
||||
#backend = sqlalchemy
|
||||
|
||||
# If True, increases the interval between retries of a database
|
||||
# operation up to db_max_retry_interval. (boolean value)
|
||||
#db_inc_retry_interval = true
|
||||
|
||||
|
||||
[glance_client]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Version of Glance API to use in glanceclient. (string value)
|
||||
#api_version = 2
|
||||
|
||||
|
||||
[keystone_authtoken]
|
||||
|
||||
#
|
||||
# From keystonemiddleware.auth_token
|
||||
#
|
||||
|
||||
# (Optional, mandatory if memcache_security_strategy is defined) This
|
||||
# string is used for key derivation. (string value)
|
||||
#memcache_secret_key = <None>
|
||||
|
||||
# In order to prevent excessive effort spent validating tokens, the
|
||||
# middleware caches previously-seen tokens for a configurable duration
|
||||
# (in seconds). Set to -1 to disable caching completely. (integer
|
||||
# value)
|
||||
#token_cache_time = 300
|
||||
|
||||
# Determines the frequency at which the list of revoked tokens is
|
||||
# retrieved from the Identity service (in seconds). A high number of
|
||||
# revocation events combined with a low cache duration may
|
||||
# significantly reduce performance. (integer value)
|
||||
#revocation_cache_time = 10
|
||||
|
||||
# (Optional) If defined, indicate whether token data should be
|
||||
# authenticated or authenticated and encrypted. If MAC, token data is
|
||||
# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
|
||||
# encrypted and authenticated in the cache. If the value is not one of
|
||||
# these options or empty, auth_token will raise an exception on
|
||||
# initialization. (string value)
|
||||
# Allowed values: None, MAC, ENCRYPT
|
||||
#memcache_security_strategy = None
|
||||
|
||||
# (Optional) Number of seconds memcached server is considered dead
|
||||
# before it is tried again. (integer value)
|
||||
#memcache_pool_dead_retry = 300
|
||||
|
||||
# (Optional) Maximum total number of open connections to every
|
||||
# memcached server. (integer value)
|
||||
#memcache_pool_maxsize = 10
|
||||
|
||||
# Complete public Identity API endpoint. (string value)
|
||||
#auth_uri = <None>
|
||||
|
||||
# (Optional) Socket timeout in seconds for communicating with a
|
||||
# memcached server. (integer value)
|
||||
#memcache_pool_socket_timeout = 3
|
||||
|
||||
# (Optional) Number of seconds a connection to memcached is held
|
||||
# unused in the pool before it is closed. (integer value)
|
||||
#memcache_pool_unused_timeout = 60
|
||||
|
||||
# API version of the admin Identity API endpoint. (string value)
|
||||
#auth_version = <None>
|
||||
|
||||
# (Optional) Number of seconds that an operation will wait to get a
|
||||
# memcached client connection from the pool. (integer value)
|
||||
#memcache_pool_conn_get_timeout = 10
|
||||
|
||||
# Do not handle authorization requests within the middleware, but
|
||||
# delegate the authorization decision to downstream WSGI components.
|
||||
# (boolean value)
|
||||
#delay_auth_decision = false
|
||||
|
||||
# (Optional) Use the advanced (eventlet safe) memcached client pool.
|
||||
# The advanced pool will only work under python 2.x. (boolean value)
|
||||
#memcache_use_advanced_pool = false
|
||||
|
||||
# Request timeout value for communicating with Identity API server.
|
||||
# (integer value)
|
||||
#http_connect_timeout = <None>
|
||||
|
||||
# (Optional) Indicate whether to set the X-Service-Catalog header. If
|
||||
# False, middleware will not ask for service catalog on token
|
||||
# validation and will not set the X-Service-Catalog header. (boolean
|
||||
# value)
|
||||
#include_service_catalog = true
|
||||
|
||||
# How many times are we trying to reconnect when communicating with
|
||||
# Identity API Server. (integer value)
|
||||
#http_request_max_retries = 3
|
||||
|
||||
# Used to control the use and type of token binding. Can be set to:
|
||||
# "disabled" to not check token binding. "permissive" (default) to
|
||||
# validate binding information if the bind type is of a form known to
|
||||
# the server and ignore it if not. "strict" like "permissive" but if
|
||||
# the bind type is unknown the token will be rejected. "required" any
|
||||
# form of token binding is needed to be allowed. Finally the name of a
|
||||
# binding method that must be present in tokens. (string value)
|
||||
#enforce_token_bind = permissive
|
||||
|
||||
# Env key for the swift cache. (string value)
|
||||
#cache = <None>
|
||||
|
||||
# If true, the revocation list will be checked for cached tokens. This
|
||||
# requires that PKI tokens are configured on the identity server.
|
||||
# (boolean value)
|
||||
#check_revocations_for_cached = false
|
||||
|
||||
# Required if identity server requires client certificate (string
|
||||
# value)
|
||||
#certfile = <None>
|
||||
|
||||
# Hash algorithms to use for hashing PKI tokens. This may be a single
|
||||
# algorithm or multiple. The algorithms are those supported by Python
|
||||
# standard hashlib.new(). The hashes will be tried in the order given,
|
||||
# so put the preferred one first for performance. The result of the
|
||||
# first hash will be stored in the cache. This will typically be set
|
||||
# to multiple values only while migrating from a less secure algorithm
|
||||
# to a more secure one. Once all the old tokens are expired this
|
||||
# option should be set to a single value for better performance. (list
|
||||
# value)
|
||||
#hash_algorithms = md5
|
||||
|
||||
# Required if identity server requires client certificate (string
|
||||
# value)
|
||||
#keyfile = <None>
|
||||
|
||||
# A PEM encoded Certificate Authority to use when verifying HTTPs
|
||||
# connections. Defaults to system CAs. (string value)
|
||||
#cafile = <None>
|
||||
|
||||
# Authentication type to load (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/auth_plugin
|
||||
#auth_type = <None>
|
||||
|
||||
# Config Section from which to load plugin specific options (unknown
|
||||
# value)
|
||||
#auth_section = <None>
|
||||
|
||||
# Verify HTTPS connections. (boolean value)
|
||||
#insecure = false
|
||||
|
||||
# The region in which the identity server can be found. (string value)
|
||||
#region_name = <None>
|
||||
|
||||
# Directory used to cache files related to PKI tokens. (string value)
|
||||
#signing_dir = <None>
|
||||
|
||||
# Optionally specify a list of memcached server(s) to use for caching.
|
||||
# If left undefined, tokens will instead be cached in-process. (list
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/memcache_servers
|
||||
#memcached_servers = <None>
|
||||
|
||||
|
||||
[matchmaker_redis]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# Time in ms to wait before the transaction is killed. (integer value)
|
||||
#check_timeout = 20000
|
||||
|
||||
# Password for Redis server (optional). (string value)
|
||||
#password =
|
||||
|
||||
# Timeout in ms on blocking socket operations (integer value)
|
||||
#socket_timeout = 1000
|
||||
|
||||
# List of Redis Sentinel hosts (fault tolerance mode) e.g.
|
||||
# [host:port, host1:port ... ] (list value)
|
||||
#sentinel_hosts =
|
||||
|
||||
# Redis replica set name. (string value)
|
||||
#sentinel_group_name = oslo-messaging-zeromq
|
||||
|
||||
# Host to locate redis. (string value)
|
||||
#host = 127.0.0.1
|
||||
|
||||
# Time in ms to wait between connection attempts. (integer value)
|
||||
#wait_timeout = 500
|
||||
|
||||
# Use this port to connect to redis host. (port value)
|
||||
# Minimum value: 0
|
||||
# Maximum value: 65535
|
||||
#port = 6379
|
||||
|
||||
|
||||
[neutron_client]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Version of Neutron API to use in neutronclient. (string value)
|
||||
#api_version = 2
|
||||
|
||||
|
||||
[nova_client]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Version of Nova API to use in novaclient. (string value)
|
||||
#api_version = 2
|
||||
|
||||
|
||||
[oslo_messaging_amqp]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# CA certificate PEM file to verify server certificate (string value)
|
||||
# Deprecated group/name - [amqp1]/ssl_ca_file
|
||||
#ssl_ca_file =
|
||||
|
||||
# Private key PEM file used to sign cert_file certificate (string
|
||||
# value)
|
||||
# Deprecated group/name - [amqp1]/ssl_key_file
|
||||
#ssl_key_file =
|
||||
|
||||
# User name for message broker authentication (string value)
|
||||
# Deprecated group/name - [amqp1]/username
|
||||
#username =
|
||||
|
||||
# Name for the AMQP container (string value)
|
||||
# Deprecated group/name - [amqp1]/container_name
|
||||
#container_name = <None>
|
||||
|
||||
# Space separated list of acceptable SASL mechanisms (string value)
|
||||
# Deprecated group/name - [amqp1]/sasl_mechanisms
|
||||
#sasl_mechanisms =
|
||||
|
||||
# address prefix used when sending to a specific server (string value)
|
||||
# Deprecated group/name - [amqp1]/server_request_prefix
|
||||
#server_request_prefix = exclusive
|
||||
|
||||
# Password for decrypting ssl_key_file (if encrypted) (string value)
|
||||
# Deprecated group/name - [amqp1]/ssl_key_password
|
||||
#ssl_key_password = <None>
|
||||
|
||||
# Timeout for inactive connections (in seconds) (integer value)
|
||||
# Deprecated group/name - [amqp1]/idle_timeout
|
||||
#idle_timeout = 0
|
||||
|
||||
# Identifying certificate PEM file to present to clients (string
|
||||
# value)
|
||||
# Deprecated group/name - [amqp1]/ssl_cert_file
|
||||
#ssl_cert_file =
|
||||
|
||||
# address prefix used when broadcasting to all servers (string value)
|
||||
# Deprecated group/name - [amqp1]/broadcast_prefix
|
||||
#broadcast_prefix = broadcast
|
||||
|
||||
# Debug: dump AMQP frames to stdout (boolean value)
|
||||
# Deprecated group/name - [amqp1]/trace
|
||||
#trace = false
|
||||
|
||||
# Password for message broker authentication (string value)
|
||||
# Deprecated group/name - [amqp1]/password
|
||||
#password =
|
||||
|
||||
# Accept clients using either SSL or plain TCP (boolean value)
|
||||
# Deprecated group/name - [amqp1]/allow_insecure_clients
|
||||
#allow_insecure_clients = false
|
||||
|
||||
# Name of configuration file (without .conf suffix) (string value)
|
||||
# Deprecated group/name - [amqp1]/sasl_config_name
|
||||
#sasl_config_name =
|
||||
|
||||
# Path to directory that contains the SASL configuration (string
|
||||
# value)
|
||||
# Deprecated group/name - [amqp1]/sasl_config_dir
|
||||
#sasl_config_dir =
|
||||
|
||||
# address prefix when sending to any server in group (string value)
|
||||
# Deprecated group/name - [amqp1]/group_request_prefix
|
||||
#group_request_prefix = unicast
|
||||
|
||||
|
||||
[oslo_messaging_notifications]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# The Drivers(s) to handle sending notifications. Possible values are
|
||||
# messaging, messagingv2, routing, log, test, noop (multi valued)
|
||||
# Deprecated group/name - [DEFAULT]/notification_driver
|
||||
#driver =
|
||||
|
||||
# A URL representing the messaging driver to use for notifications. If
|
||||
# not set, we fall back to the same configuration used for RPC.
|
||||
# (string value)
|
||||
# Deprecated group/name - [DEFAULT]/notification_transport_url
|
||||
#transport_url = <None>
|
||||
|
||||
# AMQP topic used for OpenStack notifications. (list value)
|
||||
# Deprecated group/name - [rpc_notifier2]/topics
|
||||
# Deprecated group/name - [DEFAULT]/notification_topics
|
||||
#topics = notifications
|
||||
|
||||
|
||||
[oslo_messaging_rabbit]
|
||||
|
||||
#
|
||||
# From oslo.messaging
|
||||
#
|
||||
|
||||
# How often times during the heartbeat_timeout_threshold we check the
|
||||
# heartbeat. (integer value)
|
||||
#heartbeat_rate = 2
|
||||
|
||||
# Connect over SSL for RabbitMQ. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
|
||||
#rabbit_use_ssl = false
|
||||
|
||||
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
|
||||
# (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/fake_rabbit
|
||||
#fake_rabbit = false
|
||||
|
||||
# The RabbitMQ userid. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_userid
|
||||
#rabbit_userid = guest
|
||||
|
||||
# The RabbitMQ broker address where a single node is used. (string
|
||||
# value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_host
|
||||
#rabbit_host = localhost
|
||||
|
||||
# The RabbitMQ password. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_password
|
||||
#rabbit_password = guest
|
||||
|
||||
# Use durable queues in AMQP. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/amqp_durable_queues
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
||||
#amqp_durable_queues = false
|
||||
|
||||
# The RabbitMQ login method. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_login_method
|
||||
#rabbit_login_method = AMQPLAIN
|
||||
|
||||
# Maximum number of RabbitMQ connection retries. Default is 0
|
||||
# (infinite retry count). (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
|
||||
#rabbit_max_retries = 0
|
||||
|
||||
# Auto-delete queues in AMQP. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
|
||||
#amqp_auto_delete = false
|
||||
|
||||
# The RabbitMQ virtual host. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
|
||||
#rabbit_virtual_host = /
|
||||
|
||||
# SSL version to use (valid only if SSL enabled). Valid values are
|
||||
# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
|
||||
# available on some distributions. (string value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
|
||||
#kombu_ssl_version =
|
||||
|
||||
# How frequently to retry connecting with RabbitMQ. (integer value)
|
||||
#rabbit_retry_interval = 1
|
||||
|
||||
# SSL key file (valid only if SSL enabled). (string value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
|
||||
#kombu_ssl_keyfile =
|
||||
|
||||
# Maximum interval of RabbitMQ connection retries. Default is 30
|
||||
# seconds. (integer value)
|
||||
#rabbit_interval_max = 30
|
||||
|
||||
# How long to backoff for between retries when connecting to RabbitMQ.
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
|
||||
#rabbit_retry_backoff = 2
|
||||
|
||||
# SSL certification authority file (valid only if SSL enabled).
|
||||
# (string value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
|
||||
#kombu_ssl_ca_certs =
|
||||
|
||||
# Positive integer representing duration in seconds for queue TTL
|
||||
# (x-expires). Queues which are unused for the duration of the TTL are
|
||||
# automatically deleted. The parameter affects only reply and fanout
|
||||
# queues. (integer value)
|
||||
# Minimum value: 1
|
||||
#rabbit_transient_queues_ttl = 600
|
||||
|
||||
# How long to wait before reconnecting in response to an AMQP consumer
|
||||
# cancel notification. (floating point value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
|
||||
#kombu_reconnect_delay = 1.0
|
||||
|
||||
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this
|
||||
# option, you must wipe the RabbitMQ database. (boolean value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
|
||||
#rabbit_ha_queues = false
|
||||
|
||||
# How long to wait a missing client beforce abandoning to send it its
|
||||
# replies. This value should not be longer than rpc_response_timeout.
|
||||
# (integer value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
|
||||
#kombu_missing_consumer_retry_timeout = 60
|
||||
|
||||
# Determines how the next RabbitMQ node is chosen in case the one we
|
||||
# are currently connected to becomes unavailable. Takes effect only if
|
||||
# more than one RabbitMQ node is provided in config. (string value)
|
||||
# Allowed values: round-robin, shuffle
|
||||
#kombu_failover_strategy = round-robin
|
||||
|
||||
# Specifies the number of messages to prefetch. Setting to zero allows
|
||||
# unlimited messages. (integer value)
|
||||
#rabbit_qos_prefetch_count = 0
|
||||
|
||||
# The RabbitMQ broker port where a single node is used. (port value)
|
||||
# Minimum value: 0
|
||||
# Maximum value: 65535
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_port
|
||||
#rabbit_port = 5672
|
||||
|
||||
# SSL cert file (valid only if SSL enabled). (string value)
|
||||
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
|
||||
#kombu_ssl_certfile =
|
||||
|
||||
# Number of seconds after which the Rabbit broker is considered down
|
||||
# if heartbeat's keep-alive fails (0 disable the heartbeat).
|
||||
# EXPERIMENTAL (integer value)
|
||||
#heartbeat_timeout_threshold = 60
|
||||
|
||||
# RabbitMQ HA cluster host:port pairs. (list value)
|
||||
# Deprecated group/name - [DEFAULT]/rabbit_hosts
|
||||
#rabbit_hosts = $rabbit_host:$rabbit_port
|
||||
|
||||
|
||||
[watcher_applier]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# The topic name used for status events, this topic is used so as to
|
||||
# notifythe others components of the system (string value)
|
||||
#status_topic = watcher.applier.status
|
||||
|
||||
# Select the engine to use to execute the workflow (string value)
|
||||
#workflow_engine = taskflow
|
||||
|
||||
# The topic name used forcontrol events, this topic used for rpc call
|
||||
# (string value)
|
||||
#conductor_topic = watcher.applier.control
|
||||
|
||||
# Number of workers for applier, default value is 1. (integer value)
|
||||
# Minimum value: 1
|
||||
#workers = 1
|
||||
|
||||
# The identifier used by watcher module on the message broker (string
|
||||
# value)
|
||||
#publisher_id = watcher.applier.api
|
||||
|
||||
|
||||
[watcher_clients_auth]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Optional domain name to use with v3 API and v2 parameters. It will
|
||||
# be used for both the user and project domain in v3 and ignored in v2
|
||||
# authentication. (unknown value)
|
||||
#default_domain_name = <None>
|
||||
|
||||
# Authentication URL (unknown value)
|
||||
#auth_url = <None>
|
||||
|
||||
# Domain ID to scope to (unknown value)
|
||||
#domain_id = <None>
|
||||
|
||||
# Domain name to scope to (unknown value)
|
||||
#domain_name = <None>
|
||||
|
||||
# Project ID to scope to (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/tenant-id
|
||||
#project_id = <None>
|
||||
|
||||
# Project name to scope to (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/tenant-name
|
||||
#project_name = <None>
|
||||
|
||||
# Domain ID containing project (unknown value)
|
||||
#project_domain_id = <None>
|
||||
|
||||
# PEM encoded client certificate cert file (string value)
|
||||
#certfile = <None>
|
||||
|
||||
# Domain name containing project (unknown value)
|
||||
#project_domain_name = <None>
|
||||
|
||||
# Trust ID (unknown value)
|
||||
#trust_id = <None>
|
||||
|
||||
# Optional domain ID to use with v3 and v2 parameters. It will be used
|
||||
# for both the user and project domain in v3 and ignored in v2
|
||||
# authentication. (unknown value)
|
||||
#default_domain_id = <None>
|
||||
|
||||
# Verify HTTPS connections. (boolean value)
|
||||
#insecure = false
|
||||
|
||||
# User id (unknown value)
|
||||
#user_id = <None>
|
||||
|
||||
# PEM encoded client certificate key file (string value)
|
||||
#keyfile = <None>
|
||||
|
||||
# Username (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/username
|
||||
#username = <None>
|
||||
|
||||
# User's domain id (unknown value)
|
||||
#user_domain_id = <None>
|
||||
|
||||
# User's domain name (unknown value)
|
||||
#user_domain_name = <None>
|
||||
|
||||
# Timeout value for http requests (integer value)
|
||||
#timeout = <None>
|
||||
|
||||
# User's password (unknown value)
|
||||
#password = <None>
|
||||
|
||||
# Authentication type to load (unknown value)
|
||||
# Deprecated group/name - [DEFAULT]/auth_plugin
|
||||
#auth_type = <None>
|
||||
|
||||
# Config Section from which to load plugin specific options (unknown
|
||||
# value)
|
||||
#auth_section = <None>
|
||||
|
||||
# PEM encoded Certificate Authority to use when verifying HTTPs
|
||||
# connections. (string value)
|
||||
#cafile = <None>
|
||||
|
||||
|
||||
[watcher_decision_engine]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# The maximum number of threads that can be used to execute strategies
|
||||
# (integer value)
|
||||
#max_workers = 2
|
||||
|
||||
# The topic name used for status events, this topic is used so as to
|
||||
# notifythe others components of the system (string value)
|
||||
#status_topic = watcher.decision.status
|
||||
|
||||
# The topic name used forcontrol events, this topic used for rpc call
|
||||
# (string value)
|
||||
#conductor_topic = watcher.decision.control
|
||||
|
||||
# The identifier used by watcher module on the message broker (string
|
||||
# value)
|
||||
#publisher_id = watcher.decision.api
|
||||
|
||||
|
||||
[watcher_goals]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# Goals used for the optimization. Maps each goal to an associated
|
||||
# strategy (for example: BASIC_CONSOLIDATION:basic,
|
||||
# MY_GOAL:my_strategy_1) (dict value)
|
||||
#goals = DUMMY:dummy
|
||||
|
||||
|
||||
[watcher_planner]
|
||||
|
||||
#
|
||||
# From watcher
|
||||
#
|
||||
|
||||
# The selected planner used to schedule the actions (string value)
|
||||
#planner = default
|
||||
@@ -7,6 +7,7 @@ jsonpatch>=1.1 # BSD
|
||||
keystoneauth1>=2.1.0 # Apache-2.0
|
||||
keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
|
||||
oslo.config>=3.7.0 # Apache-2.0
|
||||
oslo.context>=0.2.0 # Apache-2.0
|
||||
oslo.db>=4.1.0 # Apache-2.0
|
||||
oslo.i18n>=2.1.0 # Apache-2.0
|
||||
oslo.log>=1.14.0 # Apache-2.0
|
||||
@@ -17,12 +18,13 @@ oslo.utils>=3.5.0 # Apache-2.0
|
||||
PasteDeploy>=1.5.0 # MIT
|
||||
pbr>=1.6 # Apache-2.0
|
||||
pecan>=1.0.0 # BSD
|
||||
PrettyTable<0.8,>=0.7 # BSD
|
||||
voluptuous>=0.8.6 # BSD License
|
||||
python-ceilometerclient>=2.2.1 # Apache-2.0
|
||||
python-cinderclient>=1.3.1 # Apache-2.0
|
||||
python-glanceclient>=1.2.0 # Apache-2.0
|
||||
python-glanceclient>=2.0.0 # Apache-2.0
|
||||
python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0
|
||||
python-neutronclient>=2.6.0 # Apache-2.0
|
||||
python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0
|
||||
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
|
||||
python-openstackclient>=2.1.0 # Apache-2.0
|
||||
six>=1.9.0 # MIT
|
||||
|
||||
@@ -49,6 +49,7 @@ watcher_strategies =
|
||||
dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy
|
||||
basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation
|
||||
outlet_temp_control = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl
|
||||
vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation
|
||||
|
||||
watcher_actions =
|
||||
migrate = watcher.applier.actions.migration:Migrate
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
coverage>=3.6 # Apache-2.0
|
||||
discover # BSD
|
||||
doc8 # Apache-2.0
|
||||
freezegun # Apache-2.0
|
||||
hacking<0.11,>=0.10.2
|
||||
mock>=1.2 # BSD
|
||||
oslotest>=1.10.0 # Apache-2.0
|
||||
|
||||
7
tox.ini
7
tox.ini
@@ -40,12 +40,7 @@ commands = oslo_debug_helper -t watcher/tests {posargs}
|
||||
[testenv:config]
|
||||
sitepackages = False
|
||||
commands =
|
||||
oslo-config-generator --namespace watcher \
|
||||
--namespace keystonemiddleware.auth_token \
|
||||
--namespace oslo.log \
|
||||
--namespace oslo.db \
|
||||
--namespace oslo.messaging \
|
||||
--output-file etc/watcher/watcher.conf.sample
|
||||
oslo-config-generator --config-file etc/watcher/watcher-config-generator.conf
|
||||
|
||||
[flake8]
|
||||
show-source=True
|
||||
|
||||
@@ -49,6 +49,10 @@ be one of the following:
|
||||
- **CANCELLED** : the :ref:`Action <action_definition>` was in **PENDING** or
|
||||
**ONGOING** state and was cancelled by the
|
||||
:ref:`Administrator <administrator_definition>`
|
||||
|
||||
:ref:`Some default implementations are provided <watcher_planners>`, but it is
|
||||
possible to :ref:`develop new implementations <implement_action_plugin>` which
|
||||
are dynamically loaded by Watcher at launch time.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
@@ -359,6 +363,10 @@ class ActionsController(rest.RestController):
|
||||
|
||||
:param action: a action within the request body.
|
||||
"""
|
||||
# FIXME: blueprint edit-action-plan-flow
|
||||
raise exception.OperationNotPermitted(
|
||||
_("Cannot create an action directly"))
|
||||
|
||||
if self.from_actions:
|
||||
raise exception.OperationNotPermitted
|
||||
|
||||
@@ -379,6 +387,10 @@ class ActionsController(rest.RestController):
|
||||
:param action_uuid: UUID of a action.
|
||||
:param patch: a json PATCH document to apply to this action.
|
||||
"""
|
||||
# FIXME: blueprint edit-action-plan-flow
|
||||
raise exception.OperationNotPermitted(
|
||||
_("Cannot modify an action directly"))
|
||||
|
||||
if self.from_actions:
|
||||
raise exception.OperationNotPermitted
|
||||
|
||||
@@ -411,6 +423,9 @@ class ActionsController(rest.RestController):
|
||||
|
||||
:param action_uuid: UUID of a action.
|
||||
"""
|
||||
# FIXME: blueprint edit-action-plan-flow
|
||||
raise exception.OperationNotPermitted(
|
||||
_("Cannot delete an action directly"))
|
||||
|
||||
action_to_delete = objects.Action.get_by_uuid(
|
||||
pecan.request.context,
|
||||
|
||||
@@ -49,24 +49,9 @@ standard workflow model description formats such as
|
||||
`Business Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_
|
||||
or `Unified Modeling Language (UML) <http://www.uml.org/>`_.
|
||||
|
||||
An :ref:`Action Plan <action_plan_definition>` has a life-cycle and its current
|
||||
state may be one of the following:
|
||||
|
||||
- **RECOMMENDED** : the :ref:`Action Plan <action_plan_definition>` is waiting
|
||||
for a validation from the :ref:`Administrator <administrator_definition>`
|
||||
- **ONGOING** : the :ref:`Action Plan <action_plan_definition>` is currently
|
||||
being processed by the :ref:`Watcher Applier <watcher_applier_definition>`
|
||||
- **SUCCEEDED** : the :ref:`Action Plan <action_plan_definition>` has been
|
||||
executed successfully (i.e. all :ref:`Actions <action_definition>` that it
|
||||
contains have been executed successfully)
|
||||
- **FAILED** : an error occured while executing the
|
||||
:ref:`Action Plan <action_plan_definition>`
|
||||
- **DELETED** : the :ref:`Action Plan <action_plan_definition>` is still
|
||||
stored in the :ref:`Watcher database <watcher_database_definition>` but is
|
||||
not returned any more through the Watcher APIs.
|
||||
- **CANCELLED** : the :ref:`Action Plan <action_plan_definition>` was in
|
||||
**PENDING** or **ONGOING** state and was cancelled by the
|
||||
:ref:`Administrator <administrator_definition>`
|
||||
To see the life-cycle and description of
|
||||
:ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan state
|
||||
machine <action_plan_state_machine>`.
|
||||
""" # noqa
|
||||
|
||||
import datetime
|
||||
|
||||
@@ -25,28 +25,8 @@ on a given :ref:`Cluster <cluster_definition>`.
|
||||
For each :ref:`Audit <audit_definition>`, the Watcher system generates an
|
||||
:ref:`Action Plan <action_plan_definition>`.
|
||||
|
||||
An :ref:`Audit <audit_definition>` has a life-cycle and its current state may
|
||||
be one of the following:
|
||||
|
||||
- **PENDING** : a request for an :ref:`Audit <audit_definition>` has been
|
||||
submitted (either manually by the
|
||||
:ref:`Administrator <administrator_definition>` or automatically via some
|
||||
event handling mechanism) and is in the queue for being processed by the
|
||||
:ref:`Watcher Decision Engine <watcher_decision_engine_definition>`
|
||||
- **ONGOING** : the :ref:`Audit <audit_definition>` is currently being
|
||||
processed by the
|
||||
:ref:`Watcher Decision Engine <watcher_decision_engine_definition>`
|
||||
- **SUCCEEDED** : the :ref:`Audit <audit_definition>` has been executed
|
||||
successfully (note that it may not necessarily produce a
|
||||
:ref:`Solution <solution_definition>`).
|
||||
- **FAILED** : an error occured while executing the
|
||||
:ref:`Audit <audit_definition>`
|
||||
- **DELETED** : the :ref:`Audit <audit_definition>` is still stored in the
|
||||
:ref:`Watcher database <watcher_database_definition>` but is not returned
|
||||
any more through the Watcher APIs.
|
||||
- **CANCELLED** : the :ref:`Audit <audit_definition>` was in **PENDING** or
|
||||
**ONGOING** state and was cancelled by the
|
||||
:ref:`Administrator <administrator_definition>`
|
||||
To see the life-cycle and description of an :ref:`Audit <audit_definition>`
|
||||
states, visit :ref:`the Audit State machine <audit_state_machine>`.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
@@ -50,6 +50,7 @@ provided as a list of key-value pairs.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
@@ -67,11 +68,25 @@ from watcher import objects
|
||||
|
||||
|
||||
class AuditTemplatePatchType(types.JsonPatchType):
|
||||
|
||||
@staticmethod
|
||||
def mandatory_attrs():
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def validate(patch):
|
||||
if patch.path == "/goal":
|
||||
AuditTemplatePatchType._validate_goal(patch)
|
||||
return types.JsonPatchType.validate(patch)
|
||||
|
||||
@staticmethod
|
||||
def _validate_goal(patch):
|
||||
serialized_patch = {'path': patch.path, 'op': patch.op}
|
||||
if patch.value is not wsme.Unset:
|
||||
serialized_patch['value'] = patch.value
|
||||
new_goal = patch.value
|
||||
if new_goal and new_goal not in cfg.CONF.watcher_goals.goals.keys():
|
||||
raise exception.InvalidGoal(goal=new_goal)
|
||||
|
||||
|
||||
class AuditTemplate(base.APIBase):
|
||||
"""API representation of a audit template.
|
||||
@@ -156,6 +171,12 @@ class AuditTemplate(base.APIBase):
|
||||
updated_at=datetime.datetime.utcnow())
|
||||
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
||||
|
||||
@staticmethod
|
||||
def validate(audit_template):
|
||||
if audit_template.goal not in cfg.CONF.watcher_goals.goals.keys():
|
||||
raise exception.InvalidGoal(audit_template.goal)
|
||||
return audit_template
|
||||
|
||||
|
||||
class AuditTemplateCollection(collection.Collection):
|
||||
"""API representation of a collection of audit templates."""
|
||||
@@ -287,12 +308,14 @@ class AuditTemplatesController(rest.RestController):
|
||||
|
||||
return AuditTemplate.convert_with_links(rpc_audit_template)
|
||||
|
||||
@wsme.validate(types.uuid, AuditTemplate)
|
||||
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplate, status_code=201)
|
||||
def post(self, audit_template):
|
||||
"""Create a new audit template.
|
||||
|
||||
:param audit template: a audit template within the request body.
|
||||
"""
|
||||
|
||||
if self.from_audit_templates:
|
||||
raise exception.OperationNotPermitted
|
||||
|
||||
|
||||
@@ -53,16 +53,15 @@ class DefaultActionPlanHandler(base.BaseActionPlanHandler):
|
||||
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
||||
ap_objects.State.ONGOING)
|
||||
applier = default.DefaultApplier(self.ctx, self.applier_manager)
|
||||
result = applier.execute(self.action_plan_uuid)
|
||||
applier.execute(self.action_plan_uuid)
|
||||
state = ap_objects.State.SUCCEEDED
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
result = False
|
||||
state = ap_objects.State.FAILED
|
||||
|
||||
finally:
|
||||
if result is True:
|
||||
status = ap_objects.State.SUCCEEDED
|
||||
else:
|
||||
status = ap_objects.State.FAILED
|
||||
# update state
|
||||
self.notify(self.action_plan_uuid,
|
||||
event_types.EventTypes.LAUNCH_ACTION_PLAN,
|
||||
status)
|
||||
state)
|
||||
|
||||
@@ -22,6 +22,7 @@ from taskflow import task
|
||||
|
||||
from watcher._i18n import _LE, _LW, _LC
|
||||
from watcher.applier.workflow_engine import base
|
||||
from watcher.common import exception
|
||||
from watcher.objects import action as obj_action
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@@ -77,10 +78,9 @@ class DefaultWorkFlowEngine(base.BaseWorkFlowEngine):
|
||||
|
||||
e = engines.load(flow)
|
||||
e.run()
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
return False
|
||||
raise exception.WorkflowExecutionException(error=e)
|
||||
|
||||
|
||||
class TaskFlowActionContainer(task.Task):
|
||||
@@ -121,14 +121,9 @@ class TaskFlowActionContainer(task.Task):
|
||||
try:
|
||||
LOG.debug("Running action %s", self.name)
|
||||
|
||||
# todo(jed) remove return (true or false) raise an Exception
|
||||
result = self.action.execute()
|
||||
if result is not True:
|
||||
self.engine.notify(self._db_action,
|
||||
obj_action.State.FAILED)
|
||||
else:
|
||||
self.engine.notify(self._db_action,
|
||||
obj_action.State.SUCCEEDED)
|
||||
self.action.execute()
|
||||
self.engine.notify(self._db_action,
|
||||
obj_action.State.SUCCEEDED)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.error(_LE('The WorkFlow Engine has failed '
|
||||
|
||||
@@ -25,7 +25,7 @@ from oslo_config import cfg
|
||||
|
||||
from watcher.common import service
|
||||
from watcher.db import migration
|
||||
|
||||
from watcher.db import purge
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@@ -56,6 +56,12 @@ class DBCommand(object):
|
||||
def create_schema():
|
||||
migration.create_schema()
|
||||
|
||||
@staticmethod
|
||||
def purge():
|
||||
purge.purge(CONF.command.age_in_days, CONF.command.max_number,
|
||||
CONF.command.audit_template, CONF.command.exclude_orphans,
|
||||
CONF.command.dry_run)
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
@@ -96,6 +102,33 @@ def add_command_parsers(subparsers):
|
||||
help="Create the database schema.")
|
||||
parser.set_defaults(func=DBCommand.create_schema)
|
||||
|
||||
parser = subparsers.add_parser(
|
||||
'purge',
|
||||
help="Purge the database.")
|
||||
parser.add_argument('-d', '--age-in-days',
|
||||
help="Number of days since deletion (from today) "
|
||||
"to exclude from the purge. If None, everything "
|
||||
"will be purged.",
|
||||
type=int, default=None, nargs='?')
|
||||
parser.add_argument('-n', '--max-number',
|
||||
help="Max number of objects expected to be deleted. "
|
||||
"Prevents the deletion if exceeded. No limit if "
|
||||
"set to None.",
|
||||
type=int, default=None, nargs='?')
|
||||
parser.add_argument('-t', '--audit-template',
|
||||
help="UUID or name of the audit template to purge.",
|
||||
type=str, default=None, nargs='?')
|
||||
parser.add_argument('-e', '--exclude-orphans', action='store_true',
|
||||
help="Flag to indicate whether or not you want to "
|
||||
"exclude orphans from deletion (default: False).",
|
||||
default=False)
|
||||
parser.add_argument('--dry-run', action='store_true',
|
||||
help="Flag to indicate whether or not you want to "
|
||||
"perform a dry run (no deletion).",
|
||||
default=False)
|
||||
|
||||
parser.set_defaults(func=DBCommand.purge)
|
||||
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Command',
|
||||
@@ -114,6 +147,7 @@ def main():
|
||||
valid_commands = set([
|
||||
'upgrade', 'downgrade', 'revision',
|
||||
'version', 'stamp', 'create_schema',
|
||||
'purge',
|
||||
])
|
||||
if not set(sys.argv).intersection(valid_commands):
|
||||
sys.argv.append('upgrade')
|
||||
|
||||
@@ -22,6 +22,8 @@ from watcher import version
|
||||
|
||||
|
||||
def parse_args(argv, default_config_files=None):
|
||||
default_config_files = (default_config_files or
|
||||
cfg.find_config_files(project='watcher'))
|
||||
rpc.set_defaults(control_exchange='watcher')
|
||||
cfg.CONF(argv[1:],
|
||||
project='python-watcher',
|
||||
|
||||
@@ -234,6 +234,9 @@ class PatchError(Invalid):
|
||||
|
||||
# decision engine
|
||||
|
||||
class WorkflowExecutionException(WatcherException):
|
||||
msg_fmt = _('Workflow execution error: %(error)s')
|
||||
|
||||
|
||||
class IllegalArgumentException(WatcherException):
|
||||
msg_fmt = _('Illegal argument')
|
||||
@@ -283,3 +286,11 @@ class LoadingError(WatcherException):
|
||||
|
||||
class ReservedWord(WatcherException):
|
||||
msg_fmt = _("The identifier '%(name)s' is a reserved word")
|
||||
|
||||
|
||||
class NotSoftDeletedStateError(WatcherException):
|
||||
msg_fmt = _("The %(name)s resource %(id)s is not soft deleted")
|
||||
|
||||
|
||||
class NegativeLimitError(WatcherException):
|
||||
msg_fmt = _("Limit should be positive")
|
||||
|
||||
410
watcher/db/purge.py
Normal file
410
watcher/db/purge.py
Normal file
@@ -0,0 +1,410 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2016 b<>com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import itertools
|
||||
import sys
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
import prettytable as ptable
|
||||
from six.moves import input
|
||||
|
||||
from watcher._i18n import _, _LI
|
||||
from watcher.common import context
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher import objects
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class WatcherObjectsMap(object):
|
||||
"""Wrapper to deal with watcher objects per type
|
||||
|
||||
This wrapper object contains a list of watcher objects per type.
|
||||
Its main use is to simplify the merge of watcher objects by avoiding
|
||||
duplicates, but also for representing the relationships between these
|
||||
objects.
|
||||
"""
|
||||
|
||||
# This is for generating the .pot translations
|
||||
keymap = collections.OrderedDict([
|
||||
("audit_templates", _("Audit Templates")),
|
||||
("audits", _("Audits")),
|
||||
("action_plans", _("Action Plans")),
|
||||
("actions", _("Actions")),
|
||||
])
|
||||
|
||||
def __init__(self):
|
||||
for attr_name in self.__class__.keys():
|
||||
setattr(self, attr_name, [])
|
||||
|
||||
def values(self):
|
||||
return (getattr(self, key) for key in self.__class__.keys())
|
||||
|
||||
@classmethod
|
||||
def keys(cls):
|
||||
return cls.keymap.keys()
|
||||
|
||||
def __iter__(self):
|
||||
return itertools.chain(*self.values())
|
||||
|
||||
def __add__(self, other):
|
||||
new_map = self.__class__()
|
||||
|
||||
# Merge the 2 items dicts into a new object (and avoid dupes)
|
||||
for attr_name, initials, others in zip(self.keys(), self.values(),
|
||||
other.values()):
|
||||
# Creates a copy
|
||||
merged = initials[:]
|
||||
initials_ids = [item.id for item in initials]
|
||||
non_dupes = [item for item in others
|
||||
if item.id not in initials_ids]
|
||||
merged += non_dupes
|
||||
|
||||
setattr(new_map, attr_name, merged)
|
||||
|
||||
return new_map
|
||||
|
||||
def __str__(self):
|
||||
out = ""
|
||||
for key, vals in zip(self.keys(), self.values()):
|
||||
ids = [val.id for val in vals]
|
||||
out += "%(key)s: %(val)s" % (dict(key=key, val=ids))
|
||||
out += "\n"
|
||||
return out
|
||||
|
||||
def __len__(self):
|
||||
return sum(len(getattr(self, key)) for key in self.keys())
|
||||
|
||||
def get_count_table(self):
|
||||
headers = list(self.keymap.values())
|
||||
headers.append(_("Total")) # We also add a total count
|
||||
counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)]
|
||||
table = ptable.PrettyTable(field_names=headers)
|
||||
table.add_row(counters)
|
||||
return table.get_string()
|
||||
|
||||
|
||||
class PurgeCommand(object):
|
||||
"""Purges the DB by removing soft deleted entries
|
||||
|
||||
The workflow for this purge is the following:
|
||||
|
||||
# Find soft deleted objects which are expired
|
||||
# Find orphan objects
|
||||
# Find their related objects whether they are expired or not
|
||||
# Merge them together
|
||||
# If it does not exceed the limit, destroy them all
|
||||
"""
|
||||
|
||||
ctx = context.make_context(show_deleted=True)
|
||||
|
||||
def __init__(self, age_in_days=None, max_number=None,
|
||||
uuid=None, exclude_orphans=False, dry_run=None):
|
||||
self.age_in_days = age_in_days
|
||||
self.max_number = max_number
|
||||
self.uuid = uuid
|
||||
self.exclude_orphans = exclude_orphans
|
||||
self.dry_run = dry_run
|
||||
|
||||
self._delete_up_to_max = None
|
||||
self._objects_map = WatcherObjectsMap()
|
||||
|
||||
def get_expiry_date(self):
|
||||
if not self.age_in_days:
|
||||
return None
|
||||
today = datetime.datetime.today()
|
||||
expiry_date = today - datetime.timedelta(days=self.age_in_days)
|
||||
return expiry_date
|
||||
|
||||
@classmethod
|
||||
def get_audit_template_uuid(cls, uuid_or_name):
|
||||
if uuid_or_name is None:
|
||||
return
|
||||
|
||||
query_func = None
|
||||
if not utils.is_uuid_like(uuid_or_name):
|
||||
query_func = objects.audit_template.AuditTemplate.get_by_name
|
||||
else:
|
||||
query_func = objects.audit_template.AuditTemplate.get_by_uuid
|
||||
|
||||
try:
|
||||
audit_template = query_func(cls.ctx, uuid_or_name)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.AuditTemplateNotFound(audit_template=uuid_or_name)
|
||||
|
||||
if not audit_template.deleted_at:
|
||||
raise exception.NotSoftDeletedStateError(
|
||||
name=_('Audit Template'), id=uuid_or_name)
|
||||
|
||||
return audit_template.uuid
|
||||
|
||||
def _find_audit_templates(self, filters=None):
|
||||
return objects.audit_template.AuditTemplate.list(
|
||||
self.ctx, filters=filters)
|
||||
|
||||
def _find_audits(self, filters=None):
|
||||
return objects.audit.Audit.list(self.ctx, filters=filters)
|
||||
|
||||
def _find_action_plans(self, filters=None):
|
||||
return objects.action_plan.ActionPlan.list(self.ctx, filters=filters)
|
||||
|
||||
def _find_actions(self, filters=None):
|
||||
return objects.action.Action.list(self.ctx, filters=filters)
|
||||
|
||||
def _find_orphans(self):
|
||||
orphans = WatcherObjectsMap()
|
||||
|
||||
filters = dict(deleted=False)
|
||||
audit_templates = objects.audit_template.AuditTemplate.list(
|
||||
self.ctx, filters=filters)
|
||||
audits = objects.audit.Audit.list(self.ctx, filters=filters)
|
||||
action_plans = objects.action_plan.ActionPlan.list(
|
||||
self.ctx, filters=filters)
|
||||
actions = objects.action.Action.list(self.ctx, filters=filters)
|
||||
|
||||
audit_template_ids = set(at.id for at in audit_templates)
|
||||
orphans.audits = [
|
||||
audit for audit in audits
|
||||
if audit.audit_template_id not in audit_template_ids]
|
||||
|
||||
# Objects with orphan parents are themselves orphans
|
||||
audit_ids = [audit.id for audit in (a for a in audits
|
||||
if a not in orphans.audits)]
|
||||
orphans.action_plans = [
|
||||
ap for ap in action_plans
|
||||
if ap.audit_id not in audit_ids]
|
||||
|
||||
# Objects with orphan parents are themselves orphans
|
||||
action_plan_ids = [ap.id for ap in (a for a in action_plans
|
||||
if a not in orphans.action_plans)]
|
||||
orphans.actions = [
|
||||
action for action in actions
|
||||
if action.action_plan_id not in action_plan_ids]
|
||||
|
||||
LOG.debug("Orphans found:\n%s", orphans)
|
||||
LOG.info(_LI("Orphans found:\n%s"), orphans.get_count_table())
|
||||
|
||||
return orphans
|
||||
|
||||
def _find_soft_deleted_objects(self):
|
||||
to_be_deleted = WatcherObjectsMap()
|
||||
|
||||
expiry_date = self.get_expiry_date()
|
||||
filters = dict(deleted=True)
|
||||
if self.uuid:
|
||||
filters["uuid"] = self.uuid
|
||||
if expiry_date:
|
||||
filters.update(dict(deleted_at__lt=expiry_date))
|
||||
|
||||
to_be_deleted.audit_templates.extend(
|
||||
self._find_audit_templates(filters))
|
||||
to_be_deleted.audits.extend(self._find_audits(filters))
|
||||
to_be_deleted.action_plans.extend(self._find_action_plans(filters))
|
||||
to_be_deleted.actions.extend(self._find_actions(filters))
|
||||
|
||||
soft_deleted_objs = self._find_related_objects(
|
||||
to_be_deleted, base_filters=dict(deleted=True))
|
||||
|
||||
LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs)
|
||||
|
||||
return soft_deleted_objs
|
||||
|
||||
def _find_related_objects(self, objects_map, base_filters=None):
|
||||
base_filters = base_filters or {}
|
||||
|
||||
for audit_template in objects_map.audit_templates:
|
||||
filters = {}
|
||||
filters.update(base_filters)
|
||||
filters.update(dict(audit_template_id=audit_template.id))
|
||||
related_objs = WatcherObjectsMap()
|
||||
related_objs.audits = self._find_audits(filters)
|
||||
objects_map += related_objs
|
||||
|
||||
for audit in objects_map.audits:
|
||||
filters = {}
|
||||
filters.update(base_filters)
|
||||
filters.update(dict(audit_id=audit.id))
|
||||
related_objs = WatcherObjectsMap()
|
||||
related_objs.action_plans = self._find_action_plans(filters)
|
||||
objects_map += related_objs
|
||||
|
||||
for action_plan in objects_map.action_plans:
|
||||
filters = {}
|
||||
filters.update(base_filters)
|
||||
filters.update(dict(action_plan_id=action_plan.id))
|
||||
related_objs = WatcherObjectsMap()
|
||||
related_objs.actions = self._find_actions(filters)
|
||||
objects_map += related_objs
|
||||
|
||||
return objects_map
|
||||
|
||||
def confirmation_prompt(self):
|
||||
print(self._objects_map.get_count_table())
|
||||
raw_val = input(
|
||||
_("There are %(count)d objects set for deletion. "
|
||||
"Continue? [y/N]") % dict(count=len(self._objects_map)))
|
||||
|
||||
return strutils.bool_from_string(raw_val)
|
||||
|
||||
def delete_up_to_max_prompt(self, objects_map):
|
||||
print(objects_map.get_count_table())
|
||||
print(_("The number of objects (%(num)s) to delete from the database "
|
||||
"exceeds the maximum number of objects (%(max_number)s) "
|
||||
"specified.") % dict(max_number=self.max_number,
|
||||
num=len(objects_map)))
|
||||
raw_val = input(
|
||||
_("Do you want to delete objects up to the specified maximum "
|
||||
"number? [y/N]"))
|
||||
|
||||
self._delete_up_to_max = strutils.bool_from_string(raw_val)
|
||||
|
||||
return self._delete_up_to_max
|
||||
|
||||
def _aggregate_objects(self):
|
||||
"""Objects aggregated on a 'per audit template' basis"""
|
||||
# todo: aggregate orphans as well
|
||||
aggregate = []
|
||||
for audit_template in self._objects_map.audit_templates:
|
||||
related_objs = WatcherObjectsMap()
|
||||
related_objs.audit_templates = [audit_template]
|
||||
related_objs.audits = [
|
||||
audit for audit in self._objects_map.audits
|
||||
if audit.audit_template_id == audit_template.id
|
||||
]
|
||||
audit_ids = [audit.id for audit in related_objs.audits]
|
||||
related_objs.action_plans = [
|
||||
action_plan for action_plan in self._objects_map.action_plans
|
||||
if action_plan.audit_id in audit_ids
|
||||
]
|
||||
action_plan_ids = [
|
||||
action_plan.id for action_plan in related_objs.action_plans
|
||||
]
|
||||
related_objs.actions = [
|
||||
action for action in self._objects_map.actions
|
||||
if action.action_plan_id in action_plan_ids
|
||||
]
|
||||
aggregate.append(related_objs)
|
||||
|
||||
return aggregate
|
||||
|
||||
def _get_objects_up_to_limit(self):
|
||||
aggregated_objects = self._aggregate_objects()
|
||||
to_be_deleted_subset = WatcherObjectsMap()
|
||||
|
||||
for aggregate in aggregated_objects:
|
||||
if len(aggregate) + len(to_be_deleted_subset) <= self.max_number:
|
||||
to_be_deleted_subset += aggregate
|
||||
else:
|
||||
break
|
||||
|
||||
LOG.debug(to_be_deleted_subset)
|
||||
return to_be_deleted_subset
|
||||
|
||||
def find_objects_to_delete(self):
|
||||
"""Finds all the objects to be purged
|
||||
|
||||
:returns: A mapping with all the Watcher objects to purged
|
||||
:rtype: :py:class:`~.WatcherObjectsMap` instance
|
||||
"""
|
||||
to_be_deleted = self._find_soft_deleted_objects()
|
||||
|
||||
if not self.exclude_orphans:
|
||||
to_be_deleted += self._find_orphans()
|
||||
|
||||
LOG.debug("Objects to be deleted:\n%s", to_be_deleted)
|
||||
|
||||
return to_be_deleted
|
||||
|
||||
def do_delete(self):
|
||||
LOG.info(_LI("Deleting..."))
|
||||
# Reversed to avoid errors with foreign keys
|
||||
for entry in reversed(list(self._objects_map)):
|
||||
entry.destroy()
|
||||
|
||||
def execute(self):
|
||||
LOG.info(_LI("Starting purge command"))
|
||||
self._objects_map = self.find_objects_to_delete()
|
||||
|
||||
if (self.max_number is not None and
|
||||
len(self._objects_map) > self.max_number):
|
||||
if self.delete_up_to_max_prompt(self._objects_map):
|
||||
self._objects_map = self._get_objects_up_to_limit()
|
||||
else:
|
||||
return
|
||||
|
||||
_orphans_note = (_(" (orphans excluded)") if self.exclude_orphans
|
||||
else _(" (may include orphans)"))
|
||||
if not self.dry_run and self.confirmation_prompt():
|
||||
self.do_delete()
|
||||
print(_("Purge results summary%s:") % _orphans_note)
|
||||
LOG.info(_LI("Purge results summary%s:"), _orphans_note)
|
||||
else:
|
||||
LOG.debug(self._objects_map)
|
||||
print(_("Here below is a table containing the objects "
|
||||
"that can be purged%s:") % _orphans_note)
|
||||
|
||||
LOG.info("\n%s", self._objects_map.get_count_table())
|
||||
print(self._objects_map.get_count_table())
|
||||
LOG.info(_LI("Purge process completed"))
|
||||
|
||||
|
||||
def purge(age_in_days, max_number, audit_template, exclude_orphans, dry_run):
|
||||
"""Removes soft deleted objects from the database
|
||||
|
||||
:param age_in_days: Number of days since deletion (from today)
|
||||
to exclude from the purge. If None, everything will be purged.
|
||||
:type age_in_days: int
|
||||
:param max_number: Max number of objects expected to be deleted.
|
||||
Prevents the deletion if exceeded. No limit if set to None.
|
||||
:type max_number: int
|
||||
:param audit_template: UUID or name of the audit template to purge.
|
||||
:type audit_template: str
|
||||
:param exclude_orphans: Flag to indicate whether or not you want to
|
||||
exclude orphans from deletion (default: False).
|
||||
:type exclude_orphans: bool
|
||||
:param dry_run: Flag to indicate whether or not you want to perform
|
||||
a dry run (no deletion).
|
||||
:type dry_run: bool
|
||||
"""
|
||||
try:
|
||||
if max_number and max_number < 0:
|
||||
raise exception.NegativeLimitError
|
||||
|
||||
LOG.info("[options] age_in_days = %s", age_in_days)
|
||||
LOG.info("[options] max_number = %s", max_number)
|
||||
LOG.info("[options] audit_template = %s", audit_template)
|
||||
LOG.info("[options] exclude_orphans = %s", exclude_orphans)
|
||||
LOG.info("[options] dry_run = %s", dry_run)
|
||||
|
||||
uuid = PurgeCommand.get_audit_template_uuid(audit_template)
|
||||
|
||||
cmd = PurgeCommand(age_in_days, max_number, uuid,
|
||||
exclude_orphans, dry_run)
|
||||
|
||||
cmd.execute()
|
||||
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
print(exc)
|
||||
sys.exit(1)
|
||||
@@ -29,7 +29,10 @@ from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher.db import api
|
||||
from watcher.db.sqlalchemy import models
|
||||
from watcher.objects import action as action_objects
|
||||
from watcher.objects import action_plan as ap_objects
|
||||
from watcher.objects import audit as audit_objects
|
||||
from watcher.objects import utils as objutils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
@@ -105,12 +108,88 @@ class Connection(api.BaseConnection):
|
||||
"""SqlAlchemy connection."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
super(Connection, self).__init__()
|
||||
|
||||
def __add_soft_delete_mixin_filters(self, query, filters, model):
|
||||
if 'deleted' in filters:
|
||||
if bool(filters['deleted']):
|
||||
query = query.filter(model.deleted != 0)
|
||||
else:
|
||||
query = query.filter(model.deleted == 0)
|
||||
if 'deleted_at__eq' in filters:
|
||||
query = query.filter(
|
||||
model.deleted_at == objutils.datetime_or_str_or_none(
|
||||
filters['deleted_at__eq']))
|
||||
if 'deleted_at__gt' in filters:
|
||||
query = query.filter(
|
||||
model.deleted_at > objutils.datetime_or_str_or_none(
|
||||
filters['deleted_at__gt']))
|
||||
if 'deleted_at__gte' in filters:
|
||||
query = query.filter(
|
||||
model.deleted_at >= objutils.datetime_or_str_or_none(
|
||||
filters['deleted_at__gte']))
|
||||
if 'deleted_at__lt' in filters:
|
||||
query = query.filter(
|
||||
model.deleted_at < objutils.datetime_or_str_or_none(
|
||||
filters['deleted_at__lt']))
|
||||
if 'deleted_at__lte' in filters:
|
||||
query = query.filter(
|
||||
model.deleted_at <= objutils.datetime_or_str_or_none(
|
||||
filters['deleted_at__lte']))
|
||||
|
||||
return query
|
||||
|
||||
def __add_timestamp_mixin_filters(self, query, filters, model):
|
||||
if 'created_at__eq' in filters:
|
||||
query = query.filter(
|
||||
model.created_at == objutils.datetime_or_str_or_none(
|
||||
filters['created_at__eq']))
|
||||
if 'created_at__gt' in filters:
|
||||
query = query.filter(
|
||||
model.created_at > objutils.datetime_or_str_or_none(
|
||||
filters['created_at__gt']))
|
||||
if 'created_at__gte' in filters:
|
||||
query = query.filter(
|
||||
model.created_at >= objutils.datetime_or_str_or_none(
|
||||
filters['created_at__gte']))
|
||||
if 'created_at__lt' in filters:
|
||||
query = query.filter(
|
||||
model.created_at < objutils.datetime_or_str_or_none(
|
||||
filters['created_at__lt']))
|
||||
if 'created_at__lte' in filters:
|
||||
query = query.filter(
|
||||
model.created_at <= objutils.datetime_or_str_or_none(
|
||||
filters['created_at__lte']))
|
||||
|
||||
if 'updated_at__eq' in filters:
|
||||
query = query.filter(
|
||||
model.updated_at == objutils.datetime_or_str_or_none(
|
||||
filters['updated_at__eq']))
|
||||
if 'updated_at__gt' in filters:
|
||||
query = query.filter(
|
||||
model.updated_at > objutils.datetime_or_str_or_none(
|
||||
filters['updated_at__gt']))
|
||||
if 'updated_at__gte' in filters:
|
||||
query = query.filter(
|
||||
model.updated_at >= objutils.datetime_or_str_or_none(
|
||||
filters['updated_at__gte']))
|
||||
if 'updated_at__lt' in filters:
|
||||
query = query.filter(
|
||||
model.updated_at < objutils.datetime_or_str_or_none(
|
||||
filters['updated_at__lt']))
|
||||
if 'updated_at__lte' in filters:
|
||||
query = query.filter(
|
||||
model.updated_at <= objutils.datetime_or_str_or_none(
|
||||
filters['updated_at__lte']))
|
||||
|
||||
return query
|
||||
|
||||
def _add_audit_templates_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = []
|
||||
|
||||
if 'uuid' in filters:
|
||||
query = query.filter_by(uuid=filters['uuid'])
|
||||
if 'name' in filters:
|
||||
query = query.filter_by(name=filters['name'])
|
||||
if 'host_aggregate' in filters:
|
||||
@@ -118,12 +197,19 @@ class Connection(api.BaseConnection):
|
||||
if 'goal' in filters:
|
||||
query = query.filter_by(goal=filters['goal'])
|
||||
|
||||
query = self.__add_soft_delete_mixin_filters(
|
||||
query, filters, models.AuditTemplate)
|
||||
query = self.__add_timestamp_mixin_filters(
|
||||
query, filters, models.AuditTemplate)
|
||||
|
||||
return query
|
||||
|
||||
def _add_audits_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = []
|
||||
|
||||
if 'uuid' in filters:
|
||||
query = query.filter_by(uuid=filters['uuid'])
|
||||
if 'type' in filters:
|
||||
query = query.filter_by(type=filters['type'])
|
||||
if 'state' in filters:
|
||||
@@ -144,12 +230,20 @@ class Connection(api.BaseConnection):
|
||||
query = query.filter(
|
||||
models.AuditTemplate.name ==
|
||||
filters['audit_template_name'])
|
||||
|
||||
query = self.__add_soft_delete_mixin_filters(
|
||||
query, filters, models.Audit)
|
||||
query = self.__add_timestamp_mixin_filters(
|
||||
query, filters, models.Audit)
|
||||
|
||||
return query
|
||||
|
||||
def _add_action_plans_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = []
|
||||
|
||||
if 'uuid' in filters:
|
||||
query = query.filter_by(uuid=filters['uuid'])
|
||||
if 'state' in filters:
|
||||
query = query.filter_by(state=filters['state'])
|
||||
if 'audit_id' in filters:
|
||||
@@ -158,12 +252,20 @@ class Connection(api.BaseConnection):
|
||||
query = query.join(models.Audit,
|
||||
models.ActionPlan.audit_id == models.Audit.id)
|
||||
query = query.filter(models.Audit.uuid == filters['audit_uuid'])
|
||||
|
||||
query = self.__add_soft_delete_mixin_filters(
|
||||
query, filters, models.ActionPlan)
|
||||
query = self.__add_timestamp_mixin_filters(
|
||||
query, filters, models.ActionPlan)
|
||||
|
||||
return query
|
||||
|
||||
def _add_actions_filters(self, query, filters):
|
||||
if filters is None:
|
||||
filters = []
|
||||
|
||||
if 'uuid' in filters:
|
||||
query = query.filter_by(uuid=filters['uuid'])
|
||||
if 'action_plan_id' in filters:
|
||||
query = query.filter_by(action_plan_id=filters['action_plan_id'])
|
||||
if 'action_plan_uuid' in filters:
|
||||
@@ -184,6 +286,11 @@ class Connection(api.BaseConnection):
|
||||
if 'alarm' in filters:
|
||||
query = query.filter_by(alarm=filters['alarm'])
|
||||
|
||||
query = self.__add_soft_delete_mixin_filters(
|
||||
query, filters, models.Action)
|
||||
query = self.__add_timestamp_mixin_filters(
|
||||
query, filters, models.Action)
|
||||
|
||||
return query
|
||||
|
||||
def get_audit_template_list(self, context, filters=None, limit=None,
|
||||
@@ -193,7 +300,6 @@ class Connection(api.BaseConnection):
|
||||
query = self._add_audit_templates_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
|
||||
return _paginate_query(models.AuditTemplate, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
|
||||
@@ -312,7 +418,8 @@ class Connection(api.BaseConnection):
|
||||
query = model_query(models.Audit)
|
||||
query = self._add_audits_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter(~(models.Audit.state == 'DELETED'))
|
||||
query = query.filter(
|
||||
~(models.Audit.state == audit_objects.State.DELETED))
|
||||
|
||||
return _paginate_query(models.Audit, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
@@ -340,7 +447,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
audit = query.one()
|
||||
if not context.show_deleted:
|
||||
if audit.state == 'DELETED':
|
||||
if audit.state == audit_objects.State.DELETED:
|
||||
raise exception.AuditNotFound(audit=audit_id)
|
||||
return audit
|
||||
except exc.NoResultFound:
|
||||
@@ -353,7 +460,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
audit = query.one()
|
||||
if not context.show_deleted:
|
||||
if audit.state == 'DELETED':
|
||||
if audit.state == audit_objects.State.DELETED:
|
||||
raise exception.AuditNotFound(audit=audit_uuid)
|
||||
return audit
|
||||
except exc.NoResultFound:
|
||||
@@ -421,7 +528,8 @@ class Connection(api.BaseConnection):
|
||||
query = model_query(models.Action)
|
||||
query = self._add_actions_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter(~(models.Action.state == 'DELETED'))
|
||||
query = query.filter(
|
||||
~(models.Action.state == action_objects.State.DELETED))
|
||||
return _paginate_query(models.Action, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
|
||||
@@ -444,7 +552,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
action = query.one()
|
||||
if not context.show_deleted:
|
||||
if action.state == 'DELETED':
|
||||
if action.state == action_objects.State.DELETED:
|
||||
raise exception.ActionNotFound(
|
||||
action=action_id)
|
||||
return action
|
||||
@@ -457,7 +565,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
action = query.one()
|
||||
if not context.show_deleted:
|
||||
if action.state == 'DELETED':
|
||||
if action.state == action_objects.State.DELETED:
|
||||
raise exception.ActionNotFound(
|
||||
action=action_uuid)
|
||||
return action
|
||||
@@ -514,7 +622,8 @@ class Connection(api.BaseConnection):
|
||||
query = model_query(models.ActionPlan)
|
||||
query = self._add_action_plans_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter(~(models.ActionPlan.state == 'DELETED'))
|
||||
query = query.filter(
|
||||
~(models.ActionPlan.state == ap_objects.State.DELETED))
|
||||
|
||||
return _paginate_query(models.ActionPlan, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
@@ -539,7 +648,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
action_plan = query.one()
|
||||
if not context.show_deleted:
|
||||
if action_plan.state == 'DELETED':
|
||||
if action_plan.state == ap_objects.State.DELETED:
|
||||
raise exception.ActionPlanNotFound(
|
||||
action_plan=action_plan_id)
|
||||
return action_plan
|
||||
@@ -553,7 +662,7 @@ class Connection(api.BaseConnection):
|
||||
try:
|
||||
action_plan = query.one()
|
||||
if not context.show_deleted:
|
||||
if action_plan.state == 'DELETED':
|
||||
if action_plan.state == ap_objects.State.DELETED:
|
||||
raise exception.ActionPlanNotFound(
|
||||
action_plan=action_plan__uuid)
|
||||
return action_plan
|
||||
|
||||
@@ -21,6 +21,7 @@ class ResourceType(Enum):
|
||||
cpu_cores = 'num_cores'
|
||||
memory = 'memory'
|
||||
disk = 'disk'
|
||||
disk_capacity = 'disk_capacity'
|
||||
|
||||
|
||||
class Resource(object):
|
||||
|
||||
@@ -37,6 +37,10 @@ congestion which may decrease the :ref:`SLA <sla_definition>` for
|
||||
It is also important to schedule :ref:`Actions <action_definition>` in order to
|
||||
avoid security issues such as denial of service on core OpenStack services.
|
||||
|
||||
:ref:`Some default implementations are provided <watcher_planners>`, but it is
|
||||
possible to :ref:`develop new implementations <implement_planner_plugin>`
|
||||
which are dynamically loaded by Watcher at launch time.
|
||||
|
||||
See :doc:`../architecture` for more details on this component.
|
||||
"""
|
||||
|
||||
|
||||
@@ -18,10 +18,14 @@
|
||||
from watcher.decision_engine.strategy.strategies import basic_consolidation
|
||||
from watcher.decision_engine.strategy.strategies import dummy_strategy
|
||||
from watcher.decision_engine.strategy.strategies import outlet_temp_control
|
||||
from watcher.decision_engine.strategy.strategies \
|
||||
import vm_workload_consolidation
|
||||
|
||||
BasicConsolidation = basic_consolidation.BasicConsolidation
|
||||
OutletTempControl = outlet_temp_control.OutletTempControl
|
||||
DummyStrategy = dummy_strategy.DummyStrategy
|
||||
VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation
|
||||
|
||||
|
||||
__all__ = (BasicConsolidation, OutletTempControl, DummyStrategy)
|
||||
__all__ = (BasicConsolidation, OutletTempControl, DummyStrategy,
|
||||
VMWorkloadConsolidation)
|
||||
|
||||
@@ -30,6 +30,10 @@ to find an optimal :ref:`Solution <solution_definition>`.
|
||||
When a new :ref:`Goal <goal_definition>` is added to the Watcher configuration,
|
||||
at least one default associated :ref:`Strategy <strategy_definition>` should be
|
||||
provided as well.
|
||||
|
||||
:ref:`Some default implementations are provided <watcher_strategies>`, but it
|
||||
is possible to :ref:`develop new implementations <implement_strategy_plugin>`
|
||||
which are dynamically loaded by Watcher at launch time.
|
||||
"""
|
||||
|
||||
import abc
|
||||
|
||||
@@ -277,25 +277,25 @@ class BasicConsolidation(base.BaseStrategy):
|
||||
:return:
|
||||
"""
|
||||
resource_id = "%s_%s" % (hypervisor.uuid, hypervisor.hostname)
|
||||
vm_avg_cpu_util = self.ceilometer. \
|
||||
host_avg_cpu_util = self.ceilometer. \
|
||||
statistic_aggregation(resource_id=resource_id,
|
||||
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
||||
period="7200",
|
||||
aggregate='avg'
|
||||
)
|
||||
if vm_avg_cpu_util is None:
|
||||
if host_avg_cpu_util is None:
|
||||
LOG.error(
|
||||
_LE("No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s"),
|
||||
resource_id=resource_id,
|
||||
metric_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
||||
)
|
||||
vm_avg_cpu_util = 100
|
||||
host_avg_cpu_util = 100
|
||||
|
||||
cpu_capacity = model.get_resource_from_id(
|
||||
resource.ResourceType.cpu_cores).get_capacity(hypervisor)
|
||||
|
||||
total_cores_used = cpu_capacity * (vm_avg_cpu_util / 100)
|
||||
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)
|
||||
|
||||
return self.calculate_weight(model, hypervisor, total_cores_used,
|
||||
0,
|
||||
|
||||
@@ -0,0 +1,523 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Authors: Vojtech CIMA <cima@zhaw.ch>
|
||||
# Bruno GRAZIOLI <gaea@zhaw.ch>
|
||||
# Sean MURPHY <murp@zhaw.ch>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from copy import deepcopy
|
||||
|
||||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from watcher._i18n import _LE, _LI
|
||||
from watcher.common import exception
|
||||
from watcher.decision_engine.model import hypervisor_state as hyper_state
|
||||
from watcher.decision_engine.model import resource
|
||||
from watcher.decision_engine.model import vm_state
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
from watcher.metrics_engine.cluster_history import ceilometer \
|
||||
as ceilometer_cluster_history
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class VMWorkloadConsolidation(base.BaseStrategy):
|
||||
"""VM Workload Consolidation Strategy.
|
||||
|
||||
A load consolidation strategy based on heuristic first-fit
|
||||
algorithm which focuses on measured CPU utilization and tries to
|
||||
minimize hosts which have too much or too little load respecting
|
||||
resource capacity constraints.
|
||||
|
||||
This strategy produces a solution resulting in more efficient
|
||||
utilization of cluster resources using following four phases:
|
||||
* Offload phase - handling over-utilized resources
|
||||
* Consolidation phase - handling under-utilized resources
|
||||
* Solution optimization - reducing number of migrations
|
||||
* Deactivation of unused hypervisors
|
||||
|
||||
A capacity coefficients (cc) might be used to adjust optimization
|
||||
thresholds. Different resources may require different coefficient
|
||||
values as well as setting up different coefficient values in both
|
||||
phases may lead to to more efficient consolidation in the end.
|
||||
If the cc equals 1 the full resource capacity may be used, cc
|
||||
values lower than 1 will lead to resource under utilization and
|
||||
values higher than 1 will lead to resource overbooking.
|
||||
e.g. If targeted utilization is 80% of hypervisor capacity,
|
||||
the coefficient in the consolidation phase will be 0.8, but
|
||||
may any lower value in the offloading phase. The lower it gets
|
||||
the cluster will appear more released (distributed) for the
|
||||
following consolidation phase.
|
||||
|
||||
As this strategy laverages VM live migration to move the load
|
||||
from one hypervisor to another, this feature needs to be set up
|
||||
correctly on all hypervisors within the cluster.
|
||||
This strategy assumes it is possible to live migrate any VM from
|
||||
an active hypervisor to any other active hypervisor.
|
||||
"""
|
||||
|
||||
DEFAULT_NAME = 'vm_workload_consolidation'
|
||||
DEFAULT_DESCRIPTION = 'VM Workload Consolidation Strategy'
|
||||
|
||||
def __init__(self, name=DEFAULT_NAME, description=DEFAULT_DESCRIPTION,
|
||||
osc=None):
|
||||
super(VMWorkloadConsolidation, self).__init__(name, description, osc)
|
||||
self._ceilometer = None
|
||||
self.number_of_migrations = 0
|
||||
self.number_of_released_hypervisors = 0
|
||||
self.ceilometer_vm_data_cache = dict()
|
||||
|
||||
@property
|
||||
def ceilometer(self):
|
||||
if self._ceilometer is None:
|
||||
self._ceilometer = (ceilometer_cluster_history.
|
||||
CeilometerClusterHistory(osc=self.osc))
|
||||
return self._ceilometer
|
||||
|
||||
@ceilometer.setter
|
||||
def ceilometer(self, ceilometer):
|
||||
self._ceilometer = ceilometer
|
||||
|
||||
def get_state_str(self, state):
|
||||
"""Get resource state in string format.
|
||||
|
||||
:param state: resource state of unknown type
|
||||
"""
|
||||
if isinstance(state, six.string_types):
|
||||
return state
|
||||
elif (type(state) == hyper_state.HypervisorState or
|
||||
type(state) == vm_state.VMState):
|
||||
return state.value
|
||||
else:
|
||||
LOG.error(_LE('Unexpexted resource state type, '
|
||||
'state=%(state)s, state_type=%(st)s.'),
|
||||
state=state,
|
||||
st=type(state))
|
||||
raise exception.WatcherException
|
||||
|
||||
def add_action_activate_hypervisor(self, hypervisor):
|
||||
"""Add an action for hypervisor activation into the solution.
|
||||
|
||||
:param hypervisor: hypervisor object
|
||||
:return: None
|
||||
"""
|
||||
params = {'state': hyper_state.HypervisorState.ONLINE.value}
|
||||
self.solution.add_action(
|
||||
action_type='change_nova_service_state',
|
||||
resource_id=hypervisor.uuid,
|
||||
input_parameters=params)
|
||||
self.number_of_released_hypervisors -= 1
|
||||
|
||||
def add_action_deactivate_hypervisor(self, hypervisor):
|
||||
"""Add an action for hypervisor deactivation into the solution.
|
||||
|
||||
:param hypervisor: hypervisor object
|
||||
:return: None
|
||||
"""
|
||||
params = {'state': hyper_state.HypervisorState.OFFLINE.value}
|
||||
self.solution.add_action(
|
||||
action_type='change_nova_service_state',
|
||||
resource_id=hypervisor.uuid,
|
||||
input_parameters=params)
|
||||
self.number_of_released_hypervisors += 1
|
||||
|
||||
def add_migration(self, vm_uuid, src_hypervisor,
|
||||
dst_hypervisor, model):
|
||||
"""Add an action for VM migration into the solution.
|
||||
|
||||
:param vm_uuid: vm uuid
|
||||
:param src_hypervisor: hypervisor object
|
||||
:param dst_hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:return: None
|
||||
"""
|
||||
vm = model.get_vm_from_id(vm_uuid)
|
||||
|
||||
vm_state_str = self.get_state_str(vm.state)
|
||||
if vm_state_str != vm_state.VMState.ACTIVE.value:
|
||||
'''
|
||||
Watcher curently only supports live VM migration and block live
|
||||
VM migration which both requires migrated VM to be active.
|
||||
When supported, the cold migration may be used as a fallback
|
||||
migration mechanism to move non active VMs.
|
||||
'''
|
||||
LOG.error(_LE('Cannot live migrate: vm_uuid=%(vm_uuid)s, '
|
||||
'state=%(vm_state)s.'),
|
||||
vm_uuid=vm_uuid,
|
||||
vm_state=vm_state_str)
|
||||
raise exception.WatcherException
|
||||
|
||||
migration_type = 'live'
|
||||
|
||||
dst_hyper_state_str = self.get_state_str(dst_hypervisor.state)
|
||||
if dst_hyper_state_str == hyper_state.HypervisorState.OFFLINE.value:
|
||||
self.add_action_activate_hypervisor(dst_hypervisor)
|
||||
model.get_mapping().unmap(src_hypervisor, vm)
|
||||
model.get_mapping().map(dst_hypervisor, vm)
|
||||
|
||||
params = {'migration_type': migration_type,
|
||||
'src_hypervisor': src_hypervisor.uuid,
|
||||
'dst_hypervisor': dst_hypervisor.uuid}
|
||||
self.solution.add_action(action_type='migrate',
|
||||
resource_id=vm.uuid,
|
||||
input_parameters=params)
|
||||
self.number_of_migrations += 1
|
||||
|
||||
def deactivate_unused_hypervisors(self, model):
|
||||
"""Generate actions for deactivation of unused hypervisors.
|
||||
|
||||
:param model: model_root object
|
||||
:return: None
|
||||
"""
|
||||
for hypervisor in model.get_all_hypervisors().values():
|
||||
if len(model.get_mapping().get_node_vms(hypervisor)) == 0:
|
||||
self.add_action_deactivate_hypervisor(hypervisor)
|
||||
|
||||
def get_prediction_model(self, model):
|
||||
"""Return a deepcopy of a model representing current cluster state.
|
||||
|
||||
:param model: model_root object
|
||||
:return: model_root object
|
||||
"""
|
||||
return deepcopy(model)
|
||||
|
||||
def get_vm_utilization(self, vm_uuid, model, period=3600, aggr='avg'):
|
||||
"""Collect cpu, ram and disk utilization statistics of a VM.
|
||||
|
||||
:param vm_uuid: vm object
|
||||
:param model: model_root object
|
||||
:param period: seconds
|
||||
:param aggr: string
|
||||
:return: dict(cpu(number of vcpus used), ram(MB used), disk(B used))
|
||||
"""
|
||||
if vm_uuid in self.ceilometer_vm_data_cache.keys():
|
||||
return self.ceilometer_vm_data_cache.get(vm_uuid)
|
||||
|
||||
cpu_util_metric = 'cpu_util'
|
||||
ram_util_metric = 'memory.usage'
|
||||
|
||||
ram_alloc_metric = 'memory'
|
||||
disk_alloc_metric = 'disk.root.size'
|
||||
vm_cpu_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=vm_uuid, meter_name=cpu_util_metric,
|
||||
period=period, aggregate=aggr)
|
||||
vm_cpu_cores = model.get_resource_from_id(
|
||||
resource.ResourceType.cpu_cores).get_capacity(
|
||||
model.get_vm_from_id(vm_uuid))
|
||||
|
||||
if vm_cpu_util:
|
||||
total_cpu_utilization = vm_cpu_cores * (vm_cpu_util / 100.0)
|
||||
else:
|
||||
total_cpu_utilization = vm_cpu_cores
|
||||
|
||||
vm_ram_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=vm_uuid, meter_name=ram_util_metric,
|
||||
period=period, aggregate=aggr)
|
||||
|
||||
if not vm_ram_util:
|
||||
vm_ram_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=vm_uuid, meter_name=ram_alloc_metric,
|
||||
period=period, aggregate=aggr)
|
||||
|
||||
vm_disk_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=vm_uuid, meter_name=disk_alloc_metric,
|
||||
period=period, aggregate=aggr)
|
||||
|
||||
if not vm_ram_util or not vm_disk_util:
|
||||
LOG.error(
|
||||
_LE('No values returned by %(resource_id)s '
|
||||
'for memory.usage or disk.root.size'),
|
||||
resource_id=vm_uuid
|
||||
)
|
||||
raise exception.NoDataFound
|
||||
|
||||
self.ceilometer_vm_data_cache[vm_uuid] = dict(
|
||||
cpu=total_cpu_utilization, ram=vm_ram_util, disk=vm_disk_util)
|
||||
return self.ceilometer_vm_data_cache.get(vm_uuid)
|
||||
|
||||
def get_hypervisor_utilization(self, hypervisor, model, period=3600,
|
||||
aggr='avg'):
|
||||
"""Collect cpu, ram and disk utilization statistics of a hypervisor.
|
||||
|
||||
:param hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:param period: seconds
|
||||
:param aggr: string
|
||||
:return: dict(cpu(number of cores used), ram(MB used), disk(B used))
|
||||
"""
|
||||
hypervisor_vms = model.get_mapping().get_node_vms_from_id(
|
||||
hypervisor.uuid)
|
||||
hypervisor_ram_util = 0
|
||||
hypervisor_disk_util = 0
|
||||
hypervisor_cpu_util = 0
|
||||
for vm_uuid in hypervisor_vms:
|
||||
vm_util = self.get_vm_utilization(vm_uuid, model, period, aggr)
|
||||
hypervisor_cpu_util += vm_util['cpu']
|
||||
hypervisor_ram_util += vm_util['ram']
|
||||
hypervisor_disk_util += vm_util['disk']
|
||||
|
||||
return dict(cpu=hypervisor_cpu_util, ram=hypervisor_ram_util,
|
||||
disk=hypervisor_disk_util)
|
||||
|
||||
def get_hypervisor_capacity(self, hypervisor, model):
|
||||
"""Collect cpu, ram and disk capacity of a hypervisor.
|
||||
|
||||
:param hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
hypervisor_cpu_capacity = model.get_resource_from_id(
|
||||
resource.ResourceType.cpu_cores).get_capacity(hypervisor)
|
||||
|
||||
hypervisor_disk_capacity = model.get_resource_from_id(
|
||||
resource.ResourceType.disk_capacity).get_capacity(hypervisor)
|
||||
|
||||
hypervisor_ram_capacity = model.get_resource_from_id(
|
||||
resource.ResourceType.memory).get_capacity(hypervisor)
|
||||
return dict(cpu=hypervisor_cpu_capacity, ram=hypervisor_ram_capacity,
|
||||
disk=hypervisor_disk_capacity)
|
||||
|
||||
def get_relative_hypervisor_utilization(self, hypervisor, model):
|
||||
"""Return relative hypervisor utilization (rhu).
|
||||
|
||||
:param hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
|
||||
"""
|
||||
rhu = {}
|
||||
util = self.get_hypervisor_utilization(hypervisor, model)
|
||||
cap = self.get_hypervisor_capacity(hypervisor, model)
|
||||
for k in util.keys():
|
||||
rhu[k] = float(util[k]) / float(cap[k])
|
||||
return rhu
|
||||
|
||||
def get_relative_cluster_utilization(self, model):
|
||||
"""Calculate relative cluster utilization (rcu).
|
||||
|
||||
RCU is an average of relative utilizations (rhu) of active hypervisors.
|
||||
:param model: model_root object
|
||||
:return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>}
|
||||
"""
|
||||
hypervisors = model.get_all_hypervisors().values()
|
||||
rcu = {}
|
||||
counters = {}
|
||||
for hypervisor in hypervisors:
|
||||
hyper_state_str = self.get_state_str(hypervisor.state)
|
||||
if hyper_state_str == hyper_state.HypervisorState.ONLINE.value:
|
||||
rhu = self.get_relative_hypervisor_utilization(
|
||||
hypervisor, model)
|
||||
for k in rhu.keys():
|
||||
if k not in rcu:
|
||||
rcu[k] = 0
|
||||
if k not in counters:
|
||||
counters[k] = 0
|
||||
rcu[k] += rhu[k]
|
||||
counters[k] += 1
|
||||
for k in rcu.keys():
|
||||
rcu[k] /= counters[k]
|
||||
return rcu
|
||||
|
||||
def is_overloaded(self, hypervisor, model, cc):
|
||||
"""Indicate whether a hypervisor is overloaded.
|
||||
|
||||
This considers provided resource capacity coefficients (cc).
|
||||
:param hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:param cc: dictionary containing resource capacity coefficients
|
||||
:return: [True, False]
|
||||
"""
|
||||
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model)
|
||||
hypervisor_utilization = self.get_hypervisor_utilization(
|
||||
hypervisor, model)
|
||||
metrics = ['cpu']
|
||||
for m in metrics:
|
||||
if hypervisor_utilization[m] > hypervisor_capacity[m] * cc[m]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def vm_fits(self, vm_uuid, hypervisor, model, cc):
|
||||
"""Indicate whether is a hypervisor able to accomodate a VM.
|
||||
|
||||
This considers provided resource capacity coefficients (cc).
|
||||
:param vm_uuid: string
|
||||
:param hypervisor: hypervisor object
|
||||
:param model: model_root object
|
||||
:param cc: dictionary containing resource capacity coefficients
|
||||
:return: [True, False]
|
||||
"""
|
||||
hypervisor_capacity = self.get_hypervisor_capacity(hypervisor, model)
|
||||
hypervisor_utilization = self.get_hypervisor_utilization(
|
||||
hypervisor, model)
|
||||
vm_utilization = self.get_vm_utilization(vm_uuid, model)
|
||||
metrics = ['cpu', 'ram', 'disk']
|
||||
for m in metrics:
|
||||
if (vm_utilization[m] + hypervisor_utilization[m] >
|
||||
hypervisor_capacity[m] * cc[m]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def optimize_solution(self, model):
|
||||
"""Optimize solution.
|
||||
|
||||
This is done by eliminating unnecessary or circular set of migrations
|
||||
which can be replaced by a more efficient solution.
|
||||
e.g.:
|
||||
* A->B, B->C => replace migrations A->B, B->C with
|
||||
a single migration A->C as both solution result in
|
||||
VM running on hypervisor C which can be achieved with
|
||||
one migration instead of two.
|
||||
* A->B, B->A => remove A->B and B->A as they do not result
|
||||
in a new VM placement.
|
||||
|
||||
:param model: model_root object
|
||||
"""
|
||||
migrate_actions = (
|
||||
a for a in self.solution.actions if a[
|
||||
'action_type'] == 'migrate')
|
||||
vm_to_be_migrated = (a['input_parameters']['resource_id']
|
||||
for a in migrate_actions)
|
||||
vm_uuids = list(set(vm_to_be_migrated))
|
||||
for vm_uuid in vm_uuids:
|
||||
actions = list(
|
||||
a for a in self.solution.actions if a[
|
||||
'input_parameters'][
|
||||
'resource_id'] == vm_uuid)
|
||||
if len(actions) > 1:
|
||||
src = actions[0]['input_parameters']['src_hypervisor']
|
||||
dst = actions[-1]['input_parameters']['dst_hypervisor']
|
||||
for a in actions:
|
||||
self.solution.actions.remove(a)
|
||||
self.number_of_migrations -= 1
|
||||
if src != dst:
|
||||
self.add_migration(vm_uuid, src, dst, model)
|
||||
|
||||
def offload_phase(self, model, cc):
|
||||
"""Perform offloading phase.
|
||||
|
||||
This considers provided resource capacity coefficients.
|
||||
Offload phase performing first-fit based bin packing to offload
|
||||
overloaded hypervisors. This is done in a fashion of moving
|
||||
the least CPU utilized VM first as live migration these
|
||||
generaly causes less troubles. This phase results in a cluster
|
||||
with no overloaded hypervisors.
|
||||
* This phase is be able to activate turned off hypervisors (if needed
|
||||
and any available) in the case of the resource capacity provided by
|
||||
active hypervisors is not able to accomodate all the load.
|
||||
As the offload phase is later followed by the consolidation phase,
|
||||
the hypervisor activation in this phase doesn't necessarily results
|
||||
in more activated hypervisors in the final solution.
|
||||
|
||||
:param model: model_root object
|
||||
:param cc: dictionary containing resource capacity coefficients
|
||||
"""
|
||||
sorted_hypervisors = sorted(
|
||||
model.get_all_hypervisors().values(),
|
||||
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu'])
|
||||
for hypervisor in reversed(sorted_hypervisors):
|
||||
if self.is_overloaded(hypervisor, model, cc):
|
||||
for vm in sorted(model.get_mapping().get_node_vms(hypervisor),
|
||||
key=lambda x: self.get_vm_utilization(
|
||||
x, model)['cpu']):
|
||||
for dst_hypervisor in reversed(sorted_hypervisors):
|
||||
if self.vm_fits(vm, dst_hypervisor, model, cc):
|
||||
self.add_migration(vm, hypervisor,
|
||||
dst_hypervisor, model)
|
||||
break
|
||||
if not self.is_overloaded(hypervisor, model, cc):
|
||||
break
|
||||
|
||||
def consolidation_phase(self, model, cc):
|
||||
"""Perform consolidation phase.
|
||||
|
||||
This considers provided resource capacity coefficients.
|
||||
Consolidation phase performing first-fit based bin packing.
|
||||
First, hypervisors with the lowest cpu utilization are consolidated
|
||||
by moving their load to hypervisors with the highest cpu utilization
|
||||
which can accomodate the load. In this phase the most cpu utilizied
|
||||
VMs are prioritizied as their load is more difficult to accomodate
|
||||
in the system than less cpu utilizied VMs which can be later used
|
||||
to fill smaller CPU capacity gaps.
|
||||
|
||||
:param model: model_root object
|
||||
:param cc: dictionary containing resource capacity coefficients
|
||||
"""
|
||||
sorted_hypervisors = sorted(
|
||||
model.get_all_hypervisors().values(),
|
||||
key=lambda x: self.get_hypervisor_utilization(x, model)['cpu'])
|
||||
asc = 0
|
||||
for hypervisor in sorted_hypervisors:
|
||||
vms = sorted(model.get_mapping().get_node_vms(hypervisor),
|
||||
key=lambda x: self.get_vm_utilization(x,
|
||||
model)['cpu'])
|
||||
for vm in reversed(vms):
|
||||
dsc = len(sorted_hypervisors) - 1
|
||||
for dst_hypervisor in reversed(sorted_hypervisors):
|
||||
if asc >= dsc:
|
||||
break
|
||||
if self.vm_fits(vm, dst_hypervisor, model, cc):
|
||||
self.add_migration(vm, hypervisor,
|
||||
dst_hypervisor, model)
|
||||
break
|
||||
dsc -= 1
|
||||
asc += 1
|
||||
|
||||
def execute(self, original_model):
|
||||
"""Execute strategy.
|
||||
|
||||
This strategy produces a solution resulting in more
|
||||
efficient utilization of cluster resources using following
|
||||
four phases:
|
||||
* Offload phase - handling over-utilized resources
|
||||
* Consolidation phase - handling under-utilized resources
|
||||
* Solution optimization - reducing number of migrations
|
||||
* Deactivation of unused hypervisors
|
||||
|
||||
:param original_model: root_model object
|
||||
"""
|
||||
LOG.info(_LI('Executing Smart Strategy'))
|
||||
model = self.get_prediction_model(original_model)
|
||||
rcu = self.get_relative_cluster_utilization(model)
|
||||
self.ceilometer_vm_data_cache = dict()
|
||||
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
|
||||
# Offloading phase
|
||||
self.offload_phase(model, cc)
|
||||
|
||||
# Consolidation phase
|
||||
self.consolidation_phase(model, cc)
|
||||
|
||||
# Optimize solution
|
||||
self.optimize_solution(model)
|
||||
|
||||
# Deactivate unused hypervisors
|
||||
self.deactivate_unused_hypervisors(model)
|
||||
|
||||
rcu_after = self.get_relative_cluster_utilization(model)
|
||||
info = {
|
||||
'number_of_migrations': self.number_of_migrations,
|
||||
'number_of_released_hypervisors':
|
||||
self.number_of_released_hypervisors,
|
||||
'relative_cluster_utilization_before': str(rcu),
|
||||
'relative_cluster_utilization_after': str(rcu_after)
|
||||
}
|
||||
|
||||
LOG.debug(info)
|
||||
|
||||
self.solution.model = model
|
||||
self.solution.efficacy = rcu_after['cpu']
|
||||
|
||||
return self.solution
|
||||
@@ -7,16 +7,28 @@
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: python-watcher 0.23.3.dev2\n"
|
||||
"Project-Id-Version: python-watcher 0.25.1.dev3\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2016-02-09 09:07+0100\n"
|
||||
"POT-Creation-Date: 2016-03-30 10:10+0200\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.1.1\n"
|
||||
"Generated-By: Babel 2.2.0\n"
|
||||
|
||||
#: watcher/api/controllers/v1/action.py:364
|
||||
msgid "Cannot create an action directly"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/api/controllers/v1/action.py:388
|
||||
msgid "Cannot modify an action directly"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/api/controllers/v1/action.py:424
|
||||
msgid "Cannot delete an action directly"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/api/controllers/v1/action_plan.py:102
|
||||
#, python-format
|
||||
@@ -65,7 +77,12 @@ msgstr ""
|
||||
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/api/controllers/v1/utils.py:57
|
||||
#: watcher/api/controllers/v1/utils.py:58
|
||||
#, python-format
|
||||
msgid "Invalid filter: %s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/api/controllers/v1/utils.py:65
|
||||
#, python-format
|
||||
msgid "Adding a new attribute (%s) to the root of the resource is not allowed"
|
||||
msgstr ""
|
||||
@@ -84,30 +101,37 @@ msgstr ""
|
||||
msgid "Error parsing HTTP response: %s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/actions/change_nova_service_state.py:69
|
||||
#: watcher/applier/actions/change_nova_service_state.py:90
|
||||
msgid "The target state is not defined"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/actions/migration.py:43
|
||||
#: watcher/applier/actions/migration.py:71
|
||||
msgid "The parameter resource_id is invalid."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/actions/migration.py:86
|
||||
#: watcher/applier/actions/migration.py:124
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unexpected error occured. Migration failed forinstance %s. Leaving "
|
||||
"instance on previous host."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/actions/migration.py:140
|
||||
#, python-format
|
||||
msgid "Migration of type %(migration_type)s is not supported."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/workflow_engine/default.py:128
|
||||
#: watcher/applier/workflow_engine/default.py:129
|
||||
#, python-format
|
||||
msgid "The WorkFlow Engine has failed to execute the action %s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/workflow_engine/default.py:146
|
||||
#: watcher/applier/workflow_engine/default.py:147
|
||||
#, python-format
|
||||
msgid "Revert action %s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/applier/workflow_engine/default.py:152
|
||||
#: watcher/applier/workflow_engine/default.py:153
|
||||
msgid "Oops! We need disaster recover plan"
|
||||
msgstr ""
|
||||
|
||||
@@ -280,58 +304,72 @@ msgstr ""
|
||||
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:239
|
||||
#: watcher/common/exception.py:238
|
||||
#, python-format
|
||||
msgid "Workflow execution error: %(error)s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:242
|
||||
msgid "Illegal argument"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:243
|
||||
#: watcher/common/exception.py:246
|
||||
msgid "No such metric"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:247
|
||||
#: watcher/common/exception.py:250
|
||||
msgid "No rows were returned"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:251
|
||||
#: watcher/common/exception.py:254
|
||||
#, python-format
|
||||
msgid "%(client)s connection failed. Reason: %(reason)s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:255
|
||||
#: watcher/common/exception.py:258
|
||||
msgid "'Keystone API endpoint is missing''"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:259
|
||||
#: watcher/common/exception.py:262
|
||||
msgid "The list of hypervisor(s) in the cluster is empty"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:263
|
||||
#: watcher/common/exception.py:266
|
||||
msgid "The metrics resource collector is not defined"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:267
|
||||
#: watcher/common/exception.py:270
|
||||
msgid "the cluster state is not defined"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:273
|
||||
#: watcher/common/exception.py:276
|
||||
#, python-format
|
||||
msgid "The instance '%(name)s' is not found"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:277
|
||||
#: watcher/common/exception.py:280
|
||||
msgid "The hypervisor is not found"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:281
|
||||
#: watcher/common/exception.py:284
|
||||
#, python-format
|
||||
msgid "Error loading plugin '%(name)s'"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:285
|
||||
#: watcher/common/exception.py:288
|
||||
#, python-format
|
||||
msgid "The identifier '%(name)s' is a reserved word"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:292
|
||||
#, python-format
|
||||
msgid "The %(name)s resource %(id)s is not soft deleted"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/exception.py:296
|
||||
msgid "Limit should be positive"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/common/service.py:83
|
||||
#, python-format
|
||||
msgid "Created RPC server for service %(service)s on host %(host)s."
|
||||
@@ -374,25 +412,102 @@ msgstr ""
|
||||
msgid "Messaging configuration error"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:256
|
||||
#: watcher/db/purge.py:50
|
||||
msgid "Audit Templates"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:51
|
||||
msgid "Audits"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:52
|
||||
msgid "Action Plans"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:53
|
||||
msgid "Actions"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:100
|
||||
msgid "Total"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:158
|
||||
msgid "Audit Template"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:206
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Orphans found:\n"
|
||||
"%s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:265
|
||||
#, python-format
|
||||
msgid "There are %(count)d objects set for deletion. Continue? [y/N]"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:272
|
||||
#, python-format
|
||||
msgid ""
|
||||
"The number of objects (%(num)s) to delete from the database exceeds the "
|
||||
"maximum number of objects (%(max_number)s) specified."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:277
|
||||
msgid "Do you want to delete objects up to the specified maximum number? [y/N]"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:340
|
||||
msgid "Deleting..."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:346
|
||||
msgid "Starting purge command"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:356
|
||||
msgid " (orphans excluded)"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:357
|
||||
msgid " (may include orphans)"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:360 watcher/db/purge.py:361
|
||||
#, python-format
|
||||
msgid "Purge results summary%s:"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:364
|
||||
#, python-format
|
||||
msgid "Here below is a table containing the objects that can be purged%s:"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/purge.py:369
|
||||
msgid "Purge process completed"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:362
|
||||
msgid ""
|
||||
"Multiple audit templates exist with the same name. Please use the audit "
|
||||
"template uuid instead"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:278
|
||||
#: watcher/db/sqlalchemy/api.py:384
|
||||
msgid "Cannot overwrite UUID for an existing Audit Template."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:388
|
||||
#: watcher/db/sqlalchemy/api.py:495
|
||||
msgid "Cannot overwrite UUID for an existing Audit."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:480
|
||||
#: watcher/db/sqlalchemy/api.py:588
|
||||
msgid "Cannot overwrite UUID for an existing Action."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/db/sqlalchemy/api.py:590
|
||||
#: watcher/db/sqlalchemy/api.py:699
|
||||
msgid "Cannot overwrite UUID for an existing Action Plan."
|
||||
msgstr ""
|
||||
|
||||
@@ -407,7 +522,7 @@ msgstr ""
|
||||
msgid "'obj' argument type is not valid"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/planner/default.py:72
|
||||
#: watcher/decision_engine/planner/default.py:79
|
||||
msgid "The action plan is empty"
|
||||
msgstr ""
|
||||
|
||||
@@ -416,38 +531,57 @@ msgstr ""
|
||||
msgid "Incorrect mapping: could not find associated strategy for '%s'"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:269
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:316
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:288
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:335
|
||||
#, python-format
|
||||
msgid "No values returned by %(resource_id)s for %(metric_name)s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:426
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:448
|
||||
msgid "Initializing Sercon Consolidation"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:470
|
||||
#: watcher/decision_engine/strategy/strategies/basic_consolidation.py:492
|
||||
msgid "The workloads of the compute nodes of the cluster is zero"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:127
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:147
|
||||
#, python-format
|
||||
msgid "%s: no outlet temp data"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:151
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:172
|
||||
#, python-format
|
||||
msgid "VM not active, skipped: %s"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:208
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:230
|
||||
msgid "No hosts under outlet temp threshold found"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:231
|
||||
#: watcher/decision_engine/strategy/strategies/outlet_temp_control.py:253
|
||||
msgid "No proper target host could be found"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py:104
|
||||
#, python-format
|
||||
msgid "Unexpexted resource state type, state=%(state)s, state_type=%(st)s."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py:156
|
||||
#, python-format
|
||||
msgid "Cannot live migrate: vm_uuid=%(vm_uuid)s, state=%(vm_state)s."
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py:240
|
||||
#, python-format
|
||||
msgid "No values returned by %(resource_id)s for memory.usage or disk.root.size"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py:489
|
||||
msgid "Executing Smart Strategy"
|
||||
msgstr ""
|
||||
|
||||
#: watcher/objects/base.py:70
|
||||
#, python-format
|
||||
msgid "Error setting %(attr)s"
|
||||
|
||||
@@ -22,13 +22,13 @@ from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from watcher.common import ceilometer_helper
|
||||
|
||||
from watcher.metrics_engine.cluster_history import api
|
||||
from watcher.metrics_engine.cluster_history import base
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class CeilometerClusterHistory(api.BaseClusterHistory):
|
||||
class CeilometerClusterHistory(base.BaseClusterHistory):
|
||||
def __init__(self, osc=None):
|
||||
""":param osc: an OpenStackClients instance"""
|
||||
super(CeilometerClusterHistory, self).__init__()
|
||||
|
||||
@@ -23,12 +23,12 @@ from watcher.decision_engine.model import hypervisor as obj_hypervisor
|
||||
from watcher.decision_engine.model import model_root
|
||||
from watcher.decision_engine.model import resource
|
||||
from watcher.decision_engine.model import vm as obj_vm
|
||||
from watcher.metrics_engine.cluster_model_collector import api
|
||||
from watcher.metrics_engine.cluster_model_collector import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NovaClusterModelCollector(api.BaseClusterModelCollector):
|
||||
class NovaClusterModelCollector(base.BaseClusterModelCollector):
|
||||
def __init__(self, wrapper):
|
||||
super(NovaClusterModelCollector, self).__init__()
|
||||
self.wrapper = wrapper
|
||||
@@ -40,9 +40,11 @@ class NovaClusterModelCollector(api.BaseClusterModelCollector):
|
||||
mem = resource.Resource(resource.ResourceType.memory)
|
||||
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
|
||||
disk = resource.Resource(resource.ResourceType.disk)
|
||||
disk_capacity = resource.Resource(resource.ResourceType.disk_capacity)
|
||||
cluster.create_resource(mem)
|
||||
cluster.create_resource(num_cores)
|
||||
cluster.create_resource(disk)
|
||||
cluster.create_resource(disk_capacity)
|
||||
|
||||
flavor_cache = {}
|
||||
hypervisors = self.wrapper.get_hypervisors_list()
|
||||
@@ -55,6 +57,7 @@ class NovaClusterModelCollector(api.BaseClusterModelCollector):
|
||||
# set capacity
|
||||
mem.set_capacity(hypervisor, h.memory_mb)
|
||||
disk.set_capacity(hypervisor, h.free_disk_gb)
|
||||
disk_capacity.set_capacity(hypervisor, h.local_gb)
|
||||
num_cores.set_capacity(hypervisor, h.vcpus)
|
||||
hypervisor.state = h.state
|
||||
hypervisor.status = h.status
|
||||
|
||||
@@ -47,7 +47,6 @@ contain a list of extra parameters related to the
|
||||
provided as a list of key-value pairs.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher.db import api as dbapi
|
||||
@@ -204,9 +203,6 @@ class AuditTemplate(base.WatcherObject):
|
||||
"""
|
||||
|
||||
values = self.obj_get_changes()
|
||||
goal = values['goal']
|
||||
if goal not in cfg.CONF.watcher_goals.goals.keys():
|
||||
raise exception.InvalidGoal(goal=goal)
|
||||
db_audit_template = self.dbapi.create_audit_template(values)
|
||||
self._from_db_object(self, db_audit_template)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestApiUtilsValidScenarios(base.TestCase):
|
||||
cfg.CONF.set_override("max_limit", self.max_limit, group="api",
|
||||
enforce_type=True)
|
||||
actual_limit = v1_utils.validate_limit(self.limit)
|
||||
self.assertEqual(actual_limit, self.expected)
|
||||
self.assertEqual(self.expected, actual_limit)
|
||||
|
||||
|
||||
class TestApiUtilsInvalidScenarios(base.TestCase):
|
||||
|
||||
@@ -14,13 +14,11 @@ import datetime
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
from wsme import types as wtypes
|
||||
|
||||
from watcher.api.controllers.v1 import action as api_action
|
||||
from watcher.common import utils
|
||||
from watcher.db import api as db_api
|
||||
from watcher import objects
|
||||
from watcher.tests.api import base as api_base
|
||||
from watcher.tests.api import utils as api_utils
|
||||
from watcher.tests import base
|
||||
@@ -310,16 +308,15 @@ class TestListAction(api_base.FunctionalTest):
|
||||
self.assertEqual(len(ap2_action_list), len(response['actions']))
|
||||
|
||||
# We deleted them so that's normal
|
||||
self.assertEqual(
|
||||
[act for act in response['actions']
|
||||
if act['action_plan_uuid'] == action_plan1.uuid],
|
||||
[])
|
||||
self.assertEqual([],
|
||||
[act for act in response['actions']
|
||||
if act['action_plan_uuid'] == action_plan1.uuid])
|
||||
|
||||
# Here are the 2 actions left
|
||||
self.assertEqual(
|
||||
set([act.as_dict()['uuid'] for act in ap2_action_list]),
|
||||
set([act['uuid'] for act in response['actions']
|
||||
if act['action_plan_uuid'] == action_plan2.uuid]),
|
||||
set([act.as_dict()['uuid'] for act in ap2_action_list]))
|
||||
if act['action_plan_uuid'] == action_plan2.uuid]))
|
||||
|
||||
def test_many_with_next_uuid(self):
|
||||
action_list = []
|
||||
@@ -443,7 +440,7 @@ class TestPatch(api_base.FunctionalTest):
|
||||
return action
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_replace_ok(self, mock_utcnow):
|
||||
def test_patch_not_allowed(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
@@ -454,104 +451,12 @@ class TestPatch(api_base.FunctionalTest):
|
||||
response = self.patch_json(
|
||||
'/actions/%s' % self.action.uuid,
|
||||
[{'path': '/state', 'value': new_state,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
response = self.get_json('/actions/%s' % self.action.uuid)
|
||||
self.assertEqual(new_state, response['state'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
response['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
def test_replace_non_existent_action(self):
|
||||
response = self.patch_json('/actions/%s' % utils.generate_uuid(),
|
||||
[{'path': '/state', 'value': 'SUBMITTED',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_add_ok(self):
|
||||
new_state = 'SUCCEEDED'
|
||||
response = self.patch_json(
|
||||
'/actions/%s' % self.action.uuid,
|
||||
[{'path': '/state', 'value': new_state, 'op': 'add'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_int)
|
||||
|
||||
response = self.get_json('/actions/%s' % self.action.uuid)
|
||||
self.assertEqual(new_state, response['state'])
|
||||
|
||||
def test_add_non_existent_property(self):
|
||||
response = self.patch_json(
|
||||
'/actions/%s' % self.action.uuid,
|
||||
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_int)
|
||||
self.assertEqual(403, response.status_int)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_remove_ok(self):
|
||||
response = self.get_json('/actions/%s' % self.action.uuid)
|
||||
self.assertIsNotNone(response['state'])
|
||||
|
||||
response = self.patch_json('/actions/%s' % self.action.uuid,
|
||||
[{'path': '/state', 'op': 'remove'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
response = self.get_json('/actions/%s' % self.action.uuid)
|
||||
self.assertIsNone(response['state'])
|
||||
|
||||
def test_remove_uuid(self):
|
||||
response = self.patch_json('/actions/%s' % self.action.uuid,
|
||||
[{'path': '/uuid', 'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_remove_non_existent_property(self):
|
||||
response = self.patch_json(
|
||||
'/actions/%s' % self.action.uuid,
|
||||
[{'path': '/non-existent', 'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
|
||||
# class TestDelete(api_base.FunctionalTest):
|
||||
|
||||
# def setUp(self):
|
||||
# super(TestDelete, self).setUp()
|
||||
# self.action = obj_utils.create_test_action(self.context, next=None)
|
||||
# p = mock.patch.object(db_api.Connection, 'destroy_action')
|
||||
# self.mock_action_delete = p.start()
|
||||
# self.mock_action_delete.side_effect =
|
||||
# self._simulate_rpc_action_delete
|
||||
# self.addCleanup(p.stop)
|
||||
|
||||
# def _simulate_rpc_action_delete(self, action_uuid):
|
||||
# action = objects.Action.get_by_uuid(self.context, action_uuid)
|
||||
# action.destroy()
|
||||
|
||||
# def test_delete_action(self):
|
||||
# self.delete('/actions/%s' % self.action.uuid)
|
||||
# response = self.get_json('/actions/%s' % self.action.uuid,
|
||||
# expect_errors=True)
|
||||
# self.assertEqual(404, response.status_int)
|
||||
# self.assertEqual('application/json', response.content_type)
|
||||
# self.assertTrue(response.json['error_message'])
|
||||
|
||||
# def test_delete_action_not_found(self):
|
||||
# uuid = utils.generate_uuid()
|
||||
# response = self.delete('/actions/%s' % uuid, expect_errors=True)
|
||||
# self.assertEqual(404, response.status_int)
|
||||
# self.assertEqual('application/json', response.content_type)
|
||||
# self.assertTrue(response.json['error_message'])
|
||||
|
||||
class TestDelete(api_base.FunctionalTest):
|
||||
|
||||
@@ -569,26 +474,11 @@ class TestDelete(api_base.FunctionalTest):
|
||||
return action
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_delete_action(self, mock_utcnow):
|
||||
def test_delete_action_not_allowed(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
self.delete('/actions/%s' % self.action.uuid)
|
||||
response = self.get_json('/actions/%s' % self.action.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
self.context.show_deleted = True
|
||||
action = objects.Action.get_by_uuid(self.context, self.action.uuid)
|
||||
|
||||
return_deleted_at = timeutils.strtime(action['deleted_at'])
|
||||
self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
|
||||
self.assertEqual(action['state'], 'DELETED')
|
||||
|
||||
def test_delete_action_not_found(self):
|
||||
uuid = utils.generate_uuid()
|
||||
response = self.delete('/actions/%s' % uuid, expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
response = self.delete('/actions/%s' % self.action.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(403, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
@@ -334,7 +334,7 @@ class TestDelete(api_base.FunctionalTest):
|
||||
self.assertTrue(ap_response.json['error_message'])
|
||||
|
||||
# Nor does the action
|
||||
self.assertEqual(len(acts_response['actions']), 0)
|
||||
self.assertEqual(0, len(acts_response['actions']))
|
||||
self.assertEqual(404, act_response.status_int)
|
||||
self.assertEqual('application/json', act_response.content_type)
|
||||
self.assertTrue(act_response.json['error_message'])
|
||||
|
||||
@@ -237,6 +237,8 @@ class TestPatch(api_base.FunctionalTest):
|
||||
self.mock_audit_template_update = p.start()
|
||||
self.mock_audit_template_update.side_effect = \
|
||||
self._simulate_rpc_audit_template_update
|
||||
cfg.CONF.set_override('goals', {"DUMMY": "DUMMY", "BASIC": "BASIC"},
|
||||
group='watcher_goals', enforce_type=True)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def _simulate_rpc_audit_template_update(self, audit_template):
|
||||
@@ -248,7 +250,7 @@ class TestPatch(api_base.FunctionalTest):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
new_goal = 'BALANCE_LOAD'
|
||||
new_goal = "BASIC"
|
||||
response = self.get_json(
|
||||
'/audit_templates/%s' % self.audit_template.uuid)
|
||||
self.assertNotEqual(new_goal, response['goal'])
|
||||
@@ -272,7 +274,7 @@ class TestPatch(api_base.FunctionalTest):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
new_goal = 'BALANCE_LOAD'
|
||||
new_goal = 'BASIC'
|
||||
response = self.get_json(urlparse.quote(
|
||||
'/audit_templates/%s' % self.audit_template.name))
|
||||
self.assertNotEqual(new_goal, response['goal'])
|
||||
@@ -294,15 +296,29 @@ class TestPatch(api_base.FunctionalTest):
|
||||
def test_replace_non_existent_audit_template(self):
|
||||
response = self.patch_json(
|
||||
'/audit_templates/%s' % utils.generate_uuid(),
|
||||
[{'path': '/goal', 'value': 'BALANCE_LOAD',
|
||||
[{'path': '/goal', 'value': 'DUMMY',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_replace_invalid_goal(self):
|
||||
with mock.patch.object(
|
||||
self.dbapi,
|
||||
'update_audit_template',
|
||||
wraps=self.dbapi.update_audit_template
|
||||
) as cn_mock:
|
||||
response = self.patch_json(
|
||||
'/audit_templates/%s' % self.audit_template.uuid,
|
||||
[{'path': '/goal', 'value': 'INVALID_GOAL',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, response.status_int)
|
||||
assert not cn_mock.called
|
||||
|
||||
def test_add_ok(self):
|
||||
new_goal = 'BALANCE_LOAD'
|
||||
new_goal = 'DUMMY'
|
||||
response = self.patch_json(
|
||||
'/audit_templates/%s' % self.audit_template.uuid,
|
||||
[{'path': '/goal', 'value': new_goal, 'op': 'add'}])
|
||||
|
||||
@@ -564,7 +564,7 @@ class TestDelete(api_base.FunctionalTest):
|
||||
|
||||
return_deleted_at = timeutils.strtime(audit['deleted_at'])
|
||||
self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
|
||||
self.assertEqual(audit['state'], 'DELETED')
|
||||
self.assertEqual('DELETED', audit['state'])
|
||||
|
||||
def test_delete_audit_not_found(self):
|
||||
uuid = utils.generate_uuid()
|
||||
|
||||
@@ -152,7 +152,7 @@ class TestMigration(base.TestCase):
|
||||
exc = self.assertRaises(
|
||||
exception.InstanceNotFound, self.action.execute)
|
||||
self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID)
|
||||
self.assertEqual(exc.kwargs["name"], self.INSTANCE_UUID)
|
||||
self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"])
|
||||
|
||||
def test_execute_live_migration(self):
|
||||
self.m_helper.find_instance.return_value = self.INSTANCE_UUID
|
||||
|
||||
@@ -38,4 +38,4 @@ class TestTriggerActionPlan(base.TestCase):
|
||||
action_plan_uuid = utils.generate_uuid()
|
||||
expected_uuid = self.endpoint.launch_action_plan(self.context,
|
||||
action_plan_uuid)
|
||||
self.assertEqual(action_plan_uuid, expected_uuid)
|
||||
self.assertEqual(expected_uuid, action_plan_uuid)
|
||||
|
||||
@@ -35,5 +35,5 @@ class TestApplierManager(base.TestCase):
|
||||
def test_connect(self, m_messaging, m_thread):
|
||||
self.applier.connect()
|
||||
self.applier.join()
|
||||
self.assertEqual(m_messaging.call_count, 2)
|
||||
self.assertEqual(m_thread.call_count, 1)
|
||||
self.assertEqual(2, m_messaging.call_count)
|
||||
self.assertEqual(1, m_thread.call_count)
|
||||
|
||||
@@ -25,6 +25,7 @@ from stevedore import extension
|
||||
|
||||
from watcher.applier.actions import base as abase
|
||||
from watcher.applier.workflow_engine import default as tflow
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher import objects
|
||||
from watcher.tests.db import base
|
||||
@@ -63,10 +64,15 @@ class TestDefaultWorkFlowEngine(base.DbTestCase):
|
||||
context=self.context,
|
||||
applier_manager=mock.MagicMock())
|
||||
|
||||
def test_execute(self):
|
||||
@mock.patch('taskflow.engines.load')
|
||||
@mock.patch('taskflow.patterns.graph_flow.Flow.link')
|
||||
def test_execute(self, graph_flow, engines):
|
||||
actions = mock.MagicMock()
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, True)
|
||||
try:
|
||||
self.engine.execute(actions)
|
||||
self.assertTrue(engines.called)
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
def create_action(self, action_type, parameters, next):
|
||||
action = {
|
||||
@@ -85,70 +91,90 @@ class TestDefaultWorkFlowEngine(base.DbTestCase):
|
||||
|
||||
def check_action_state(self, action, expected_state):
|
||||
to_check = objects.Action.get_by_uuid(self.context, action.uuid)
|
||||
self.assertEqual(to_check.state, expected_state)
|
||||
self.assertEqual(expected_state, to_check.state)
|
||||
|
||||
def check_actions_state(self, actions, expected_state):
|
||||
for a in actions:
|
||||
self.check_action_state(a, expected_state)
|
||||
|
||||
def test_execute_with_no_actions(self):
|
||||
@mock.patch('taskflow.engines.load')
|
||||
@mock.patch('taskflow.patterns.graph_flow.Flow.link')
|
||||
def test_execute_with_no_actions(self, graph_flow, engines):
|
||||
actions = []
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, True)
|
||||
try:
|
||||
self.engine.execute(actions)
|
||||
self.assertFalse(graph_flow.called)
|
||||
self.assertTrue(engines.called)
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
def test_execute_with_one_action(self):
|
||||
actions = [self.create_action("nop", {'message': 'test'}, None)]
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, True)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
try:
|
||||
self.engine.execute(actions)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
def test_execute_with_two_actions(self):
|
||||
actions = []
|
||||
next = self.create_action("sleep", {'duration': 0.0}, None)
|
||||
first = self.create_action("nop", {'message': 'test'}, next.id)
|
||||
second = self.create_action("sleep", {'duration': 0.0}, None)
|
||||
first = self.create_action("nop", {'message': 'test'}, second.id)
|
||||
|
||||
actions.append(first)
|
||||
actions.append(next)
|
||||
actions.append(second)
|
||||
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, True)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
try:
|
||||
self.engine.execute(actions)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
def test_execute_with_three_actions(self):
|
||||
actions = []
|
||||
next2 = self.create_action("nop", {'message': 'next'}, None)
|
||||
next = self.create_action("sleep", {'duration': 0.0}, next2.id)
|
||||
first = self.create_action("nop", {'message': 'hello'}, next.id)
|
||||
|
||||
third = self.create_action("nop", {'message': 'next'}, None)
|
||||
second = self.create_action("sleep", {'duration': 0.0}, third.id)
|
||||
first = self.create_action("nop", {'message': 'hello'}, second.id)
|
||||
|
||||
self.check_action_state(first, objects.action.State.PENDING)
|
||||
self.check_action_state(next, objects.action.State.PENDING)
|
||||
self.check_action_state(next2, objects.action.State.PENDING)
|
||||
self.check_action_state(second, objects.action.State.PENDING)
|
||||
self.check_action_state(third, objects.action.State.PENDING)
|
||||
|
||||
actions.append(first)
|
||||
actions.append(next)
|
||||
actions.append(next2)
|
||||
actions.append(second)
|
||||
actions.append(third)
|
||||
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, True)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
try:
|
||||
self.engine.execute(actions)
|
||||
self.check_actions_state(actions, objects.action.State.SUCCEEDED)
|
||||
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
def test_execute_with_exception(self):
|
||||
actions = []
|
||||
next2 = self.create_action("no_exist", {'message': 'next'}, None)
|
||||
next = self.create_action("sleep", {'duration': 0.0}, next2.id)
|
||||
first = self.create_action("nop", {'message': 'hello'}, next.id)
|
||||
|
||||
third = self.create_action("no_exist", {'message': 'next'}, None)
|
||||
second = self.create_action("sleep", {'duration': 0.0}, third.id)
|
||||
first = self.create_action("nop", {'message': 'hello'}, second.id)
|
||||
|
||||
self.check_action_state(first, objects.action.State.PENDING)
|
||||
self.check_action_state(next, objects.action.State.PENDING)
|
||||
self.check_action_state(next2, objects.action.State.PENDING)
|
||||
actions.append(first)
|
||||
actions.append(next)
|
||||
actions.append(next2)
|
||||
self.check_action_state(second, objects.action.State.PENDING)
|
||||
self.check_action_state(third, objects.action.State.PENDING)
|
||||
|
||||
actions.append(first)
|
||||
actions.append(second)
|
||||
actions.append(third)
|
||||
|
||||
self.assertRaises(exception.WorkflowExecutionException,
|
||||
self.engine.execute, actions)
|
||||
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, False)
|
||||
self.check_action_state(first, objects.action.State.SUCCEEDED)
|
||||
self.check_action_state(next, objects.action.State.SUCCEEDED)
|
||||
self.check_action_state(next2, objects.action.State.FAILED)
|
||||
self.check_action_state(second, objects.action.State.SUCCEEDED)
|
||||
self.check_action_state(third, objects.action.State.FAILED)
|
||||
|
||||
@mock.patch("watcher.common.loader.default.DriverManager")
|
||||
def test_execute_with_action_exception(self, m_driver):
|
||||
@@ -161,6 +187,7 @@ class TestDefaultWorkFlowEngine(base.DbTestCase):
|
||||
obj=None),
|
||||
namespace=FakeAction.namespace())
|
||||
actions = [self.create_action("dontcare", {}, None)]
|
||||
result = self.engine.execute(actions)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
self.assertRaises(exception.WorkflowExecutionException,
|
||||
self.engine.execute, actions)
|
||||
self.check_action_state(actions[0], objects.action.State.FAILED)
|
||||
|
||||
@@ -54,7 +54,7 @@ class TestApi(BaseTestCase):
|
||||
def test_run_api_app(self, m_make, m_make_app):
|
||||
m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG)
|
||||
api.main()
|
||||
self.assertEqual(m_make.call_count, 1)
|
||||
self.assertEqual(1, m_make.call_count)
|
||||
|
||||
@patch("watcher.api.app.pecan.make_app")
|
||||
@patch.object(BaseServer, "serve_forever", Mock())
|
||||
@@ -63,4 +63,4 @@ class TestApi(BaseTestCase):
|
||||
cfg.CONF.set_default("host", "localhost", group="api")
|
||||
m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG)
|
||||
api.main()
|
||||
self.assertEqual(m_make.call_count, 1)
|
||||
self.assertEqual(1, m_make.call_count)
|
||||
|
||||
@@ -47,5 +47,5 @@ class TestApplier(BaseTestCase):
|
||||
@patch.object(ApplierManager, "join")
|
||||
def test_run_applier_app(self, m_connect, m_join):
|
||||
applier.main()
|
||||
self.assertEqual(m_connect.call_count, 1)
|
||||
self.assertEqual(m_join.call_count, 1)
|
||||
self.assertEqual(1, m_connect.call_count)
|
||||
self.assertEqual(1, m_join.call_count)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2015 b<>com
|
||||
# Copyright (c) 2016 b<>com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +14,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
import sys
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from watcher.cmd import dbmanage
|
||||
from watcher.db import migration
|
||||
from watcher.tests.base import TestCase
|
||||
from watcher.db import purge
|
||||
from watcher.tests import base
|
||||
|
||||
|
||||
class TestDBManageRunApp(TestCase):
|
||||
class TestDBManageRunApp(base.TestCase):
|
||||
|
||||
scenarios = (
|
||||
("upgrade", {"command": "upgrade", "expected": "upgrade"}),
|
||||
@@ -32,15 +35,16 @@ class TestDBManageRunApp(TestCase):
|
||||
("version", {"command": "version", "expected": "version"}),
|
||||
("create_schema", {"command": "create_schema",
|
||||
"expected": "create_schema"}),
|
||||
("purge", {"command": "purge", "expected": "purge"}),
|
||||
("no_param", {"command": None, "expected": "upgrade"}),
|
||||
)
|
||||
|
||||
@patch.object(dbmanage, "register_sub_command_opts", Mock())
|
||||
@patch("watcher.cmd.dbmanage.service.prepare_service")
|
||||
@patch("watcher.cmd.dbmanage.sys")
|
||||
@mock.patch.object(dbmanage, "register_sub_command_opts", mock.Mock())
|
||||
@mock.patch("watcher.cmd.dbmanage.service.prepare_service")
|
||||
@mock.patch("watcher.cmd.dbmanage.sys")
|
||||
def test_run_db_manage_app(self, m_sys, m_prepare_service):
|
||||
# Patch command function
|
||||
m_func = Mock()
|
||||
m_func = mock.Mock()
|
||||
cfg.CONF.register_opt(cfg.SubCommandOpt("command"))
|
||||
cfg.CONF.command.func = m_func
|
||||
|
||||
@@ -48,14 +52,14 @@ class TestDBManageRunApp(TestCase):
|
||||
m_sys.argv = list(filter(None, ["watcher-db-manage", self.command]))
|
||||
|
||||
dbmanage.main()
|
||||
self.assertEqual(m_func.call_count, 1)
|
||||
self.assertEqual(1, m_func.call_count)
|
||||
m_prepare_service.assert_called_once_with(
|
||||
["watcher-db-manage", self.expected])
|
||||
|
||||
|
||||
class TestDBManageRunCommand(TestCase):
|
||||
class TestDBManageRunCommand(base.TestCase):
|
||||
|
||||
@patch.object(migration, "upgrade")
|
||||
@mock.patch.object(migration, "upgrade")
|
||||
def test_run_db_upgrade(self, m_upgrade):
|
||||
cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command")
|
||||
cfg.CONF.set_default("revision", "dummy", group="command")
|
||||
@@ -63,7 +67,7 @@ class TestDBManageRunCommand(TestCase):
|
||||
|
||||
m_upgrade.assert_called_once_with("dummy")
|
||||
|
||||
@patch.object(migration, "downgrade")
|
||||
@mock.patch.object(migration, "downgrade")
|
||||
def test_run_db_downgrade(self, m_downgrade):
|
||||
cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command")
|
||||
cfg.CONF.set_default("revision", "dummy", group="command")
|
||||
@@ -71,7 +75,7 @@ class TestDBManageRunCommand(TestCase):
|
||||
|
||||
m_downgrade.assert_called_once_with("dummy")
|
||||
|
||||
@patch.object(migration, "revision")
|
||||
@mock.patch.object(migration, "revision")
|
||||
def test_run_db_revision(self, m_revision):
|
||||
cfg.CONF.register_opt(cfg.StrOpt("message"), group="command")
|
||||
cfg.CONF.register_opt(cfg.StrOpt("autogenerate"), group="command")
|
||||
@@ -87,14 +91,85 @@ class TestDBManageRunCommand(TestCase):
|
||||
"dummy_message", "dummy_autogenerate"
|
||||
)
|
||||
|
||||
@patch.object(migration, "stamp")
|
||||
@mock.patch.object(migration, "stamp")
|
||||
def test_run_db_stamp(self, m_stamp):
|
||||
cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command")
|
||||
cfg.CONF.set_default("revision", "dummy", group="command")
|
||||
dbmanage.DBCommand.stamp()
|
||||
|
||||
@patch.object(migration, "version")
|
||||
@mock.patch.object(migration, "version")
|
||||
def test_run_db_version(self, m_version):
|
||||
dbmanage.DBCommand.version()
|
||||
|
||||
self.assertEqual(m_version.call_count, 1)
|
||||
self.assertEqual(1, m_version.call_count)
|
||||
|
||||
@mock.patch.object(purge, "PurgeCommand")
|
||||
def test_run_db_purge(self, m_purge_cls):
|
||||
m_purge = mock.Mock()
|
||||
m_purge_cls.return_value = m_purge
|
||||
m_purge_cls.get_audit_template_uuid.return_value = 'Some UUID'
|
||||
cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command")
|
||||
cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command")
|
||||
cfg.CONF.register_opt(cfg.StrOpt("audit_template"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command")
|
||||
cfg.CONF.set_default("age_in_days", None, group="command")
|
||||
cfg.CONF.set_default("max_number", None, group="command")
|
||||
cfg.CONF.set_default("audit_template", None, group="command")
|
||||
cfg.CONF.set_default("exclude_orphans", True, group="command")
|
||||
cfg.CONF.set_default("dry_run", False, group="command")
|
||||
|
||||
dbmanage.DBCommand.purge()
|
||||
|
||||
m_purge_cls.assert_called_once_with(
|
||||
None, None, 'Some UUID', True, False)
|
||||
m_purge.execute.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(sys, "exit")
|
||||
@mock.patch.object(purge, "PurgeCommand")
|
||||
def test_run_db_purge_negative_max_number(self, m_purge_cls, m_exit):
|
||||
m_purge = mock.Mock()
|
||||
m_purge_cls.return_value = m_purge
|
||||
m_purge_cls.get_audit_template_uuid.return_value = 'Some UUID'
|
||||
cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command")
|
||||
cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command")
|
||||
cfg.CONF.register_opt(cfg.StrOpt("audit_template"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command")
|
||||
cfg.CONF.set_default("age_in_days", None, group="command")
|
||||
cfg.CONF.set_default("max_number", -1, group="command")
|
||||
cfg.CONF.set_default("audit_template", None, group="command")
|
||||
cfg.CONF.set_default("exclude_orphans", True, group="command")
|
||||
cfg.CONF.set_default("dry_run", False, group="command")
|
||||
|
||||
dbmanage.DBCommand.purge()
|
||||
|
||||
self.assertEqual(0, m_purge_cls.call_count)
|
||||
self.assertEqual(0, m_purge.execute.call_count)
|
||||
self.assertEqual(0, m_purge.do_delete.call_count)
|
||||
self.assertEqual(1, m_exit.call_count)
|
||||
|
||||
@mock.patch.object(sys, "exit")
|
||||
@mock.patch.object(purge, "PurgeCommand")
|
||||
def test_run_db_purge_dry_run(self, m_purge_cls, m_exit):
|
||||
m_purge = mock.Mock()
|
||||
m_purge_cls.return_value = m_purge
|
||||
m_purge_cls.get_audit_template_uuid.return_value = 'Some UUID'
|
||||
cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command")
|
||||
cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command")
|
||||
cfg.CONF.register_opt(cfg.StrOpt("audit_template"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command")
|
||||
cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command")
|
||||
cfg.CONF.set_default("age_in_days", None, group="command")
|
||||
cfg.CONF.set_default("max_number", None, group="command")
|
||||
cfg.CONF.set_default("audit_template", None, group="command")
|
||||
cfg.CONF.set_default("exclude_orphans", True, group="command")
|
||||
cfg.CONF.set_default("dry_run", True, group="command")
|
||||
|
||||
dbmanage.DBCommand.purge()
|
||||
|
||||
m_purge_cls.assert_called_once_with(
|
||||
None, None, 'Some UUID', True, True)
|
||||
self.assertEqual(1, m_purge.execute.call_count)
|
||||
self.assertEqual(0, m_purge.do_delete.call_count)
|
||||
self.assertEqual(0, m_exit.call_count)
|
||||
|
||||
@@ -49,5 +49,5 @@ class TestDecisionEngine(BaseTestCase):
|
||||
@patch.object(DecisionEngineManager, "join")
|
||||
def test_run_de_app(self, m_connect, m_join):
|
||||
decisionengine.main()
|
||||
self.assertEqual(m_connect.call_count, 1)
|
||||
self.assertEqual(m_join.call_count, 1)
|
||||
self.assertEqual(1, m_connect.call_count)
|
||||
self.assertEqual(1, m_join.call_count)
|
||||
|
||||
@@ -42,7 +42,7 @@ class TestLoader(BaseTestCase):
|
||||
loader_manager = DefaultLoader(namespace='TESTING')
|
||||
loaded_driver = loader_manager.load(name='fake')
|
||||
|
||||
self.assertEqual(loaded_driver.get_name(), FakeLoadable.get_name())
|
||||
self.assertEqual(FakeLoadable.get_name(), loaded_driver.get_name())
|
||||
|
||||
@mock.patch("watcher.common.loader.default.DriverManager")
|
||||
def test_load_driver_bad_plugin(self, m_driver_manager):
|
||||
|
||||
@@ -32,13 +32,13 @@ class TestMessagingCore(base.TestCase):
|
||||
def test_connect(self, m_handler):
|
||||
messaging = messaging_core.MessagingCore("", "", "")
|
||||
messaging.connect()
|
||||
self.assertEqual(m_handler.call_count, 2)
|
||||
self.assertEqual(2, m_handler.call_count)
|
||||
|
||||
@mock.patch.object(messaging_handler, "MessagingHandler")
|
||||
def test_disconnect(self, m_handler):
|
||||
messaging = messaging_core.MessagingCore("", "", "")
|
||||
messaging.disconnect()
|
||||
self.assertEqual(m_handler.call_count, 2)
|
||||
self.assertEqual(2, m_handler.call_count)
|
||||
|
||||
def test_build_topic_handler(self):
|
||||
topic_name = "MyTopic"
|
||||
@@ -102,9 +102,9 @@ class TestMessagingCore(base.TestCase):
|
||||
topic = messaging.build_topic_handler("test_topic")
|
||||
|
||||
self.assertIsInstance(topic, messaging_handler.MessagingHandler)
|
||||
self.assertEqual(messaging.publisher_id, "pub_id")
|
||||
self.assertEqual(topic.publisher_id, "pub_id")
|
||||
self.assertEqual("pub_id", messaging.publisher_id)
|
||||
self.assertEqual("pub_id", topic.publisher_id)
|
||||
|
||||
self.assertEqual(
|
||||
messaging.conductor_topic_handler.topic_name, "test_topic")
|
||||
self.assertEqual(topic.topic_name, "test_topic")
|
||||
self.assertEqual("test_topic",
|
||||
messaging.conductor_topic_handler.topic_name)
|
||||
self.assertEqual("test_topic", topic.topic_name)
|
||||
|
||||
@@ -70,8 +70,8 @@ class TestMessagingHandler(base.TestCase):
|
||||
serializer=None,
|
||||
)
|
||||
|
||||
self.assertEqual(handler.endpoints, [self.ENDPOINT])
|
||||
self.assertEqual([self.ENDPOINT], handler.endpoints)
|
||||
|
||||
handler.remove_endpoint(self.ENDPOINT)
|
||||
|
||||
self.assertEqual(handler.endpoints, [])
|
||||
self.assertEqual([], handler.endpoints)
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestCeilometerHelper(base.BaseTestCase):
|
||||
user_ids=["user_ids"],
|
||||
tenant_ids=["tenant_ids"],
|
||||
resource_ids=["resource_ids"])
|
||||
self.assertEqual(query, expected)
|
||||
self.assertEqual(expected, query)
|
||||
|
||||
def test_statistic_aggregation(self, mock_ceilometer):
|
||||
cm = ceilometer_helper.CeilometerHelper()
|
||||
@@ -60,7 +60,7 @@ class TestCeilometerHelper(base.BaseTestCase):
|
||||
meter_name="cpu_util",
|
||||
period="7300"
|
||||
)
|
||||
self.assertEqual(val, expected_result)
|
||||
self.assertEqual(expected_result, val)
|
||||
|
||||
def test_get_last_sample(self, mock_ceilometer):
|
||||
ceilometer = mock.MagicMock()
|
||||
@@ -74,7 +74,7 @@ class TestCeilometerHelper(base.BaseTestCase):
|
||||
resource_id="id",
|
||||
meter_name="compute.node.percent"
|
||||
)
|
||||
self.assertEqual(val, expected_result)
|
||||
self.assertEqual(expected_result, val)
|
||||
|
||||
def test_get_last_sample_none(self, mock_ceilometer):
|
||||
ceilometer = mock.MagicMock()
|
||||
@@ -86,7 +86,7 @@ class TestCeilometerHelper(base.BaseTestCase):
|
||||
resource_id="id",
|
||||
meter_name="compute.node.percent"
|
||||
)
|
||||
self.assertEqual(val, expected)
|
||||
self.assertEqual(expected, val)
|
||||
|
||||
def test_statistic_list(self, mock_ceilometer):
|
||||
ceilometer = mock.MagicMock()
|
||||
@@ -95,4 +95,4 @@ class TestCeilometerHelper(base.BaseTestCase):
|
||||
mock_ceilometer.return_value = ceilometer
|
||||
cm = ceilometer_helper.CeilometerHelper()
|
||||
val = cm.statistic_list(meter_name="cpu_util")
|
||||
self.assertEqual(val, expected_value)
|
||||
self.assertEqual(expected_value, val)
|
||||
|
||||
@@ -51,7 +51,7 @@ class TestNovaHelper(base.TestCase):
|
||||
nova_util.nova.servers.list.return_value = [server]
|
||||
|
||||
result = nova_util.stop_instance(instance_id)
|
||||
self.assertEqual(result, True)
|
||||
self.assertEqual(True, result)
|
||||
|
||||
def test_set_host_offline(self, mock_glance, mock_cinder, mock_neutron,
|
||||
mock_nova):
|
||||
@@ -60,7 +60,7 @@ class TestNovaHelper(base.TestCase):
|
||||
nova_util.nova.hosts = mock.MagicMock()
|
||||
nova_util.nova.hosts.get.return_value = host
|
||||
result = nova_util.set_host_offline("rennes")
|
||||
self.assertEqual(result, True)
|
||||
self.assertEqual(True, result)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_live_migrate_instance(self, mock_glance, mock_cinder,
|
||||
@@ -85,7 +85,7 @@ class TestNovaHelper(base.TestCase):
|
||||
self.instance_uuid,
|
||||
self.destination_hypervisor)
|
||||
|
||||
self.assertEqual(is_success, False)
|
||||
self.assertEqual(False, is_success)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_watcher_non_live_migrate_instance_volume(
|
||||
|
||||
@@ -92,6 +92,12 @@ class DbTestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
cfg.CONF.set_override("enable_authentication", False,
|
||||
enforce_type=True)
|
||||
# To use in-memory SQLite DB
|
||||
cfg.CONF.set_override("connection", "sqlite://", group="database",
|
||||
enforce_type=True)
|
||||
cfg.CONF.set_override("sqlite_db", "", group="database",
|
||||
enforce_type=True)
|
||||
|
||||
super(DbTestCase, self).setUp()
|
||||
|
||||
self.dbapi = dbapi.get_instance()
|
||||
|
||||
@@ -15,13 +15,222 @@
|
||||
|
||||
"""Tests for manipulating Action via the DB API"""
|
||||
|
||||
import freezegun
|
||||
import six
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils as w_utils
|
||||
from watcher.objects import action as act_objects
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestDbActionFilters(base.DbTestCase):
|
||||
|
||||
FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414'
|
||||
FAKE_OLD_DATE = '2015-01-01T09:52:05.219414'
|
||||
FAKE_TODAY = '2016-02-24T09:52:05.219414'
|
||||
|
||||
def setUp(self):
|
||||
super(TestDbActionFilters, self).setUp()
|
||||
self.context.show_deleted = True
|
||||
self._data_setup()
|
||||
|
||||
def _data_setup(self):
|
||||
self.audit_template_name = "Audit Template"
|
||||
|
||||
self.audit_template = utils.create_test_audit_template(
|
||||
name=self.audit_template_name, id=1, uuid=None)
|
||||
self.audit = utils.create_test_audit(
|
||||
audit_template_id=self.audit_template.id, id=1, uuid=None)
|
||||
self.action_plan = utils.create_test_action_plan(
|
||||
audit_id=self.audit.id, id=1, uuid=None)
|
||||
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.action1 = utils.create_test_action(
|
||||
action_plan_id=self.action_plan.id, id=1, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.action2 = utils.create_test_action(
|
||||
action_plan_id=self.action_plan.id, id=2, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.action3 = utils.create_test_action(
|
||||
action_plan_id=self.action_plan.id, id=3, uuid=None)
|
||||
|
||||
def _soft_delete_actions(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action(self.action1.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.soft_delete_action(self.action2.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.soft_delete_action(self.action3.uuid)
|
||||
|
||||
def _update_actions(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.update_action(
|
||||
self.action1.uuid,
|
||||
values={"state": act_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.update_action(
|
||||
self.action2.uuid,
|
||||
values={"state": act_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.update_action(
|
||||
self.action3.uuid,
|
||||
values={"state": act_objects.State.SUCCEEDED})
|
||||
|
||||
def test_get_action_filter_deleted_true(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action(self.action1.uuid)
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted': True})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_false(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action(self.action1.uuid)
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted': False})
|
||||
|
||||
self.assertEqual([self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_at_eq(self):
|
||||
self._soft_delete_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_at_lt(self):
|
||||
self._soft_delete_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_at_lte(self):
|
||||
self._soft_delete_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_at_gt(self):
|
||||
self._soft_delete_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_deleted_at_gte(self):
|
||||
self._soft_delete_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action1['id'], self.action2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# created_at #
|
||||
|
||||
def test_get_action_filter_created_at_eq(self):
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_created_at_lt(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_created_at_lte(self):
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_created_at_gt(self):
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_created_at_gte(self):
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action1['id'], self.action2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# updated_at #
|
||||
|
||||
def test_get_action_filter_updated_at_eq(self):
|
||||
self._update_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_updated_at_lt(self):
|
||||
self._update_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_updated_at_lte(self):
|
||||
self._update_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action2['id'], self.action3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_filter_updated_at_gt(self):
|
||||
self._update_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_filter_updated_at_gte(self):
|
||||
self._update_actions()
|
||||
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action1['id'], self.action2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
|
||||
class DbActionTestCase(base.DbTestCase):
|
||||
|
||||
def _create_test_action(self, **kwargs):
|
||||
@@ -36,7 +245,7 @@ class DbActionTestCase(base.DbTestCase):
|
||||
|
||||
def test_get_action_list(self):
|
||||
uuids = []
|
||||
for i in range(1, 6):
|
||||
for _ in range(1, 6):
|
||||
action = utils.create_test_action(uuid=w_utils.generate_uuid())
|
||||
uuids.append(six.text_type(action['uuid']))
|
||||
res = self.dbapi.get_action_list(self.context)
|
||||
@@ -101,6 +310,14 @@ class DbActionTestCase(base.DbTestCase):
|
||||
for action in res:
|
||||
self.assertEqual(action_plan['id'], action.action_plan_id)
|
||||
|
||||
def test_get_action_list_with_filter_by_uuid(self):
|
||||
action = self._create_test_action()
|
||||
res = self.dbapi.get_action_list(
|
||||
self.context, filters={'uuid': action["uuid"]})
|
||||
|
||||
self.assertEqual(len(res), 1)
|
||||
self.assertEqual(action['uuid'], res[0].uuid)
|
||||
|
||||
def test_get_action_by_id(self):
|
||||
action = self._create_test_action()
|
||||
action = self.dbapi.get_action_by_id(self.context, action['id'])
|
||||
|
||||
@@ -15,13 +15,219 @@
|
||||
|
||||
"""Tests for manipulating ActionPlan via the DB API"""
|
||||
|
||||
import freezegun
|
||||
import six
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils as w_utils
|
||||
from watcher.objects import action_plan as ap_objects
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestDbActionPlanFilters(base.DbTestCase):
|
||||
|
||||
FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414'
|
||||
FAKE_OLD_DATE = '2015-01-01T09:52:05.219414'
|
||||
FAKE_TODAY = '2016-02-24T09:52:05.219414'
|
||||
|
||||
def setUp(self):
|
||||
super(TestDbActionPlanFilters, self).setUp()
|
||||
self.context.show_deleted = True
|
||||
self._data_setup()
|
||||
|
||||
def _data_setup(self):
|
||||
self.audit_template_name = "Audit Template"
|
||||
|
||||
self.audit_template = utils.create_test_audit_template(
|
||||
name=self.audit_template_name, id=1, uuid=None)
|
||||
self.audit = utils.create_test_audit(
|
||||
audit_template_id=self.audit_template.id, id=1, uuid=None)
|
||||
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.action_plan1 = utils.create_test_action_plan(
|
||||
audit_id=self.audit.id, id=1, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.action_plan2 = utils.create_test_action_plan(
|
||||
audit_id=self.audit.id, id=2, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.action_plan3 = utils.create_test_action_plan(
|
||||
audit_id=self.audit.id, id=3, uuid=None)
|
||||
|
||||
def _soft_delete_action_plans(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_plan(self.action_plan1.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.soft_delete_action_plan(self.action_plan2.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.soft_delete_action_plan(self.action_plan3.uuid)
|
||||
|
||||
def _update_action_plans(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.update_action_plan(
|
||||
self.action_plan1.uuid,
|
||||
values={"state": ap_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.update_action_plan(
|
||||
self.action_plan2.uuid,
|
||||
values={"state": ap_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.update_action_plan(
|
||||
self.action_plan3.uuid,
|
||||
values={"state": ap_objects.State.SUCCEEDED})
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_true(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_plan(self.action_plan1.uuid)
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted': True})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_false(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_plan(self.action_plan1.uuid)
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted': False})
|
||||
|
||||
self.assertEqual([self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_at_eq(self):
|
||||
self._soft_delete_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_at_lt(self):
|
||||
self._soft_delete_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_at_lte(self):
|
||||
self._soft_delete_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_at_gt(self):
|
||||
self._soft_delete_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_deleted_at_gte(self):
|
||||
self._soft_delete_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan1['id'], self.action_plan2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# created_at #
|
||||
|
||||
def test_get_action_plan_list_filter_created_at_eq(self):
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_created_at_lt(self):
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_created_at_lte(self):
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_created_at_gt(self):
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_created_at_gte(self):
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan1['id'], self.action_plan2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# updated_at #
|
||||
|
||||
def test_get_action_plan_list_filter_updated_at_eq(self):
|
||||
self._update_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_updated_at_lt(self):
|
||||
self._update_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_updated_at_lte(self):
|
||||
self._update_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan2['id'], self.action_plan3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_updated_at_gt(self):
|
||||
self._update_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_plan1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_plan_list_filter_updated_at_gte(self):
|
||||
self._update_action_plans()
|
||||
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.action_plan1['id'], self.action_plan2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
|
||||
class DbActionPlanTestCase(base.DbTestCase):
|
||||
|
||||
def _create_test_audit(self, **kwargs):
|
||||
@@ -80,6 +286,14 @@ class DbActionPlanTestCase(base.DbTestCase):
|
||||
for r in res:
|
||||
self.assertEqual(audit['id'], r.audit_id)
|
||||
|
||||
def test_get_action_plan_list_with_filter_by_uuid(self):
|
||||
action_plan = self._create_test_action_plan()
|
||||
res = self.dbapi.get_action_plan_list(
|
||||
self.context, filters={'uuid': action_plan["uuid"]})
|
||||
|
||||
self.assertEqual(len(res), 1)
|
||||
self.assertEqual(action_plan['uuid'], res[0].uuid)
|
||||
|
||||
def test_get_action_plan_by_id(self):
|
||||
action_plan = self._create_test_action_plan()
|
||||
action_plan = self.dbapi.get_action_plan_by_id(
|
||||
|
||||
@@ -15,13 +15,217 @@
|
||||
|
||||
"""Tests for manipulating Audit via the DB API"""
|
||||
|
||||
import freezegun
|
||||
import six
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils as w_utils
|
||||
from watcher.objects import audit as audit_objects
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestDbAuditFilters(base.DbTestCase):
|
||||
|
||||
FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414'
|
||||
FAKE_OLD_DATE = '2015-01-01T09:52:05.219414'
|
||||
FAKE_TODAY = '2016-02-24T09:52:05.219414'
|
||||
|
||||
def setUp(self):
|
||||
super(TestDbAuditFilters, self).setUp()
|
||||
self.context.show_deleted = True
|
||||
self._data_setup()
|
||||
|
||||
def _data_setup(self):
|
||||
self.audit_template_name = "Audit Template"
|
||||
|
||||
self.audit_template = utils.create_test_audit_template(
|
||||
name=self.audit_template_name, id=1, uuid=None)
|
||||
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.audit1 = utils.create_test_audit(
|
||||
audit_template_id=self.audit_template.id, id=1, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.audit2 = utils.create_test_audit(
|
||||
audit_template_id=self.audit_template.id, id=2, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.audit3 = utils.create_test_audit(
|
||||
audit_template_id=self.audit_template.id, id=3, uuid=None)
|
||||
|
||||
def _soft_delete_audits(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit(self.audit1.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.soft_delete_audit(self.audit2.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.soft_delete_audit(self.audit3.uuid)
|
||||
|
||||
def _update_audits(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.update_audit(
|
||||
self.audit1.uuid,
|
||||
values={"state": audit_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.update_audit(
|
||||
self.audit2.uuid,
|
||||
values={"state": audit_objects.State.SUCCEEDED})
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.update_audit(
|
||||
self.audit3.uuid,
|
||||
values={"state": audit_objects.State.SUCCEEDED})
|
||||
|
||||
def test_get_audit_list_filter_deleted_true(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit(self.audit1.uuid)
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted': True})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_false(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit(self.audit1.uuid)
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted': False})
|
||||
|
||||
self.assertEqual([self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_at_eq(self):
|
||||
self._soft_delete_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_at_lt(self):
|
||||
self._soft_delete_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_at_lte(self):
|
||||
self._soft_delete_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_at_gt(self):
|
||||
self._soft_delete_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_deleted_at_gte(self):
|
||||
self._soft_delete_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit1['id'], self.audit2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# created_at #
|
||||
|
||||
def test_get_audit_list_filter_created_at_eq(self):
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_created_at_lt(self):
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_created_at_lte(self):
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_created_at_gt(self):
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_created_at_gte(self):
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit1['id'], self.audit2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# updated_at #
|
||||
|
||||
def test_get_audit_list_filter_updated_at_eq(self):
|
||||
self._update_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_updated_at_lt(self):
|
||||
self._update_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_updated_at_lte(self):
|
||||
self._update_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit2['id'], self.audit3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_updated_at_gt(self):
|
||||
self._update_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_filter_updated_at_gte(self):
|
||||
self._update_audits()
|
||||
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit1['id'], self.audit2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
|
||||
class DbAuditTestCase(base.DbTestCase):
|
||||
|
||||
def _create_test_audit(self, **kwargs):
|
||||
@@ -31,7 +235,7 @@ class DbAuditTestCase(base.DbTestCase):
|
||||
|
||||
def test_get_audit_list(self):
|
||||
uuids = []
|
||||
for i in range(1, 6):
|
||||
for _ in range(1, 6):
|
||||
audit = utils.create_test_audit(uuid=w_utils.generate_uuid())
|
||||
uuids.append(six.text_type(audit['uuid']))
|
||||
res = self.dbapi.get_audit_list(self.context)
|
||||
@@ -70,6 +274,14 @@ class DbAuditTestCase(base.DbTestCase):
|
||||
filters={'state': 'PENDING'})
|
||||
self.assertEqual([audit2['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_list_with_filter_by_uuid(self):
|
||||
audit = self._create_test_audit()
|
||||
res = self.dbapi.get_audit_list(
|
||||
self.context, filters={'uuid': audit["uuid"]})
|
||||
|
||||
self.assertEqual(len(res), 1)
|
||||
self.assertEqual(audit['uuid'], res[0].uuid)
|
||||
|
||||
def test_get_audit_by_id(self):
|
||||
audit = self._create_test_audit()
|
||||
audit = self.dbapi.get_audit_by_id(self.context, audit['id'])
|
||||
|
||||
@@ -15,13 +15,214 @@
|
||||
|
||||
"""Tests for manipulating AuditTemplate via the DB API"""
|
||||
|
||||
import freezegun
|
||||
import six
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils as w_utils
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestDbAuditTemplateFilters(base.DbTestCase):
|
||||
|
||||
FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414'
|
||||
FAKE_OLD_DATE = '2015-01-01T09:52:05.219414'
|
||||
FAKE_TODAY = '2016-02-24T09:52:05.219414'
|
||||
|
||||
def setUp(self):
|
||||
super(TestDbAuditTemplateFilters, self).setUp()
|
||||
self.context.show_deleted = True
|
||||
self._data_setup()
|
||||
|
||||
def _data_setup(self):
|
||||
gen_name = lambda: "Audit Template %s" % w_utils.generate_uuid()
|
||||
self.audit_template1_name = gen_name()
|
||||
self.audit_template2_name = gen_name()
|
||||
self.audit_template3_name = gen_name()
|
||||
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.audit_template1 = utils.create_test_audit_template(
|
||||
name=self.audit_template1_name, id=1, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.audit_template2 = utils.create_test_audit_template(
|
||||
name=self.audit_template2_name, id=2, uuid=None)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.audit_template3 = utils.create_test_audit_template(
|
||||
name=self.audit_template3_name, id=3, uuid=None)
|
||||
|
||||
def _soft_delete_audit_templates(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit_template(self.audit_template1.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.soft_delete_audit_template(self.audit_template2.uuid)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.soft_delete_audit_template(self.audit_template3.uuid)
|
||||
|
||||
def _update_audit_templates(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.update_audit_template(
|
||||
self.audit_template1.uuid, values={"name": "audit_template1"})
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.update_audit_template(
|
||||
self.audit_template2.uuid, values={"name": "audit_template2"})
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.update_audit_template(
|
||||
self.audit_template3.uuid, values={"name": "audit_template3"})
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_true(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit_template(self.audit_template1.uuid)
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted': True})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_false(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_audit_template(self.audit_template1.uuid)
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted': False})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_at_eq(self):
|
||||
self._soft_delete_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_at_lt(self):
|
||||
self._soft_delete_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_at_lte(self):
|
||||
self._soft_delete_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_at_gt(self):
|
||||
self._soft_delete_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_deleted_at_gte(self):
|
||||
self._soft_delete_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template1['id'], self.audit_template2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# created_at #
|
||||
|
||||
def test_get_audit_template_list_filter_created_at_eq(self):
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_created_at_lt(self):
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_created_at_lte(self):
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_created_at_gt(self):
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_created_at_gte(self):
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template1['id'], self.audit_template2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
# updated_at #
|
||||
|
||||
def test_get_audit_template_list_filter_updated_at_eq(self):
|
||||
self._update_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_updated_at_lt(self):
|
||||
self._update_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_updated_at_lte(self):
|
||||
self._update_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template2['id'], self.audit_template3['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_updated_at_gt(self):
|
||||
self._update_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.audit_template1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_filter_updated_at_gte(self):
|
||||
self._update_audit_templates()
|
||||
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
[self.audit_template1['id'], self.audit_template2['id']],
|
||||
[r.id for r in res])
|
||||
|
||||
|
||||
class DbAuditTemplateTestCase(base.DbTestCase):
|
||||
|
||||
def _create_test_audit_template(self, **kwargs):
|
||||
@@ -77,6 +278,14 @@ class DbAuditTemplateTestCase(base.DbTestCase):
|
||||
filters={'name': 'My Audit Template 2'})
|
||||
self.assertEqual([audit_template2['id']], [r.id for r in res])
|
||||
|
||||
def test_get_audit_template_list_with_filter_by_uuid(self):
|
||||
audit_template = self._create_test_audit_template()
|
||||
res = self.dbapi.get_audit_template_list(
|
||||
self.context, filters={'uuid': audit_template["uuid"]})
|
||||
|
||||
self.assertEqual(len(res), 1)
|
||||
self.assertEqual(audit_template['uuid'], res[0].uuid)
|
||||
|
||||
def test_get_audit_template_by_id(self):
|
||||
audit_template = self._create_test_audit_template()
|
||||
audit_template = self.dbapi.get_audit_template_by_id(
|
||||
@@ -132,14 +341,6 @@ class DbAuditTemplateTestCase(base.DbTestCase):
|
||||
self.assertRaises(exception.AuditTemplateNotFound,
|
||||
self.dbapi.destroy_audit_template, 1234)
|
||||
|
||||
# def test_destroy_audit_template_that_referenced_by_goals(self):
|
||||
# audit_template = self._create_test_audit_template()
|
||||
# goal = utils.create_test_goal(audit_template=audit_template['uuid'])
|
||||
# self.assertEqual(audit_template['uuid'], goal.audit_template)
|
||||
# self.assertRaises(exception.AuditTemplateReferenced,
|
||||
# self.dbapi.destroy_audit_template,
|
||||
# audit_template['id'])
|
||||
|
||||
def test_create_audit_template_already_exists(self):
|
||||
uuid = w_utils.generate_uuid()
|
||||
self._create_test_audit_template(id=1, uuid=uuid)
|
||||
|
||||
372
watcher/tests/db/test_purge.py
Normal file
372
watcher/tests/db/test_purge.py
Normal file
@@ -0,0 +1,372 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2016 b<>com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
import freezegun
|
||||
import mock
|
||||
|
||||
from watcher.common import context as watcher_context
|
||||
from watcher.db import purge
|
||||
from watcher.db.sqlalchemy import api as dbapi
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.objects import utils as obj_utils
|
||||
|
||||
|
||||
class TestPurgeCommand(base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestPurgeCommand, self).setUp()
|
||||
self.cmd = purge.PurgeCommand()
|
||||
token_info = {
|
||||
'token': {
|
||||
'project': {
|
||||
'id': 'fake_project'
|
||||
},
|
||||
'user': {
|
||||
'id': 'fake_user'
|
||||
}
|
||||
}
|
||||
}
|
||||
self.context = watcher_context.RequestContext(
|
||||
auth_token_info=token_info,
|
||||
project_id='fake_project',
|
||||
user_id='fake_user',
|
||||
show_deleted=True,
|
||||
)
|
||||
|
||||
self.fake_today = '2016-02-24T09:52:05.219414+00:00'
|
||||
self.expired_date = '2016-01-24T09:52:05.219414+00:00'
|
||||
|
||||
self.m_input = mock.Mock()
|
||||
p = mock.patch("watcher.db.purge.input", self.m_input)
|
||||
self.m_input.return_value = 'y'
|
||||
p.start()
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
self._id_generator = None
|
||||
self._data_setup()
|
||||
|
||||
def _generate_id(self):
|
||||
if self._id_generator is None:
|
||||
self._id_generator = self._get_id_generator()
|
||||
return next(self._id_generator)
|
||||
|
||||
def _get_id_generator(self):
|
||||
seed = 1
|
||||
while True:
|
||||
yield seed
|
||||
seed += 1
|
||||
|
||||
def _data_setup(self):
|
||||
# All the 1's are soft_deleted and are expired
|
||||
# All the 2's are soft_deleted but are not expired
|
||||
# All the 3's are *not* soft_deleted
|
||||
|
||||
# Number of days we want to keep in DB (no purge for them)
|
||||
self.cmd.age_in_days = 10
|
||||
self.cmd.max_number = None
|
||||
self.cmd.orphans = True
|
||||
gen_name = lambda: "Audit Template %s" % uuid.uuid4()
|
||||
self.audit_template1_name = gen_name()
|
||||
self.audit_template2_name = gen_name()
|
||||
self.audit_template3_name = gen_name()
|
||||
|
||||
with freezegun.freeze_time(self.expired_date):
|
||||
self.audit_template1 = obj_utils.create_test_audit_template(
|
||||
self.context, name=self.audit_template1_name,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit_template2 = obj_utils.create_test_audit_template(
|
||||
self.context, name=self.audit_template2_name,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit_template3 = obj_utils.create_test_audit_template(
|
||||
self.context, name=self.audit_template3_name,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit_template1.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.expired_date):
|
||||
self.audit1 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=self.audit_template1.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit2 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=self.audit_template2.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit3 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=self.audit_template3.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.audit1.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.expired_date):
|
||||
self.action_plan1 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=self.audit1.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.action_plan2 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=self.audit2.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.action_plan3 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=self.audit3.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
|
||||
self.action1 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=self.action_plan1.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.action2 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=self.action_plan2.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.action3 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=self.action_plan3.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
self.action_plan1.soft_delete()
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_execute_max_number_exceeded(self, m_destroy_audit_template,
|
||||
m_destroy_audit,
|
||||
m_destroy_action_plan,
|
||||
m_destroy_action):
|
||||
self.cmd.age_in_days = None
|
||||
self.cmd.max_number = 5
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.audit_template2.soft_delete()
|
||||
self.audit2.soft_delete()
|
||||
self.action_plan2.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
# The 1's and the 2's are purgeable (due to age of day set to 0),
|
||||
# but max_number = 5, and because of no Db integrity violation, we
|
||||
# should be able to purge only 4 objects.
|
||||
self.assertEqual(m_destroy_audit_template.call_count, 1)
|
||||
self.assertEqual(m_destroy_audit.call_count, 1)
|
||||
self.assertEqual(m_destroy_action_plan.call_count, 1)
|
||||
self.assertEqual(m_destroy_action.call_count, 1)
|
||||
|
||||
def test_find_deleted_entries(self):
|
||||
self.cmd.age_in_days = None
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
objects_map = self.cmd.find_objects_to_delete()
|
||||
|
||||
self.assertEqual(len(objects_map.audit_templates), 1)
|
||||
self.assertEqual(len(objects_map.audits), 1)
|
||||
self.assertEqual(len(objects_map.action_plans), 1)
|
||||
self.assertEqual(len(objects_map.actions), 1)
|
||||
|
||||
def test_find_deleted_and_expired_entries(self):
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.audit_template2.soft_delete()
|
||||
self.audit2.soft_delete()
|
||||
self.action_plan2.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
objects_map = self.cmd.find_objects_to_delete()
|
||||
|
||||
# The 1's are purgeable (due to age of day set to 10)
|
||||
self.assertEqual(len(objects_map.audit_templates), 1)
|
||||
self.assertEqual(len(objects_map.audits), 1)
|
||||
self.assertEqual(len(objects_map.action_plans), 1)
|
||||
self.assertEqual(len(objects_map.actions), 1)
|
||||
|
||||
def test_find_deleted_and_nonexpired_related_entries(self):
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
# orphan audit
|
||||
audit4 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=404, # Does not exist
|
||||
id=self._generate_id(), uuid=None)
|
||||
action_plan4 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=audit4.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action4 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=action_plan4.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
|
||||
audit5 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=self.audit_template1.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action_plan5 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=audit5.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action5 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=action_plan5.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
|
||||
self.audit_template2.soft_delete()
|
||||
self.audit2.soft_delete()
|
||||
self.action_plan2.soft_delete()
|
||||
|
||||
# All the 4's should be purged as well because they are orphans
|
||||
# even though they were not deleted
|
||||
|
||||
# All the 5's should be purged as well even though they are not
|
||||
# expired because their related audit template is itself expired
|
||||
audit5.soft_delete()
|
||||
action_plan5.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
objects_map = self.cmd.find_objects_to_delete()
|
||||
|
||||
self.assertEqual(len(objects_map.audit_templates), 1)
|
||||
self.assertEqual(len(objects_map.audits), 3)
|
||||
self.assertEqual(len(objects_map.action_plans), 3)
|
||||
self.assertEqual(len(objects_map.actions), 3)
|
||||
self.assertEqual(
|
||||
set([self.action1.id, action4.id, action5.id]),
|
||||
set([entry.id for entry in objects_map.actions]))
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_purge_command(self, m_destroy_audit_template,
|
||||
m_destroy_audit, m_destroy_action_plan,
|
||||
m_destroy_action):
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
m_destroy_audit_template.assert_called_once_with(
|
||||
self.audit_template1.uuid)
|
||||
m_destroy_audit.assert_called_once_with(
|
||||
self.audit1.uuid)
|
||||
m_destroy_action_plan.assert_called_once_with(
|
||||
self.action_plan1.uuid)
|
||||
m_destroy_action.assert_called_once_with(
|
||||
self.action1.uuid)
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_purge_command_with_nonexpired_related_entries(
|
||||
self, m_destroy_audit_template, m_destroy_audit,
|
||||
m_destroy_action_plan, m_destroy_action):
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
# orphan audit
|
||||
audit4 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=404, # Does not exist
|
||||
id=self._generate_id(), uuid=None)
|
||||
action_plan4 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=audit4.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action4 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=action_plan4.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
|
||||
audit5 = obj_utils.create_test_audit(
|
||||
self.context, audit_template_id=self.audit_template1.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action_plan5 = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=audit5.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
action5 = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=action_plan5.id,
|
||||
id=self._generate_id(), uuid=None)
|
||||
|
||||
self.audit_template2.soft_delete()
|
||||
self.audit2.soft_delete()
|
||||
self.action_plan2.soft_delete()
|
||||
|
||||
# All the 4's should be purged as well because they are orphans
|
||||
# even though they were not deleted
|
||||
|
||||
# All the 5's should be purged as well even though they are not
|
||||
# expired because their related audit template is itself expired
|
||||
audit5.soft_delete()
|
||||
action_plan5.soft_delete()
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
self.assertEqual(m_destroy_audit_template.call_count, 1)
|
||||
self.assertEqual(m_destroy_audit.call_count, 3)
|
||||
self.assertEqual(m_destroy_action_plan.call_count, 3)
|
||||
self.assertEqual(m_destroy_action.call_count, 3)
|
||||
|
||||
m_destroy_audit_template.assert_any_call(self.audit_template1.uuid)
|
||||
m_destroy_audit.assert_any_call(self.audit1.uuid)
|
||||
m_destroy_audit.assert_any_call(audit4.uuid)
|
||||
m_destroy_action_plan.assert_any_call(self.action_plan1.uuid)
|
||||
m_destroy_action_plan.assert_any_call(action_plan4.uuid)
|
||||
m_destroy_action_plan.assert_any_call(action_plan5.uuid)
|
||||
m_destroy_action.assert_any_call(self.action1.uuid)
|
||||
m_destroy_action.assert_any_call(action4.uuid)
|
||||
m_destroy_action.assert_any_call(action5.uuid)
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_purge_command_with_audit_template_ok(
|
||||
self, m_destroy_audit_template, m_destroy_audit,
|
||||
m_destroy_action_plan, m_destroy_action):
|
||||
self.cmd.orphans = False
|
||||
self.cmd.uuid = self.audit_template1.uuid
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
self.assertEqual(m_destroy_audit_template.call_count, 1)
|
||||
self.assertEqual(m_destroy_audit.call_count, 1)
|
||||
self.assertEqual(m_destroy_action_plan.call_count, 1)
|
||||
self.assertEqual(m_destroy_action.call_count, 1)
|
||||
|
||||
m_destroy_audit_template.assert_called_once_with(
|
||||
self.audit_template1.uuid)
|
||||
m_destroy_audit.assert_called_once_with(
|
||||
self.audit1.uuid)
|
||||
m_destroy_action_plan.assert_called_once_with(
|
||||
self.action_plan1.uuid)
|
||||
m_destroy_action.assert_called_once_with(
|
||||
self.action1.uuid)
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_purge_command_with_audit_template_not_expired(
|
||||
self, m_destroy_audit_template, m_destroy_audit,
|
||||
m_destroy_action_plan, m_destroy_action):
|
||||
self.cmd.orphans = False
|
||||
self.cmd.uuid = self.audit_template2.uuid
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
self.assertEqual(m_destroy_audit_template.call_count, 0)
|
||||
self.assertEqual(m_destroy_audit.call_count, 0)
|
||||
self.assertEqual(m_destroy_action_plan.call_count, 0)
|
||||
self.assertEqual(m_destroy_action.call_count, 0)
|
||||
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_action_plan")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit")
|
||||
@mock.patch.object(dbapi.Connection, "destroy_audit_template")
|
||||
def test_purge_command_with_audit_template_not_soft_deleted(
|
||||
self, m_destroy_audit_template, m_destroy_audit,
|
||||
m_destroy_action_plan, m_destroy_action):
|
||||
self.cmd.orphans = False
|
||||
self.cmd.uuid = self.audit_template3.uuid
|
||||
|
||||
with freezegun.freeze_time(self.fake_today):
|
||||
self.cmd.execute()
|
||||
|
||||
self.assertEqual(m_destroy_audit_template.call_count, 0)
|
||||
self.assertEqual(m_destroy_audit.call_count, 0)
|
||||
self.assertEqual(m_destroy_action_plan.call_count, 0)
|
||||
self.assertEqual(m_destroy_action.call_count, 0)
|
||||
@@ -25,9 +25,9 @@ class TestDiskInfo(base.BaseTestCase):
|
||||
def test_all(self):
|
||||
disk_information = DiskInfo()
|
||||
disk_information.set_size(1024)
|
||||
self.assertEqual(disk_information.get_size(), 1024)
|
||||
self.assertEqual(1024, disk_information.get_size())
|
||||
|
||||
disk_information.set_scheduler = "scheduler_qcq"
|
||||
|
||||
disk_information.set_device_name("nom_qcq")
|
||||
self.assertEqual(disk_information.get_device_name(), "nom_qcq")
|
||||
self.assertEqual("nom_qcq", disk_information.get_device_name())
|
||||
|
||||
@@ -40,35 +40,35 @@ class TestMapping(base.BaseTestCase):
|
||||
if vm.uuid != self.VM1_UUID:
|
||||
vm = vms[keys[1]]
|
||||
node = model.mapping.get_node_from_vm(vm)
|
||||
self.assertEqual(node.uuid, 'Node_0')
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
|
||||
def test_get_node_from_vm_id(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
model = fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
|
||||
hyps = model.mapping.get_node_vms_from_id("BLABLABLA")
|
||||
self.assertEqual(hyps.__len__(), 0)
|
||||
self.assertEqual(0, hyps.__len__())
|
||||
|
||||
def test_get_all_vms(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
model = fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
|
||||
vms = model.get_all_vms()
|
||||
self.assertEqual(vms.__len__(), 2)
|
||||
self.assertEqual(vms[self.VM1_UUID].state,
|
||||
vm_state.VMState.ACTIVE.value)
|
||||
self.assertEqual(vms[self.VM1_UUID].uuid, self.VM1_UUID)
|
||||
self.assertEqual(vms[self.VM2_UUID].state,
|
||||
vm_state.VMState.ACTIVE.value)
|
||||
self.assertEqual(vms[self.VM2_UUID].uuid, self.VM2_UUID)
|
||||
self.assertEqual(2, vms.__len__())
|
||||
self.assertEqual(vm_state.VMState.ACTIVE.value,
|
||||
vms[self.VM1_UUID].state)
|
||||
self.assertEqual(self.VM1_UUID, vms[self.VM1_UUID].uuid)
|
||||
self.assertEqual(vm_state.VMState.ACTIVE.value,
|
||||
vms[self.VM2_UUID].state)
|
||||
self.assertEqual(self.VM2_UUID, vms[self.VM2_UUID].uuid)
|
||||
|
||||
def test_get_mapping(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
model = fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
mapping_vm = model.mapping.get_mapping_vm()
|
||||
self.assertEqual(mapping_vm.__len__(), 2)
|
||||
self.assertEqual(mapping_vm[self.VM1_UUID], 'Node_0')
|
||||
self.assertEqual(mapping_vm[self.VM2_UUID], 'Node_1')
|
||||
self.assertEqual(2, mapping_vm.__len__())
|
||||
self.assertEqual('Node_0', mapping_vm[self.VM1_UUID])
|
||||
self.assertEqual('Node_1', mapping_vm[self.VM2_UUID])
|
||||
|
||||
def test_migrate_vm(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
@@ -80,10 +80,10 @@ class TestMapping(base.BaseTestCase):
|
||||
vm1 = vms[keys[1]]
|
||||
hyp1 = model.mapping.get_node_from_vm_id(vm1.uuid)
|
||||
|
||||
self.assertEqual(model.mapping.migrate_vm(vm1, hyp1, hyp1), False)
|
||||
self.assertEqual(model.mapping.migrate_vm(vm1, hyp0, hyp0), False)
|
||||
self.assertEqual(model.mapping.migrate_vm(vm1, hyp1, hyp0), True)
|
||||
self.assertEqual(model.mapping.migrate_vm(vm1, hyp0, hyp1), True)
|
||||
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp1, hyp1))
|
||||
self.assertEqual(False, model.mapping.migrate_vm(vm1, hyp0, hyp0))
|
||||
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp1, hyp0))
|
||||
self.assertEqual(True, model.mapping.migrate_vm(vm1, hyp0, hyp1))
|
||||
|
||||
def test_unmap_from_id_log_warning(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
@@ -108,5 +108,5 @@ class TestMapping(base.BaseTestCase):
|
||||
hyp0 = model.mapping.get_node_from_vm_id(vm0.uuid)
|
||||
|
||||
model.mapping.unmap_from_id(hyp0.uuid, vm0.uuid)
|
||||
self.assertEqual(len(model.mapping.get_node_vms_from_id(
|
||||
hyp0.uuid)), 0)
|
||||
self.assertEqual(0, len(model.mapping.get_node_vms_from_id(
|
||||
hyp0.uuid)))
|
||||
|
||||
@@ -33,9 +33,9 @@ class TestModel(base.BaseTestCase):
|
||||
fake_cluster = FakerModelCollector()
|
||||
model = fake_cluster.generate_scenario_1()
|
||||
|
||||
self.assertEqual(len(model._hypervisors), 5)
|
||||
self.assertEqual(len(model._vms), 35)
|
||||
self.assertEqual(len(model.get_mapping().get_mapping()), 5)
|
||||
self.assertEqual(5, len(model._hypervisors))
|
||||
self.assertEqual(35, len(model._vms))
|
||||
self.assertEqual(5, len(model.get_mapping().get_mapping()))
|
||||
|
||||
def test_add_hypervisor(self):
|
||||
model = ModelRoot()
|
||||
@@ -43,7 +43,7 @@ class TestModel(base.BaseTestCase):
|
||||
hypervisor = Hypervisor()
|
||||
hypervisor.uuid = id
|
||||
model.add_hypervisor(hypervisor)
|
||||
self.assertEqual(model.get_hypervisor_from_id(id), hypervisor)
|
||||
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
|
||||
|
||||
def test_delete_hypervisor(self):
|
||||
model = ModelRoot()
|
||||
@@ -51,7 +51,7 @@ class TestModel(base.BaseTestCase):
|
||||
hypervisor = Hypervisor()
|
||||
hypervisor.uuid = id
|
||||
model.add_hypervisor(hypervisor)
|
||||
self.assertEqual(model.get_hypervisor_from_id(id), hypervisor)
|
||||
self.assertEqual(hypervisor, model.get_hypervisor_from_id(id))
|
||||
model.remove_hypervisor(hypervisor)
|
||||
self.assertRaises(exception.HypervisorNotFound,
|
||||
model.get_hypervisor_from_id, id)
|
||||
|
||||
@@ -24,9 +24,9 @@ class TestNamedElement(base.BaseTestCase):
|
||||
def test_namedelement(self):
|
||||
id = ComputeResource()
|
||||
id.uuid = "BLABLABLA"
|
||||
self.assertEqual(id.uuid, "BLABLABLA")
|
||||
self.assertEqual("BLABLABLA", id.uuid)
|
||||
|
||||
def test_set_get_human_id(self):
|
||||
id = ComputeResource()
|
||||
id.human_id = "BLABLABLA"
|
||||
self.assertEqual(id.human_id, "BLABLABLA")
|
||||
self.assertEqual("BLABLABLA", id.human_id)
|
||||
|
||||
@@ -25,6 +25,6 @@ class TestVm(base.BaseTestCase):
|
||||
def test_namedelement(self):
|
||||
vm = VM()
|
||||
vm.state = VMState.ACTIVE
|
||||
self.assertEqual(vm.state, VMState.ACTIVE)
|
||||
self.assertEqual(VMState.ACTIVE, vm.state)
|
||||
vm.human_id = "human_05"
|
||||
self.assertEqual(vm.human_id, "human_05")
|
||||
self.assertEqual("human_05", vm.human_id)
|
||||
|
||||
@@ -79,10 +79,10 @@ class TestActionScheduling(base.DbTestCase):
|
||||
)
|
||||
|
||||
self.assertIsNotNone(action_plan.uuid)
|
||||
self.assertEqual(m_create_action.call_count, 1)
|
||||
self.assertEqual(1, m_create_action.call_count)
|
||||
filters = {'action_plan_id': action_plan.id}
|
||||
actions = objects.Action.dbapi.get_action_list(self.context, filters)
|
||||
self.assertEqual(actions[0].action_type, "migrate")
|
||||
self.assertEqual("migrate", actions[0].action_type)
|
||||
|
||||
def test_schedule_two_actions(self):
|
||||
default_planner = pbase.DefaultPlanner()
|
||||
@@ -108,12 +108,12 @@ class TestActionScheduling(base.DbTestCase):
|
||||
self.context, audit.id, solution
|
||||
)
|
||||
self.assertIsNotNone(action_plan.uuid)
|
||||
self.assertEqual(m_create_action.call_count, 2)
|
||||
self.assertEqual(2, m_create_action.call_count)
|
||||
# check order
|
||||
filters = {'action_plan_id': action_plan.id}
|
||||
actions = objects.Action.dbapi.get_action_list(self.context, filters)
|
||||
self.assertEqual(actions[0].action_type, "nop")
|
||||
self.assertEqual(actions[1].action_type, "migrate")
|
||||
self.assertEqual("nop", actions[0].action_type)
|
||||
self.assertEqual("migrate", actions[1].action_type)
|
||||
|
||||
|
||||
class TestDefaultPlanner(base.DbTestCase):
|
||||
|
||||
@@ -28,14 +28,14 @@ class TestDefaultSolution(base.BaseTestCase):
|
||||
solution.add_action(action_type="nop",
|
||||
resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36",
|
||||
input_parameters=parameters)
|
||||
self.assertEqual(len(solution.actions), 1)
|
||||
self.assertEqual(1, len(solution.actions))
|
||||
expected_action_type = "nop"
|
||||
expected_parameters = {
|
||||
"src_uuid_hypervisor": "server1",
|
||||
"dst_uuid_hypervisor": "server2",
|
||||
"resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36"
|
||||
}
|
||||
self.assertEqual(solution.actions[0].get('action_type'),
|
||||
expected_action_type)
|
||||
self.assertEqual(solution.actions[0].get('input_parameters'),
|
||||
expected_parameters)
|
||||
self.assertEqual(expected_action_type,
|
||||
solution.actions[0].get('action_type'))
|
||||
self.assertEqual(expected_parameters,
|
||||
solution.actions[0].get('input_parameters'))
|
||||
|
||||
@@ -46,4 +46,4 @@ class TestStrategySelector(TestCase):
|
||||
self.assertRaises(WatcherException,
|
||||
self.strategy_selector.define_from_goal,
|
||||
"DUMMY")
|
||||
self.assertEqual(mock_call.call_count, 0)
|
||||
self.assertEqual(0, mock_call.call_count)
|
||||
|
||||
@@ -0,0 +1,264 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Authors: Vojtech CIMA <cima@zhaw.ch>
|
||||
# Bruno GRAZIOLI <gaea@zhaw.ch>
|
||||
# Sean MURPHY <murp@zhaw.ch>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from watcher.decision_engine.model import hypervisor
|
||||
from watcher.decision_engine.model import model_root as modelroot
|
||||
from watcher.decision_engine.model import resource
|
||||
from watcher.decision_engine.model import vm as modelvm
|
||||
from watcher.decision_engine.model import vm_state
|
||||
from watcher.metrics_engine.cluster_model_collector import base
|
||||
|
||||
|
||||
class FakerModelCollector(base.BaseClusterModelCollector):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_latest_cluster_data_model(self):
|
||||
return self.generate_scenario_1()
|
||||
|
||||
def generate_scenario_1(self):
|
||||
"""Simulates cluster with 2 hypervisors and 2 VMs using 1:1 mapping"""
|
||||
|
||||
current_state_cluster = modelroot.ModelRoot()
|
||||
count_node = 2
|
||||
count_vm = 2
|
||||
|
||||
mem = resource.Resource(resource.ResourceType.memory)
|
||||
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
|
||||
disk = resource.Resource(resource.ResourceType.disk)
|
||||
disk_capacity =\
|
||||
resource.Resource(resource.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = hypervisor.Hypervisor()
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.state = 'up'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 40)
|
||||
current_state_cluster.add_hypervisor(node)
|
||||
|
||||
for i in range(0, count_vm):
|
||||
vm_uuid = "VM_{0}".format(i)
|
||||
vm = modelvm.VM()
|
||||
vm.uuid = vm_uuid
|
||||
vm.state = vm_state.VMState.ACTIVE
|
||||
mem.set_capacity(vm, 2)
|
||||
disk.set_capacity(vm, 20)
|
||||
num_cores.set_capacity(vm, 10)
|
||||
current_state_cluster.add_vm(vm)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_hypervisor_from_id("Node_0"),
|
||||
current_state_cluster.get_vm_from_id("VM_0"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_hypervisor_from_id("Node_1"),
|
||||
current_state_cluster.get_vm_from_id("VM_1"))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
def generate_scenario_2(self):
|
||||
"""Simulates a cluster
|
||||
|
||||
With 4 hypervisors and 6 VMs all mapped to one hypervisor
|
||||
"""
|
||||
|
||||
current_state_cluster = modelroot.ModelRoot()
|
||||
count_node = 4
|
||||
count_vm = 6
|
||||
|
||||
mem = resource.Resource(resource.ResourceType.memory)
|
||||
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
|
||||
disk = resource.Resource(resource.ResourceType.disk)
|
||||
disk_capacity =\
|
||||
resource.Resource(resource.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = hypervisor.Hypervisor()
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.state = 'up'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 16)
|
||||
current_state_cluster.add_hypervisor(node)
|
||||
|
||||
for i in range(0, count_vm):
|
||||
vm_uuid = "VM_{0}".format(i)
|
||||
vm = modelvm.VM()
|
||||
vm.uuid = vm_uuid
|
||||
vm.state = vm_state.VMState.ACTIVE
|
||||
mem.set_capacity(vm, 2)
|
||||
disk.set_capacity(vm, 20)
|
||||
num_cores.set_capacity(vm, 10)
|
||||
current_state_cluster.add_vm(vm)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_hypervisor_from_id("Node_0"),
|
||||
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
def generate_scenario_3(self):
|
||||
"""Simulates a cluster
|
||||
|
||||
With 4 hypervisors and 6 VMs all mapped to one hypervisor
|
||||
"""
|
||||
|
||||
current_state_cluster = modelroot.ModelRoot()
|
||||
count_node = 2
|
||||
count_vm = 4
|
||||
|
||||
mem = resource.Resource(resource.ResourceType.memory)
|
||||
num_cores = resource.Resource(resource.ResourceType.cpu_cores)
|
||||
disk = resource.Resource(resource.ResourceType.disk)
|
||||
disk_capacity =\
|
||||
resource.Resource(resource.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = hypervisor.Hypervisor()
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.state = 'up'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 10)
|
||||
current_state_cluster.add_hypervisor(node)
|
||||
|
||||
for i in range(6, 6 + count_vm):
|
||||
vm_uuid = "VM_{0}".format(i)
|
||||
vm = modelvm.VM()
|
||||
vm.uuid = vm_uuid
|
||||
vm.state = vm_state.VMState.ACTIVE
|
||||
mem.set_capacity(vm, 2)
|
||||
disk.set_capacity(vm, 20)
|
||||
num_cores.set_capacity(vm, 2 ** (i-6))
|
||||
current_state_cluster.add_vm(vm)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_hypervisor_from_id("Node_0"),
|
||||
current_state_cluster.get_vm_from_id("VM_%s" % str(i)))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
|
||||
class FakeCeilometerMetrics(object):
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
|
||||
def mock_get_statistics(self, resource_id, meter_name, period=3600,
|
||||
aggregate='avg'):
|
||||
if meter_name == "compute.node.cpu.percent":
|
||||
return self.get_hypervisor_cpu_util(resource_id)
|
||||
elif meter_name == "cpu_util":
|
||||
return self.get_vm_cpu_util(resource_id)
|
||||
elif meter_name == "memory.usage":
|
||||
return self.get_vm_ram_util(resource_id)
|
||||
elif meter_name == "disk.root.size":
|
||||
return self.get_vm_disk_root_size(resource_id)
|
||||
|
||||
def get_hypervisor_cpu_util(self, r_id):
|
||||
"""Calculates hypervisor utilization dynamicaly.
|
||||
|
||||
Hypervisor CPU utilization should consider
|
||||
and corelate with actual VM-hypervisor mappings
|
||||
provided within a cluster model.
|
||||
Returns relative hypervisor CPU utilization <0, 100>.
|
||||
:param r_id: resource id
|
||||
"""
|
||||
|
||||
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
|
||||
vms = self.model.get_mapping().get_node_vms_from_id(id)
|
||||
util_sum = 0.0
|
||||
hypervisor_cpu_cores = self.model.get_resource_from_id(
|
||||
resource.ResourceType.cpu_cores).get_capacity_from_id(id)
|
||||
for vm_uuid in vms:
|
||||
vm_cpu_cores = self.model.get_resource_from_id(
|
||||
resource.ResourceType.cpu_cores).\
|
||||
get_capacity(self.model.get_vm_from_id(vm_uuid))
|
||||
total_cpu_util = vm_cpu_cores * self.get_vm_cpu_util(vm_uuid)
|
||||
util_sum += total_cpu_util / 100.0
|
||||
util_sum /= hypervisor_cpu_cores
|
||||
return util_sum * 100.0
|
||||
|
||||
def get_vm_cpu_util(self, r_id):
|
||||
vm_cpu_util = dict()
|
||||
vm_cpu_util['VM_0'] = 10
|
||||
vm_cpu_util['VM_1'] = 30
|
||||
vm_cpu_util['VM_2'] = 60
|
||||
vm_cpu_util['VM_3'] = 20
|
||||
vm_cpu_util['VM_4'] = 40
|
||||
vm_cpu_util['VM_5'] = 50
|
||||
vm_cpu_util['VM_6'] = 100
|
||||
vm_cpu_util['VM_7'] = 100
|
||||
vm_cpu_util['VM_8'] = 100
|
||||
vm_cpu_util['VM_9'] = 100
|
||||
return vm_cpu_util[str(r_id)]
|
||||
|
||||
def get_vm_ram_util(self, r_id):
|
||||
vm_ram_util = dict()
|
||||
vm_ram_util['VM_0'] = 1
|
||||
vm_ram_util['VM_1'] = 2
|
||||
vm_ram_util['VM_2'] = 4
|
||||
vm_ram_util['VM_3'] = 8
|
||||
vm_ram_util['VM_4'] = 3
|
||||
vm_ram_util['VM_5'] = 2
|
||||
vm_ram_util['VM_6'] = 1
|
||||
vm_ram_util['VM_7'] = 2
|
||||
vm_ram_util['VM_8'] = 4
|
||||
vm_ram_util['VM_9'] = 8
|
||||
return vm_ram_util[str(r_id)]
|
||||
|
||||
def get_vm_disk_root_size(self, r_id):
|
||||
vm_disk_util = dict()
|
||||
vm_disk_util['VM_0'] = 10
|
||||
vm_disk_util['VM_1'] = 15
|
||||
vm_disk_util['VM_2'] = 30
|
||||
vm_disk_util['VM_3'] = 35
|
||||
vm_disk_util['VM_4'] = 20
|
||||
vm_disk_util['VM_5'] = 25
|
||||
vm_disk_util['VM_6'] = 25
|
||||
vm_disk_util['VM_7'] = 25
|
||||
vm_disk_util['VM_8'] = 25
|
||||
vm_disk_util['VM_9'] = 25
|
||||
return vm_disk_util[str(r_id)]
|
||||
@@ -20,10 +20,10 @@ from watcher.decision_engine.model import hypervisor
|
||||
from watcher.decision_engine.model import model_root as modelroot
|
||||
from watcher.decision_engine.model import resource
|
||||
from watcher.decision_engine.model import vm as modelvm
|
||||
from watcher.metrics_engine.cluster_model_collector import api
|
||||
from watcher.metrics_engine.cluster_model_collector import base
|
||||
|
||||
|
||||
class FakerModelCollector(api.BaseClusterModelCollector):
|
||||
class FakerModelCollector(base.BaseClusterModelCollector):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
size_cluster = len(
|
||||
self.fake_cluster.generate_scenario_1().get_all_hypervisors())
|
||||
size_cluster_assert = 5
|
||||
self.assertEqual(size_cluster, size_cluster_assert)
|
||||
self.assertEqual(size_cluster_assert, size_cluster)
|
||||
|
||||
def test_basic_consolidation_score_hypervisor(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
@@ -50,20 +50,17 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||
|
||||
node_1_score = 0.023333333333333317
|
||||
self.assertEqual(
|
||||
sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_1"),
|
||||
cluster), node_1_score)
|
||||
self.assertEqual(node_1_score, sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_1"),
|
||||
cluster))
|
||||
node_2_score = 0.26666666666666666
|
||||
self.assertEqual(
|
||||
sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_2"),
|
||||
cluster), node_2_score)
|
||||
self.assertEqual(node_2_score, sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_2"),
|
||||
cluster))
|
||||
node_0_score = 0.023333333333333317
|
||||
self.assertEqual(
|
||||
sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_0"),
|
||||
cluster), node_0_score)
|
||||
self.assertEqual(node_0_score, sercon.calculate_score_node(
|
||||
cluster.get_hypervisor_from_id("Node_0"),
|
||||
cluster))
|
||||
|
||||
def test_basic_consolidation_score_vm(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
@@ -72,21 +69,20 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||
vm_0 = cluster.get_vm_from_id("VM_0")
|
||||
vm_0_score = 0.023333333333333317
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_0, cluster), vm_0_score)
|
||||
self.assertEqual(vm_0_score, sercon.calculate_score_vm(vm_0, cluster))
|
||||
|
||||
vm_1 = cluster.get_vm_from_id("VM_1")
|
||||
vm_1_score = 0.023333333333333317
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_1, cluster),
|
||||
vm_1_score)
|
||||
self.assertEqual(vm_1_score, sercon.calculate_score_vm(vm_1, cluster))
|
||||
vm_2 = cluster.get_vm_from_id("VM_2")
|
||||
vm_2_score = 0.033333333333333326
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_2, cluster), vm_2_score)
|
||||
self.assertEqual(vm_2_score, sercon.calculate_score_vm(vm_2, cluster))
|
||||
vm_6 = cluster.get_vm_from_id("VM_6")
|
||||
vm_6_score = 0.02666666666666669
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_6, cluster), vm_6_score)
|
||||
self.assertEqual(vm_6_score, sercon.calculate_score_vm(vm_6, cluster))
|
||||
vm_7 = cluster.get_vm_from_id("VM_7")
|
||||
vm_7_score = 0.013333333333333345
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_7, cluster), vm_7_score)
|
||||
self.assertEqual(vm_7_score, sercon.calculate_score_vm(vm_7, cluster))
|
||||
|
||||
def test_basic_consolidation_score_vm_disk(self):
|
||||
cluster = self.fake_cluster.generate_scenario_5_with_vm_disk_0()
|
||||
@@ -95,7 +91,7 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||
vm_0 = cluster.get_vm_from_id("VM_0")
|
||||
vm_0_score = 0.023333333333333355
|
||||
self.assertEqual(sercon.calculate_score_vm(vm_0, cluster), vm_0_score)
|
||||
self.assertEqual(vm_0_score, sercon.calculate_score_vm(vm_0, cluster))
|
||||
|
||||
def test_basic_consolidation_weight(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
@@ -109,9 +105,9 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
# mem 8 Go
|
||||
mem = 8
|
||||
vm_0_weight_assert = 3.1999999999999997
|
||||
self.assertEqual(sercon.calculate_weight(cluster, vm_0, cores, disk,
|
||||
mem),
|
||||
vm_0_weight_assert)
|
||||
self.assertEqual(vm_0_weight_assert,
|
||||
sercon.calculate_weight(cluster, vm_0, cores, disk,
|
||||
mem))
|
||||
|
||||
def test_calculate_migration_efficacy(self):
|
||||
sercon = strategies.BasicConsolidation()
|
||||
@@ -162,7 +158,7 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
|
||||
threshold_cores = sercon.get_threshold_cores()
|
||||
sercon.set_threshold_cores(threshold_cores + 1)
|
||||
self.assertEqual(sercon.get_threshold_cores(), threshold_cores + 1)
|
||||
self.assertEqual(threshold_cores + 1, sercon.get_threshold_cores())
|
||||
|
||||
def test_number_of(self):
|
||||
sercon = strategies.BasicConsolidation()
|
||||
@@ -186,8 +182,8 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
num_migrations = actions_counter.get("migrate", 0)
|
||||
num_hypervisor_state_change = actions_counter.get(
|
||||
"change_hypervisor_state", 0)
|
||||
self.assertEqual(num_migrations, expected_num_migrations)
|
||||
self.assertEqual(num_hypervisor_state_change, expected_power_state)
|
||||
self.assertEqual(expected_num_migrations, num_migrations)
|
||||
self.assertEqual(expected_power_state, num_hypervisor_state_change)
|
||||
|
||||
# calculate_weight
|
||||
def test_execute_no_workload(self):
|
||||
@@ -204,7 +200,7 @@ class TestBasicConsolidation(base.BaseTestCase):
|
||||
as mock_score_call:
|
||||
mock_score_call.return_value = 0
|
||||
solution = sercon.execute(model)
|
||||
self.assertEqual(solution.efficacy, 100)
|
||||
self.assertEqual(100, solution.efficacy)
|
||||
|
||||
def test_check_parameters(self):
|
||||
sercon = strategies.BasicConsolidation()
|
||||
|
||||
@@ -51,7 +51,7 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
cap_mem,
|
||||
cap_disk)
|
||||
|
||||
self.assertEqual((cores_used, mem_used, disk_used), (10, 2, 20))
|
||||
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
|
||||
|
||||
def test_group_hosts_by_outlet_temp(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
@@ -59,8 +59,8 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||
h1, h2 = strategy.group_hosts_by_outlet_temp(model)
|
||||
self.assertEqual(h1[0]['hv'].uuid, 'Node_1')
|
||||
self.assertEqual(h2[0]['hv'].uuid, 'Node_0')
|
||||
self.assertEqual('Node_1', h1[0]['hv'].uuid)
|
||||
self.assertEqual('Node_0', h2[0]['hv'].uuid)
|
||||
|
||||
def test_choose_vm_to_migrate(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
@@ -69,9 +69,9 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||
h1, h2 = strategy.group_hosts_by_outlet_temp(model)
|
||||
vm_to_mig = strategy.choose_vm_to_migrate(model, h1)
|
||||
self.assertEqual(vm_to_mig[0].uuid, 'Node_1')
|
||||
self.assertEqual(vm_to_mig[1].uuid,
|
||||
"a4cab39b-9828-413a-bf88-f76921bf1517")
|
||||
self.assertEqual('Node_1', vm_to_mig[0].uuid)
|
||||
self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517',
|
||||
vm_to_mig[1].uuid)
|
||||
|
||||
def test_filter_dest_servers(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_hypervisors()
|
||||
@@ -81,8 +81,8 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
h1, h2 = strategy.group_hosts_by_outlet_temp(model)
|
||||
vm_to_mig = strategy.choose_vm_to_migrate(model, h1)
|
||||
dest_hosts = strategy.filter_dest_servers(model, h2, vm_to_mig[1])
|
||||
self.assertEqual(len(dest_hosts), 1)
|
||||
self.assertEqual(dest_hosts[0]['hv'].uuid, 'Node_0')
|
||||
self.assertEqual(1, len(dest_hosts))
|
||||
self.assertEqual('Node_0', dest_hosts[0]['hv'].uuid)
|
||||
|
||||
def test_exception_model(self):
|
||||
strategy = strategies.OutletTempControl()
|
||||
@@ -111,7 +111,7 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
generate_scenario_4_with_1_hypervisor_no_vm()
|
||||
|
||||
solution = strategy.execute(model)
|
||||
self.assertEqual(solution.actions, [])
|
||||
self.assertEqual([], solution.actions)
|
||||
|
||||
def test_execute(self):
|
||||
strategy = strategies.OutletTempControl()
|
||||
@@ -123,7 +123,7 @@ class TestOutletTempControl(base.BaseTestCase):
|
||||
[action.get('action_type') for action in solution.actions])
|
||||
|
||||
num_migrations = actions_counter.get("migrate", 0)
|
||||
self.assertEqual(num_migrations, 1)
|
||||
self.assertEqual(1, num_migrations)
|
||||
|
||||
def test_check_parameters(self):
|
||||
outlet = strategies.OutletTempControl()
|
||||
|
||||
@@ -0,0 +1,277 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Authors: Vojtech CIMA <cima@zhaw.ch>
|
||||
# Bruno GRAZIOLI <gaea@zhaw.ch>
|
||||
# Sean MURPHY <murp@zhaw.ch>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import mock
|
||||
|
||||
from watcher.decision_engine.strategy import strategies
|
||||
from watcher.tests import base
|
||||
from watcher.tests.decision_engine.strategy.strategies \
|
||||
import faker_cluster_and_metrics
|
||||
|
||||
|
||||
class TestSmartConsolidation(base.BaseTestCase):
|
||||
fake_cluster = faker_cluster_and_metrics.FakerModelCollector()
|
||||
|
||||
def test_get_vm_utilization(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(cluster)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
vm_0 = cluster.get_vm_from_id("VM_0")
|
||||
vm_util = dict(cpu=1.0, ram=1, disk=10)
|
||||
self.assertEqual(vm_util,
|
||||
strategy.get_vm_utilization(vm_0.uuid, cluster))
|
||||
|
||||
def test_get_hypervisor_utilization(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(cluster)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
node_0 = cluster.get_hypervisor_from_id("Node_0")
|
||||
node_util = dict(cpu=1.0, ram=1, disk=10)
|
||||
self.assertEqual(node_util,
|
||||
strategy.get_hypervisor_utilization(node_0, cluster))
|
||||
|
||||
def test_get_hypervisor_capacity(self):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(cluster)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
node_0 = cluster.get_hypervisor_from_id("Node_0")
|
||||
node_util = dict(cpu=40, ram=64, disk=250)
|
||||
self.assertEqual(node_util,
|
||||
strategy.get_hypervisor_capacity(node_0, cluster))
|
||||
|
||||
def test_get_relative_hypervisor_utilization(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
hypervisor = model.get_hypervisor_from_id('Node_0')
|
||||
rhu = strategy.get_relative_hypervisor_utilization(hypervisor, model)
|
||||
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
|
||||
self.assertEqual(expected_rhu, rhu)
|
||||
|
||||
def test_get_relative_cluster_utilization(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
cru = strategy.get_relative_cluster_utilization(model)
|
||||
expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375}
|
||||
self.assertEqual(expected_cru, cru)
|
||||
|
||||
def test_add_migration(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
h2 = model.get_hypervisor_from_id('Node_1')
|
||||
vm_uuid = 'VM_0'
|
||||
strategy.add_migration(vm_uuid, h1, h2, model)
|
||||
self.assertEqual(1, len(strategy.solution.actions))
|
||||
expected = {'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2.uuid,
|
||||
'src_hypervisor': h1.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': vm_uuid}}
|
||||
self.assertEqual(expected, strategy.solution.actions[0])
|
||||
|
||||
def test_is_overloaded(self):
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
res = strategy.is_overloaded(h1, model, cc)
|
||||
self.assertEqual(False, res)
|
||||
|
||||
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
|
||||
res = strategy.is_overloaded(h1, model, cc)
|
||||
self.assertEqual(False, res)
|
||||
|
||||
cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0}
|
||||
res = strategy.is_overloaded(h1, model, cc)
|
||||
self.assertEqual(True, res)
|
||||
|
||||
def test_vm_fits(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h = model.get_hypervisor_from_id('Node_1')
|
||||
vm_uuid = 'VM_0'
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
res = strategy.vm_fits(vm_uuid, h, model, cc)
|
||||
self.assertEqual(True, res)
|
||||
|
||||
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
|
||||
res = strategy.vm_fits(vm_uuid, h, model, cc)
|
||||
self.assertEqual(False, res)
|
||||
|
||||
def test_add_action_activate_hypervisor(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h = model.get_hypervisor_from_id('Node_0')
|
||||
strategy.add_action_activate_hypervisor(h)
|
||||
expected = [{'action_type': 'change_nova_service_state',
|
||||
'input_parameters': {'state': 'up',
|
||||
'resource_id': 'Node_0'}}]
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
|
||||
def test_add_action_deactivate_hypervisor(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h = model.get_hypervisor_from_id('Node_0')
|
||||
strategy.add_action_deactivate_hypervisor(h)
|
||||
expected = [{'action_type': 'change_nova_service_state',
|
||||
'input_parameters': {'state': 'down',
|
||||
'resource_id': 'Node_0'}}]
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
|
||||
def test_deactivate_unused_hypervisors(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
h2 = model.get_hypervisor_from_id('Node_1')
|
||||
vm_uuid = 'VM_0'
|
||||
strategy.deactivate_unused_hypervisors(model)
|
||||
self.assertEqual(0, len(strategy.solution.actions))
|
||||
|
||||
# Migrate VM to free the hypervisor
|
||||
strategy.add_migration(vm_uuid, h1, h2, model)
|
||||
|
||||
strategy.deactivate_unused_hypervisors(model)
|
||||
expected = {'action_type': 'change_nova_service_state',
|
||||
'input_parameters': {'state': 'down',
|
||||
'resource_id': 'Node_0'}}
|
||||
self.assertEqual(2, len(strategy.solution.actions))
|
||||
self.assertEqual(expected, strategy.solution.actions[1])
|
||||
|
||||
def test_offload_phase(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
strategy.offload_phase(model, cc)
|
||||
expected = []
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
|
||||
def test_consolidation_phase(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
h2 = model.get_hypervisor_from_id('Node_1')
|
||||
vm_uuid = 'VM_0'
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
strategy.consolidation_phase(model, cc)
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2.uuid,
|
||||
'src_hypervisor': h1.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': vm_uuid}}]
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
|
||||
def test_strategy(self):
|
||||
model = self.fake_cluster.generate_scenario_2()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
strategy.offload_phase(model, cc)
|
||||
strategy.consolidation_phase(model, cc)
|
||||
strategy.optimize_solution(model)
|
||||
h2 = strategy.solution.actions[0][
|
||||
'input_parameters']['dst_hypervisor']
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2,
|
||||
'src_hypervisor': h1.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_3'}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2,
|
||||
'src_hypervisor': h1.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_1'}}]
|
||||
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
|
||||
def test_strategy2(self):
|
||||
model = self.fake_cluster.generate_scenario_3()
|
||||
fake_metrics = faker_cluster_and_metrics.FakeCeilometerMetrics(model)
|
||||
strategy = strategies.VMWorkloadConsolidation()
|
||||
strategy.ceilometer = mock.MagicMock(
|
||||
statistic_aggregation=fake_metrics.mock_get_statistics)
|
||||
h1 = model.get_hypervisor_from_id('Node_0')
|
||||
h2 = model.get_hypervisor_from_id('Node_1')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
strategy.offload_phase(model, cc)
|
||||
expected = [{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_6',
|
||||
'src_hypervisor': h1.uuid}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_7',
|
||||
'src_hypervisor': h1.uuid}},
|
||||
{'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h2.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_8',
|
||||
'src_hypervisor': h1.uuid}}]
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
strategy.consolidation_phase(model, cc)
|
||||
expected.append({'action_type': 'migrate',
|
||||
'input_parameters': {'dst_hypervisor': h1.uuid,
|
||||
'migration_type': 'live',
|
||||
'resource_id': 'VM_7',
|
||||
'src_hypervisor': h2.uuid}})
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
strategy.optimize_solution(model)
|
||||
del expected[3]
|
||||
del expected[1]
|
||||
self.assertEqual(expected, strategy.solution.actions)
|
||||
@@ -57,7 +57,7 @@ class TestActionObject(base.DbTestCase):
|
||||
autospec=True) as mock_get_list:
|
||||
mock_get_list.return_value = [self.fake_action]
|
||||
actions = objects.Action.list(self.context)
|
||||
self.assertEqual(mock_get_list.call_count, 1)
|
||||
self.assertEqual(1, mock_get_list.call_count)
|
||||
self.assertThat(actions, HasLength(1))
|
||||
self.assertIsInstance(actions[0], objects.Action)
|
||||
self.assertEqual(self.context, actions[0]._context)
|
||||
|
||||
@@ -57,7 +57,7 @@ class TestActionPlanObject(base.DbTestCase):
|
||||
autospec=True) as mock_get_list:
|
||||
mock_get_list.return_value = [self.fake_action_plan]
|
||||
action_plans = objects.ActionPlan.list(self.context)
|
||||
self.assertEqual(mock_get_list.call_count, 1)
|
||||
self.assertEqual(1, mock_get_list.call_count)
|
||||
self.assertThat(action_plans, HasLength(1))
|
||||
self.assertIsInstance(action_plans[0], objects.ActionPlan)
|
||||
self.assertEqual(self.context, action_plans[0]._context)
|
||||
|
||||
@@ -57,7 +57,7 @@ class TestAuditObject(base.DbTestCase):
|
||||
autospec=True) as mock_get_list:
|
||||
mock_get_list.return_value = [self.fake_audit]
|
||||
audits = objects.Audit.list(self.context)
|
||||
self.assertEqual(mock_get_list.call_count, 1)
|
||||
self.assertEqual(1, mock_get_list.call_count, 1)
|
||||
self.assertThat(audits, HasLength(1))
|
||||
self.assertIsInstance(audits[0], objects.Audit)
|
||||
self.assertEqual(self.context, audits[0]._context)
|
||||
|
||||
@@ -70,7 +70,7 @@ class TestAuditTemplateObject(base.DbTestCase):
|
||||
autospec=True) as mock_get_list:
|
||||
mock_get_list.return_value = [self.fake_audit_template]
|
||||
audit_templates = objects.AuditTemplate.list(self.context)
|
||||
self.assertEqual(mock_get_list.call_count, 1)
|
||||
self.assertEqual(1, mock_get_list.call_count)
|
||||
self.assertThat(audit_templates, HasLength(1))
|
||||
self.assertIsInstance(audit_templates[0], objects.AuditTemplate)
|
||||
self.assertEqual(self.context, audit_templates[0]._context)
|
||||
|
||||
@@ -118,39 +118,39 @@ class TestUtils(test_base.TestCase):
|
||||
def test_datetime_or_none(self):
|
||||
naive_dt = datetime.datetime.now()
|
||||
dt = timeutils.parse_isotime(timeutils.isotime(naive_dt))
|
||||
self.assertEqual(utils.datetime_or_none(dt), dt)
|
||||
self.assertEqual(utils.datetime_or_none(dt),
|
||||
naive_dt.replace(tzinfo=iso8601.iso8601.Utc(),
|
||||
microsecond=0))
|
||||
self.assertEqual(dt, utils.datetime_or_none(dt))
|
||||
self.assertEqual(naive_dt.replace(tzinfo=iso8601.iso8601.Utc(),
|
||||
microsecond=0),
|
||||
utils.datetime_or_none(dt))
|
||||
self.assertIsNone(utils.datetime_or_none(None))
|
||||
self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
|
||||
|
||||
def test_datetime_or_str_or_none(self):
|
||||
dts = timeutils.isotime()
|
||||
dt = timeutils.parse_isotime(dts)
|
||||
self.assertEqual(utils.datetime_or_str_or_none(dt), dt)
|
||||
self.assertEqual(dt, utils.datetime_or_str_or_none(dt))
|
||||
self.assertIsNone(utils.datetime_or_str_or_none(None))
|
||||
self.assertEqual(utils.datetime_or_str_or_none(dts), dt)
|
||||
self.assertEqual(dt, utils.datetime_or_str_or_none(dts))
|
||||
self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
|
||||
|
||||
def test_int_or_none(self):
|
||||
self.assertEqual(utils.int_or_none(1), 1)
|
||||
self.assertEqual(utils.int_or_none('1'), 1)
|
||||
self.assertEqual(1, utils.int_or_none(1))
|
||||
self.assertEqual(1, utils.int_or_none('1'))
|
||||
self.assertIsNone(utils.int_or_none(None))
|
||||
self.assertRaises(ValueError, utils.int_or_none, 'foo')
|
||||
|
||||
def test_str_or_none(self):
|
||||
class Obj(object):
|
||||
pass
|
||||
self.assertEqual(utils.str_or_none('foo'), 'foo')
|
||||
self.assertEqual(utils.str_or_none(1), '1')
|
||||
self.assertEqual('foo', utils.str_or_none('foo'))
|
||||
self.assertEqual('1', utils.str_or_none(1))
|
||||
self.assertIsNone(utils.str_or_none(None))
|
||||
|
||||
def test_ip_or_none(self):
|
||||
ip4 = netaddr.IPAddress('1.2.3.4', 4)
|
||||
ip6 = netaddr.IPAddress('1::2', 6)
|
||||
self.assertEqual(utils.ip_or_none(4)('1.2.3.4'), ip4)
|
||||
self.assertEqual(utils.ip_or_none(6)('1::2'), ip6)
|
||||
self.assertEqual(ip4, utils.ip_or_none(4)('1.2.3.4'))
|
||||
self.assertEqual(ip6, utils.ip_or_none(6)('1::2'))
|
||||
self.assertIsNone(utils.ip_or_none(4)(None))
|
||||
self.assertIsNone(utils.ip_or_none(6)(None))
|
||||
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(4), 'foo')
|
||||
@@ -170,7 +170,7 @@ class TestUtils(test_base.TestCase):
|
||||
|
||||
def test_dt_deserializer(self):
|
||||
dt = timeutils.parse_isotime('1955-11-05T00:00:00Z')
|
||||
self.assertEqual(utils.dt_deserializer(timeutils.isotime(dt)), dt)
|
||||
self.assertEqual(dt, utils.dt_deserializer(timeutils.isotime(dt)))
|
||||
self.assertIsNone(utils.dt_deserializer(None))
|
||||
self.assertRaises(ValueError, utils.dt_deserializer, 'foo')
|
||||
|
||||
|
||||
@@ -47,9 +47,10 @@ class TestShowListAction(base.BaseInfraOptimTest):
|
||||
_, action = self.client.show_action(
|
||||
self.action_plan["first_action_uuid"])
|
||||
|
||||
self.assertEqual(action['uuid'], self.action_plan["first_action_uuid"])
|
||||
self.assertEqual(action['action_type'], "nop")
|
||||
self.assertEqual(action['state'], "PENDING")
|
||||
self.assertEqual(self.action_plan["first_action_uuid"],
|
||||
action['uuid'])
|
||||
self.assertEqual("nop", action['action_type'])
|
||||
self.assertEqual("PENDING", action['state'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
def test_show_action_with_links(self):
|
||||
@@ -81,9 +82,9 @@ class TestShowListAction(base.BaseInfraOptimTest):
|
||||
act['action_type'] for act in body['actions'])
|
||||
|
||||
# A dummy strategy generates 2 "nop" actions and 1 "sleep" action
|
||||
self.assertEqual(len(body['actions']), 3)
|
||||
self.assertEqual(action_counter.get("nop"), 2)
|
||||
self.assertEqual(action_counter.get("sleep"), 1)
|
||||
self.assertEqual(3, len(body['actions']))
|
||||
self.assertEqual(2, action_counter.get("nop"))
|
||||
self.assertEqual(1, action_counter.get("sleep"))
|
||||
|
||||
@test.attr(type="smoke")
|
||||
def test_list_actions_by_audit(self):
|
||||
@@ -97,6 +98,6 @@ class TestShowListAction(base.BaseInfraOptimTest):
|
||||
act['action_type'] for act in body['actions'])
|
||||
|
||||
# A dummy strategy generates 2 "nop" actions and 1 "sleep" action
|
||||
self.assertEqual(len(body['actions']), 3)
|
||||
self.assertEqual(action_counter.get("nop"), 2)
|
||||
self.assertEqual(action_counter.get("sleep"), 1)
|
||||
self.assertEqual(3, len(body['actions']))
|
||||
self.assertEqual(2, action_counter.get("nop"))
|
||||
self.assertEqual(1, action_counter.get("sleep"))
|
||||
|
||||
@@ -43,8 +43,8 @@ class TestCreateDeleteExecuteActionPlan(base.BaseInfraOptimTest):
|
||||
|
||||
_, action_plan = self.client.show_action_plan(action_plan['uuid'])
|
||||
|
||||
self.assertEqual(action_plan['audit_uuid'], audit['uuid'])
|
||||
self.assertEqual(action_plan['state'], 'RECOMMENDED')
|
||||
self.assertEqual(audit['uuid'], action_plan['audit_uuid'])
|
||||
self.assertEqual('RECOMMENDED', action_plan['state'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
def test_delete_action_plan(self):
|
||||
@@ -98,7 +98,7 @@ class TestCreateDeleteExecuteActionPlan(base.BaseInfraOptimTest):
|
||||
_, finished_ap = self.client.show_action_plan(action_plan['uuid'])
|
||||
|
||||
self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
|
||||
self.assertEqual(finished_ap['state'], 'SUCCEEDED')
|
||||
self.assertEqual('SUCCEEDED', finished_ap['state'])
|
||||
|
||||
|
||||
class TestShowListActionPlan(base.BaseInfraOptimTest):
|
||||
@@ -164,5 +164,5 @@ class TestShowListActionPlan(base.BaseInfraOptimTest):
|
||||
|
||||
next_marker = body['action_plans'][-1]['uuid']
|
||||
|
||||
self.assertEqual(len(body['action_plans']), 3)
|
||||
self.assertEqual(3, len(body['action_plans']))
|
||||
self.assertIn(next_marker, body['next'])
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestApiDiscovery(base.BaseInfraOptimTest):
|
||||
def test_default_version(self):
|
||||
_, descr = self.client.get_api_description()
|
||||
default_version = descr['default_version']
|
||||
self.assertEqual(default_version['id'], 'v1')
|
||||
self.assertEqual('v1', default_version['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
def test_version_1_resources(self):
|
||||
|
||||
@@ -101,7 +101,7 @@ class TestCreateUpdateDeleteAudit(base.BaseInfraOptimTest):
|
||||
_, audit = self.client.show_audit(body['uuid'])
|
||||
|
||||
initial_audit_state = audit.pop('state')
|
||||
self.assertEqual(initial_audit_state, 'PENDING')
|
||||
self.assertEqual('PENDING', initial_audit_state)
|
||||
|
||||
self.assert_expected(audit, body)
|
||||
|
||||
@@ -176,7 +176,7 @@ class TestShowListAudit(base.BaseInfraOptimTest):
|
||||
_, body = self.client.list_audits(limit=3)
|
||||
|
||||
next_marker = body['audits'][-1]['uuid']
|
||||
self.assertEqual(len(body['audits']), 3)
|
||||
self.assertEqual(3, len(body['audits']))
|
||||
self.assertIn(next_marker, body['next'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
|
||||
@@ -121,7 +121,7 @@ class TestAuditTemplate(base.BaseInfraOptimTest):
|
||||
_, body = self.client.list_audit_templates(limit=3)
|
||||
|
||||
next_marker = body['audit_templates'][-1]['uuid']
|
||||
self.assertEqual(len(body['audit_templates']), 3)
|
||||
self.assertEqual(3, len(body['audit_templates']))
|
||||
self.assertIn(next_marker, body['next'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@@ -137,7 +137,7 @@ class TestAuditTemplate(base.BaseInfraOptimTest):
|
||||
new_name = 'my at new name %s' % uuid.uuid4()
|
||||
new_description = 'my new at description'
|
||||
new_host_aggregate = 10
|
||||
new_goal = 'A NEW GOAL'
|
||||
new_goal = 'BASIC_CONSOLIDATION'
|
||||
new_extra = {'key1': 'new-value1', 'key2': 'new-value2'}
|
||||
|
||||
patch = [{'path': '/name',
|
||||
|
||||
@@ -39,8 +39,8 @@ class TestShowListGoal(base.BaseInfraOptimTest):
|
||||
def test_show_goal(self):
|
||||
_, goal = self.client.show_goal(self.DUMMY_GOAL)
|
||||
|
||||
self.assertEqual(goal['name'], self.DUMMY_GOAL)
|
||||
self.assertEqual(goal['strategy'], "dummy")
|
||||
self.assertEqual(self.DUMMY_GOAL, goal['name'])
|
||||
self.assertEqual("dummy", goal['strategy'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
def test_show_goal_with_links(self):
|
||||
|
||||
@@ -140,7 +140,7 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
||||
action_plan_uuid=finished_ap["uuid"])
|
||||
|
||||
self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
|
||||
self.assertEqual(finished_ap['state'], 'SUCCEEDED')
|
||||
self.assertEqual('SUCCEEDED', finished_ap['state'])
|
||||
|
||||
for action in action_list['actions']:
|
||||
self.assertEqual(action.get('state'), 'SUCCEEDED')
|
||||
self.assertEqual('SUCCEEDED', action.get('state'))
|
||||
|
||||
@@ -71,9 +71,9 @@ class TestExecuteDummyStrategy(base.BaseInfraOptimScenarioTest):
|
||||
act['action_type'] for act in action_list['actions'])
|
||||
|
||||
self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
|
||||
self.assertEqual(finished_ap['state'], 'SUCCEEDED')
|
||||
self.assertEqual('SUCCEEDED', finished_ap['state'])
|
||||
|
||||
# A dummy strategy generates 2 "nop" actions and 1 "sleep" action
|
||||
self.assertEqual(len(action_list['actions']), 3)
|
||||
self.assertEqual(action_counter.get("nop"), 2)
|
||||
self.assertEqual(action_counter.get("sleep"), 1)
|
||||
self.assertEqual(3, len(action_list['actions']))
|
||||
self.assertEqual(2, action_counter.get("nop"))
|
||||
self.assertEqual(1, action_counter.get("sleep"))
|
||||
|
||||
Reference in New Issue
Block a user