Compare commits
74 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16113e255b | ||
|
|
30d6f07ceb | ||
|
|
343a65952a | ||
|
|
9af6886b0e | ||
|
|
b0ef77f5d1 | ||
|
|
f5157f2894 | ||
|
|
13331935df | ||
|
|
8d61c1a2b4 | ||
|
|
6b4b5c2fe5 | ||
|
|
62623a7f77 | ||
|
|
9d2f8d11ec | ||
|
|
f1d064c759 | ||
|
|
6cb02c18a7 | ||
|
|
37fc37e138 | ||
|
|
b68685741e | ||
|
|
6721977f74 | ||
|
|
c303ad4cdc | ||
|
|
51c9db2936 | ||
|
|
1e003d4153 | ||
|
|
bab89fd769 | ||
|
|
e1e17ab0b9 | ||
|
|
3d542472f6 | ||
|
|
eaa09a4cfc | ||
|
|
e78f2d073f | ||
|
|
47004b7c67 | ||
|
|
9ecd22f4c8 | ||
|
|
daee2336a4 | ||
|
|
893b730a44 | ||
|
|
d5b6e0a54f | ||
|
|
13b89c8dd2 | ||
|
|
7a300832b2 | ||
|
|
d218e6f107 | ||
|
|
d2f70f9d6f | ||
|
|
4951854f76 | ||
|
|
ffbd263888 | ||
|
|
985c6c49f9 | ||
|
|
adac2c0c16 | ||
|
|
f700ca4e0f | ||
|
|
5b741b2a4d | ||
|
|
382f641b22 | ||
|
|
5da5db8b56 | ||
|
|
c4888fee63 | ||
|
|
76f85591ea | ||
|
|
b006cadd22 | ||
|
|
1fd2053001 | ||
|
|
6a920fd307 | ||
|
|
514eeb75ef | ||
|
|
b43633fa6d | ||
|
|
d5a7d7674c | ||
|
|
bce87b3d05 | ||
|
|
783627626c | ||
|
|
3043e57066 | ||
|
|
be8b163a62 | ||
|
|
4f38595e4e | ||
|
|
30def6f35b | ||
|
|
0b31828a01 | ||
|
|
b5ac97bc2d | ||
|
|
398974a7b0 | ||
|
|
3a29b4e710 | ||
|
|
8024dbf913 | ||
|
|
529b0d34ee | ||
|
|
dac0924194 | ||
|
|
3bb66b645c | ||
|
|
63cebc0bfa | ||
|
|
5a28ac772a | ||
|
|
fe7ad9e42b | ||
|
|
711de94855 | ||
|
|
a24b7f0b61 | ||
|
|
c03668cb02 | ||
|
|
aab18245eb | ||
|
|
c12178920b | ||
|
|
f733fbeecd | ||
|
|
bff76de6f1 | ||
|
|
22ee0aa8f7 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -72,3 +72,6 @@ releasenotes/build
|
||||
|
||||
# Desktop Service Store
|
||||
*.DS_Store
|
||||
|
||||
# Autogenerated sample config file
|
||||
etc/watcher/watcher.conf.sample
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
host=review.opendev.org
|
||||
port=29418
|
||||
project=openstack/watcher.git
|
||||
defaultbranch=stable/pike
|
||||
|
||||
9
.zuul.yaml
Normal file
9
.zuul.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
- project:
|
||||
templates:
|
||||
- openstack-python-jobs
|
||||
- openstack-python35-jobs
|
||||
- publish-openstack-sphinx-docs
|
||||
- check-requirements
|
||||
- release-notes-jobs
|
||||
gate:
|
||||
queue: watcher
|
||||
@@ -35,7 +35,7 @@ VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
|
||||
NOVA_INSTANCES_PATH=/opt/stack/data/instances
|
||||
|
||||
# Enable the Ceilometer plugin for the compute agent
|
||||
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
|
||||
enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer
|
||||
disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api
|
||||
|
||||
LOGFILE=$DEST/logs/stack.sh.log
|
||||
|
||||
@@ -32,13 +32,13 @@ ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,neutron
|
||||
enable_service n-cauth
|
||||
|
||||
# Enable the Watcher Dashboard plugin
|
||||
enable_plugin watcher-dashboard git://git.openstack.org/openstack/watcher-dashboard
|
||||
enable_plugin watcher-dashboard https://git.openstack.org/openstack/watcher-dashboard
|
||||
|
||||
# Enable the Watcher plugin
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
enable_plugin watcher https://git.openstack.org/openstack/watcher
|
||||
|
||||
# Enable the Ceilometer plugin
|
||||
enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
|
||||
enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer
|
||||
|
||||
# This is the controller node, so disable the ceilometer compute agent
|
||||
disable_service ceilometer-acompute
|
||||
@@ -46,7 +46,7 @@ disable_service ceilometer-acompute
|
||||
enable_service ceilometer-api
|
||||
|
||||
# Enable the Gnocchi plugin
|
||||
enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi
|
||||
enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi
|
||||
|
||||
LOGFILE=$DEST/logs/stack.sh.log
|
||||
LOGDAYS=2
|
||||
|
||||
@@ -165,7 +165,7 @@ You can easily generate and update a sample configuration file
|
||||
named :ref:`watcher.conf.sample <watcher_sample_configuration_files>` by using
|
||||
these following commands::
|
||||
|
||||
$ git clone git://git.openstack.org/openstack/watcher
|
||||
$ git clone https://git.openstack.org/openstack/watcher
|
||||
$ cd watcher/
|
||||
$ tox -e genconfig
|
||||
$ vi etc/watcher/watcher.conf.sample
|
||||
@@ -430,6 +430,26 @@ to Watcher receives Nova notifications in ``watcher_notifications`` as well.
|
||||
* Restart the Nova services.
|
||||
|
||||
|
||||
Configure Cinder Notifications
|
||||
==============================
|
||||
|
||||
Watcher can also consume notifications generated by the Cinder services, in
|
||||
order to build or update, in real time, its cluster data model related to
|
||||
storage resources. To do so, you have to update the Cinder configuration
|
||||
file on controller and volume nodes, in order to let Watcher receive Cinder
|
||||
notifications in a dedicated ``watcher_notifications`` channel.
|
||||
|
||||
* In the file ``/etc/cinder/cinder.conf``, update the section
|
||||
``[oslo_messaging_notifications]``, by redefining the list of topics
|
||||
into which Cinder services will publish events ::
|
||||
|
||||
[oslo_messaging_notifications]
|
||||
driver = messagingv2
|
||||
topics = notifications,watcher_notifications
|
||||
|
||||
* Restart the Cinder services.
|
||||
|
||||
|
||||
Workers
|
||||
=======
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ model. To enable the Watcher plugin with DevStack, add the following to the
|
||||
`[[local|localrc]]` section of your controller's `local.conf` to enable the
|
||||
Watcher plugin::
|
||||
|
||||
enable_plugin watcher git://git.openstack.org/openstack/watcher
|
||||
enable_plugin watcher https://git.openstack.org/openstack/watcher
|
||||
|
||||
For more detailed instructions, see `Detailed DevStack Instructions`_. Check
|
||||
out the `DevStack documentation`_ for more information regarding DevStack.
|
||||
|
||||
@@ -25,7 +25,7 @@ Prerequisites
|
||||
This document assumes you are using Ubuntu or Fedora, and that you have the
|
||||
following tools available on your system:
|
||||
|
||||
- Python_ 2.7 and 3.4
|
||||
- Python_ 2.7 and 3.5
|
||||
- git_
|
||||
- setuptools_
|
||||
- pip_
|
||||
@@ -77,13 +77,13 @@ extension, PyPi) cannot satisfy. These dependencies should be installed
|
||||
prior to using `pip`, and the installation method may vary depending on
|
||||
your platform.
|
||||
|
||||
* Ubuntu 14.04::
|
||||
* Ubuntu 16.04::
|
||||
|
||||
$ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev
|
||||
|
||||
* Fedora 19+::
|
||||
* Fedora 24+::
|
||||
|
||||
$ sudo yum install openssl-devel libffi-devel mysql-devel
|
||||
$ sudo dnf install redhat-rpm-config gcc python-devel libxml2-devel
|
||||
|
||||
* CentOS 7::
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ Here below is how you would proceed to register ``DummyAction`` using pbr_:
|
||||
watcher_actions =
|
||||
dummy = thirdparty.dummy:DummyAction
|
||||
|
||||
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||
.. _pbr: https://docs.openstack.org/pbr/latest
|
||||
|
||||
|
||||
Using action plugins
|
||||
@@ -217,3 +217,11 @@ which is only able to process the Watcher built-in actions. Therefore, you will
|
||||
either have to use an existing third-party planner or :ref:`implement another
|
||||
planner <implement_planner_plugin>` that will be able to take into account your
|
||||
new action plugin.
|
||||
|
||||
|
||||
Test your new action
|
||||
====================
|
||||
|
||||
In order to test your new action via a manual test or a Tempest test, you can
|
||||
use the :py:class:`~.Actuator` strategy and pass it one or more actions to
|
||||
execute. This way, you can isolate your action to see if it works as expected.
|
||||
|
||||
@@ -198,7 +198,7 @@ Here below is how to register ``DummyClusterDataModelCollector`` using pbr_:
|
||||
watcher_cluster_data_model_collectors =
|
||||
dummy = thirdparty.dummy:DummyClusterDataModelCollector
|
||||
|
||||
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||
.. _pbr: http://docs.openstack.org/pbr/latest
|
||||
|
||||
|
||||
Add new notification endpoints
|
||||
|
||||
@@ -7,7 +7,9 @@ ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Wat
|
||||
ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully
|
||||
FAILED --> DELETED : Administrator removes\nAction Plan
|
||||
SUCCEEDED --> DELETED : Administrator removes\nAction Plan
|
||||
ONGOING --> CANCELLED : Administrator cancels\nAction Plan
|
||||
ONGOING --> CANCELLING : Administrator cancels\nAction Plan
|
||||
CANCELLING --> CANCELLED : The Watcher Applier cancelled\nthe Action Plan successfully
|
||||
CANCELLING --> FAILED : Something failed while cancelling\nthe Action Plan in the Watcher Applier
|
||||
RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan
|
||||
RECOMMENDED --> SUPERSEDED : The Watcher Decision Engine supersedes\nAction Plan
|
||||
PENDING --> CANCELLED : Administrator cancels\nAction Plan
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 76 KiB |
@@ -339,6 +339,34 @@
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
|
||||
transform="matrix(-0.8,0,0,-0.8,4.8,0)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="EmptyTriangleInL"
|
||||
orient="auto"
|
||||
refY="0"
|
||||
refX="0"
|
||||
id="EmptyTriangleInL-6"
|
||||
style="overflow:visible">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path7091-2"
|
||||
d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
|
||||
transform="matrix(-0.8,0,0,-0.8,4.8,0)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="EmptyTriangleInL"
|
||||
orient="auto"
|
||||
refY="0"
|
||||
refX="0"
|
||||
id="EmptyTriangleInL-12"
|
||||
style="overflow:visible">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path7091-70"
|
||||
d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
|
||||
transform="matrix(-0.8,0,0,-0.8,4.8,0)" />
|
||||
</marker>
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
inkscape:document-units="mm"
|
||||
@@ -348,13 +376,13 @@
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1.4142136"
|
||||
inkscape:cx="261.24633"
|
||||
inkscape:cx="665.19215"
|
||||
inkscape:cy="108.90512"
|
||||
inkscape:current-layer="g5356"
|
||||
inkscape:current-layer="g4866-2-3"
|
||||
id="namedview4950"
|
||||
showgrid="true"
|
||||
inkscape:window-width="1215"
|
||||
inkscape:window-height="776"
|
||||
inkscape:window-width="1211"
|
||||
inkscape:window-height="698"
|
||||
inkscape:window-x="65"
|
||||
inkscape:window-y="24"
|
||||
inkscape:window-maximized="1">
|
||||
@@ -381,6 +409,12 @@
|
||||
<g
|
||||
id="g5356"
|
||||
transform="translate(-15.096057,-107.16694)">
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3284-4-2-3-77-5-9"
|
||||
d="m 813.66791,753.1462 0,-92.21768"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#EmptyTriangleInL-6)" />
|
||||
<rect
|
||||
y="377.8927"
|
||||
x="96.920677"
|
||||
@@ -875,8 +909,8 @@
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path5110-9"
|
||||
d="m 472.18905,726.66568 221.85496,0"
|
||||
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;display:inline" />
|
||||
d="m 472.18905,726.66568 331.45651,0"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1.22230256px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
@@ -919,8 +953,8 @@
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3284-4-2-3-4-6"
|
||||
d="m 540.57926,651.7922 179.16488,0"
|
||||
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:1.99999999, 1.99999999;stroke-dashoffset:0;marker-start:url(#TriangleInL);display:inline" />
|
||||
d="m 543.75943,651.7922 280.63651,0"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1.25154257;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.50308524, 2.50308524;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#TriangleInL)" />
|
||||
<rect
|
||||
y="262.01205"
|
||||
x="451.89563"
|
||||
@@ -1402,6 +1436,48 @@
|
||||
id="path5110-9-6"
|
||||
d="m 192.18905,726.66568 221.85496,0"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
|
||||
<g
|
||||
id="g4866-2-3"
|
||||
style="display:inline"
|
||||
transform="matrix(1.7775787,0,0,1.7775787,991.15946,596.08131)">
|
||||
<rect
|
||||
style="display:inline;fill:#ffffff;stroke:#000000;stroke-width:0.562563;stroke-opacity:1"
|
||||
id="rect4267-4-7-7-6"
|
||||
width="49.81258"
|
||||
height="24.243191"
|
||||
x="-116.67716"
|
||||
y="88.977051" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-weight:normal;font-size:11.73851585px;line-height:125%;font-family:Sans;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none"
|
||||
x="-91.899979"
|
||||
y="104.01585"
|
||||
id="text5037-4-6-9-7"
|
||||
sodipodi:linespacing="125%"><tspan
|
||||
sodipodi:role="line"
|
||||
x="-91.899979"
|
||||
y="104.01585"
|
||||
style="font-size:11.2512598px;text-align:center;text-anchor:middle"
|
||||
id="tspan5184-3-5-5">cinder</tspan></text>
|
||||
</g>
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3284-4-2-3-4-9-3"
|
||||
d="m 824.37881,651.58554 0,102.98987"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1.99999999, 1.99999999;stroke-dashoffset:0;stroke-opacity:1;marker-start:none" />
|
||||
<circle
|
||||
r="2.6672709"
|
||||
cy="693.98395"
|
||||
cx="823.72699"
|
||||
id="path13407-89-5"
|
||||
style="color:#000000;display:inline;overflow:visible;visibility:visible;fill:#ececec;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;enable-background:accumulate" />
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3284-4-2-3-7-9"
|
||||
d="m 804.16781,752.35205 0,-26.2061"
|
||||
style="display:inline;fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#EmptyTriangleInL-12)" />
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 64 KiB |
@@ -68,4 +68,4 @@
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
su -s /bin/sh -c "watcher-db-manage" watcher
|
||||
su -s /bin/sh -c "watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema"
|
||||
|
||||
@@ -22,7 +22,7 @@ The *vm_workload_consolidation* strategy requires the following metrics:
|
||||
============================ ============ ======= =======
|
||||
metric service name plugins comment
|
||||
============================ ============ ======= =======
|
||||
``memory`` ceilometer_ none
|
||||
``memory`` ceilometer_ none
|
||||
``disk.root.size`` ceilometer_ none
|
||||
============================ ============ ======= =======
|
||||
|
||||
@@ -32,7 +32,7 @@ the strategy if available:
|
||||
============================ ============ ======= =======
|
||||
metric service name plugins comment
|
||||
============================ ============ ======= =======
|
||||
``memory.usage`` ceilometer_ none
|
||||
``memory.resident`` ceilometer_ none
|
||||
``cpu_util`` ceilometer_ none
|
||||
============================ ============ ======= =======
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ The *workload_balance* strategy requires the following metrics:
|
||||
metric service name plugins comment
|
||||
======================= ============ ======= =======
|
||||
``cpu_util`` ceilometer_ none
|
||||
``memory.resident`` ceilometer_ none
|
||||
======================= ============ ======= =======
|
||||
|
||||
.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute
|
||||
@@ -66,6 +67,9 @@ Strategy parameters are:
|
||||
============== ====== ============= ====================================
|
||||
parameter type default Value description
|
||||
============== ====== ============= ====================================
|
||||
``metrics`` String 'cpu_util' Workload balance base on cpu or ram
|
||||
utilization. choice: ['cpu_util',
|
||||
'memory.resident']
|
||||
``threshold`` Number 25.0 Workload threshold for migration
|
||||
``period`` Number 300 Aggregate time period of ceilometer
|
||||
============== ====== ============= ====================================
|
||||
@@ -90,7 +94,7 @@ How to use it ?
|
||||
at1 workload_balancing --strategy workload_balance
|
||||
|
||||
$ openstack optimize audit create -a at1 -p threshold=26.0 \
|
||||
-p period=310
|
||||
-p period=310 -p metrics=cpu_util
|
||||
|
||||
External Links
|
||||
--------------
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Add notifications related to Action object.
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- Added the functionality to filter out instances which have metadata field
|
||||
'optimize' set to False. For now, this is only available for the
|
||||
basic_consolidation strategy (if "check_optimize_metadata" configuration
|
||||
option is enabled).
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Added binding between apscheduler job and Watcher decision engine service.
|
||||
It will allow to provide HA support in the future.
|
||||
@@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- Enhancement of vm_workload_consolidation strategy
|
||||
by using 'memory.resident' metric in place of
|
||||
'memory.usage', as memory.usage shows the memory
|
||||
usage inside guest-os and memory.resident
|
||||
represents volume of RAM used by instance
|
||||
on host machine.
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- There is new ability to create Watcher continuous audits with cron
|
||||
interval. It means you may use, for example, optional argument
|
||||
'--interval "\*/5 \* \* \* \*"' to launch audit every 5 minutes.
|
||||
These jobs are executed on a best effort basis and therefore, we
|
||||
recommend you to use a minimal cron interval of at least one minute.
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Add description property for dynamic action. Admin can see detail information
|
||||
of any specify action.
|
||||
4
releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml
Normal file
4
releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Added gnocchi support as data source for metrics. Administrator can change
|
||||
data source for each strategy using config file.
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Added using of JSONSchema instead of voluptuous to validate Actions.
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
features:
|
||||
- Added strategy to identify and migrate a Noisy Neighbor - a low priority VM
|
||||
that negatively affects peformance of a high priority VM by over utilizing
|
||||
Last Level Cache.
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Add notifications related to Service object.
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added volume migrate action
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- Existing workload_balance strategy based on
|
||||
the VM workloads of CPU. This feature improves
|
||||
the strategy. By the input parameter "metrics",
|
||||
it makes decision to migrate a VM base on CPU
|
||||
or memory utilization.
|
||||
@@ -5,7 +5,7 @@
|
||||
apscheduler # MIT License
|
||||
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||
jsonpatch>=1.1 # BSD
|
||||
keystoneauth1>=3.0.1 # Apache-2.0
|
||||
keystoneauth1>=3.1.0 # Apache-2.0
|
||||
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
|
||||
keystonemiddleware>=4.12.0 # Apache-2.0
|
||||
lxml!=3.7.0,>=2.3 # BSD
|
||||
@@ -31,13 +31,13 @@ PrettyTable<0.8,>=0.7.1 # BSD
|
||||
voluptuous>=0.8.9 # BSD License
|
||||
gnocchiclient>=2.7.0 # Apache-2.0
|
||||
python-ceilometerclient>=2.5.0 # Apache-2.0
|
||||
python-cinderclient>=3.0.0 # Apache-2.0
|
||||
python-glanceclient>=2.7.0 # Apache-2.0
|
||||
python-cinderclient>=3.1.0 # Apache-2.0
|
||||
python-glanceclient>=2.8.0 # Apache-2.0
|
||||
python-keystoneclient>=3.8.0 # Apache-2.0
|
||||
python-monascaclient>=1.1.0 # Apache-2.0
|
||||
python-monascaclient>=1.7.0 # Apache-2.0
|
||||
python-neutronclient>=6.3.0 # Apache-2.0
|
||||
python-novaclient>=9.0.0 # Apache-2.0
|
||||
python-openstackclient!=3.10.0,>=3.3.0 # Apache-2.0
|
||||
python-openstackclient>=3.11.0 # Apache-2.0
|
||||
python-ironicclient>=1.14.0 # Apache-2.0
|
||||
six>=1.9.0 # MIT
|
||||
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
|
||||
@@ -45,5 +45,5 @@ stevedore>=1.20.0 # Apache-2.0
|
||||
taskflow>=2.7.0 # Apache-2.0
|
||||
WebOb>=1.7.1 # MIT
|
||||
WSME>=0.8 # MIT
|
||||
networkx>=1.10 # BSD
|
||||
networkx<2.0,>=1.10 # BSD
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ watcher_goals =
|
||||
workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing
|
||||
airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization
|
||||
noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization
|
||||
saving_energy = watcher.decision_engine.goal.goals:SavingEnergy
|
||||
|
||||
watcher_scoring_engines =
|
||||
dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer
|
||||
@@ -65,8 +66,10 @@ watcher_strategies =
|
||||
dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy
|
||||
dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer
|
||||
dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize
|
||||
actuator = watcher.decision_engine.strategy.strategies.actuation:Actuator
|
||||
basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation
|
||||
outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl
|
||||
saving_energy = watcher.decision_engine.strategy.strategies.saving_energy:SavingEnergy
|
||||
vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation
|
||||
workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization
|
||||
workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance
|
||||
@@ -80,6 +83,7 @@ watcher_actions =
|
||||
change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState
|
||||
resize = watcher.applier.actions.resize:Resize
|
||||
change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState
|
||||
volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate
|
||||
|
||||
watcher_workflow_engines =
|
||||
taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine
|
||||
@@ -94,7 +98,6 @@ watcher_cluster_data_model_collectors =
|
||||
|
||||
|
||||
[pbr]
|
||||
warnerrors = true
|
||||
autodoc_index_modules = true
|
||||
autodoc_exclude_modules =
|
||||
watcher.db.sqlalchemy.alembic.env
|
||||
|
||||
@@ -15,7 +15,7 @@ testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testtools>=1.4.0 # MIT
|
||||
|
||||
# Doc requirements
|
||||
openstackdocstheme>=1.11.0 # Apache-2.0
|
||||
openstackdocstheme>=1.16.0 # Apache-2.0
|
||||
sphinx>=1.6.2 # BSD
|
||||
sphinxcontrib-pecanwsme>=0.8 # Apache-2.0
|
||||
|
||||
|
||||
2
tox.ini
2
tox.ini
@@ -7,7 +7,7 @@ skipsdist = True
|
||||
usedevelop = True
|
||||
whitelist_externals = find
|
||||
rm
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
|
||||
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
|
||||
@@ -118,6 +118,9 @@ class Action(base.APIBase):
|
||||
action_type = wtypes.text
|
||||
"""Action type"""
|
||||
|
||||
description = wtypes.text
|
||||
"""Action description"""
|
||||
|
||||
input_parameters = types.jsontype
|
||||
"""One or more key/value pairs """
|
||||
|
||||
@@ -141,6 +144,7 @@ class Action(base.APIBase):
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
self.fields.append('action_plan_id')
|
||||
self.fields.append('description')
|
||||
setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id',
|
||||
wtypes.Unset))
|
||||
|
||||
@@ -162,6 +166,14 @@ class Action(base.APIBase):
|
||||
@classmethod
|
||||
def convert_with_links(cls, action, expand=True):
|
||||
action = Action(**action.as_dict())
|
||||
try:
|
||||
obj_action_desc = objects.ActionDescription.get_by_type(
|
||||
pecan.request.context, action.action_type)
|
||||
description = obj_action_desc.description
|
||||
except exception.ActionDescriptionNotFound:
|
||||
description = ""
|
||||
setattr(action, 'description', description)
|
||||
|
||||
return cls._convert_with_links(action, pecan.request.host_url, expand)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -52,8 +52,8 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
|
||||
# The information whether the API call is being performed against the
|
||||
# public API is required for some other components. Saving it to the
|
||||
# WSGI environment is reasonable thereby.
|
||||
env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path),
|
||||
self.public_api_routes))
|
||||
env['is_public_api'] = any(re.match(pattern, path)
|
||||
for pattern in self.public_api_routes)
|
||||
|
||||
if env['is_public_api']:
|
||||
return self._app(env, start_response)
|
||||
|
||||
252
watcher/applier/actions/volume_migration.py
Normal file
252
watcher/applier/actions/volume_migration.py
Normal file
@@ -0,0 +1,252 @@
|
||||
# Copyright 2017 NEC Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import jsonschema
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from cinderclient import client as cinder_client
|
||||
from watcher._i18n import _
|
||||
from watcher.applier.actions import base
|
||||
from watcher.common import cinder_helper
|
||||
from watcher.common import exception
|
||||
from watcher.common import keystone_helper
|
||||
from watcher.common import nova_helper
|
||||
from watcher.common import utils
|
||||
from watcher import conf
|
||||
|
||||
CONF = conf.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeMigrate(base.BaseAction):
|
||||
"""Migrates a volume to destination node or type
|
||||
|
||||
By using this action, you will be able to migrate cinder volume.
|
||||
Migration type 'swap' can only be used for migrating attached volume.
|
||||
Migration type 'cold' can only be used for migrating detached volume.
|
||||
|
||||
The action schema is::
|
||||
|
||||
schema = Schema({
|
||||
'resource_id': str, # should be a UUID
|
||||
'migration_type': str, # choices -> "swap", "cold"
|
||||
'destination_node': str,
|
||||
'destination_type': str,
|
||||
)}
|
||||
|
||||
The `resource_id` is the UUID of cinder volume to migrate.
|
||||
The `destination_node` is the destination block storage pool name.
|
||||
(list of available pools are returned by this command: ``cinder
|
||||
get-pools``) which is mandatory for migrating detached volume
|
||||
to the one with same volume type.
|
||||
The `destination_type` is the destination block storage type name.
|
||||
(list of available types are returned by this command: ``cinder
|
||||
type-list``) which is mandatory for migrating detached volume or
|
||||
swapping attached volume to the one with different volume type.
|
||||
"""
|
||||
|
||||
MIGRATION_TYPE = 'migration_type'
|
||||
SWAP = 'swap'
|
||||
COLD = 'cold'
|
||||
DESTINATION_NODE = "destination_node"
|
||||
DESTINATION_TYPE = "destination_type"
|
||||
|
||||
def __init__(self, config, osc=None):
|
||||
super(VolumeMigrate, self).__init__(config)
|
||||
self.temp_username = utils.random_string(10)
|
||||
self.temp_password = utils.random_string(10)
|
||||
self.cinder_util = cinder_helper.CinderHelper(osc=self.osc)
|
||||
self.nova_util = nova_helper.NovaHelper(osc=self.osc)
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'resource_id': {
|
||||
'type': 'string',
|
||||
"minlength": 1,
|
||||
"pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-"
|
||||
"([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-"
|
||||
"([a-fA-F0-9]){12}$")
|
||||
},
|
||||
'migration_type': {
|
||||
'type': 'string',
|
||||
"enum": ["swap", "cold"]
|
||||
},
|
||||
'destination_node': {
|
||||
"anyof": [
|
||||
{'type': 'string', "minLength": 1},
|
||||
{'type': 'None'}
|
||||
]
|
||||
},
|
||||
'destination_type': {
|
||||
"anyof": [
|
||||
{'type': 'string', "minLength": 1},
|
||||
{'type': 'None'}
|
||||
]
|
||||
}
|
||||
},
|
||||
'required': ['resource_id', 'migration_type'],
|
||||
'additionalProperties': False,
|
||||
}
|
||||
|
||||
def validate_parameters(self):
|
||||
try:
|
||||
jsonschema.validate(self.input_parameters, self.schema)
|
||||
return True
|
||||
except jsonschema.ValidationError as e:
|
||||
raise e
|
||||
|
||||
@property
|
||||
def volume_id(self):
|
||||
return self.input_parameters.get(self.RESOURCE_ID)
|
||||
|
||||
@property
|
||||
def migration_type(self):
|
||||
return self.input_parameters.get(self.MIGRATION_TYPE)
|
||||
|
||||
@property
|
||||
def destination_node(self):
|
||||
return self.input_parameters.get(self.DESTINATION_NODE)
|
||||
|
||||
@property
|
||||
def destination_type(self):
|
||||
return self.input_parameters.get(self.DESTINATION_TYPE)
|
||||
|
||||
def _cold_migrate(self, volume, dest_node, dest_type):
|
||||
if not self.cinder_util.can_cold(volume, dest_node):
|
||||
raise exception.Invalid(
|
||||
message=(_("Invalid state for cold migration")))
|
||||
|
||||
if dest_node:
|
||||
return self.cinder_util.migrate(volume, dest_node)
|
||||
elif dest_type:
|
||||
return self.cinder_util.retype(volume, dest_type)
|
||||
else:
|
||||
raise exception.Invalid(
|
||||
message=(_("destination host or destination type is "
|
||||
"required when migration type is cold")))
|
||||
|
||||
def _can_swap(self, volume):
|
||||
"""Judge volume can be swapped"""
|
||||
|
||||
if not volume.attachments:
|
||||
return False
|
||||
instance_id = volume.attachments[0]['server_id']
|
||||
instance_status = self.nova_util.find_instance(instance_id).status
|
||||
|
||||
if (volume.status == 'in-use' and
|
||||
instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _create_user(self, volume, user):
|
||||
"""Create user with volume attribute and user information"""
|
||||
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
|
||||
project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id')
|
||||
user['project'] = project_id
|
||||
user['domain'] = keystone_util.get_project(project_id).domain_id
|
||||
user['roles'] = ['admin']
|
||||
return keystone_util.create_user(user)
|
||||
|
||||
def _get_cinder_client(self, session):
|
||||
"""Get cinder client by session"""
|
||||
return cinder_client.Client(
|
||||
CONF.cinder_client.api_version,
|
||||
session=session,
|
||||
endpoint_type=CONF.cinder_client.endpoint_type)
|
||||
|
||||
def _swap_volume(self, volume, dest_type):
|
||||
"""Swap volume to dest_type
|
||||
|
||||
Limitation note: only for compute libvirt driver
|
||||
"""
|
||||
if not dest_type:
|
||||
raise exception.Invalid(
|
||||
message=(_("destination type is required when "
|
||||
"migration type is swap")))
|
||||
|
||||
if not self._can_swap(volume):
|
||||
raise exception.Invalid(
|
||||
message=(_("Invalid state for swapping volume")))
|
||||
|
||||
user_info = {
|
||||
'name': self.temp_username,
|
||||
'password': self.temp_password}
|
||||
user = self._create_user(volume, user_info)
|
||||
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
|
||||
try:
|
||||
session = keystone_util.create_session(
|
||||
user.id, self.temp_password)
|
||||
temp_cinder = self._get_cinder_client(session)
|
||||
|
||||
# swap volume
|
||||
new_volume = self.cinder_util.create_volume(
|
||||
temp_cinder, volume, dest_type)
|
||||
self.nova_util.swap_volume(volume, new_volume)
|
||||
|
||||
# delete old volume
|
||||
self.cinder_util.delete_volume(volume)
|
||||
|
||||
finally:
|
||||
keystone_util.delete_user(user)
|
||||
|
||||
return True
|
||||
|
||||
def _migrate(self, volume_id, dest_node, dest_type):
|
||||
|
||||
try:
|
||||
volume = self.cinder_util.get_volume(volume_id)
|
||||
if self.migration_type == self.COLD:
|
||||
return self._cold_migrate(volume, dest_node, dest_type)
|
||||
elif self.migration_type == self.SWAP:
|
||||
if dest_node:
|
||||
LOG.warning("dest_node is ignored")
|
||||
return self._swap_volume(volume, dest_type)
|
||||
else:
|
||||
raise exception.Invalid(
|
||||
message=(_("Migration of type '%(migration_type)s' is not "
|
||||
"supported.") %
|
||||
{'migration_type': self.migration_type}))
|
||||
except exception.Invalid as ei:
|
||||
LOG.exception(ei)
|
||||
return False
|
||||
except Exception as e:
|
||||
LOG.critical("Unexpected exception occurred.")
|
||||
LOG.exception(e)
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
return self._migrate(self.volume_id,
|
||||
self.destination_node,
|
||||
self.destination_type)
|
||||
|
||||
def revert(self):
|
||||
LOG.warning("revert not supported")
|
||||
|
||||
def abort(self):
|
||||
pass
|
||||
|
||||
def pre_condition(self):
|
||||
pass
|
||||
|
||||
def post_condition(self):
|
||||
pass
|
||||
|
||||
def get_description(self):
|
||||
return "Moving a volume to destination_node or destination_type"
|
||||
44
watcher/applier/sync.py
Normal file
44
watcher/applier/sync.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from watcher.applier.loading import default
|
||||
from watcher.common import context
|
||||
from watcher.common import exception
|
||||
from watcher import objects
|
||||
|
||||
|
||||
class Syncer(object):
|
||||
"""Syncs all available actions with the Watcher DB"""
|
||||
|
||||
def sync(self):
|
||||
ctx = context.make_context()
|
||||
action_loader = default.DefaultActionLoader()
|
||||
available_actions = action_loader.list_available()
|
||||
for action_type in available_actions.keys():
|
||||
load_action = action_loader.load(action_type)
|
||||
load_description = load_action.get_description()
|
||||
try:
|
||||
action_desc = objects.ActionDescription.get_by_type(
|
||||
ctx, action_type)
|
||||
if action_desc.description != load_description:
|
||||
action_desc.description = load_description
|
||||
action_desc.save()
|
||||
except exception.ActionDescriptionNotFound:
|
||||
obj_action_desc = objects.ActionDescription(ctx)
|
||||
obj_action_desc.action_type = action_type
|
||||
obj_action_desc.description = load_description
|
||||
obj_action_desc.create()
|
||||
@@ -90,6 +90,7 @@ class BaseWorkFlowEngine(loadable.Loadable):
|
||||
eager=True)
|
||||
db_action.state = state
|
||||
db_action.save()
|
||||
return db_action
|
||||
|
||||
@abc.abstractmethod
|
||||
def execute(self, actions):
|
||||
@@ -149,9 +150,9 @@ class BaseTaskFlowActionContainer(flow_task.Task):
|
||||
self.engine.context, self._db_action.action_plan_id)
|
||||
if action_plan.state in CANCEL_STATE:
|
||||
raise exception.ActionPlanCancelled(uuid=action_plan.uuid)
|
||||
self.do_pre_execute()
|
||||
db_action = self.do_pre_execute()
|
||||
notifications.action.send_execution_notification(
|
||||
self.engine.context, self._db_action,
|
||||
self.engine.context, db_action,
|
||||
fields.NotificationAction.EXECUTION,
|
||||
fields.NotificationPhase.START)
|
||||
except exception.ActionPlanCancelled as e:
|
||||
@@ -159,9 +160,10 @@ class BaseTaskFlowActionContainer(flow_task.Task):
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
self.engine.notify(self._db_action, objects.action.State.FAILED)
|
||||
db_action = self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
notifications.action.send_execution_notification(
|
||||
self.engine.context, self._db_action,
|
||||
self.engine.context, db_action,
|
||||
fields.NotificationAction.EXECUTION,
|
||||
fields.NotificationPhase.ERROR,
|
||||
priority=fields.NotificationPriority.ERROR)
|
||||
@@ -169,19 +171,19 @@ class BaseTaskFlowActionContainer(flow_task.Task):
|
||||
def execute(self, *args, **kwargs):
|
||||
def _do_execute_action(*args, **kwargs):
|
||||
try:
|
||||
self.do_execute(*args, **kwargs)
|
||||
db_action = self.do_execute(*args, **kwargs)
|
||||
notifications.action.send_execution_notification(
|
||||
self.engine.context, self._db_action,
|
||||
self.engine.context, db_action,
|
||||
fields.NotificationAction.EXECUTION,
|
||||
fields.NotificationPhase.END)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.error('The workflow engine has failed'
|
||||
'to execute the action: %s', self.name)
|
||||
self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
db_action = self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
notifications.action.send_execution_notification(
|
||||
self.engine.context, self._db_action,
|
||||
self.engine.context, db_action,
|
||||
fields.NotificationAction.EXECUTION,
|
||||
fields.NotificationPhase.ERROR,
|
||||
priority=fields.NotificationPriority.ERROR)
|
||||
@@ -227,9 +229,10 @@ class BaseTaskFlowActionContainer(flow_task.Task):
|
||||
self.do_post_execute()
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
self.engine.notify(self._db_action, objects.action.State.FAILED)
|
||||
db_action = self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
notifications.action.send_execution_notification(
|
||||
self.engine.context, self._db_action,
|
||||
self.engine.context, db_action,
|
||||
fields.NotificationAction.EXECUTION,
|
||||
fields.NotificationPhase.ERROR,
|
||||
priority=fields.NotificationPriority.ERROR)
|
||||
|
||||
@@ -111,21 +111,24 @@ class TaskFlowActionContainer(base.BaseTaskFlowActionContainer):
|
||||
super(TaskFlowActionContainer, self).__init__(name, db_action, engine)
|
||||
|
||||
def do_pre_execute(self):
|
||||
self.engine.notify(self._db_action, objects.action.State.ONGOING)
|
||||
db_action = self.engine.notify(self._db_action,
|
||||
objects.action.State.ONGOING)
|
||||
LOG.debug("Pre-condition action: %s", self.name)
|
||||
self.action.pre_condition()
|
||||
return db_action
|
||||
|
||||
def do_execute(self, *args, **kwargs):
|
||||
LOG.debug("Running action: %s", self.name)
|
||||
|
||||
# NOTE: For result is False, set action state fail
|
||||
# NOTE:Some actions(such as migrate) will return None when exception
|
||||
# Only when True is returned, the action state is set to SUCCEEDED
|
||||
result = self.action.execute()
|
||||
if result is False:
|
||||
self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
if result is True:
|
||||
return self.engine.notify(self._db_action,
|
||||
objects.action.State.SUCCEEDED)
|
||||
else:
|
||||
self.engine.notify(self._db_action,
|
||||
objects.action.State.SUCCEEDED)
|
||||
return self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
|
||||
def do_post_execute(self):
|
||||
LOG.debug("Post-condition action: %s", self.name)
|
||||
@@ -146,14 +149,15 @@ class TaskFlowActionContainer(base.BaseTaskFlowActionContainer):
|
||||
result = self.action.abort()
|
||||
if result:
|
||||
# Aborted the action.
|
||||
self.engine.notify(self._db_action,
|
||||
objects.action.State.CANCELLED)
|
||||
return self.engine.notify(self._db_action,
|
||||
objects.action.State.CANCELLED)
|
||||
else:
|
||||
self.engine.notify(self._db_action,
|
||||
objects.action.State.SUCCEEDED)
|
||||
return self.engine.notify(self._db_action,
|
||||
objects.action.State.SUCCEEDED)
|
||||
except Exception as e:
|
||||
self.engine.notify(self._db_action, objects.action.State.FAILED)
|
||||
LOG.exception(e)
|
||||
return self.engine.notify(self._db_action,
|
||||
objects.action.State.FAILED)
|
||||
|
||||
|
||||
class TaskFlowNop(flow_task.Task):
|
||||
|
||||
@@ -23,6 +23,7 @@ import sys
|
||||
from oslo_log import log as logging
|
||||
|
||||
from watcher.applier import manager
|
||||
from watcher.applier import sync
|
||||
from watcher.common import service as watcher_service
|
||||
from watcher import conf
|
||||
|
||||
@@ -37,6 +38,9 @@ def main():
|
||||
|
||||
applier_service = watcher_service.Service(manager.ApplierManager)
|
||||
|
||||
syncer = sync.Syncer()
|
||||
syncer.sync()
|
||||
|
||||
# Only 1 process
|
||||
launcher = watcher_service.launch(CONF, applier_service)
|
||||
launcher.wait()
|
||||
|
||||
@@ -12,12 +12,18 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from cinderclient.v2.volumes import Volume
|
||||
from watcher._i18n import _
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher import conf
|
||||
|
||||
CONF = conf.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -34,9 +40,8 @@ class CinderHelper(object):
|
||||
def get_storage_node_by_name(self, name):
|
||||
"""Get storage node by name(host@backendname)"""
|
||||
try:
|
||||
storages = list(filter(lambda storage:
|
||||
storage.host == name,
|
||||
self.get_storage_node_list()))
|
||||
storages = [storage for storage in self.get_storage_node_list()
|
||||
if storage.host == name]
|
||||
if len(storages) != 1:
|
||||
raise exception.StorageNodeNotFound(name=name)
|
||||
return storages[0]
|
||||
@@ -50,9 +55,8 @@ class CinderHelper(object):
|
||||
def get_storage_pool_by_name(self, name):
|
||||
"""Get pool by name(host@backend#poolname)"""
|
||||
try:
|
||||
pools = list(filter(lambda pool:
|
||||
pool.name == name,
|
||||
self.get_storage_pool_list()))
|
||||
pools = [pool for pool in self.get_storage_pool_list()
|
||||
if pool.name == name]
|
||||
if len(pools) != 1:
|
||||
raise exception.PoolNotFound(name=name)
|
||||
return pools[0]
|
||||
@@ -69,11 +73,197 @@ class CinderHelper(object):
|
||||
def get_volume_type_by_backendname(self, backendname):
|
||||
volume_type_list = self.get_volume_type_list()
|
||||
|
||||
volume_type = list(filter(
|
||||
lambda volume_type:
|
||||
volume_type.extra_specs.get(
|
||||
'volume_backend_name') == backendname, volume_type_list))
|
||||
volume_type = [volume_type for volume_type in volume_type_list
|
||||
if volume_type.extra_specs.get(
|
||||
'volume_backend_name') == backendname]
|
||||
if volume_type:
|
||||
return volume_type[0].name
|
||||
else:
|
||||
return ""
|
||||
|
||||
def get_volume(self, volume):
|
||||
|
||||
if isinstance(volume, Volume):
|
||||
volume = volume.id
|
||||
|
||||
try:
|
||||
volume = self.cinder.volumes.get(volume)
|
||||
return volume
|
||||
except cinder_exception.NotFound:
|
||||
return self.cinder.volumes.find(name=volume)
|
||||
|
||||
def backendname_from_poolname(self, poolname):
|
||||
"""Get backendname from poolname"""
|
||||
# pooolname formatted as host@backend#pool since ocata
|
||||
# as of ocata, may as only host
|
||||
backend = poolname.split('#')[0]
|
||||
backendname = ""
|
||||
try:
|
||||
backendname = backend.split('@')[1]
|
||||
except IndexError:
|
||||
pass
|
||||
return backendname
|
||||
|
||||
def _has_snapshot(self, volume):
|
||||
"""Judge volume has a snapshot"""
|
||||
volume = self.get_volume(volume)
|
||||
if volume.snapshot_id:
|
||||
return True
|
||||
return False
|
||||
|
||||
def can_cold(self, volume, host=None):
|
||||
"""Judge volume can be migrated"""
|
||||
can_cold = False
|
||||
status = self.get_volume(volume).status
|
||||
snapshot = self._has_snapshot(volume)
|
||||
|
||||
same_host = False
|
||||
if host and getattr(volume, 'os-vol-host-attr:host') == host:
|
||||
same_host = True
|
||||
|
||||
if (status == 'available' and
|
||||
snapshot is False and
|
||||
same_host is False):
|
||||
can_cold = True
|
||||
|
||||
return can_cold
|
||||
|
||||
def get_deleting_volume(self, volume):
|
||||
volume = self.get_volume(volume)
|
||||
all_volume = self.get_volume_list()
|
||||
for _volume in all_volume:
|
||||
if getattr(_volume, 'os-vol-mig-status-attr:name_id') == volume.id:
|
||||
return _volume
|
||||
return False
|
||||
|
||||
def _can_get_volume(self, volume_id):
|
||||
"""Check to get volume with volume_id"""
|
||||
try:
|
||||
volume = self.get_volume(volume_id)
|
||||
if not volume:
|
||||
raise Exception
|
||||
except cinder_exception.NotFound:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def check_volume_deleted(self, volume, retry=120, retry_interval=10):
|
||||
"""Check volume has been deleted"""
|
||||
volume = self.get_volume(volume)
|
||||
while self._can_get_volume(volume.id) and retry:
|
||||
volume = self.get_volume(volume.id)
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
|
||||
if self._can_get_volume(volume.id):
|
||||
LOG.error("Volume deletion error: %s" % volume.id)
|
||||
return False
|
||||
|
||||
LOG.debug("Volume %s was deleted successfully." % volume.id)
|
||||
return True
|
||||
|
||||
def check_migrated(self, volume, retry_interval=10):
|
||||
volume = self.get_volume(volume)
|
||||
while getattr(volume, 'migration_status') == 'migrating':
|
||||
volume = self.get_volume(volume.id)
|
||||
LOG.debug('Waiting the migration of {0}'.format(volume))
|
||||
time.sleep(retry_interval)
|
||||
if getattr(volume, 'migration_status') == 'error':
|
||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||
error_msg = (("Volume migration error : "
|
||||
"volume %(volume)s is now on host '%(host)s'.") %
|
||||
{'volume': volume.id, 'host': host_name})
|
||||
LOG.error(error_msg)
|
||||
return False
|
||||
|
||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||
if getattr(volume, 'migration_status') == 'success':
|
||||
# check original volume deleted
|
||||
deleting_volume = self.get_deleting_volume(volume)
|
||||
if deleting_volume:
|
||||
delete_id = getattr(deleting_volume, 'id')
|
||||
if not self.check_volume_deleted(delete_id):
|
||||
return False
|
||||
else:
|
||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||
error_msg = (("Volume migration error : "
|
||||
"volume %(volume)s is now on host '%(host)s'.") %
|
||||
{'volume': volume.id, 'host': host_name})
|
||||
LOG.error(error_msg)
|
||||
return False
|
||||
LOG.debug(
|
||||
"Volume migration succeeded : "
|
||||
"volume %s is now on host '%s'." % (
|
||||
volume.id, host_name))
|
||||
return True
|
||||
|
||||
def migrate(self, volume, dest_node):
|
||||
"""Migrate volume to dest_node"""
|
||||
volume = self.get_volume(volume)
|
||||
dest_backend = self.backendname_from_poolname(dest_node)
|
||||
dest_type = self.get_volume_type_by_backendname(dest_backend)
|
||||
if volume.volume_type != dest_type:
|
||||
raise exception.Invalid(
|
||||
message=(_("Volume type must be same for migrating")))
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug("Volume %s found on host '%s'."
|
||||
% (volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.migrate_volume(
|
||||
volume, dest_node, False, True)
|
||||
|
||||
return self.check_migrated(volume)
|
||||
|
||||
def retype(self, volume, dest_type):
|
||||
"""Retype volume to dest_type with on-demand option"""
|
||||
volume = self.get_volume(volume)
|
||||
if volume.volume_type == dest_type:
|
||||
raise exception.Invalid(
|
||||
message=(_("Volume type must be different for retyping")))
|
||||
|
||||
source_node = getattr(volume, 'os-vol-host-attr:host')
|
||||
LOG.debug(
|
||||
"Volume %s found on host '%s'." % (
|
||||
volume.id, source_node))
|
||||
|
||||
self.cinder.volumes.retype(
|
||||
volume, dest_type, "on-demand")
|
||||
|
||||
return self.check_migrated(volume)
|
||||
|
||||
def create_volume(self, cinder, volume,
|
||||
dest_type, retry=120, retry_interval=10):
|
||||
"""Create volume of volume with dest_type using cinder"""
|
||||
volume = self.get_volume(volume)
|
||||
LOG.debug("start creating new volume")
|
||||
new_volume = cinder.volumes.create(
|
||||
getattr(volume, 'size'),
|
||||
name=getattr(volume, 'name'),
|
||||
volume_type=dest_type,
|
||||
availability_zone=getattr(volume, 'availability_zone'))
|
||||
while getattr(new_volume, 'status') != 'available' and retry:
|
||||
new_volume = cinder.volumes.get(new_volume.id)
|
||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
|
||||
if getattr(new_volume, 'status') != 'available':
|
||||
error_msg = (_("Failed to create volume '%(volume)s. ") %
|
||||
{'volume': new_volume.id})
|
||||
raise Exception(error_msg)
|
||||
|
||||
LOG.debug("Volume %s was created successfully." % new_volume)
|
||||
return new_volume
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete volume"""
|
||||
volume = self.get_volume(volume)
|
||||
self.cinder.volumes.delete(volume)
|
||||
result = self.check_volume_deleted(volume)
|
||||
if not result:
|
||||
error_msg = (_("Failed to delete volume '%(volume)s. ") %
|
||||
{'volume': volume.id})
|
||||
raise Exception(error_msg)
|
||||
|
||||
@@ -110,8 +110,12 @@ class OpenStackClients(object):
|
||||
'api_version')
|
||||
gnocchiclient_interface = self._get_client_option('gnocchi',
|
||||
'endpoint_type')
|
||||
adapter_options = {
|
||||
"interface": gnocchiclient_interface
|
||||
}
|
||||
|
||||
self._gnocchi = gnclient.Client(gnocchiclient_version,
|
||||
interface=gnocchiclient_interface,
|
||||
adapter_options=adapter_options,
|
||||
session=self.session)
|
||||
return self._gnocchi
|
||||
|
||||
@@ -199,6 +203,6 @@ class OpenStackClients(object):
|
||||
ironicclient_version = self._get_client_option('ironic', 'api_version')
|
||||
endpoint_type = self._get_client_option('ironic', 'endpoint_type')
|
||||
self._ironic = irclient.get_client(ironicclient_version,
|
||||
ironic_url=endpoint_type,
|
||||
os_endpoint_type=endpoint_type,
|
||||
session=self.session)
|
||||
return self._ironic
|
||||
|
||||
@@ -426,6 +426,15 @@ class CronFormatIsInvalid(WatcherException):
|
||||
msg_fmt = _("Provided cron is invalid: %(message)s")
|
||||
|
||||
|
||||
class ActionDescriptionAlreadyExists(Conflict):
|
||||
msg_fmt = _("An action description with type %(action_type)s is "
|
||||
"already exist.")
|
||||
|
||||
|
||||
class ActionDescriptionNotFound(ResourceNotFound):
|
||||
msg_fmt = _("The action description %(action_id)s cannot be found.")
|
||||
|
||||
|
||||
# Model
|
||||
|
||||
class ComputeResourceNotFound(WatcherException):
|
||||
|
||||
124
watcher/common/keystone_helper.py
Normal file
124
watcher/common/keystone_helper.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from keystoneauth1.exceptions import http as ks_exceptions
|
||||
from keystoneauth1 import loading
|
||||
from keystoneauth1 import session
|
||||
from watcher._i18n import _
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher import conf
|
||||
|
||||
CONF = conf.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class KeystoneHelper(object):
|
||||
|
||||
def __init__(self, osc=None):
|
||||
""":param osc: an OpenStackClients instance"""
|
||||
self.osc = osc if osc else clients.OpenStackClients()
|
||||
self.keystone = self.osc.keystone()
|
||||
|
||||
def get_role(self, name_or_id):
|
||||
try:
|
||||
role = self.keystone.roles.get(name_or_id)
|
||||
return role
|
||||
except ks_exceptions.NotFound:
|
||||
roles = self.keystone.roles.list(name=name_or_id)
|
||||
if len(roles) == 0:
|
||||
raise exception.Invalid(
|
||||
message=(_("Role not Found: %s") % name_or_id))
|
||||
if len(roles) > 1:
|
||||
raise exception.Invalid(
|
||||
message=(_("Role name seems ambiguous: %s") % name_or_id))
|
||||
return roles[0]
|
||||
|
||||
def get_user(self, name_or_id):
|
||||
try:
|
||||
user = self.keystone.users.get(name_or_id)
|
||||
return user
|
||||
except ks_exceptions.NotFound:
|
||||
users = self.keystone.users.list(name=name_or_id)
|
||||
if len(users) == 0:
|
||||
raise exception.Invalid(
|
||||
message=(_("User not Found: %s") % name_or_id))
|
||||
if len(users) > 1:
|
||||
raise exception.Invalid(
|
||||
message=(_("User name seems ambiguous: %s") % name_or_id))
|
||||
return users[0]
|
||||
|
||||
def get_project(self, name_or_id):
|
||||
try:
|
||||
project = self.keystone.projects.get(name_or_id)
|
||||
return project
|
||||
except ks_exceptions.NotFound:
|
||||
projects = self.keystone.projects.list(name=name_or_id)
|
||||
if len(projects) == 0:
|
||||
raise exception.Invalid(
|
||||
message=(_("Project not Found: %s") % name_or_id))
|
||||
if len(projects) > 1:
|
||||
raise exception.Invalid(
|
||||
messsage=(_("Project name seems ambiguous: %s") %
|
||||
name_or_id))
|
||||
return projects[0]
|
||||
|
||||
def get_domain(self, name_or_id):
|
||||
try:
|
||||
domain = self.keystone.domains.get(name_or_id)
|
||||
return domain
|
||||
except ks_exceptions.NotFound:
|
||||
domains = self.keystone.domains.list(name=name_or_id)
|
||||
if len(domains) == 0:
|
||||
raise exception.Invalid(
|
||||
message=(_("Domain not Found: %s") % name_or_id))
|
||||
if len(domains) > 1:
|
||||
raise exception.Invalid(
|
||||
message=(_("Domain name seems ambiguous: %s") %
|
||||
name_or_id))
|
||||
return domains[0]
|
||||
|
||||
def create_session(self, user_id, password):
|
||||
user = self.get_user(user_id)
|
||||
loader = loading.get_plugin_loader('password')
|
||||
auth = loader.load_from_options(
|
||||
auth_url=CONF.watcher_clients_auth.auth_url,
|
||||
password=password,
|
||||
user_id=user_id,
|
||||
project_id=user.default_project_id)
|
||||
return session.Session(auth=auth)
|
||||
|
||||
def create_user(self, user):
|
||||
project = self.get_project(user['project'])
|
||||
domain = self.get_domain(user['domain'])
|
||||
_user = self.keystone.users.create(
|
||||
user['name'],
|
||||
password=user['password'],
|
||||
domain=domain,
|
||||
project=project,
|
||||
)
|
||||
for role in user['roles']:
|
||||
role = self.get_role(role)
|
||||
self.keystone.roles.grant(
|
||||
role.id, user=_user.id, project=project.id)
|
||||
return _user
|
||||
|
||||
def delete_user(self, user):
|
||||
try:
|
||||
user = self.get_user(user)
|
||||
self.keystone.users.delete(user)
|
||||
except exception.Invalid:
|
||||
pass
|
||||
@@ -82,6 +82,9 @@ class NovaHelper(object):
|
||||
def get_availability_zone_list(self):
|
||||
return self.nova.availability_zones.list(detailed=True)
|
||||
|
||||
def get_service_list(self):
|
||||
return self.nova.services.list(binary='nova-compute')
|
||||
|
||||
def find_instance(self, instance_id):
|
||||
return self.nova.servers.get(instance_id)
|
||||
|
||||
@@ -787,6 +790,9 @@ class NovaHelper(object):
|
||||
net_obj = {"net-id": nic_id}
|
||||
net_list.append(net_obj)
|
||||
|
||||
# get availability zone of destination host
|
||||
azone = self.nova.services.list(host=node_id,
|
||||
binary='nova-compute')[0].zone
|
||||
instance = self.nova.servers.create(
|
||||
inst_name, image,
|
||||
flavor=flavor,
|
||||
@@ -794,7 +800,7 @@ class NovaHelper(object):
|
||||
security_groups=sec_group_list,
|
||||
nics=net_list,
|
||||
block_device_mapping_v2=block_device_mapping_v2,
|
||||
availability_zone="nova:%s" % node_id)
|
||||
availability_zone="%s:%s" % (azone, node_id))
|
||||
|
||||
# Poll at 5 second intervals, until the status is no longer 'BUILD'
|
||||
if instance:
|
||||
@@ -864,3 +870,27 @@ class NovaHelper(object):
|
||||
|
||||
def get_running_migration(self, instance_id):
|
||||
return self.nova.server_migrations.list(server=instance_id)
|
||||
|
||||
def swap_volume(self, old_volume, new_volume,
|
||||
retry=120, retry_interval=10):
|
||||
"""Swap old_volume for new_volume"""
|
||||
attachments = old_volume.attachments
|
||||
instance_id = attachments[0]['server_id']
|
||||
# do volume update
|
||||
self.nova.volumes.update_server_volume(
|
||||
instance_id, old_volume.id, new_volume.id)
|
||||
while getattr(new_volume, 'status') != 'in-use' and retry:
|
||||
new_volume = self.cinder.volumes.get(new_volume.id)
|
||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s" % retry)
|
||||
if getattr(new_volume, 'status') != "in-use":
|
||||
LOG.error("Volume update retry timeout or error")
|
||||
return False
|
||||
|
||||
host_name = getattr(new_volume, "os-vol-host-attr:host")
|
||||
LOG.debug(
|
||||
"Volume update succeeded : "
|
||||
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
|
||||
return True
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
"""Utilities and helper functions."""
|
||||
|
||||
import datetime
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
|
||||
from croniter import croniter
|
||||
|
||||
@@ -158,3 +160,8 @@ StrictDefaultValidatingDraft4Validator = extend_with_default(
|
||||
extend_with_strict_schema(validators.Draft4Validator))
|
||||
|
||||
Draft4Validator = validators.Draft4Validator
|
||||
|
||||
|
||||
def random_string(n):
|
||||
return ''.join([random.choice(
|
||||
string.ascii_letters + string.digits) for i in range(n)])
|
||||
|
||||
@@ -24,6 +24,7 @@ from watcher.conf import applier
|
||||
from watcher.conf import ceilometer_client
|
||||
from watcher.conf import cinder_client
|
||||
from watcher.conf import clients_auth
|
||||
from watcher.conf import collector
|
||||
from watcher.conf import db
|
||||
from watcher.conf import decision_engine
|
||||
from watcher.conf import exception
|
||||
@@ -58,3 +59,4 @@ ceilometer_client.register_opts(CONF)
|
||||
neutron_client.register_opts(CONF)
|
||||
clients_auth.register_opts(CONF)
|
||||
ironic_client.register_opts(CONF)
|
||||
collector.register_opts(CONF)
|
||||
|
||||
37
watcher/conf/collector.py
Normal file
37
watcher/conf/collector.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2017 NEC Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
collector = cfg.OptGroup(name='collector',
|
||||
title='Defines the parameters of '
|
||||
'the module model collectors')
|
||||
|
||||
COLLECTOR_OPTS = [
|
||||
cfg.ListOpt('collector_plugins',
|
||||
default=['compute'],
|
||||
help='The cluster data model plugin names'),
|
||||
]
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
conf.register_group(collector)
|
||||
conf.register_opts(COLLECTOR_OPTS,
|
||||
group=collector)
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [('collector', COLLECTOR_OPTS)]
|
||||
@@ -24,6 +24,7 @@ from oslo_log import log
|
||||
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils as common_utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
@@ -72,6 +73,17 @@ class GnocchiHelper(object):
|
||||
raise exception.InvalidParameter(parameter='stop_time',
|
||||
parameter_type=datetime)
|
||||
|
||||
if not common_utils.is_uuid_like(resource_id):
|
||||
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
|
||||
limit=1)
|
||||
resources = self.query_retry(
|
||||
f=self.gnocchi.resource.search, **kwargs)
|
||||
|
||||
if not resources:
|
||||
raise exception.ResourceNotFound(name=resource_id)
|
||||
|
||||
resource_id = resources[0]['id']
|
||||
|
||||
raw_kwargs = dict(
|
||||
metric=metric,
|
||||
start=start_time,
|
||||
|
||||
@@ -33,7 +33,7 @@ class MonascaHelper(object):
|
||||
def query_retry(self, f, *args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except exc.HTTPUnauthorized:
|
||||
except exc.Unauthorized:
|
||||
self.osc.reset_clients()
|
||||
self.monasca = self.osc.monasca()
|
||||
return f(*args, **kwargs)
|
||||
|
||||
@@ -20,7 +20,7 @@ You can upgrade to the latest database version via::
|
||||
|
||||
To check the current database version::
|
||||
|
||||
$ watcher-db-manage --config-file /path/to/watcher.conf current
|
||||
$ watcher-db-manage --config-file /path/to/watcher.conf version
|
||||
|
||||
|
||||
To create a script to run the migration offline::
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
"""add action description table
|
||||
|
||||
Revision ID: d09a5945e4a0
|
||||
Revises: d098df6021e2
|
||||
Create Date: 2017-07-13 20:33:01.473711
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'd09a5945e4a0'
|
||||
down_revision = 'd098df6021e2'
|
||||
|
||||
from alembic import op
|
||||
import oslo_db
|
||||
import sqlalchemy as sa
|
||||
|
||||
def upgrade():
|
||||
op.create_table('action_descriptions',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('deleted_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True),
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('action_type', sa.String(length=255), nullable=False),
|
||||
sa.Column('description', sa.String(length=255), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('action_type', name='uniq_action_description0action_type')
|
||||
)
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_table('action_descriptions')
|
||||
@@ -1127,3 +1127,74 @@ class Connection(api.BaseConnection):
|
||||
return self._soft_delete(models.Service, service_id)
|
||||
except exception.ResourceNotFound:
|
||||
raise exception.ServiceNotFound(service=service_id)
|
||||
|
||||
# ### ACTION_DESCRIPTIONS ### #
|
||||
|
||||
def _add_action_descriptions_filters(self, query, filters):
|
||||
if not filters:
|
||||
filters = {}
|
||||
|
||||
plain_fields = ['id', 'action_type']
|
||||
|
||||
return self._add_filters(
|
||||
query=query, model=models.ActionDescription, filters=filters,
|
||||
plain_fields=plain_fields)
|
||||
|
||||
def get_action_description_list(self, context, filters=None, limit=None,
|
||||
marker=None, sort_key=None,
|
||||
sort_dir=None, eager=False):
|
||||
query = model_query(models.ActionDescription)
|
||||
if eager:
|
||||
query = self._set_eager_options(models.ActionDescription, query)
|
||||
query = self._add_action_descriptions_filters(query, filters)
|
||||
if not context.show_deleted:
|
||||
query = query.filter_by(deleted_at=None)
|
||||
return _paginate_query(models.ActionDescription, limit, marker,
|
||||
sort_key, sort_dir, query)
|
||||
|
||||
def create_action_description(self, values):
|
||||
try:
|
||||
action_description = self._create(models.ActionDescription, values)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
raise exception.ActionDescriptionAlreadyExists(
|
||||
action_type=values['action_type'])
|
||||
return action_description
|
||||
|
||||
def _get_action_description(self, context, fieldname, value, eager):
|
||||
try:
|
||||
return self._get(context, model=models.ActionDescription,
|
||||
fieldname=fieldname, value=value, eager=eager)
|
||||
except exception.ResourceNotFound:
|
||||
raise exception.ActionDescriptionNotFound(action_id=value)
|
||||
|
||||
def get_action_description_by_id(self, context,
|
||||
action_id, eager=False):
|
||||
return self._get_action_description(
|
||||
context, fieldname="id", value=action_id, eager=eager)
|
||||
|
||||
def get_action_description_by_type(self, context,
|
||||
action_type, eager=False):
|
||||
return self._get_action_description(
|
||||
context, fieldname="action_type", value=action_type, eager=eager)
|
||||
|
||||
def destroy_action_description(self, action_id):
|
||||
try:
|
||||
return self._destroy(models.ActionDescription, action_id)
|
||||
except exception.ResourceNotFound:
|
||||
raise exception.ActionDescriptionNotFound(
|
||||
action_id=action_id)
|
||||
|
||||
def update_action_description(self, action_id, values):
|
||||
try:
|
||||
return self._update(models.ActionDescription,
|
||||
action_id, values)
|
||||
except exception.ResourceNotFound:
|
||||
raise exception.ActionDescriptionNotFound(
|
||||
action_id=action_id)
|
||||
|
||||
def soft_delete_action_description(self, action_id):
|
||||
try:
|
||||
return self._soft_delete(models.ActionDescription, action_id)
|
||||
except exception.ResourceNotFound:
|
||||
raise exception.ActionDescriptionNotFound(
|
||||
action_id=action_id)
|
||||
|
||||
@@ -278,3 +278,17 @@ class Service(Base):
|
||||
name = Column(String(255), nullable=False)
|
||||
host = Column(String(255), nullable=False)
|
||||
last_seen_up = Column(DateTime, nullable=True)
|
||||
|
||||
|
||||
class ActionDescription(Base):
|
||||
"""Represents a action description"""
|
||||
|
||||
__tablename__ = 'action_descriptions'
|
||||
__table_args__ = (
|
||||
UniqueConstraint('action_type',
|
||||
name="uniq_action_description0action_type"),
|
||||
table_args()
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
action_type = Column(String(255), nullable=False)
|
||||
description = Column(String(255), nullable=False)
|
||||
|
||||
@@ -62,9 +62,11 @@ class ContinuousAuditHandler(base.AuditHandler):
|
||||
if objects.audit.AuditStateTransitionManager().is_inactive(audit):
|
||||
# if audit isn't in active states, audit's job must be removed to
|
||||
# prevent using of inactive audit in future.
|
||||
[job for job in self.scheduler.get_jobs()
|
||||
if job.name == 'execute_audit' and
|
||||
job.args[0].uuid == audit.uuid][0].remove()
|
||||
jobs = [job for job in self.scheduler.get_jobs()
|
||||
if job.name == 'execute_audit' and
|
||||
job.args[0].uuid == audit.uuid]
|
||||
if jobs:
|
||||
jobs[0].remove()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -22,7 +22,8 @@ ThermalOptimization = goals.ThermalOptimization
|
||||
Unclassified = goals.Unclassified
|
||||
WorkloadBalancing = goals.WorkloadBalancing
|
||||
NoisyNeighbor = goals.NoisyNeighborOptimization
|
||||
SavingEnergy = goals.SavingEnergy
|
||||
|
||||
__all__ = ("Dummy", "ServerConsolidation", "ThermalOptimization",
|
||||
"Unclassified", "WorkloadBalancing",
|
||||
"NoisyNeighborOptimization",)
|
||||
"NoisyNeighborOptimization", "SavingEnergy")
|
||||
|
||||
@@ -192,3 +192,27 @@ class NoisyNeighborOptimization(base.Goal):
|
||||
def get_efficacy_specification(cls):
|
||||
"""The efficacy spec for the current goal"""
|
||||
return specs.Unclassified()
|
||||
|
||||
|
||||
class SavingEnergy(base.Goal):
|
||||
"""SavingEnergy
|
||||
|
||||
This goal is used to reduce power consumption within a data center.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "saving_energy"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Saving Energy")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Saving Energy"
|
||||
|
||||
@classmethod
|
||||
def get_efficacy_specification(cls):
|
||||
"""The efficacy spec for the current goal"""
|
||||
return specs.Unclassified()
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from watcher.common import utils
|
||||
from watcher.decision_engine.loading import default
|
||||
|
||||
@@ -31,8 +33,8 @@ class CollectorManager(object):
|
||||
def get_collectors(self):
|
||||
if self._collectors is None:
|
||||
collectors = utils.Struct()
|
||||
available_collectors = self.collector_loader.list_available()
|
||||
for collector_name in available_collectors:
|
||||
collector_plugins = cfg.CONF.collector.collector_plugins
|
||||
for collector_name in collector_plugins:
|
||||
collector = self.collector_loader.load(collector_name)
|
||||
collectors[collector_name] = collector
|
||||
self._collectors = collectors
|
||||
|
||||
@@ -46,7 +46,8 @@ class WeightPlanner(base.BasePlanner):
|
||||
super(WeightPlanner, self).__init__(config)
|
||||
|
||||
action_weights = {
|
||||
'nop': 60,
|
||||
'nop': 70,
|
||||
'volume_migrate': 60,
|
||||
'change_nova_service_state': 50,
|
||||
'sleep': 40,
|
||||
'migrate': 30,
|
||||
@@ -63,6 +64,7 @@ class WeightPlanner(base.BasePlanner):
|
||||
'change_nova_service_state': 1,
|
||||
'nop': 1,
|
||||
'change_node_power_state': 2,
|
||||
'volume_migrate': 2
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@@ -85,6 +87,7 @@ class WeightPlanner(base.BasePlanner):
|
||||
@staticmethod
|
||||
def chunkify(lst, n):
|
||||
"""Yield successive n-sized chunks from lst."""
|
||||
n = int(n)
|
||||
if n < 1:
|
||||
# Just to make sure the number is valid
|
||||
n = 1
|
||||
|
||||
@@ -145,7 +145,7 @@ class DefaultScope(base.BaseScope):
|
||||
compute_nodes.extend(detailed_aggregate.hosts)
|
||||
|
||||
def _collect_zones(self, availability_zones, allowed_nodes):
|
||||
zone_list = self.wrapper.get_availability_zone_list()
|
||||
service_list = self.wrapper.get_service_list()
|
||||
zone_names = [zone['name'] for zone
|
||||
in availability_zones]
|
||||
include_all_nodes = False
|
||||
@@ -155,9 +155,9 @@ class DefaultScope(base.BaseScope):
|
||||
else:
|
||||
raise exception.WildcardCharacterIsUsed(
|
||||
resource="availability zones")
|
||||
for zone in zone_list:
|
||||
if zone.zoneName in zone_names or include_all_nodes:
|
||||
allowed_nodes.extend(zone.hosts.keys())
|
||||
for service in service_list:
|
||||
if service.zone in zone_names or include_all_nodes:
|
||||
allowed_nodes.extend(service.host)
|
||||
|
||||
def exclude_resources(self, resources, **kwargs):
|
||||
instances_to_exclude = kwargs.get('instances')
|
||||
|
||||
@@ -14,27 +14,32 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from watcher.decision_engine.strategy.strategies import actuation
|
||||
from watcher.decision_engine.strategy.strategies import basic_consolidation
|
||||
from watcher.decision_engine.strategy.strategies import dummy_strategy
|
||||
from watcher.decision_engine.strategy.strategies import dummy_with_scorer
|
||||
from watcher.decision_engine.strategy.strategies import noisy_neighbor
|
||||
from watcher.decision_engine.strategy.strategies import outlet_temp_control
|
||||
from watcher.decision_engine.strategy.strategies import saving_energy
|
||||
from watcher.decision_engine.strategy.strategies import uniform_airflow
|
||||
from watcher.decision_engine.strategy.strategies import \
|
||||
vm_workload_consolidation
|
||||
from watcher.decision_engine.strategy.strategies import workload_balance
|
||||
from watcher.decision_engine.strategy.strategies import workload_stabilization
|
||||
|
||||
Actuator = actuation.Actuator
|
||||
BasicConsolidation = basic_consolidation.BasicConsolidation
|
||||
OutletTempControl = outlet_temp_control.OutletTempControl
|
||||
DummyStrategy = dummy_strategy.DummyStrategy
|
||||
DummyWithScorer = dummy_with_scorer.DummyWithScorer
|
||||
SavingEnergy = saving_energy.SavingEnergy
|
||||
VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation
|
||||
WorkloadBalance = workload_balance.WorkloadBalance
|
||||
WorkloadStabilization = workload_stabilization.WorkloadStabilization
|
||||
UniformAirflow = uniform_airflow.UniformAirflow
|
||||
NoisyNeighbor = noisy_neighbor.NoisyNeighbor
|
||||
|
||||
__all__ = ("BasicConsolidation", "OutletTempControl", "DummyStrategy",
|
||||
"DummyWithScorer", "VMWorkloadConsolidation", "WorkloadBalance",
|
||||
"WorkloadStabilization", "UniformAirflow", "NoisyNeighbor")
|
||||
__all__ = ("Actuator", "BasicConsolidation", "OutletTempControl",
|
||||
"DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation",
|
||||
"WorkloadBalance", "WorkloadStabilization", "UniformAirflow",
|
||||
"NoisyNeighbor", "SavingEnergy")
|
||||
|
||||
99
watcher/decision_engine/strategy/strategies/actuation.py
Normal file
99
watcher/decision_engine/strategy/strategies/actuation.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 b<>com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Actuator*
|
||||
|
||||
This strategy allows anyone to create an action plan with a predefined set of
|
||||
actions. This strategy can be used for 2 different purposes:
|
||||
|
||||
- Test actions
|
||||
- Use this strategy based on an event trigger to perform some explicit task
|
||||
|
||||
"""
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Actuator(base.UnclassifiedStrategy):
|
||||
"""Actuator that simply executes the actions given as parameter"""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "actuator"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Actuator")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Actuator"
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls):
|
||||
# Mandatory default setting for each element
|
||||
return {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"actions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action_type": {
|
||||
"type": "string"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"input_parameters": {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"additionalProperties": True
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action_type", "input_parameters"
|
||||
],
|
||||
"additionalProperties": True,
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"actions"
|
||||
]
|
||||
}
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
return self.input_parameters.get('actions', [])
|
||||
|
||||
def pre_execute(self):
|
||||
LOG.info("Preparing Actuator strategy...")
|
||||
|
||||
def do_execute(self):
|
||||
for action in self.actions:
|
||||
self.solution.add_action(**action)
|
||||
|
||||
def post_execute(self):
|
||||
pass
|
||||
@@ -358,3 +358,11 @@ class NoisyNeighborBaseStrategy(BaseStrategy):
|
||||
@classmethod
|
||||
def get_goal_name(cls):
|
||||
return "noisy_neighbor"
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class SavingEnergyBaseStrategy(BaseStrategy):
|
||||
|
||||
@classmethod
|
||||
def get_goal_name(cls):
|
||||
return "saving_energy"
|
||||
|
||||
@@ -30,6 +30,7 @@ telemetries to measure thermal/workload status of server.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
@@ -160,6 +161,16 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
def granularity(self):
|
||||
return self.input_parameters.get('granularity', 300)
|
||||
|
||||
@classmethod
|
||||
def get_config_opts(cls):
|
||||
return [
|
||||
cfg.StrOpt(
|
||||
"datasource",
|
||||
help="Data source to use in order to query the needed metrics",
|
||||
default="ceilometer",
|
||||
choices=["ceilometer", "gnocchi"])
|
||||
]
|
||||
|
||||
def calc_used_resource(self, node):
|
||||
"""Calculate the used vcpus, memory and disk based on VM flavors"""
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
|
||||
201
watcher/decision_engine/strategy/strategies/saving_energy.py
Normal file
201
watcher/decision_engine/strategy/strategies/saving_energy.py
Normal file
@@ -0,0 +1,201 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE Corporation
|
||||
#
|
||||
# Authors: licanwei <li.canwei2@zte.com.cn>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import exception as wexc
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class SavingEnergy(base.SavingEnergyBaseStrategy):
|
||||
|
||||
def __init__(self, config, osc=None):
|
||||
|
||||
super(SavingEnergy, self).__init__(config, osc)
|
||||
self._ironic_client = None
|
||||
self._nova_client = None
|
||||
|
||||
self.with_vms_node_pool = []
|
||||
self.free_poweron_node_pool = []
|
||||
self.free_poweroff_node_pool = []
|
||||
self.free_used_percent = 0
|
||||
self.min_free_hosts_num = 1
|
||||
|
||||
@property
|
||||
def ironic_client(self):
|
||||
if not self._ironic_client:
|
||||
self._ironic_client = self.osc.ironic()
|
||||
return self._ironic_client
|
||||
|
||||
@property
|
||||
def nova_client(self):
|
||||
if not self._nova_client:
|
||||
self._nova_client = self.osc.nova()
|
||||
return self._nova_client
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "saving_energy"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Saving Energy Strategy")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Saving Energy Strategy"
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls):
|
||||
"""return a schema of two input parameters
|
||||
|
||||
The standby nodes refer to those nodes unused
|
||||
but still poweredon to deal with boom of new instances.
|
||||
"""
|
||||
return {
|
||||
"properties": {
|
||||
"free_used_percent": {
|
||||
"description": ("a rational number, which describes the"
|
||||
"quotient of"
|
||||
" min_free_hosts_num/nodes_with_VMs_num"
|
||||
"where nodes_with_VMs_num is the number"
|
||||
"of nodes with VMs"),
|
||||
"type": "number",
|
||||
"default": 10.0
|
||||
},
|
||||
"min_free_hosts_num": {
|
||||
"description": ("minimum number of hosts without VMs"
|
||||
"but still powered on"),
|
||||
"type": "number",
|
||||
"default": 1
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def add_action_poweronoff_node(self, node_uuid, state):
|
||||
"""Add an action for node disability into the solution.
|
||||
|
||||
:param node: node uuid
|
||||
:param state: node power state, power on or power off
|
||||
:return: None
|
||||
"""
|
||||
params = {'state': state}
|
||||
self.solution.add_action(
|
||||
action_type='change_node_power_state',
|
||||
resource_id=node_uuid,
|
||||
input_parameters=params)
|
||||
|
||||
def get_hosts_pool(self):
|
||||
"""Get three pools, with_vms_node_pool, free_poweron_node_pool,
|
||||
|
||||
free_poweroff_node_pool.
|
||||
|
||||
"""
|
||||
|
||||
node_list = self.ironic_client.node.list()
|
||||
for node in node_list:
|
||||
node_uuid = (node.to_dict())['uuid']
|
||||
node_info = self.ironic_client.node.get(node_uuid).to_dict()
|
||||
hypervisor_id = node_info['extra'].get('compute_node_id', None)
|
||||
if hypervisor_id is None:
|
||||
LOG.warning(('Cannot find compute_node_id in extra '
|
||||
'of ironic node %s'), node_uuid)
|
||||
continue
|
||||
hypervisor_node = self.nova_client.hypervisors.get(hypervisor_id)
|
||||
if hypervisor_node is None:
|
||||
LOG.warning(('Cannot find hypervisor %s'), hypervisor_id)
|
||||
continue
|
||||
hypervisor_node = hypervisor_node.to_dict()
|
||||
compute_service = hypervisor_node.get('service', None)
|
||||
host_uuid = compute_service.get('host')
|
||||
try:
|
||||
self.compute_model.get_node_by_uuid(host_uuid)
|
||||
except wexc.ComputeNodeNotFound:
|
||||
continue
|
||||
|
||||
if not (hypervisor_node.get('state') == 'up'):
|
||||
"""filter nodes that are not in 'up' state"""
|
||||
continue
|
||||
else:
|
||||
if (hypervisor_node['running_vms'] == 0):
|
||||
if (node_info['power_state'] == 'power on'):
|
||||
self.free_poweron_node_pool.append(node_uuid)
|
||||
elif (node_info['power_state'] == 'power off'):
|
||||
self.free_poweroff_node_pool.append(node_uuid)
|
||||
else:
|
||||
self.with_vms_node_pool.append(node_uuid)
|
||||
|
||||
def save_energy(self):
|
||||
|
||||
need_poweron = max(
|
||||
(len(self.with_vms_node_pool) * self.free_used_percent / 100), (
|
||||
self.min_free_hosts_num))
|
||||
len_poweron = len(self.free_poweron_node_pool)
|
||||
len_poweroff = len(self.free_poweroff_node_pool)
|
||||
if len_poweron > need_poweron:
|
||||
for node in random.sample(self.free_poweron_node_pool,
|
||||
(len_poweron - need_poweron)):
|
||||
self.add_action_poweronoff_node(node, 'off')
|
||||
LOG.debug("power off %s", node)
|
||||
elif len_poweron < need_poweron:
|
||||
diff = need_poweron - len_poweron
|
||||
for node in random.sample(self.free_poweroff_node_pool,
|
||||
min(len_poweroff, diff)):
|
||||
self.add_action_poweronoff_node(node, 'on')
|
||||
LOG.debug("power on %s", node)
|
||||
|
||||
def pre_execute(self):
|
||||
"""Pre-execution phase
|
||||
|
||||
This can be used to fetch some pre-requisites or data.
|
||||
"""
|
||||
LOG.info("Initializing Saving Energy Strategy")
|
||||
|
||||
if not self.compute_model:
|
||||
raise wexc.ClusterStateNotDefined()
|
||||
|
||||
if self.compute_model.stale:
|
||||
raise wexc.ClusterStateStale()
|
||||
|
||||
LOG.debug(self.compute_model.to_string())
|
||||
|
||||
def do_execute(self):
|
||||
"""Strategy execution phase
|
||||
|
||||
This phase is where you should put the main logic of your strategy.
|
||||
"""
|
||||
self.free_used_percent = self.input_parameters.free_used_percent
|
||||
self.min_free_hosts_num = self.input_parameters.min_free_hosts_num
|
||||
|
||||
self.get_hosts_pool()
|
||||
self.save_energy()
|
||||
|
||||
def post_execute(self):
|
||||
"""Post-execution phase
|
||||
|
||||
This can be used to compute the global efficacy
|
||||
"""
|
||||
self.solution.model = self.compute_model
|
||||
|
||||
LOG.debug(self.compute_model.to_string())
|
||||
6
watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py
Executable file → Normal file
6
watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py
Executable file → Normal file
@@ -77,12 +77,12 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
METRIC_NAMES = dict(
|
||||
ceilometer=dict(
|
||||
cpu_util_metric='cpu_util',
|
||||
ram_util_metric='memory.usage',
|
||||
ram_util_metric='memory.resident',
|
||||
ram_alloc_metric='memory',
|
||||
disk_alloc_metric='disk.root.size'),
|
||||
gnocchi=dict(
|
||||
cpu_util_metric='cpu_util',
|
||||
ram_util_metric='memory.usage',
|
||||
ram_util_metric='memory.resident',
|
||||
ram_alloc_metric='memory',
|
||||
disk_alloc_metric='disk.root.size'),
|
||||
)
|
||||
@@ -361,7 +361,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
|
||||
if not instance_ram_util:
|
||||
instance_ram_util = instance.memory
|
||||
LOG.warning('No values returned by %s for memory.usage, '
|
||||
LOG.warning('No values returned by %s for memory.resident, '
|
||||
'use instance flavor ram value', instance.uuid)
|
||||
|
||||
if not instance_disk_util:
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
*Description*
|
||||
|
||||
This strategy migrates a VM based on the VM workload of the hosts.
|
||||
It makes decision to migrate a workload whenever a host's CPU
|
||||
It makes decision to migrate a workload whenever a host's CPU or RAM
|
||||
utilization % is higher than the specified threshold. The VM to
|
||||
be moved should make the host close to average workload of all
|
||||
hosts nodes.
|
||||
@@ -32,7 +32,7 @@ hosts nodes.
|
||||
* Hardware: compute node should use the same physical CPUs
|
||||
* Software: Ceilometer component ceilometer-agent-compute
|
||||
running in each compute node, and Ceilometer API can
|
||||
report such telemetry "cpu_util" successfully.
|
||||
report such telemetry "cpu_util" and "memory.resident" successfully.
|
||||
* You must have at least 2 physical compute nodes to run
|
||||
this strategy.
|
||||
|
||||
@@ -69,16 +69,16 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
|
||||
It is a migration strategy based on the VM workload of physical
|
||||
servers. It generates solutions to move a workload whenever a server's
|
||||
CPU utilization % is higher than the specified threshold.
|
||||
CPU or RAM utilization % is higher than the specified threshold.
|
||||
The VM to be moved should make the host close to average workload
|
||||
of all compute nodes.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* Hardware: compute node should use the same physical CPUs
|
||||
* Hardware: compute node should use the same physical CPUs/RAMs
|
||||
* Software: Ceilometer component ceilometer-agent-compute running
|
||||
in each compute node, and Ceilometer API can report such telemetry
|
||||
"cpu_util" successfully.
|
||||
"cpu_util" and "memory.resident" successfully.
|
||||
* You must have at least 2 physical compute nodes to run this strategy
|
||||
|
||||
*Limitations*
|
||||
@@ -91,8 +91,12 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
"""
|
||||
|
||||
# The meter to report CPU utilization % of VM in ceilometer
|
||||
METER_NAME = "cpu_util"
|
||||
# Unit: %, value range is [0 , 100]
|
||||
CPU_METER_NAME = "cpu_util"
|
||||
|
||||
# The meter to report memory resident of VM in ceilometer
|
||||
# Unit: MB
|
||||
MEM_METER_NAME = "memory.resident"
|
||||
|
||||
MIGRATION = "migrate"
|
||||
|
||||
@@ -104,9 +108,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
:param osc: :py:class:`~.OpenStackClients` instance
|
||||
"""
|
||||
super(WorkloadBalance, self).__init__(config, osc)
|
||||
# the migration plan will be triggered when the CPU utilization %
|
||||
# reaches threshold
|
||||
self._meter = self.METER_NAME
|
||||
# the migration plan will be triggered when the CPU or RAM
|
||||
# utilization % reaches threshold
|
||||
self._meter = None
|
||||
self._ceilometer = None
|
||||
self._gnocchi = None
|
||||
|
||||
@@ -151,6 +155,13 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
# Mandatory default setting for each element
|
||||
return {
|
||||
"properties": {
|
||||
"metrics": {
|
||||
"description": "Workload balance based on metrics: "
|
||||
"cpu or ram utilization",
|
||||
"type": "string",
|
||||
"choice": ["cpu_util", "memory.resident"],
|
||||
"default": "cpu_util"
|
||||
},
|
||||
"threshold": {
|
||||
"description": "workload threshold for migration",
|
||||
"type": "number",
|
||||
@@ -251,18 +262,21 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
cores_available = host.vcpus - cores_used
|
||||
disk_available = host.disk - disk_used
|
||||
mem_available = host.memory - mem_used
|
||||
if (
|
||||
cores_available >= required_cores and
|
||||
disk_available >= required_disk and
|
||||
if (cores_available >= required_cores and
|
||||
mem_available >= required_mem and
|
||||
disk_available >= required_disk):
|
||||
if (self._meter == self.CPU_METER_NAME and
|
||||
((src_instance_workload + workload) <
|
||||
self.threshold / 100 * host.vcpus)
|
||||
):
|
||||
destination_hosts.append(instance_data)
|
||||
self.threshold / 100 * host.vcpus)):
|
||||
destination_hosts.append(instance_data)
|
||||
if (self._meter == self.MEM_METER_NAME and
|
||||
((src_instance_workload + workload) <
|
||||
self.threshold / 100 * host.memory)):
|
||||
destination_hosts.append(instance_data)
|
||||
|
||||
return destination_hosts
|
||||
|
||||
def group_hosts_by_cpu_util(self):
|
||||
def group_hosts_by_cpu_or_ram_util(self):
|
||||
"""Calculate the workloads of each node
|
||||
|
||||
try to find out the nodes which have reached threshold
|
||||
@@ -286,10 +300,10 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
node_workload = 0.0
|
||||
for instance in instances:
|
||||
cpu_util = None
|
||||
instance_util = None
|
||||
try:
|
||||
if self.config.datasource == "ceilometer":
|
||||
cpu_util = self.ceilometer.statistic_aggregation(
|
||||
instance_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
meter_name=self._meter,
|
||||
period=self._period,
|
||||
@@ -298,7 +312,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
start_time = stop_time - datetime.timedelta(
|
||||
seconds=int(self._period))
|
||||
cpu_util = self.gnocchi.statistic_aggregation(
|
||||
instance_util = self.gnocchi.statistic_aggregation(
|
||||
resource_id=instance.uuid,
|
||||
metric=self._meter,
|
||||
granularity=self.granularity,
|
||||
@@ -308,23 +322,32 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
LOG.error("Can not get cpu_util from %s",
|
||||
LOG.error("Can not get %s from %s", self._meter,
|
||||
self.config.datasource)
|
||||
continue
|
||||
if cpu_util is None:
|
||||
LOG.debug("Instance (%s): cpu_util is None", instance.uuid)
|
||||
if instance_util is None:
|
||||
LOG.debug("Instance (%s): %s is None",
|
||||
instance.uuid, self._meter)
|
||||
continue
|
||||
workload_cache[instance.uuid] = cpu_util * instance.vcpus / 100
|
||||
if self._meter == self.CPU_METER_NAME:
|
||||
workload_cache[instance.uuid] = (instance_util *
|
||||
instance.vcpus / 100)
|
||||
else:
|
||||
workload_cache[instance.uuid] = instance_util
|
||||
node_workload += workload_cache[instance.uuid]
|
||||
LOG.debug("VM (%s): cpu_util %f", instance.uuid, cpu_util)
|
||||
node_cpu_util = node_workload / node.vcpus * 100
|
||||
LOG.debug("VM (%s): %s %f", instance.uuid, self._meter,
|
||||
instance_util)
|
||||
|
||||
cluster_workload += node_workload
|
||||
if self._meter == self.CPU_METER_NAME:
|
||||
node_util = node_workload / node.vcpus * 100
|
||||
else:
|
||||
node_util = node_workload / node.memory * 100
|
||||
|
||||
instance_data = {
|
||||
'node': node, "cpu_util": node_cpu_util,
|
||||
'node': node, self._meter: node_util,
|
||||
'workload': node_workload}
|
||||
if node_cpu_util >= self.threshold:
|
||||
if node_util >= self.threshold:
|
||||
# mark the node to release resources
|
||||
overload_hosts.append(instance_data)
|
||||
else:
|
||||
@@ -356,8 +379,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
"""
|
||||
self.threshold = self.input_parameters.threshold
|
||||
self._period = self.input_parameters.period
|
||||
self._meter = self.input_parameters.metrics
|
||||
source_nodes, target_nodes, avg_workload, workload_cache = (
|
||||
self.group_hosts_by_cpu_util())
|
||||
self.group_hosts_by_cpu_or_ram_util())
|
||||
|
||||
if not source_nodes:
|
||||
LOG.debug("No hosts require optimization")
|
||||
@@ -373,7 +397,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
# choose the server with largest cpu_util
|
||||
source_nodes = sorted(source_nodes,
|
||||
reverse=True,
|
||||
key=lambda x: (x[self.METER_NAME]))
|
||||
key=lambda x: (x[self._meter]))
|
||||
|
||||
instance_to_migrate = self.choose_instance_to_migrate(
|
||||
source_nodes, avg_workload, workload_cache)
|
||||
@@ -391,7 +415,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
"be because of there's no enough CPU/Memory/DISK")
|
||||
return self.solution
|
||||
destination_hosts = sorted(destination_hosts,
|
||||
key=lambda x: (x["cpu_util"]))
|
||||
key=lambda x: (x[self._meter]))
|
||||
# always use the host with lowerest CPU utilization
|
||||
mig_destination_node = destination_hosts[0]['node']
|
||||
# generate solution to migrate the instance to the dest server,
|
||||
|
||||
@@ -252,7 +252,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
"No values returned by %(resource_id)s "
|
||||
"for %(metric_name)s" % dict(
|
||||
resource_id=instance.uuid, metric_name=meter))
|
||||
avg_meter = 0
|
||||
return
|
||||
if meter == 'cpu_util':
|
||||
avg_meter /= float(100)
|
||||
instance_load[meter] = avg_meter
|
||||
@@ -308,12 +308,10 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
)
|
||||
|
||||
if avg_meter is None:
|
||||
if meter_name == 'hardware.memory.used':
|
||||
avg_meter = node.memory
|
||||
if meter_name == 'compute.node.cpu.percent':
|
||||
avg_meter = 1
|
||||
LOG.warning('No values returned by node %s for %s',
|
||||
node_id, meter_name)
|
||||
del hosts_load[node_id]
|
||||
break
|
||||
else:
|
||||
if meter_name == 'hardware.memory.used':
|
||||
avg_meter /= oslo_utils.units.Ki
|
||||
@@ -362,10 +360,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
migration_case = []
|
||||
new_hosts = copy.deepcopy(hosts)
|
||||
instance_load = self.get_instance_load(instance)
|
||||
if not instance_load:
|
||||
return
|
||||
s_host_vcpus = new_hosts[src_node.uuid]['vcpus']
|
||||
d_host_vcpus = new_hosts[dst_node.uuid]['vcpus']
|
||||
for metric in self.metrics:
|
||||
if metric is 'cpu_util':
|
||||
if metric == 'cpu_util':
|
||||
new_hosts[src_node.uuid][metric] -= (
|
||||
self.transform_instance_cpu(instance_load, s_host_vcpus))
|
||||
new_hosts[dst_node.uuid][metric] += (
|
||||
@@ -408,6 +408,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
dst_node = self.compute_model.get_node_by_uuid(dst_host)
|
||||
sd_case = self.calculate_migration_case(
|
||||
hosts, instance, src_node, dst_node)
|
||||
if sd_case is None:
|
||||
break
|
||||
|
||||
weighted_sd = self.calculate_weighted_sd(sd_case[:-1])
|
||||
|
||||
@@ -416,6 +418,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
'host': dst_node.uuid, 'value': weighted_sd,
|
||||
's_host': src_node.uuid, 'instance': instance.uuid}
|
||||
instance_host_map.append(min_sd_case)
|
||||
if sd_case is None:
|
||||
continue
|
||||
return sorted(instance_host_map, key=lambda x: x['value'])
|
||||
|
||||
def check_threshold(self):
|
||||
@@ -424,7 +428,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
normalized_load = self.normalize_hosts_load(hosts_load)
|
||||
for metric in self.metrics:
|
||||
metric_sd = self.get_sd(normalized_load, metric)
|
||||
LOG.info("Standard deviation for %s is %s."
|
||||
% (metric, metric_sd))
|
||||
if metric_sd > float(self.thresholds[metric]):
|
||||
LOG.info("Standard deviation of %s exceeds"
|
||||
" appropriate threshold %s."
|
||||
% (metric, metric_sd))
|
||||
return self.simulate_migrations(hosts_load)
|
||||
|
||||
def add_migration(self,
|
||||
|
||||
@@ -78,6 +78,14 @@ class Syncer(object):
|
||||
"""Strategies loaded from DB"""
|
||||
if self._available_strategies is None:
|
||||
self._available_strategies = objects.Strategy.list(self.ctx)
|
||||
goal_ids = [g.id for g in self.available_goals]
|
||||
stale_strategies = [s for s in self._available_strategies
|
||||
if s.goal_id not in goal_ids]
|
||||
for s in stale_strategies:
|
||||
LOG.info("Can't find Goal id %d of strategy %s",
|
||||
s.goal_id, s.name)
|
||||
s.soft_delete()
|
||||
self._available_strategies.remove(s)
|
||||
return self._available_strategies
|
||||
|
||||
@property
|
||||
|
||||
@@ -33,3 +33,4 @@ def register_all():
|
||||
__import__('watcher.objects.efficacy_indicator')
|
||||
__import__('watcher.objects.scoring_engine')
|
||||
__import__('watcher.objects.service')
|
||||
__import__('watcher.objects.action_description')
|
||||
|
||||
141
watcher/objects/action_description.py
Normal file
141
watcher/objects/action_description.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
from watcher.db import api as db_api
|
||||
from watcher.objects import base
|
||||
from watcher.objects import fields as wfields
|
||||
|
||||
|
||||
@base.WatcherObjectRegistry.register
|
||||
class ActionDescription(base.WatcherPersistentObject, base.WatcherObject,
|
||||
base.WatcherObjectDictCompat):
|
||||
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = db_api.get_instance()
|
||||
|
||||
fields = {
|
||||
'id': wfields.IntegerField(),
|
||||
'action_type': wfields.StringField(),
|
||||
'description': wfields.StringField(),
|
||||
}
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get(cls, context, action_id):
|
||||
"""Find a action description based on its id
|
||||
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object
|
||||
:param action_id: the id of a action description.
|
||||
:returns: a :class:`ActionDescription` object.
|
||||
"""
|
||||
if utils.is_int_like(action_id):
|
||||
db_action = cls.dbapi.get_action_description_by_id(
|
||||
context, action_id)
|
||||
action = ActionDescription._from_db_object(cls(context), db_action)
|
||||
return action
|
||||
else:
|
||||
raise exception.InvalidIdentity(identity=action_id)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_by_type(cls, context, action_type):
|
||||
"""Find a action description based on action type
|
||||
|
||||
:param action_type: the action type of a action description.
|
||||
:param context: Security context
|
||||
:returns: a :class:`ActionDescription` object.
|
||||
"""
|
||||
|
||||
db_action = cls.dbapi.get_action_description_by_type(
|
||||
context, action_type)
|
||||
action = cls._from_db_object(cls(context), db_action)
|
||||
return action
|
||||
|
||||
@base.remotable_classmethod
|
||||
def list(cls, context, limit=None, marker=None, filters=None,
|
||||
sort_key=None, sort_dir=None):
|
||||
"""Return a list of :class:`ActionDescription` objects.
|
||||
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: ActionDescription(context)
|
||||
:param filters: dict mapping the filter key to a value.
|
||||
:param limit: maximum number of resources to return in a single result.
|
||||
:param marker: pagination marker for large data sets.
|
||||
:param sort_key: column to sort results by.
|
||||
:param sort_dir: direction to sort. "asc" or "desc".
|
||||
:returns: a list of :class:`ActionDescription` object.
|
||||
"""
|
||||
db_actions = cls.dbapi.get_action_description_list(
|
||||
context,
|
||||
filters=filters,
|
||||
limit=limit,
|
||||
marker=marker,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
|
||||
return [cls._from_db_object(cls(context), obj) for obj in db_actions]
|
||||
|
||||
@base.remotable
|
||||
def create(self):
|
||||
"""Create a :class:`ActionDescription` record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
db_action = self.dbapi.create_action_description(values)
|
||||
self._from_db_object(self, db_action)
|
||||
|
||||
@base.remotable
|
||||
def save(self):
|
||||
"""Save updates to this :class:`ActionDescription`.
|
||||
|
||||
Updates will be made column by column based on the result
|
||||
of self.what_changed().
|
||||
"""
|
||||
updates = self.obj_get_changes()
|
||||
db_obj = self.dbapi.update_action_description(self.id, updates)
|
||||
obj = self._from_db_object(self, db_obj, eager=False)
|
||||
self.obj_refresh(obj)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def refresh(self):
|
||||
"""Loads updates for this :class:`ActionDescription`.
|
||||
|
||||
Loads a action description with the same id from the database and
|
||||
checks for updated attributes. Updates are applied from
|
||||
the loaded action description column by column, if there
|
||||
are any updates.
|
||||
"""
|
||||
current = self.get(self._context, action_id=self.id)
|
||||
for field in self.fields:
|
||||
if (hasattr(self, base.get_attrname(field)) and
|
||||
self[field] != current[field]):
|
||||
self[field] = current[field]
|
||||
|
||||
def soft_delete(self):
|
||||
"""Soft Delete the :class:`ActionDescription` from the DB."""
|
||||
db_obj = self.dbapi.soft_delete_action_description(self.id)
|
||||
obj = self._from_db_object(
|
||||
self.__class__(self._context), db_obj, eager=False)
|
||||
self.obj_refresh(obj)
|
||||
249
watcher/tests/applier/actions/test_volume_migration.py
Normal file
249
watcher/tests/applier/actions/test_volume_migration.py
Normal file
@@ -0,0 +1,249 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import jsonschema
|
||||
import mock
|
||||
|
||||
from watcher.applier.actions import base as baction
|
||||
from watcher.applier.actions import volume_migration
|
||||
from watcher.common import cinder_helper
|
||||
from watcher.common import clients
|
||||
from watcher.common import keystone_helper
|
||||
from watcher.common import nova_helper
|
||||
from watcher.common import utils as w_utils
|
||||
from watcher.tests import base
|
||||
|
||||
|
||||
class TestMigration(base.TestCase):
|
||||
|
||||
VOLUME_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba"
|
||||
INSTANCE_UUID = "45a37aec-85ab-4dda-a303-7d9f62c2f5bb"
|
||||
|
||||
def setUp(self):
|
||||
super(TestMigration, self).setUp()
|
||||
|
||||
self.m_osc_cls = mock.Mock()
|
||||
self.m_osc = mock.Mock(spec=clients.OpenStackClients)
|
||||
self.m_osc_cls.return_value = self.m_osc
|
||||
|
||||
self.m_n_helper_cls = mock.Mock()
|
||||
self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper)
|
||||
self.m_n_helper_cls.return_value = self.m_n_helper
|
||||
|
||||
self.m_c_helper_cls = mock.Mock()
|
||||
self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper)
|
||||
self.m_c_helper_cls.return_value = self.m_c_helper
|
||||
|
||||
self.m_k_helper_cls = mock.Mock()
|
||||
self.m_k_helper = mock.Mock(spec=keystone_helper.KeystoneHelper)
|
||||
self.m_k_helper_cls.return_value = self.m_k_helper
|
||||
|
||||
m_openstack_clients = mock.patch.object(
|
||||
clients, "OpenStackClients", self.m_osc_cls)
|
||||
m_nova_helper = mock.patch.object(
|
||||
nova_helper, "NovaHelper", self.m_n_helper_cls)
|
||||
|
||||
m_cinder_helper = mock.patch.object(
|
||||
cinder_helper, "CinderHelper", self.m_c_helper_cls)
|
||||
|
||||
m_keystone_helper = mock.patch.object(
|
||||
keystone_helper, "KeystoneHelper", self.m_k_helper_cls)
|
||||
|
||||
m_openstack_clients.start()
|
||||
m_nova_helper.start()
|
||||
m_cinder_helper.start()
|
||||
m_keystone_helper.start()
|
||||
|
||||
self.addCleanup(m_keystone_helper.stop)
|
||||
self.addCleanup(m_cinder_helper.stop)
|
||||
self.addCleanup(m_nova_helper.stop)
|
||||
self.addCleanup(m_openstack_clients.stop)
|
||||
|
||||
self.action = volume_migration.VolumeMigrate(mock.Mock())
|
||||
|
||||
self.input_parameters_swap = {
|
||||
"migration_type": "swap",
|
||||
"destination_node": "storage1-poolname",
|
||||
"destination_type": "storage1-typename",
|
||||
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
|
||||
}
|
||||
self.action_swap = volume_migration.VolumeMigrate(mock.Mock())
|
||||
self.action_swap.input_parameters = self.input_parameters_swap
|
||||
|
||||
self.input_parameters_migrate = {
|
||||
"migration_type": "cold",
|
||||
"destination_node": "storage1-poolname",
|
||||
"destination_type": "",
|
||||
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
|
||||
}
|
||||
self.action_migrate = volume_migration.VolumeMigrate(mock.Mock())
|
||||
self.action_migrate.input_parameters = self.input_parameters_migrate
|
||||
|
||||
self.input_parameters_retype = {
|
||||
"migration_type": "cold",
|
||||
"destination_node": "",
|
||||
"destination_type": "storage1-typename",
|
||||
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
|
||||
}
|
||||
self.action_retype = volume_migration.VolumeMigrate(mock.Mock())
|
||||
self.action_retype.input_parameters = self.input_parameters_retype
|
||||
|
||||
@staticmethod
|
||||
def fake_volume(**kwargs):
|
||||
volume = mock.MagicMock()
|
||||
volume.id = kwargs.get('id', TestMigration.VOLUME_UUID)
|
||||
volume.size = kwargs.get('size', '1')
|
||||
volume.status = kwargs.get('status', 'available')
|
||||
volume.snapshot_id = kwargs.get('snapshot_id', None)
|
||||
volume.availability_zone = kwargs.get('availability_zone', 'nova')
|
||||
return volume
|
||||
|
||||
@staticmethod
|
||||
def fake_instance(**kwargs):
|
||||
instance = mock.MagicMock()
|
||||
instance.id = kwargs.get('id', TestMigration.INSTANCE_UUID)
|
||||
instance.status = kwargs.get('status', 'ACTIVE')
|
||||
return instance
|
||||
|
||||
def test_parameters_swap(self):
|
||||
params = {baction.BaseAction.RESOURCE_ID:
|
||||
self.VOLUME_UUID,
|
||||
self.action.MIGRATION_TYPE: 'swap',
|
||||
self.action.DESTINATION_NODE: None,
|
||||
self.action.DESTINATION_TYPE: 'type-1'}
|
||||
self.action_swap.input_parameters = params
|
||||
self.assertTrue(self.action_swap.validate_parameters)
|
||||
|
||||
def test_parameters_migrate(self):
|
||||
params = {baction.BaseAction.RESOURCE_ID:
|
||||
self.VOLUME_UUID,
|
||||
self.action.MIGRATION_TYPE: 'cold',
|
||||
self.action.DESTINATION_NODE: 'node-1',
|
||||
self.action.DESTINATION_TYPE: None}
|
||||
self.action_migrate.input_parameters = params
|
||||
self.assertTrue(self.action_migrate.validate_parameters)
|
||||
|
||||
def test_parameters_retype(self):
|
||||
params = {baction.BaseAction.RESOURCE_ID:
|
||||
self.VOLUME_UUID,
|
||||
self.action.MIGRATION_TYPE: 'cold',
|
||||
self.action.DESTINATION_NODE: None,
|
||||
self.action.DESTINATION_TYPE: 'type-1'}
|
||||
self.action_retype.input_parameters = params
|
||||
self.assertTrue(self.action_retype.validate_parameters)
|
||||
|
||||
def test_parameters_exception_resource_id(self):
|
||||
params = {baction.BaseAction.RESOURCE_ID: "EFEF",
|
||||
self.action.MIGRATION_TYPE: 'swap',
|
||||
self.action.DESTINATION_NODE: None,
|
||||
self.action.DESTINATION_TYPE: 'type-1'}
|
||||
self.action_swap.input_parameters = params
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
self.action_swap.validate_parameters)
|
||||
|
||||
def test_migrate_success(self):
|
||||
volume = self.fake_volume()
|
||||
|
||||
self.m_c_helper.can_cold.return_value = True
|
||||
self.m_c_helper.get_volume.return_value = volume
|
||||
result = self.action_migrate.execute()
|
||||
self.assertTrue(result)
|
||||
self.m_c_helper.migrate.assert_called_once_with(
|
||||
volume,
|
||||
"storage1-poolname"
|
||||
)
|
||||
|
||||
def test_migrate_fail(self):
|
||||
self.m_c_helper.can_cold.return_value = False
|
||||
result = self.action_migrate.execute()
|
||||
self.assertFalse(result)
|
||||
self.m_c_helper.migrate.assert_not_called()
|
||||
|
||||
def test_retype_success(self):
|
||||
volume = self.fake_volume()
|
||||
|
||||
self.m_c_helper.can_cold.return_value = True
|
||||
self.m_c_helper.get_volume.return_value = volume
|
||||
result = self.action_retype.execute()
|
||||
self.assertTrue(result)
|
||||
self.m_c_helper.retype.assert_called_once_with(
|
||||
volume,
|
||||
"storage1-typename",
|
||||
)
|
||||
|
||||
def test_retype_fail(self):
|
||||
self.m_c_helper.can_cold.return_value = False
|
||||
result = self.action_migrate.execute()
|
||||
self.assertFalse(result)
|
||||
self.m_c_helper.migrate.assert_not_called()
|
||||
|
||||
def test_swap_success(self):
|
||||
volume = self.fake_volume(
|
||||
status='in-use', attachments=[{'server_id': 'server_id'}])
|
||||
self.m_n_helper.find_instance.return_value = self.fake_instance()
|
||||
|
||||
new_volume = self.fake_volume(id=w_utils.generate_uuid())
|
||||
user = mock.Mock()
|
||||
session = mock.MagicMock()
|
||||
self.m_k_helper.create_user.return_value = user
|
||||
self.m_k_helper.create_session.return_value = session
|
||||
self.m_c_helper.get_volume.return_value = volume
|
||||
self.m_c_helper.create_volume.return_value = new_volume
|
||||
|
||||
result = self.action_swap.execute()
|
||||
self.assertTrue(result)
|
||||
|
||||
self.m_n_helper.swap_volume.assert_called_once_with(
|
||||
volume,
|
||||
new_volume
|
||||
)
|
||||
self.m_k_helper.delete_user.assert_called_once_with(user)
|
||||
|
||||
def test_swap_fail(self):
|
||||
# _can_swap fail
|
||||
instance = self.fake_instance(status='STOPPED')
|
||||
self.m_n_helper.find_instance.return_value = instance
|
||||
|
||||
result = self.action_swap.execute()
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_can_swap_success(self):
|
||||
volume = self.fake_volume(
|
||||
status='in-use', attachments=[{'server_id': 'server_id'}])
|
||||
instance = self.fake_instance()
|
||||
|
||||
self.m_n_helper.find_instance.return_value = instance
|
||||
result = self.action_swap._can_swap(volume)
|
||||
self.assertTrue(result)
|
||||
|
||||
instance = self.fake_instance(status='PAUSED')
|
||||
self.m_n_helper.find_instance.return_value = instance
|
||||
result = self.action_swap._can_swap(volume)
|
||||
self.assertTrue(result)
|
||||
|
||||
instance = self.fake_instance(status='RESIZED')
|
||||
self.m_n_helper.find_instance.return_value = instance
|
||||
result = self.action_swap._can_swap(volume)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_can_swap_fail(self):
|
||||
|
||||
volume = self.fake_volume(
|
||||
status='in-use', attachments=[{'server_id': 'server_id'}])
|
||||
instance = self.fake_instance(status='STOPPED')
|
||||
self.m_n_helper.find_instance.return_value = instance
|
||||
result = self.action_swap._can_swap(volume)
|
||||
self.assertFalse(result)
|
||||
@@ -20,6 +20,8 @@ import eventlet
|
||||
import mock
|
||||
|
||||
from watcher.applier.workflow_engine import default as tflow
|
||||
from watcher.common import clients
|
||||
from watcher.common import nova_helper
|
||||
from watcher import objects
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.objects import utils as obj_utils
|
||||
@@ -55,6 +57,32 @@ class TestTaskFlowActionContainer(base.DbTestCase):
|
||||
|
||||
self.assertTrue(action.state, objects.action.State.SUCCEEDED)
|
||||
|
||||
@mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock())
|
||||
def test_execute_with_failed(self):
|
||||
nova_util = nova_helper.NovaHelper()
|
||||
instance = "31b9dd5c-b1fd-4f61-9b68-a47096326dac"
|
||||
nova_util.nova.servers.get.return_value = instance
|
||||
action_plan = obj_utils.create_test_action_plan(
|
||||
self.context, audit_id=self.audit.id,
|
||||
strategy_id=self.strategy.id,
|
||||
state=objects.action.State.ONGOING)
|
||||
|
||||
action = obj_utils.create_test_action(
|
||||
self.context, action_plan_id=action_plan.id,
|
||||
state=objects.action.State.ONGOING,
|
||||
action_type='migrate',
|
||||
input_parameters={"resource_id":
|
||||
instance,
|
||||
"migration_type": "live",
|
||||
"destination_node": "host2",
|
||||
"source_node": "host1"})
|
||||
action_container = tflow.TaskFlowActionContainer(
|
||||
db_action=action,
|
||||
engine=self.engine)
|
||||
action_container.execute()
|
||||
|
||||
self.assertTrue(action.state, objects.action.State.FAILED)
|
||||
|
||||
@mock.patch('eventlet.spawn')
|
||||
def test_execute_with_cancel_action_plan(self, mock_eventlet_spawn):
|
||||
action_plan = obj_utils.create_test_action_plan(
|
||||
|
||||
@@ -22,6 +22,7 @@ import types
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_service import service
|
||||
from watcher.applier import sync
|
||||
from watcher.common import service as watcher_service
|
||||
|
||||
from watcher.cmd import applier
|
||||
@@ -49,6 +50,7 @@ class TestApplier(base.BaseTestCase):
|
||||
super(TestApplier, self).tearDown()
|
||||
self.conf._parse_cli_opts = self._parse_cli_opts
|
||||
|
||||
@mock.patch.object(sync.Syncer, "sync", mock.Mock())
|
||||
@mock.patch.object(service, "launch")
|
||||
def test_run_applier_app(self, m_launch):
|
||||
applier.main()
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@@ -13,6 +14,7 @@
|
||||
#
|
||||
|
||||
import mock
|
||||
import time
|
||||
|
||||
from watcher.common import cinder_helper
|
||||
from watcher.common import clients
|
||||
@@ -124,3 +126,120 @@ class TestCinderHelper(base.TestCase):
|
||||
'nobackend')
|
||||
|
||||
self.assertEqual("", volume_type_name)
|
||||
|
||||
@staticmethod
|
||||
def fake_volume(**kwargs):
|
||||
volume = mock.MagicMock()
|
||||
volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba')
|
||||
volume.name = kwargs.get('name', 'fakename')
|
||||
volume.size = kwargs.get('size', '1')
|
||||
volume.status = kwargs.get('status', 'available')
|
||||
volume.snapshot_id = kwargs.get('snapshot_id', None)
|
||||
volume.availability_zone = kwargs.get('availability_zone', 'nova')
|
||||
volume.volume_type = kwargs.get('volume_type', 'fake_type')
|
||||
return volume
|
||||
|
||||
def test_can_cold_success(self, mock_cinder):
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume()
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
result = cinder_util.can_cold(volume)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_can_cold_fail(self, mock_cinder):
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume(status='in-use')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
result = cinder_util.can_cold(volume)
|
||||
self.assertFalse(result)
|
||||
|
||||
volume = self.fake_volume(snapshot_id='snapshot_id')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
result = cinder_util.can_cold(volume)
|
||||
self.assertFalse(result)
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'host@backend#pool')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
result = cinder_util.can_cold(volume, 'host@backend#pool')
|
||||
self.assertFalse(result)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_migrate_success(self, mock_cinder):
|
||||
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'source_node')
|
||||
setattr(volume, 'migration_status', 'success')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
volume_type = self.fake_volume_type()
|
||||
cinder_util.cinder.volume_types.list.return_value = [volume_type]
|
||||
|
||||
result = cinder_util.migrate(volume, 'host@backend#pool')
|
||||
self.assertTrue(result)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_migrate_fail(self, mock_cinder):
|
||||
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume()
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
volume_type = self.fake_volume_type()
|
||||
volume_type.name = 'notbackend'
|
||||
cinder_util.cinder.volume_types.list.return_value = [volume_type]
|
||||
|
||||
self.assertRaisesRegex(
|
||||
exception.Invalid,
|
||||
"Volume type must be same for migrating",
|
||||
cinder_util.migrate, volume, 'host@backend#pool')
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'source_node')
|
||||
setattr(volume, 'migration_status', 'error')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
volume_type = self.fake_volume_type()
|
||||
cinder_util.cinder.volume_types.list.return_value = [volume_type]
|
||||
|
||||
result = cinder_util.migrate(volume, 'host@backend#pool')
|
||||
self.assertFalse(result)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_retype_success(self, mock_cinder):
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'source_node')
|
||||
setattr(volume, 'migration_status', 'success')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
result = cinder_util.retype(volume, 'notfake_type')
|
||||
self.assertTrue(result)
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_retype_fail(self, mock_cinder):
|
||||
cinder_util = cinder_helper.CinderHelper()
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'source_node')
|
||||
setattr(volume, 'migration_status', 'success')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
self.assertRaisesRegex(
|
||||
exception.Invalid,
|
||||
"Volume type must be different for retyping",
|
||||
cinder_util.retype, volume, 'fake_type')
|
||||
|
||||
volume = self.fake_volume()
|
||||
setattr(volume, 'os-vol-host-attr:host', 'source_node')
|
||||
setattr(volume, 'migration_status', 'error')
|
||||
cinder_util.cinder.volumes.get.return_value = volume
|
||||
|
||||
result = cinder_util.retype(volume, 'notfake_type')
|
||||
self.assertFalse(result)
|
||||
|
||||
@@ -190,7 +190,8 @@ class TestClients(base.TestCase):
|
||||
osc.gnocchi()
|
||||
mock_call.assert_called_once_with(
|
||||
CONF.gnocchi_client.api_version,
|
||||
interface=CONF.gnocchi_client.endpoint_type,
|
||||
adapter_options={
|
||||
"interface": CONF.gnocchi_client.endpoint_type},
|
||||
session=mock_session)
|
||||
|
||||
@mock.patch.object(clients.OpenStackClients, 'session')
|
||||
@@ -395,12 +396,14 @@ class TestClients(base.TestCase):
|
||||
@mock.patch.object(irclient, 'Client')
|
||||
@mock.patch.object(clients.OpenStackClients, 'session')
|
||||
def test_clients_ironic(self, mock_session, mock_call):
|
||||
ironic_url = 'http://localhost:6385/'
|
||||
mock_session.get_endpoint.return_value = ironic_url
|
||||
osc = clients.OpenStackClients()
|
||||
osc._ironic = None
|
||||
osc.ironic()
|
||||
mock_call.assert_called_once_with(
|
||||
CONF.ironic_client.api_version,
|
||||
CONF.ironic_client.endpoint_type,
|
||||
ironic_url,
|
||||
max_retries=None,
|
||||
os_ironic_api_version=None,
|
||||
retry_interval=None,
|
||||
@@ -408,6 +411,8 @@ class TestClients(base.TestCase):
|
||||
|
||||
@mock.patch.object(clients.OpenStackClients, 'session')
|
||||
def test_clients_ironic_diff_vers(self, mock_session):
|
||||
ironic_url = 'http://localhost:6385/'
|
||||
mock_session.get_endpoint.return_value = ironic_url
|
||||
CONF.set_override('api_version', '1', group='ironic_client')
|
||||
osc = clients.OpenStackClients()
|
||||
osc._ironic = None
|
||||
@@ -416,15 +421,29 @@ class TestClients(base.TestCase):
|
||||
|
||||
@mock.patch.object(clients.OpenStackClients, 'session')
|
||||
def test_clients_ironic_diff_endpoint(self, mock_session):
|
||||
CONF.set_override('endpoint_type', 'internalURL',
|
||||
group='ironic_client')
|
||||
ironic_url = 'http://localhost:6385/'
|
||||
mock_session.get_endpoint.return_value = ironic_url
|
||||
osc = clients.OpenStackClients()
|
||||
osc._ironic = None
|
||||
osc.ironic()
|
||||
self.assertEqual('internalURL', osc.ironic().http_client.endpoint)
|
||||
mock_session.get_endpoint.assert_called_once_with(
|
||||
interface='publicURL',
|
||||
region_name=None,
|
||||
service_type='baremetal')
|
||||
|
||||
CONF.set_override('endpoint_type', 'internalURL',
|
||||
group='ironic_client')
|
||||
osc._ironic = None
|
||||
osc.ironic()
|
||||
mock_session.get_endpoint.assert_called_with(
|
||||
interface='internalURL',
|
||||
region_name=None,
|
||||
service_type='baremetal')
|
||||
|
||||
@mock.patch.object(clients.OpenStackClients, 'session')
|
||||
def test_clients_ironic_cached(self, mock_session):
|
||||
ironic_url = 'http://localhost:6385/'
|
||||
mock_session.get_endpoint.return_value = ironic_url
|
||||
osc = clients.OpenStackClients()
|
||||
osc._ironic = None
|
||||
ironic = osc.ironic()
|
||||
|
||||
@@ -363,3 +363,28 @@ class TestNovaHelper(base.TestCase):
|
||||
|
||||
nova_util.get_flavor_instance(instance, cache)
|
||||
self.assertEqual(instance.flavor['name'], cache['name'])
|
||||
|
||||
@staticmethod
|
||||
def fake_volume(**kwargs):
|
||||
volume = mock.MagicMock()
|
||||
volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba')
|
||||
volume.size = kwargs.get('size', '1')
|
||||
volume.status = kwargs.get('status', 'available')
|
||||
volume.snapshot_id = kwargs.get('snapshot_id', None)
|
||||
volume.availability_zone = kwargs.get('availability_zone', 'nova')
|
||||
return volume
|
||||
|
||||
@mock.patch.object(time, 'sleep', mock.Mock())
|
||||
def test_swap_volume(self, mock_glance, mock_cinder,
|
||||
mock_neutron, mock_nova):
|
||||
nova_util = nova_helper.NovaHelper()
|
||||
server = self.fake_server(self.instance_uuid)
|
||||
self.fake_nova_find_list(nova_util, find=server, list=server)
|
||||
|
||||
old_volume = self.fake_volume(
|
||||
status='in-use', attachments=[{'server_id': self.instance_uuid}])
|
||||
new_volume = self.fake_volume(
|
||||
id=utils.generate_uuid(), status='in-use')
|
||||
|
||||
result = nova_util.swap_volume(old_volume, new_volume)
|
||||
self.assertTrue(result)
|
||||
|
||||
@@ -32,7 +32,7 @@ class TestListOpts(base.TestCase):
|
||||
'watcher_applier', 'watcher_planner', 'nova_client',
|
||||
'glance_client', 'gnocchi_client', 'cinder_client',
|
||||
'ceilometer_client', 'monasca_client', 'ironic_client',
|
||||
'neutron_client', 'watcher_clients_auth']
|
||||
'neutron_client', 'watcher_clients_auth', 'collector']
|
||||
self.opt_sections = list(dict(opts.list_opts()).keys())
|
||||
|
||||
def test_run_list_opts(self):
|
||||
|
||||
293
watcher/tests/db/test_action_description.py
Normal file
293
watcher/tests/db/test_action_description.py
Normal file
@@ -0,0 +1,293 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Tests for manipulating ActionDescription via the DB API"""
|
||||
|
||||
import freezegun
|
||||
|
||||
from watcher.common import exception
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestDbActionDescriptionFilters(base.DbTestCase):
|
||||
|
||||
FAKE_OLDER_DATE = '2015-01-01T09:52:05.219414'
|
||||
FAKE_OLD_DATE = '2016-01-01T09:52:05.219414'
|
||||
FAKE_TODAY = '2017-02-24T09:52:05.219414'
|
||||
|
||||
def setUp(self):
|
||||
super(TestDbActionDescriptionFilters, self).setUp()
|
||||
self.context.show_deleted = True
|
||||
self._data_setup()
|
||||
|
||||
def _data_setup(self):
|
||||
action_desc1_type = "nop"
|
||||
action_desc2_type = "sleep"
|
||||
action_desc3_type = "resize"
|
||||
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.action_desc1 = utils.create_test_action_desc(
|
||||
id=1, action_type=action_desc1_type,
|
||||
description="description")
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.action_desc2 = utils.create_test_action_desc(
|
||||
id=2, action_type=action_desc2_type,
|
||||
description="description")
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.action_desc3 = utils.create_test_action_desc(
|
||||
id=3, action_type=action_desc3_type,
|
||||
description="description")
|
||||
|
||||
def _soft_delete_action_descs(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_description(self.action_desc1.id)
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.soft_delete_action_description(self.action_desc2.id)
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.soft_delete_action_description(self.action_desc3.id)
|
||||
|
||||
def _update_action_descs(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.update_action_description(
|
||||
self.action_desc1.id, values={"description":
|
||||
"nop description"})
|
||||
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||
self.dbapi.update_action_description(
|
||||
self.action_desc2.id, values={"description":
|
||||
"sleep description"})
|
||||
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||
self.dbapi.update_action_description(
|
||||
self.action_desc3.id, values={"description":
|
||||
"resize description"})
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_true(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_description(self.action_desc1.id)
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted': True})
|
||||
|
||||
self.assertEqual([self.action_desc1['action_type']],
|
||||
[r.action_type for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_false(self):
|
||||
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||
self.dbapi.soft_delete_action_description(self.action_desc1.id)
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted': False})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['action_type'],
|
||||
self.action_desc3['action_type']]),
|
||||
set([r.action_type for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_at_eq(self):
|
||||
self._soft_delete_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_at_lt(self):
|
||||
self._soft_delete_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_at_lte(self):
|
||||
self._soft_delete_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_at_gt(self):
|
||||
self._soft_delete_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_deleted_at_gte(self):
|
||||
self._soft_delete_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc1['id'], self.action_desc2['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
# created_at #
|
||||
|
||||
def test_get_action_desc_list_filter_created_at_eq(self):
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_created_at_lt(self):
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_created_at_lte(self):
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_created_at_gt(self):
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_created_at_gte(self):
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc1['id'], self.action_desc2['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
# updated_at #
|
||||
|
||||
def test_get_action_desc_list_filter_updated_at_eq(self):
|
||||
self._update_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_updated_at_lt(self):
|
||||
self._update_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_updated_at_lte(self):
|
||||
self._update_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc2['id'], self.action_desc3['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
def test_get_action_desc_list_filter_updated_at_gt(self):
|
||||
self._update_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual([self.action_desc1['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_list_filter_updated_at_gte(self):
|
||||
self._update_action_descs()
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||
|
||||
self.assertEqual(
|
||||
set([self.action_desc1['id'], self.action_desc2['id']]),
|
||||
set([r.id for r in res]))
|
||||
|
||||
|
||||
class DbActionDescriptionTestCase(base.DbTestCase):
|
||||
|
||||
def _create_test_action_desc(self, **kwargs):
|
||||
action_desc = utils.get_test_action_desc(**kwargs)
|
||||
self.dbapi.create_action_description(action_desc)
|
||||
return action_desc
|
||||
|
||||
def test_get_action_desc_list(self):
|
||||
ids = []
|
||||
for i in range(1, 4):
|
||||
action_desc = utils.create_test_action_desc(
|
||||
id=i,
|
||||
action_type="action_%s" % i,
|
||||
description="description_{0}".format(i))
|
||||
ids.append(action_desc['id'])
|
||||
action_descs = self.dbapi.get_action_description_list(self.context)
|
||||
action_desc_ids = [s.id for s in action_descs]
|
||||
self.assertEqual(sorted(ids), sorted(action_desc_ids))
|
||||
|
||||
def test_get_action_desc_list_with_filters(self):
|
||||
action_desc1 = self._create_test_action_desc(
|
||||
id=1,
|
||||
action_type="action_1",
|
||||
description="description_1",
|
||||
)
|
||||
action_desc2 = self._create_test_action_desc(
|
||||
id=2,
|
||||
action_type="action_2",
|
||||
description="description_2",
|
||||
)
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'action_type': 'action_1'})
|
||||
self.assertEqual([action_desc1['id']], [r.id for r in res])
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context, filters={'action_type': 'action_3'})
|
||||
self.assertEqual([], [r.id for r in res])
|
||||
|
||||
res = self.dbapi.get_action_description_list(
|
||||
self.context,
|
||||
filters={'action_type': 'action_2'})
|
||||
self.assertEqual([action_desc2['id']], [r.id for r in res])
|
||||
|
||||
def test_get_action_desc_by_type(self):
|
||||
created_action_desc = self._create_test_action_desc()
|
||||
action_desc = self.dbapi.get_action_description_by_type(
|
||||
self.context, created_action_desc['action_type'])
|
||||
self.assertEqual(action_desc.action_type,
|
||||
created_action_desc['action_type'])
|
||||
|
||||
def test_get_action_desc_that_does_not_exist(self):
|
||||
self.assertRaises(exception.ActionDescriptionNotFound,
|
||||
self.dbapi.get_action_description_by_id,
|
||||
self.context, 404)
|
||||
|
||||
def test_update_action_desc(self):
|
||||
action_desc = self._create_test_action_desc()
|
||||
res = self.dbapi.update_action_description(
|
||||
action_desc['id'], {'description': 'description_test'})
|
||||
self.assertEqual('description_test', res.description)
|
||||
@@ -331,3 +331,26 @@ def create_test_efficacy_indicator(**kwargs):
|
||||
del efficacy_indicator['id']
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.create_efficacy_indicator(efficacy_indicator)
|
||||
|
||||
|
||||
def get_test_action_desc(**kwargs):
|
||||
return {
|
||||
'id': kwargs.get('id', 1),
|
||||
'action_type': kwargs.get('action_type', 'nop'),
|
||||
'description': kwargs.get('description', 'Logging a NOP message'),
|
||||
'created_at': kwargs.get('created_at'),
|
||||
'updated_at': kwargs.get('updated_at'),
|
||||
'deleted_at': kwargs.get('deleted_at'),
|
||||
}
|
||||
|
||||
|
||||
def create_test_action_desc(**kwargs):
|
||||
"""Create test action description entry in DB and return ActionDescription.
|
||||
|
||||
Function to be used to create test ActionDescription objects in the DB.
|
||||
:param kwargs: kwargs with overriding values for service's attributes.
|
||||
:returns: Test ActionDescription DB object.
|
||||
"""
|
||||
action_desc = get_test_action_desc(**kwargs)
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.create_action_description(action_desc)
|
||||
|
||||
@@ -361,3 +361,45 @@ class TestContinuousAuditHandler(base.DbTestCase):
|
||||
self.assertTrue(is_inactive)
|
||||
is_inactive = audit_handler._is_audit_inactive(self.audits[0])
|
||||
self.assertTrue(is_inactive)
|
||||
|
||||
@mock.patch.object(objects.service.Service, 'list')
|
||||
@mock.patch.object(sq_api, 'get_engine')
|
||||
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
|
||||
@mock.patch.object(objects.audit.AuditStateTransitionManager,
|
||||
'is_inactive')
|
||||
@mock.patch.object(continuous.ContinuousAuditHandler, 'execute')
|
||||
def test_execute_audit_with_interval_no_job(
|
||||
self,
|
||||
m_execute,
|
||||
m_is_inactive,
|
||||
m_get_jobs,
|
||||
m_get_engine,
|
||||
m_service):
|
||||
audit_handler = continuous.ContinuousAuditHandler()
|
||||
self.audits[0].next_run_time = (datetime.datetime.now() -
|
||||
datetime.timedelta(seconds=1800))
|
||||
m_is_inactive.return_value = True
|
||||
m_get_jobs.return_value = []
|
||||
|
||||
audit_handler.execute_audit(self.audits[0], self.context)
|
||||
self.assertIsNotNone(self.audits[0].next_run_time)
|
||||
|
||||
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
|
||||
def test_is_audit_inactive(self, mock_jobs):
|
||||
audit_handler = continuous.ContinuousAuditHandler()
|
||||
mock_jobs.return_value = mock.MagicMock()
|
||||
audit_handler._scheduler = mock.MagicMock()
|
||||
|
||||
ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit',
|
||||
func=audit_handler.execute_audit,
|
||||
args=(self.audits[0], mock.MagicMock()),
|
||||
kwargs={}),
|
||||
]
|
||||
|
||||
audit_handler.update_audit_state(self.audits[1],
|
||||
objects.audit.State.CANCELLED)
|
||||
mock_jobs.return_value = ap_jobs
|
||||
is_inactive = audit_handler._is_audit_inactive(self.audits[1])
|
||||
self.assertTrue(is_inactive)
|
||||
is_inactive = audit_handler._is_audit_inactive(self.audits[0])
|
||||
self.assertFalse(is_inactive)
|
||||
|
||||
@@ -54,6 +54,8 @@ class FakeCeilometerMetrics(object):
|
||||
result = 0.0
|
||||
if meter_name == "cpu_util":
|
||||
result = self.get_average_usage_instance_cpu_wb(resource_id)
|
||||
elif meter_name == "memory.resident":
|
||||
result = self.get_average_usage_instance_memory_wb(resource_id)
|
||||
return result
|
||||
|
||||
def mock_get_statistics_nn(self, resource_id, meter_name, period,
|
||||
@@ -174,6 +176,8 @@ class FakeCeilometerMetrics(object):
|
||||
|
||||
# node 3
|
||||
mock['Node_6_hostname_6'] = 8
|
||||
# This node doesn't send metrics
|
||||
mock['LOST_NODE_hostname_7'] = None
|
||||
mock['Node_19_hostname_19'] = 10
|
||||
# node 4
|
||||
mock['INSTANCE_7_hostname_7'] = 4
|
||||
@@ -188,7 +192,10 @@ class FakeCeilometerMetrics(object):
|
||||
# mock[uuid] = random.randint(1, 4)
|
||||
mock[uuid] = 8
|
||||
|
||||
return float(mock[str(uuid)])
|
||||
if mock[str(uuid)] is not None:
|
||||
return float(mock[str(uuid)])
|
||||
else:
|
||||
return mock[str(uuid)]
|
||||
|
||||
@staticmethod
|
||||
def get_average_usage_instance_cpu_wb(uuid):
|
||||
@@ -211,6 +218,20 @@ class FakeCeilometerMetrics(object):
|
||||
mock['INSTANCE_4'] = 10
|
||||
return float(mock[str(uuid)])
|
||||
|
||||
@staticmethod
|
||||
def get_average_usage_instance_memory_wb(uuid):
|
||||
mock = {}
|
||||
# node 0
|
||||
mock['INSTANCE_1'] = 30
|
||||
# node 1
|
||||
mock['INSTANCE_3'] = 12
|
||||
mock['INSTANCE_4'] = 12
|
||||
if uuid not in mock.keys():
|
||||
# mock[uuid] = random.randint(1, 4)
|
||||
mock[uuid] = 12
|
||||
|
||||
return mock[str(uuid)]
|
||||
|
||||
@staticmethod
|
||||
def get_average_usage_instance_cpu(uuid):
|
||||
"""The last VM CPU usage values to average
|
||||
@@ -239,6 +260,8 @@ class FakeCeilometerMetrics(object):
|
||||
|
||||
# node 4
|
||||
mock['INSTANCE_7'] = 4
|
||||
|
||||
mock['LOST_INSTANCE'] = None
|
||||
if uuid not in mock.keys():
|
||||
# mock[uuid] = random.randint(1, 4)
|
||||
mock[uuid] = 8
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
<ModelRoot>
|
||||
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_0" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_1" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_2" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="Node_2" status="enabled" state="up" id="2" hostname="hostname_2" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_3" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_4" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_5" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="Node_3" status="enabled" state="up" id="3" hostname="hostname_3" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_6" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="Node_4" status="enabled" state="up" id="4" hostname="hostname_4" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_7" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="LOST_NODE" status="enabled" state="up" id="1" hostname="hostname_7" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="LOST_INSTANCE" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_10" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_11" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_12" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_13" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_14" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_15" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_16" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_17" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_18" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_19" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_20" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_21" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_22" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_23" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_24" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_25" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_26" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_27" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_28" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_29" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_30" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_31" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_32" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_33" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_34" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_8" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_9" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ModelRoot>
|
||||
@@ -1,10 +1,10 @@
|
||||
<ModelRoot>
|
||||
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_1" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" vcpus="10" disk="20" disk_capacity="20" memory="32" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_1" vcpus="10" disk="20" disk_capacity="20" memory="32" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" vcpus="40" disk="250" disk_capacity="250" memory="132">
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_3" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_4" vcpus="10" disk="20" disk_capacity="20" memory="2" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_3" vcpus="10" disk="20" disk_capacity="20" memory="32" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
<Instance state="active" human_id="" uuid="INSTANCE_4" vcpus="10" disk="20" disk_capacity="20" memory="32" metadata='{"optimize": true,"top": "floor", "nested": {"x": "y"}}'/>
|
||||
</ComputeNode>
|
||||
</ModelRoot>
|
||||
|
||||
@@ -87,7 +87,7 @@ class FakeCeilometerMetrics(object):
|
||||
return self.get_node_cpu_util(resource_id)
|
||||
elif meter_name == "cpu_util":
|
||||
return self.get_instance_cpu_util(resource_id)
|
||||
elif meter_name == "memory.usage":
|
||||
elif meter_name == "memory.resident":
|
||||
return self.get_instance_ram_util(resource_id)
|
||||
elif meter_name == "disk.root.size":
|
||||
return self.get_instance_disk_root_size(resource_id)
|
||||
@@ -169,7 +169,7 @@ class FakeGnocchiMetrics(object):
|
||||
return self.get_node_cpu_util(resource_id)
|
||||
elif metric == "cpu_util":
|
||||
return self.get_instance_cpu_util(resource_id)
|
||||
elif metric == "memory.usage":
|
||||
elif metric == "memory.resident":
|
||||
return self.get_instance_ram_util(resource_id)
|
||||
elif metric == "disk.root.size":
|
||||
return self.get_instance_disk_root_size(resource_id)
|
||||
|
||||
@@ -114,6 +114,9 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
def generate_scenario_1(self):
|
||||
return self.load_model('scenario_1.xml')
|
||||
|
||||
def generate_scenario_1_with_1_node_unavailable(self):
|
||||
return self.load_model('scenario_1_with_1_node_unavailable.xml')
|
||||
|
||||
def generate_scenario_3_with_2_nodes(self):
|
||||
return self.load_model('scenario_3_with_2_nodes.xml')
|
||||
|
||||
|
||||
@@ -50,6 +50,8 @@ class FakeGnocchiMetrics(object):
|
||||
result = 0.0
|
||||
if metric == "cpu_util":
|
||||
result = self.get_average_usage_instance_cpu_wb(resource_id)
|
||||
elif metric == "memory.resident":
|
||||
result = self.get_average_usage_instance_memory_wb(resource_id)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
@@ -130,6 +132,8 @@ class FakeGnocchiMetrics(object):
|
||||
|
||||
# node 3
|
||||
mock['Node_6_hostname_6'] = 8
|
||||
# This node doesn't send metrics
|
||||
mock['LOST_NODE_hostname_7'] = None
|
||||
mock['Node_19_hostname_19'] = 10
|
||||
# node 4
|
||||
mock['INSTANCE_7_hostname_7'] = 4
|
||||
@@ -143,7 +147,10 @@ class FakeGnocchiMetrics(object):
|
||||
if uuid not in mock.keys():
|
||||
mock[uuid] = 8
|
||||
|
||||
return float(mock[str(uuid)])
|
||||
if mock[str(uuid)] is not None:
|
||||
return float(mock[str(uuid)])
|
||||
else:
|
||||
return mock[str(uuid)]
|
||||
|
||||
@staticmethod
|
||||
def get_average_usage_instance_cpu(uuid):
|
||||
@@ -170,6 +177,8 @@ class FakeGnocchiMetrics(object):
|
||||
|
||||
# node 4
|
||||
mock['INSTANCE_7'] = 4
|
||||
|
||||
mock['LOST_INSTANCE'] = None
|
||||
if uuid not in mock.keys():
|
||||
mock[uuid] = 8
|
||||
|
||||
@@ -242,3 +251,17 @@ class FakeGnocchiMetrics(object):
|
||||
mock['INSTANCE_3'] = 20
|
||||
mock['INSTANCE_4'] = 10
|
||||
return float(mock[str(uuid)])
|
||||
|
||||
@staticmethod
|
||||
def get_average_usage_instance_memory_wb(uuid):
|
||||
mock = {}
|
||||
# node 0
|
||||
mock['INSTANCE_1'] = 30
|
||||
# node 1
|
||||
mock['INSTANCE_3'] = 12
|
||||
mock['INSTANCE_4'] = 12
|
||||
if uuid not in mock.keys():
|
||||
# mock[uuid] = random.randint(1, 4)
|
||||
mock[uuid] = 12
|
||||
|
||||
return mock[str(uuid)]
|
||||
|
||||
@@ -32,20 +32,20 @@ class TestDefaultScope(base.TestCase):
|
||||
super(TestDefaultScope, self).setUp()
|
||||
self.fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list')
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_service_list')
|
||||
def test_get_scoped_model_with_zones_and_instances(self, mock_zone_list):
|
||||
cluster = self.fake_cluster.generate_scenario_1()
|
||||
audit_scope = fake_scopes.fake_scope_1
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zoneName='AZ{0}'.format(i),
|
||||
hosts={'Node_{0}'.format(i): {}})
|
||||
mock.Mock(zone='AZ{0}'.format(i),
|
||||
host={'Node_{0}'.format(i): {}})
|
||||
for i in range(2)]
|
||||
model = default.DefaultScope(audit_scope, mock.Mock(),
|
||||
osc=mock.Mock()).get_scoped_model(cluster)
|
||||
expected_edges = [('INSTANCE_2', 'Node_1')]
|
||||
self.assertEqual(sorted(expected_edges), sorted(model.edges()))
|
||||
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list')
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_service_list')
|
||||
def test_get_scoped_model_without_scope(self, mock_zone_list):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
default.DefaultScope([], mock.Mock(),
|
||||
@@ -125,26 +125,26 @@ class TestDefaultScope(base.TestCase):
|
||||
[{'name': 'HA_1'}, {'id': 0}], allowed_nodes)
|
||||
self.assertEqual(['Node_0', 'Node_1'], allowed_nodes)
|
||||
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list')
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_service_list')
|
||||
def test_collect_zones(self, mock_zone_list):
|
||||
allowed_nodes = []
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zoneName="AZ{0}".format(i + 1),
|
||||
hosts={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
mock.Mock(zone="AZ{0}".format(i + 1),
|
||||
host={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
for i in range(2)]
|
||||
default.DefaultScope([{'availability_zones': [{'name': "AZ1"}]}],
|
||||
mock.Mock(), osc=mock.Mock())._collect_zones(
|
||||
[{'name': "AZ1"}], allowed_nodes)
|
||||
self.assertEqual(['Node_0', 'Node_1'], sorted(allowed_nodes))
|
||||
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list')
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_service_list')
|
||||
def test_zones_wildcard_is_used(self, mock_zone_list):
|
||||
allowed_nodes = []
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zoneName="AZ{0}".format(i + 1),
|
||||
hosts={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
mock.Mock(zone="AZ{0}".format(i + 1),
|
||||
host={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
for i in range(2)]
|
||||
default.DefaultScope([{'availability_zones': [{'name': "*"}]}],
|
||||
mock.Mock(), osc=mock.Mock())._collect_zones(
|
||||
@@ -152,13 +152,13 @@ class TestDefaultScope(base.TestCase):
|
||||
self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'],
|
||||
sorted(allowed_nodes))
|
||||
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list')
|
||||
@mock.patch.object(nova_helper.NovaHelper, 'get_service_list')
|
||||
def test_zones_wildcard_with_other_ids(self, mock_zone_list):
|
||||
allowed_nodes = []
|
||||
mock_zone_list.return_value = [
|
||||
mock.Mock(zoneName="AZ{0}".format(i + 1),
|
||||
hosts={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
mock.Mock(zone="AZ{0}".format(i + 1),
|
||||
host={'Node_{0}'.format(2 * i): 1,
|
||||
'Node_{0}'.format(2 * i + 1): 2})
|
||||
for i in range(2)]
|
||||
scope_handler = default.DefaultScope(
|
||||
[{'availability_zones': [{'name': "*"}, {'name': 'AZ1'}]}],
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 b<>com
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from watcher.common import utils
|
||||
from watcher.decision_engine.strategy import strategies
|
||||
from watcher.tests import base
|
||||
|
||||
|
||||
class TestActuator(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestActuator, self).setUp()
|
||||
# fake cluster
|
||||
self.strategy = strategies.Actuator(config=mock.Mock())
|
||||
|
||||
def test_actuator_strategy(self):
|
||||
fake_action = {"action_type": "TEST", "input_parameters": {"a": "b"}}
|
||||
self.strategy.input_parameters = utils.Struct(
|
||||
{"actions": [fake_action]})
|
||||
solution = self.strategy.execute()
|
||||
self.assertEqual(1, len(solution.actions))
|
||||
self.assertEqual([fake_action], solution.actions)
|
||||
@@ -0,0 +1,237 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from watcher.common import clients
|
||||
from watcher.common import utils
|
||||
from watcher.decision_engine.strategy import strategies
|
||||
from watcher.tests import base
|
||||
from watcher.tests.decision_engine.model import faker_cluster_and_metrics
|
||||
|
||||
|
||||
class TestSavingEnergy(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestSavingEnergy, self).setUp()
|
||||
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'uuid': '922d4762-0bc5-4b30-9cb9-48ab644dd861'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'uuid': '922d4762-0bc5-4b30-9cb9-48ab644dd862'}
|
||||
self.fake_nodes = [mock_node1, mock_node2]
|
||||
|
||||
# fake cluster
|
||||
self.fake_cluster = faker_cluster_and_metrics.FakerModelCollector()
|
||||
|
||||
p_model = mock.patch.object(
|
||||
strategies.SavingEnergy, "compute_model",
|
||||
new_callable=mock.PropertyMock)
|
||||
self.m_model = p_model.start()
|
||||
self.addCleanup(p_model.stop)
|
||||
|
||||
p_ironic = mock.patch.object(
|
||||
clients.OpenStackClients, 'ironic')
|
||||
self.m_ironic = p_ironic.start()
|
||||
self.addCleanup(p_ironic.stop)
|
||||
|
||||
p_nova = mock.patch.object(
|
||||
clients.OpenStackClients, 'nova')
|
||||
self.m_nova = p_nova.start()
|
||||
self.addCleanup(p_nova.stop)
|
||||
|
||||
p_model = mock.patch.object(
|
||||
strategies.SavingEnergy, "compute_model",
|
||||
new_callable=mock.PropertyMock)
|
||||
self.m_model = p_model.start()
|
||||
self.addCleanup(p_model.stop)
|
||||
|
||||
p_audit_scope = mock.patch.object(
|
||||
strategies.SavingEnergy, "audit_scope",
|
||||
new_callable=mock.PropertyMock
|
||||
)
|
||||
self.m_audit_scope = p_audit_scope.start()
|
||||
self.addCleanup(p_audit_scope.stop)
|
||||
|
||||
self.m_audit_scope.return_value = mock.Mock()
|
||||
self.m_ironic.node.list.return_value = self.fake_nodes
|
||||
|
||||
self.strategy = strategies.SavingEnergy(
|
||||
config=mock.Mock())
|
||||
self.strategy.input_parameters = utils.Struct()
|
||||
self.strategy.input_parameters.update(
|
||||
{'free_used_percent': 10.0,
|
||||
'min_free_hosts_num': 1})
|
||||
self.strategy.free_used_percent = 10.0
|
||||
self.strategy.min_free_hosts_num = 1
|
||||
self.strategy._ironic_client = self.m_ironic
|
||||
self.strategy._nova_client = self.m_nova
|
||||
|
||||
def test_get_hosts_pool_with_vms_node_pool(self):
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 1},
|
||||
'power_state': 'power on'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 2},
|
||||
'power_state': 'power off'}
|
||||
self.m_ironic.node.get.side_effect = [mock_node1, mock_node2]
|
||||
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 2, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 2, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.strategy.get_hosts_pool()
|
||||
|
||||
self.assertEqual(len(self.strategy.with_vms_node_pool), 2)
|
||||
self.assertEqual(len(self.strategy.free_poweron_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0)
|
||||
|
||||
def test_get_hosts_pool_free_poweron_node_pool(self):
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 1},
|
||||
'power_state': 'power on'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 2},
|
||||
'power_state': 'power on'}
|
||||
self.m_ironic.node.get.side_effect = [mock_node1, mock_node2]
|
||||
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.strategy.get_hosts_pool()
|
||||
|
||||
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweron_node_pool), 2)
|
||||
self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0)
|
||||
|
||||
def test_get_hosts_pool_free_poweroff_node_pool(self):
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 1},
|
||||
'power_state': 'power off'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 2},
|
||||
'power_state': 'power off'}
|
||||
self.m_ironic.node.get.side_effect = [mock_node1, mock_node2]
|
||||
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.strategy.get_hosts_pool()
|
||||
|
||||
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweron_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweroff_node_pool), 2)
|
||||
|
||||
def test_get_hosts_pool_with_node_out_model(self):
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 1},
|
||||
'power_state': 'power off'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 2},
|
||||
'power_state': 'power off'}
|
||||
self.m_ironic.node.get.side_effect = [mock_node1, mock_node2]
|
||||
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_10'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.strategy.get_hosts_pool()
|
||||
|
||||
self.assertEqual(len(self.strategy.with_vms_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweron_node_pool), 0)
|
||||
self.assertEqual(len(self.strategy.free_poweroff_node_pool), 1)
|
||||
|
||||
def test_save_energy_poweron(self):
|
||||
self.strategy.free_poweroff_node_pool = [
|
||||
'922d4762-0bc5-4b30-9cb9-48ab644dd861',
|
||||
'922d4762-0bc5-4b30-9cb9-48ab644dd862'
|
||||
]
|
||||
self.strategy.save_energy()
|
||||
self.assertEqual(len(self.strategy.solution.actions), 1)
|
||||
action = self.strategy.solution.actions[0]
|
||||
self.assertEqual(action.get('input_parameters').get('state'), 'on')
|
||||
|
||||
def test_save_energy_poweroff(self):
|
||||
self.strategy.free_poweron_node_pool = [
|
||||
'922d4762-0bc5-4b30-9cb9-48ab644dd861',
|
||||
'922d4762-0bc5-4b30-9cb9-48ab644dd862'
|
||||
]
|
||||
self.strategy.save_energy()
|
||||
self.assertEqual(len(self.strategy.solution.actions), 1)
|
||||
action = self.strategy.solution.actions[0]
|
||||
self.assertEqual(action.get('input_parameters').get('state'), 'off')
|
||||
|
||||
def test_execute(self):
|
||||
mock_node1 = mock.Mock()
|
||||
mock_node2 = mock.Mock()
|
||||
mock_node1.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 1},
|
||||
'power_state': 'power on'}
|
||||
mock_node2.to_dict.return_value = {
|
||||
'extra': {'compute_node_id': 2},
|
||||
'power_state': 'power on'}
|
||||
self.m_ironic.node.get.side_effect = [mock_node1, mock_node2]
|
||||
|
||||
mock_hyper1 = mock.Mock()
|
||||
mock_hyper2 = mock.Mock()
|
||||
mock_hyper1.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_0'}, 'state': 'up'}
|
||||
mock_hyper2.to_dict.return_value = {
|
||||
'running_vms': 0, 'service': {'host': 'Node_1'}, 'state': 'up'}
|
||||
self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2]
|
||||
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
|
||||
solution = self.strategy.execute()
|
||||
self.assertEqual(len(solution.actions), 1)
|
||||
@@ -74,10 +74,12 @@ class TestWorkloadBalance(base.TestCase):
|
||||
self.strategy = strategies.WorkloadBalance(
|
||||
config=mock.Mock(datasource=self.datasource))
|
||||
self.strategy.input_parameters = utils.Struct()
|
||||
self.strategy.input_parameters.update({'threshold': 25.0,
|
||||
self.strategy.input_parameters.update({'metrics': 'cpu_util',
|
||||
'threshold': 25.0,
|
||||
'period': 300})
|
||||
self.strategy.threshold = 25.0
|
||||
self.strategy._period = 300
|
||||
self.strategy._meter = "cpu_util"
|
||||
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
@@ -86,21 +88,31 @@ class TestWorkloadBalance(base.TestCase):
|
||||
cores_used, mem_used, disk_used = (
|
||||
self.strategy.calculate_used_resource(node))
|
||||
|
||||
self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40))
|
||||
self.assertEqual((cores_used, mem_used, disk_used), (20, 64, 40))
|
||||
|
||||
def test_group_hosts_by_cpu_util(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
self.strategy.threshold = 30
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
|
||||
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
|
||||
self.assertEqual(avg, 8.0)
|
||||
|
||||
def test_group_hosts_by_ram_util(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
self.strategy._meter = "memory.resident"
|
||||
self.strategy.threshold = 30
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
self.assertEqual(n1[0]['node'].uuid, 'Node_0')
|
||||
self.assertEqual(n2[0]['node'].uuid, 'Node_1')
|
||||
self.assertEqual(avg, 33.0)
|
||||
|
||||
def test_choose_instance_to_migrate(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
instance_to_mig = self.strategy.choose_instance_to_migrate(
|
||||
n1, avg, w_map)
|
||||
self.assertEqual(instance_to_mig[0].uuid, 'Node_0')
|
||||
@@ -110,7 +122,7 @@ class TestWorkloadBalance(base.TestCase):
|
||||
def test_choose_instance_notfound(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
instances = model.get_all_instances()
|
||||
[model.remove_instance(inst) for inst in instances.values()]
|
||||
instance_to_mig = self.strategy.choose_instance_to_migrate(
|
||||
@@ -122,7 +134,7 @@ class TestWorkloadBalance(base.TestCase):
|
||||
self.m_model.return_value = model
|
||||
self.strategy.datasource = mock.MagicMock(
|
||||
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util()
|
||||
n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
instance_to_mig = self.strategy.choose_instance_to_migrate(
|
||||
n1, avg, w_map)
|
||||
dest_hosts = self.strategy.filter_destination_hosts(
|
||||
@@ -202,7 +214,7 @@ class TestWorkloadBalance(base.TestCase):
|
||||
m_gnocchi.statistic_aggregation = mock.Mock(
|
||||
side_effect=self.fake_metrics.mock_get_statistics_wb)
|
||||
instance0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||
self.strategy.group_hosts_by_cpu_util()
|
||||
self.strategy.group_hosts_by_cpu_or_ram_util()
|
||||
if self.strategy.config.datasource == "ceilometer":
|
||||
m_ceilometer.statistic_aggregation.assert_any_call(
|
||||
aggregate='avg', meter_name='cpu_util',
|
||||
|
||||
@@ -172,6 +172,12 @@ class TestWorkloadStabilization(base.TestCase):
|
||||
granularity=300, start_time=start_time, stop_time=stop_time,
|
||||
aggregation='mean')
|
||||
|
||||
def test_get_instance_load_with_no_metrics(self):
|
||||
model = self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
|
||||
self.m_model.return_value = model
|
||||
lost_instance = model.get_instance_by_uuid("LOST_INSTANCE")
|
||||
self.assertIsNone(self.strategy.get_instance_load(lost_instance))
|
||||
|
||||
def test_normalize_hosts_load(self):
|
||||
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
||||
fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7},
|
||||
@@ -196,6 +202,12 @@ class TestWorkloadStabilization(base.TestCase):
|
||||
self.assertEqual(self.strategy.get_hosts_load(),
|
||||
self.hosts_load_assert)
|
||||
|
||||
def test_get_hosts_load_with_node_missing(self):
|
||||
self.m_model.return_value = \
|
||||
self.fake_cluster.generate_scenario_1_with_1_node_unavailable()
|
||||
self.assertEqual(self.hosts_load_assert,
|
||||
self.strategy.get_hosts_load())
|
||||
|
||||
def test_get_sd(self):
|
||||
test_cpu_sd = 0.296
|
||||
test_ram_sd = 9.3
|
||||
|
||||
@@ -659,3 +659,56 @@ class TestSyncer(base.DbTestCase):
|
||||
all(ap.state == objects.action_plan.State.CANCELLED
|
||||
for ap in modified_action_plans.values()))
|
||||
self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans))
|
||||
|
||||
def test_sync_strategies_with_removed_goal(self):
|
||||
# ### Setup ### #
|
||||
|
||||
goal1 = objects.Goal(
|
||||
self.ctx, id=1, uuid=utils.generate_uuid(),
|
||||
name="dummy_1", display_name="Dummy 1",
|
||||
efficacy_specification=self.goal1_spec.serialize_indicators_specs()
|
||||
)
|
||||
goal2 = objects.Goal(
|
||||
self.ctx, id=2, uuid=utils.generate_uuid(),
|
||||
name="dummy_2", display_name="Dummy 2",
|
||||
efficacy_specification=self.goal2_spec.serialize_indicators_specs()
|
||||
)
|
||||
goal1.create()
|
||||
goal2.create()
|
||||
|
||||
strategy1 = objects.Strategy(
|
||||
self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(),
|
||||
display_name="Strategy 1", goal_id=goal1.id)
|
||||
strategy2 = objects.Strategy(
|
||||
self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(),
|
||||
display_name="Strategy 2", goal_id=goal2.id)
|
||||
strategy1.create()
|
||||
strategy2.create()
|
||||
# to be removed by some reasons
|
||||
goal2.soft_delete()
|
||||
|
||||
before_goals = objects.Goal.list(self.ctx)
|
||||
before_strategies = objects.Strategy.list(self.ctx)
|
||||
|
||||
# ### Action under test ### #
|
||||
|
||||
try:
|
||||
self.syncer.sync()
|
||||
except Exception as exc:
|
||||
self.fail(exc)
|
||||
|
||||
# ### Assertions ### #
|
||||
|
||||
after_goals = objects.Goal.list(self.ctx)
|
||||
after_strategies = objects.Strategy.list(self.ctx)
|
||||
|
||||
self.assertEqual(1, len(before_goals))
|
||||
self.assertEqual(2, len(before_strategies))
|
||||
self.assertEqual(2, len(after_goals))
|
||||
self.assertEqual(4, len(after_strategies))
|
||||
self.assertEqual(
|
||||
{"dummy_1", "dummy_2"},
|
||||
set([g.name for g in after_goals]))
|
||||
self.assertEqual(
|
||||
{"strategy_1", "strategy_2", "strategy_3", "strategy_4"},
|
||||
set([s.name for s in after_strategies]))
|
||||
|
||||
120
watcher/tests/objects/test_action_description.py
Normal file
120
watcher/tests/objects/test_action_description.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright 2017 ZTE
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import iso8601
|
||||
import mock
|
||||
|
||||
from watcher.db.sqlalchemy import api as db_api
|
||||
from watcher import objects
|
||||
from watcher.tests.db import base
|
||||
from watcher.tests.db import utils
|
||||
|
||||
|
||||
class TestActionDescriptionObject(base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestActionDescriptionObject, self).setUp()
|
||||
self.fake_action_desc = utils.get_test_action_desc(
|
||||
created_at=datetime.datetime.utcnow())
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'get_action_description_by_id')
|
||||
def test_get_by_id(self, mock_get_action_desc):
|
||||
action_desc_id = self.fake_action_desc['id']
|
||||
mock_get_action_desc.return_value = self.fake_action_desc
|
||||
action_desc = objects.ActionDescription.get(
|
||||
self.context, action_desc_id)
|
||||
mock_get_action_desc.assert_called_once_with(
|
||||
self.context, action_desc_id)
|
||||
self.assertEqual(self.context, action_desc._context)
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'get_action_description_list')
|
||||
def test_list(self, mock_get_list):
|
||||
mock_get_list.return_value = [self.fake_action_desc]
|
||||
action_desc = objects.ActionDescription.list(self.context)
|
||||
self.assertEqual(1, mock_get_list.call_count)
|
||||
self.assertEqual(1, len(action_desc))
|
||||
self.assertIsInstance(action_desc[0], objects.ActionDescription)
|
||||
self.assertEqual(self.context, action_desc[0]._context)
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'create_action_description')
|
||||
def test_create(self, mock_create_action_desc):
|
||||
mock_create_action_desc.return_value = self.fake_action_desc
|
||||
action_desc = objects.ActionDescription(
|
||||
self.context, **self.fake_action_desc)
|
||||
|
||||
action_desc.create()
|
||||
expected_action_desc = self.fake_action_desc.copy()
|
||||
expected_action_desc['created_at'] = expected_action_desc[
|
||||
'created_at'].replace(tzinfo=iso8601.iso8601.Utc())
|
||||
|
||||
mock_create_action_desc.assert_called_once_with(expected_action_desc)
|
||||
self.assertEqual(self.context, action_desc._context)
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'update_action_description')
|
||||
@mock.patch.object(db_api.Connection, 'get_action_description_by_id')
|
||||
def test_save(self, mock_get_action_desc, mock_update_action_desc):
|
||||
mock_get_action_desc.return_value = self.fake_action_desc
|
||||
fake_saved_action_desc = self.fake_action_desc.copy()
|
||||
fake_saved_action_desc['updated_at'] = datetime.datetime.utcnow()
|
||||
mock_update_action_desc.return_value = fake_saved_action_desc
|
||||
_id = self.fake_action_desc['id']
|
||||
action_desc = objects.ActionDescription.get(self.context, _id)
|
||||
action_desc.description = 'This is a test'
|
||||
action_desc.save()
|
||||
|
||||
mock_get_action_desc.assert_called_once_with(self.context, _id)
|
||||
mock_update_action_desc.assert_called_once_with(
|
||||
_id, {'description': 'This is a test'})
|
||||
self.assertEqual(self.context, action_desc._context)
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'get_action_description_by_id')
|
||||
def test_refresh(self, mock_get_action_desc):
|
||||
returns = [dict(self.fake_action_desc, description="Test message1"),
|
||||
dict(self.fake_action_desc, description="Test message2")]
|
||||
mock_get_action_desc.side_effect = returns
|
||||
_id = self.fake_action_desc['id']
|
||||
expected = [mock.call(self.context, _id),
|
||||
mock.call(self.context, _id)]
|
||||
action_desc = objects.ActionDescription.get(self.context, _id)
|
||||
self.assertEqual("Test message1", action_desc.description)
|
||||
action_desc.refresh()
|
||||
self.assertEqual("Test message2", action_desc.description)
|
||||
self.assertEqual(expected, mock_get_action_desc.call_args_list)
|
||||
self.assertEqual(self.context, action_desc._context)
|
||||
|
||||
@mock.patch.object(db_api.Connection, 'soft_delete_action_description')
|
||||
@mock.patch.object(db_api.Connection, 'get_action_description_by_id')
|
||||
def test_soft_delete(self, mock_get_action_desc, mock_soft_delete):
|
||||
mock_get_action_desc.return_value = self.fake_action_desc
|
||||
fake_deleted_action_desc = self.fake_action_desc.copy()
|
||||
fake_deleted_action_desc['deleted_at'] = datetime.datetime.utcnow()
|
||||
mock_soft_delete.return_value = fake_deleted_action_desc
|
||||
|
||||
expected_action_desc = fake_deleted_action_desc.copy()
|
||||
expected_action_desc['created_at'] = expected_action_desc[
|
||||
'created_at'].replace(tzinfo=iso8601.iso8601.Utc())
|
||||
expected_action_desc['deleted_at'] = expected_action_desc[
|
||||
'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc())
|
||||
|
||||
_id = self.fake_action_desc['id']
|
||||
action_desc = objects.ActionDescription.get(self.context, _id)
|
||||
action_desc.soft_delete()
|
||||
mock_get_action_desc.assert_called_once_with(self.context, _id)
|
||||
mock_soft_delete.assert_called_once_with(_id)
|
||||
self.assertEqual(self.context, action_desc._context)
|
||||
self.assertEqual(expected_action_desc, action_desc.as_dict())
|
||||
@@ -419,6 +419,7 @@ expected_object_fingerprints = {
|
||||
'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576',
|
||||
'Service': '1.0-4b35b99ada9677a882c9de2b30212f35',
|
||||
'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3',
|
||||
'ActionDescription': '1.0-5761a3d16651046e7a0c357b57a6583e'
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -165,18 +165,19 @@ class BaseInfraOptimTest(test.BaseTestCase):
|
||||
|
||||
@classmethod
|
||||
def create_audit(cls, audit_template_uuid, audit_type='ONESHOT',
|
||||
state=None, interval=None):
|
||||
state=None, interval=None, parameters=None):
|
||||
"""Wrapper utility for creating a test audit
|
||||
|
||||
:param audit_template_uuid: Audit Template UUID this audit will use
|
||||
:param audit_type: Audit type (either ONESHOT or CONTINUOUS)
|
||||
:param state: Audit state (str)
|
||||
:param interval: Audit interval in seconds or cron syntax (str)
|
||||
:param parameters: list of execution parameters
|
||||
:return: A tuple with The HTTP response and its body
|
||||
"""
|
||||
resp, body = cls.client.create_audit(
|
||||
audit_template_uuid=audit_template_uuid, audit_type=audit_type,
|
||||
state=state, interval=interval)
|
||||
state=state, interval=interval, parameters=parameters)
|
||||
|
||||
cls.created_audits.add(body['uuid'])
|
||||
cls.created_action_plans_audit_uuids.add(body['uuid'])
|
||||
@@ -251,11 +252,6 @@ class BaseInfraOptimTest(test.BaseTestCase):
|
||||
|
||||
return resp
|
||||
|
||||
@classmethod
|
||||
def has_action_plan_finished(cls, action_plan_uuid):
|
||||
_, action_plan = cls.client.show_action_plan(action_plan_uuid)
|
||||
return action_plan.get('state') in cls.FINISHED_STATES
|
||||
|
||||
@classmethod
|
||||
def is_action_plan_idle(cls, action_plan_uuid):
|
||||
"""This guard makes sure your action plan is not running"""
|
||||
|
||||
@@ -70,42 +70,6 @@ class TestCreateDeleteExecuteActionPlan(base.BaseInfraOptimTest):
|
||||
self.assertRaises(exceptions.NotFound, self.client.show_action_plan,
|
||||
action_plan['uuid'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
def test_execute_dummy_action_plan(self):
|
||||
_, goal = self.client.show_goal("dummy")
|
||||
_, audit_template = self.create_audit_template(goal['uuid'])
|
||||
_, audit = self.create_audit(audit_template['uuid'])
|
||||
|
||||
self.assertTrue(test_utils.call_until_true(
|
||||
func=functools.partial(self.has_audit_finished, audit['uuid']),
|
||||
duration=30,
|
||||
sleep_for=.5
|
||||
))
|
||||
_, action_plans = self.client.list_action_plans(
|
||||
audit_uuid=audit['uuid'])
|
||||
action_plan = action_plans['action_plans'][0]
|
||||
|
||||
_, action_plan = self.client.show_action_plan(action_plan['uuid'])
|
||||
|
||||
if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']:
|
||||
# This means the action plan is superseded so we cannot trigger it,
|
||||
# or it is empty.
|
||||
return
|
||||
|
||||
# Execute the action by changing its state to PENDING
|
||||
_, updated_ap = self.client.start_action_plan(action_plan['uuid'])
|
||||
|
||||
self.assertTrue(test_utils.call_until_true(
|
||||
func=functools.partial(
|
||||
self.has_action_plan_finished, action_plan['uuid']),
|
||||
duration=30,
|
||||
sleep_for=.5
|
||||
))
|
||||
_, finished_ap = self.client.show_action_plan(action_plan['uuid'])
|
||||
|
||||
self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
|
||||
self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))
|
||||
|
||||
|
||||
class TestShowListActionPlan(base.BaseInfraOptimTest):
|
||||
"""Tests for action_plan."""
|
||||
|
||||
@@ -24,6 +24,7 @@ from oslo_log import log
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
|
||||
from watcher_tempest_plugin import infra_optim_clients as clients
|
||||
from watcher_tempest_plugin.tests.scenario import manager
|
||||
@@ -75,6 +76,19 @@ class BaseInfraOptimScenarioTest(manager.ScenarioTest):
|
||||
LOG.error(msg)
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
@classmethod
|
||||
def _are_all_action_plans_finished(cls):
|
||||
_, action_plans = cls.client.list_action_plans()
|
||||
return all([ap['state'] in cls.FINISHED_STATES
|
||||
for ap in action_plans['action_plans']])
|
||||
|
||||
def wait_for_all_action_plans_to_finish(self):
|
||||
assert test_utils.call_until_true(
|
||||
func=self._are_all_action_plans_finished,
|
||||
duration=300,
|
||||
sleep_for=5
|
||||
)
|
||||
|
||||
# ### AUDIT TEMPLATES ### #
|
||||
|
||||
def create_audit_template(self, goal, name=None, description=None,
|
||||
@@ -111,18 +125,19 @@ class BaseInfraOptimScenarioTest(manager.ScenarioTest):
|
||||
# ### AUDITS ### #
|
||||
|
||||
def create_audit(self, audit_template_uuid, audit_type='ONESHOT',
|
||||
state=None, parameters=None):
|
||||
state=None, interval=None, parameters=None):
|
||||
"""Wrapper utility for creating a test audit
|
||||
|
||||
:param audit_template_uuid: Audit Template UUID this audit will use
|
||||
:param audit_type: Audit type (either ONESHOT or CONTINUOUS)
|
||||
:param state: Audit state
|
||||
:param parameters: Input parameters of the audit
|
||||
:param type: Audit type (either ONESHOT or CONTINUOUS)
|
||||
:param state: Audit state (str)
|
||||
:param interval: Audit interval in seconds (int)
|
||||
:param parameters: list of execution parameters
|
||||
:return: A tuple with The HTTP response and its body
|
||||
"""
|
||||
resp, body = self.client.create_audit(
|
||||
audit_template_uuid=audit_template_uuid, audit_type=audit_type,
|
||||
state=state, parameters=parameters)
|
||||
state=state, interval=interval, parameters=parameters)
|
||||
|
||||
self.addCleanup(self.delete_audit, audit_uuid=body["uuid"])
|
||||
return resp, body
|
||||
|
||||
340
watcher_tempest_plugin/tests/scenario/test_execute_actuator.py
Normal file
340
watcher_tempest_plugin/tests/scenario/test_execute_actuator.py
Normal file
@@ -0,0 +1,340 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2016 b<>com
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import collections
|
||||
import functools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import test_utils
|
||||
|
||||
from watcher_tempest_plugin.tests.scenario import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class TestExecuteActionsViaActuator(base.BaseInfraOptimScenarioTest):
|
||||
|
||||
scenarios = [
|
||||
("nop", {"actions": [
|
||||
{"action_type": "nop",
|
||||
"input_parameters": {
|
||||
"message": "hello World"}}]}),
|
||||
("sleep", {"actions": [
|
||||
{"action_type": "sleep",
|
||||
"input_parameters": {
|
||||
"duration": 1.0}}]}),
|
||||
("change_nova_service_state", {"actions": [
|
||||
{"action_type": "change_nova_service_state",
|
||||
"input_parameters": {
|
||||
"state": "enabled"},
|
||||
"filling_function":
|
||||
"_prerequisite_param_for_"
|
||||
"change_nova_service_state_action"}]}),
|
||||
("resize", {"actions": [
|
||||
{"action_type": "resize",
|
||||
"filling_function": "_prerequisite_param_for_resize_action"}]}),
|
||||
("migrate", {"actions": [
|
||||
{"action_type": "migrate",
|
||||
"input_parameters": {
|
||||
"migration_type": "live"},
|
||||
"filling_function": "_prerequisite_param_for_migrate_action"},
|
||||
{"action_type": "migrate",
|
||||
"filling_function": "_prerequisite_param_for_migrate_action"}]})
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestExecuteActionsViaActuator, cls).resource_setup()
|
||||
if CONF.compute.min_compute_nodes < 2:
|
||||
raise cls.skipException(
|
||||
"Less than 2 compute nodes, skipping multinode tests.")
|
||||
if not CONF.compute_feature_enabled.live_migration:
|
||||
raise cls.skipException("Live migration is not enabled")
|
||||
|
||||
cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup()
|
||||
enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup
|
||||
if cn.get('status') == 'enabled']
|
||||
|
||||
cls.wait_for_compute_node_setup()
|
||||
|
||||
if len(enabled_compute_nodes) < 2:
|
||||
raise cls.skipException(
|
||||
"Less than 2 compute nodes are enabled, "
|
||||
"skipping multinode tests.")
|
||||
|
||||
@classmethod
|
||||
def get_compute_nodes_setup(cls):
|
||||
services_client = cls.mgr.services_client
|
||||
available_services = services_client.list_services()['services']
|
||||
|
||||
return [srv for srv in available_services
|
||||
if srv.get('binary') == 'nova-compute']
|
||||
|
||||
@classmethod
|
||||
def wait_for_compute_node_setup(cls):
|
||||
|
||||
def _are_compute_nodes_setup():
|
||||
try:
|
||||
hypervisors_client = cls.mgr.hypervisor_client
|
||||
hypervisors = hypervisors_client.list_hypervisors(
|
||||
detail=True)['hypervisors']
|
||||
available_hypervisors = set(
|
||||
hyp['hypervisor_hostname'] for hyp in hypervisors)
|
||||
available_services = set(
|
||||
service['host']
|
||||
for service in cls.get_compute_nodes_setup())
|
||||
|
||||
return (
|
||||
available_hypervisors == available_services and
|
||||
len(hypervisors) >= 2)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
assert test_utils.call_until_true(
|
||||
func=_are_compute_nodes_setup,
|
||||
duration=600,
|
||||
sleep_for=2
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rollback_compute_nodes_status(cls):
|
||||
current_compute_nodes_setup = cls.get_compute_nodes_setup()
|
||||
for cn_setup in current_compute_nodes_setup:
|
||||
cn_hostname = cn_setup.get('host')
|
||||
matching_cns = [
|
||||
cns for cns in cls.initial_compute_nodes_setup
|
||||
if cns.get('host') == cn_hostname
|
||||
]
|
||||
initial_cn_setup = matching_cns[0] # Should return a single result
|
||||
if cn_setup.get('status') != initial_cn_setup.get('status'):
|
||||
if initial_cn_setup.get('status') == 'enabled':
|
||||
rollback_func = cls.mgr.services_client.enable_service
|
||||
else:
|
||||
rollback_func = cls.mgr.services_client.disable_service
|
||||
rollback_func(binary='nova-compute', host=cn_hostname)
|
||||
|
||||
def _create_one_instance_per_host(self):
|
||||
"""Create 1 instance per compute node
|
||||
|
||||
This goes up to the min_compute_nodes threshold so that things don't
|
||||
get crazy if you have 1000 compute nodes but set min to 3.
|
||||
"""
|
||||
host_client = self.mgr.hosts_client
|
||||
all_hosts = host_client.list_hosts()['hosts']
|
||||
compute_nodes = [x for x in all_hosts if x['service'] == 'compute']
|
||||
|
||||
created_servers = []
|
||||
for _ in compute_nodes[:CONF.compute.min_compute_nodes]:
|
||||
# by getting to active state here, this means this has
|
||||
# landed on the host in question.
|
||||
created_servers.append(
|
||||
self.create_server(image_id=CONF.compute.image_ref,
|
||||
wait_until='ACTIVE',
|
||||
clients=self.mgr))
|
||||
|
||||
return created_servers
|
||||
|
||||
def _get_flavors(self):
|
||||
return self.mgr.flavors_client.list_flavors()['flavors']
|
||||
|
||||
def _prerequisite_param_for_migrate_action(self):
|
||||
created_instances = self._create_one_instance_per_host()
|
||||
instance = created_instances[0]
|
||||
source_node = created_instances[0]["OS-EXT-SRV-ATTR:host"]
|
||||
destination_node = created_instances[-1]["OS-EXT-SRV-ATTR:host"]
|
||||
|
||||
parameters = {
|
||||
"resource_id": instance['id'],
|
||||
"migration_type": "live",
|
||||
"source_node": source_node,
|
||||
"destination_node": destination_node
|
||||
}
|
||||
|
||||
return parameters
|
||||
|
||||
def _prerequisite_param_for_resize_action(self):
|
||||
created_instances = self._create_one_instance_per_host()
|
||||
instance = created_instances[0]
|
||||
current_flavor_id = instance['flavor']['id']
|
||||
|
||||
flavors = self._get_flavors()
|
||||
new_flavors = [f for f in flavors if f['id'] != current_flavor_id]
|
||||
new_flavor = new_flavors[0]
|
||||
|
||||
parameters = {
|
||||
"resource_id": instance['id'],
|
||||
"flavor": new_flavor['name']
|
||||
}
|
||||
|
||||
return parameters
|
||||
|
||||
def _prerequisite_param_for_change_nova_service_state_action(self):
|
||||
enabled_compute_nodes = [cn for cn in
|
||||
self.initial_compute_nodes_setup
|
||||
if cn.get('status') == 'enabled']
|
||||
enabled_compute_node = enabled_compute_nodes[0]
|
||||
|
||||
parameters = {
|
||||
"resource_id": enabled_compute_node['host'],
|
||||
"state": "enabled"
|
||||
}
|
||||
|
||||
return parameters
|
||||
|
||||
def _fill_actions(self, actions):
|
||||
for action in actions:
|
||||
filling_function_name = action.pop('filling_function', None)
|
||||
|
||||
if filling_function_name is not None:
|
||||
filling_function = getattr(self, filling_function_name, None)
|
||||
|
||||
if filling_function is not None:
|
||||
parameters = filling_function()
|
||||
|
||||
resource_id = parameters.pop('resource_id', None)
|
||||
|
||||
if resource_id is not None:
|
||||
action['resource_id'] = resource_id
|
||||
|
||||
input_parameters = action.get('input_parameters', None)
|
||||
|
||||
if input_parameters is not None:
|
||||
parameters.update(input_parameters)
|
||||
input_parameters.update(parameters)
|
||||
else:
|
||||
action['input_parameters'] = parameters
|
||||
|
||||
def _execute_actions(self, actions):
|
||||
self.wait_for_all_action_plans_to_finish()
|
||||
|
||||
_, goal = self.client.show_goal("unclassified")
|
||||
_, strategy = self.client.show_strategy("actuator")
|
||||
_, audit_template = self.create_audit_template(
|
||||
goal['uuid'], strategy=strategy['uuid'])
|
||||
_, audit = self.create_audit(
|
||||
audit_template['uuid'], parameters={"actions": actions})
|
||||
|
||||
self.assertTrue(test_utils.call_until_true(
|
||||
func=functools.partial(self.has_audit_succeeded, audit['uuid']),
|
||||
duration=30,
|
||||
sleep_for=.5
|
||||
))
|
||||
_, action_plans = self.client.list_action_plans(
|
||||
audit_uuid=audit['uuid'])
|
||||
action_plan = action_plans['action_plans'][0]
|
||||
|
||||
_, action_plan = self.client.show_action_plan(action_plan['uuid'])
|
||||
|
||||
# Execute the action plan
|
||||
_, updated_ap = self.client.start_action_plan(action_plan['uuid'])
|
||||
|
||||
self.assertTrue(test_utils.call_until_true(
|
||||
func=functools.partial(
|
||||
self.has_action_plan_finished, action_plan['uuid']),
|
||||
duration=300,
|
||||
sleep_for=1
|
||||
))
|
||||
_, finished_ap = self.client.show_action_plan(action_plan['uuid'])
|
||||
_, action_list = self.client.list_actions(
|
||||
action_plan_uuid=finished_ap["uuid"])
|
||||
|
||||
self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
|
||||
self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))
|
||||
|
||||
expected_action_counter = collections.Counter(
|
||||
act['action_type'] for act in actions)
|
||||
action_counter = collections.Counter(
|
||||
act['action_type'] for act in action_list['actions'])
|
||||
|
||||
self.assertEqual(expected_action_counter, action_counter)
|
||||
|
||||
def test_execute_nop(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
actions = [{
|
||||
"action_type": "nop",
|
||||
"input_parameters": {"message": "hello World"}}]
|
||||
self._execute_actions(actions)
|
||||
|
||||
def test_execute_sleep(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
actions = [
|
||||
{"action_type": "sleep",
|
||||
"input_parameters": {"duration": 1.0}}
|
||||
]
|
||||
self._execute_actions(actions)
|
||||
|
||||
def test_execute_change_nova_service_state(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
enabled_compute_nodes = [
|
||||
cn for cn in self.initial_compute_nodes_setup
|
||||
if cn.get('status') == 'enabled']
|
||||
|
||||
enabled_compute_node = enabled_compute_nodes[0]
|
||||
actions = [
|
||||
{"action_type": "change_nova_service_state",
|
||||
"resource_id": enabled_compute_node['host'],
|
||||
"input_parameters": {"state": "enabled"}}
|
||||
]
|
||||
self._execute_actions(actions)
|
||||
|
||||
def test_execute_resize(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
created_instances = self._create_one_instance_per_host()
|
||||
instance = created_instances[0]
|
||||
current_flavor_id = instance['flavor']['id']
|
||||
|
||||
flavors = self._get_flavors()
|
||||
new_flavors = [f for f in flavors if f['id'] != current_flavor_id]
|
||||
new_flavor = new_flavors[0]
|
||||
|
||||
actions = [
|
||||
{"action_type": "resize",
|
||||
"resource_id": instance['id'],
|
||||
"input_parameters": {"flavor": new_flavor['name']}}
|
||||
]
|
||||
self._execute_actions(actions)
|
||||
|
||||
def test_execute_migrate(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
created_instances = self._create_one_instance_per_host()
|
||||
instance = created_instances[0]
|
||||
source_node = created_instances[0]["OS-EXT-SRV-ATTR:host"]
|
||||
destination_node = created_instances[-1]["OS-EXT-SRV-ATTR:host"]
|
||||
actions = [
|
||||
{"action_type": "migrate",
|
||||
"resource_id": instance['id'],
|
||||
"input_parameters": {
|
||||
"migration_type": "live",
|
||||
"source_node": source_node,
|
||||
"destination_node": destination_node}}
|
||||
]
|
||||
self._execute_actions(actions)
|
||||
|
||||
def test_execute_scenarios(self):
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
|
||||
for _, scenario in self.scenarios:
|
||||
actions = scenario['actions']
|
||||
self._fill_actions(actions)
|
||||
self._execute_actions(actions)
|
||||
Reference in New Issue
Block a user