Compare commits
34 Commits
0.29.0
...
newton-eol
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c38637dff | ||
|
|
1eb2b517ef | ||
|
|
3e030618fa | ||
|
|
f823345424 | ||
|
|
19fdd1557e | ||
|
|
641989b424 | ||
|
|
8814c09087 | ||
|
|
eb4f46b703 | ||
|
|
2f33dd10c0 | ||
|
|
1a197ab801 | ||
|
|
eeb2788355 | ||
|
|
af7871831a | ||
|
|
fbd9411fd9 | ||
|
|
0a0f482f2d | ||
|
|
0873c26b17 | ||
|
|
af99b3f4eb | ||
|
|
4b20e991a1 | ||
|
|
e907cec90a | ||
|
|
9f814e6c15 | ||
|
|
5ac51efa69 | ||
|
|
0baec1cfc2 | ||
|
|
72e6564549 | ||
|
|
23092b6f84 | ||
|
|
0b276fb602 | ||
|
|
a684e70b61 | ||
|
|
414685ab53 | ||
|
|
2d8650f87a | ||
|
|
dd924dd9d5 | ||
|
|
dd5b5428db | ||
|
|
74989fe94e | ||
|
|
3b673fe9bd | ||
|
|
c1cbd9ebf4 | ||
|
|
add49f3fcb | ||
|
|
1f4a46ea5d |
@@ -2,3 +2,4 @@
|
|||||||
host=review.openstack.org
|
host=review.openstack.org
|
||||||
port=29418
|
port=29418
|
||||||
project=openstack/watcher.git
|
project=openstack/watcher.git
|
||||||
|
defaultbranch=stable/newton
|
||||||
|
|||||||
@@ -10,10 +10,8 @@ Watcher
|
|||||||
|
|
||||||
OpenStack Watcher provides a flexible and scalable resource optimization
|
OpenStack Watcher provides a flexible and scalable resource optimization
|
||||||
service for multi-tenant OpenStack-based clouds.
|
service for multi-tenant OpenStack-based clouds.
|
||||||
Watcher provides a complete optimization loop-including everything from a
|
Watcher provides a robust framework to realize a wide range of cloud
|
||||||
metrics receiver, complex event processor and profiler, optimization processor
|
optimization goals, including the reduction of data center
|
||||||
and an action plan applier. This provides a robust framework to realize a wide
|
|
||||||
range of cloud optimization goals, including the reduction of data center
|
|
||||||
operating costs, increased system performance via intelligent virtual machine
|
operating costs, increased system performance via intelligent virtual machine
|
||||||
migration, increased energy efficiency-and more!
|
migration, increased energy efficiency-and more!
|
||||||
|
|
||||||
|
|||||||
@@ -171,12 +171,12 @@ This component is responsible for computing a set of potential optimization
|
|||||||
:ref:`Actions <action_definition>` in order to fulfill
|
:ref:`Actions <action_definition>` in order to fulfill
|
||||||
the :ref:`Goal <goal_definition>` of an :ref:`Audit <audit_definition>`.
|
the :ref:`Goal <goal_definition>` of an :ref:`Audit <audit_definition>`.
|
||||||
|
|
||||||
It first reads the parameters of the :ref:`Audit <audit_definition>` from the
|
It first reads the parameters of the :ref:`Audit <audit_definition>` to know
|
||||||
associated :ref:`Audit Template <audit_template_definition>` and knows the
|
the :ref:`Goal <goal_definition>` to achieve.
|
||||||
:ref:`Goal <goal_definition>` to achieve.
|
|
||||||
|
|
||||||
It then selects the most appropriate :ref:`Strategy <strategy_definition>`
|
Unless specified, it then selects the most appropriate :ref:`strategy
|
||||||
from the list of available strategies achieving this goal.
|
<strategy_definition>` from the list of available strategies achieving this
|
||||||
|
goal.
|
||||||
|
|
||||||
The :ref:`Strategy <strategy_definition>` is then dynamically loaded (via
|
The :ref:`Strategy <strategy_definition>` is then dynamically loaded (via
|
||||||
`stevedore <http://docs.openstack.org/developer/stevedore/>`_). The
|
`stevedore <http://docs.openstack.org/developer/stevedore/>`_). The
|
||||||
@@ -290,7 +290,7 @@ the Audit parameters from the
|
|||||||
:ref:`Watcher Database <watcher_database_definition>`. It instantiates the
|
:ref:`Watcher Database <watcher_database_definition>`. It instantiates the
|
||||||
appropriate :ref:`strategy <strategy_definition>` (using entry points)
|
appropriate :ref:`strategy <strategy_definition>` (using entry points)
|
||||||
given both the :ref:`goal <goal_definition>` and the strategy associated to the
|
given both the :ref:`goal <goal_definition>` and the strategy associated to the
|
||||||
parent :ref:`audit template <audit_template_definition>` of the :ref:`Audit
|
parent :ref:`audit template <audit_template_definition>` of the :ref:`audit
|
||||||
<audit_definition>`. If no strategy is associated to the audit template, the
|
<audit_definition>`. If no strategy is associated to the audit template, the
|
||||||
strategy is dynamically selected by the Decision Engine.
|
strategy is dynamically selected by the Decision Engine.
|
||||||
|
|
||||||
|
|||||||
210
doc/source/dev/plugin/scoring-engine-plugin.rst
Normal file
210
doc/source/dev/plugin/scoring-engine-plugin.rst
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
..
|
||||||
|
Except where otherwise noted, this document is licensed under Creative
|
||||||
|
Commons Attribution 3.0 License. You can view the license at:
|
||||||
|
|
||||||
|
https://creativecommons.org/licenses/by/3.0/
|
||||||
|
|
||||||
|
.. _implement_scoring_engine_plugin:
|
||||||
|
|
||||||
|
==========================
|
||||||
|
Build a new scoring engine
|
||||||
|
==========================
|
||||||
|
|
||||||
|
Watcher Decision Engine has an external :ref:`scoring engine
|
||||||
|
<scoring_engine_definition>` plugin interface which gives anyone the ability
|
||||||
|
to integrate an external scoring engine in order to make use of it in a
|
||||||
|
:ref:`strategy <strategy_definition>`.
|
||||||
|
|
||||||
|
This section gives some guidelines on how to implement and integrate custom
|
||||||
|
scoring engines with Watcher. If you wish to create a third-party package for
|
||||||
|
your plugin, you can refer to our :ref:`documentation for third-party package
|
||||||
|
creation <plugin-base_setup>`.
|
||||||
|
|
||||||
|
|
||||||
|
Pre-requisites
|
||||||
|
==============
|
||||||
|
|
||||||
|
Because scoring engines execute a purely mathematical tasks, they typically do
|
||||||
|
not have any additional dependencies. Additional requirements might be defined
|
||||||
|
by specific scoring engine implementations. For example, some scoring engines
|
||||||
|
might require to prepare learning data, which has to be loaded during the
|
||||||
|
scoring engine startup. Some other might require some external services to be
|
||||||
|
available (e.g. if the scoring infrastructure is running in the cloud).
|
||||||
|
|
||||||
|
|
||||||
|
Create a new scoring engine plugin
|
||||||
|
==================================
|
||||||
|
|
||||||
|
In order to create a new scoring engine you have to:
|
||||||
|
|
||||||
|
- Extend the :py:class:`~.ScoringEngine` class
|
||||||
|
- Implement its :py:meth:`~.ScoringEngine.get_name` method to return the
|
||||||
|
**unique** ID of the new scoring engine you want to create. This unique ID
|
||||||
|
should be the same as the name of :ref:`the entry point we will declare later
|
||||||
|
on <scoring_engine_plugin_add_entrypoint>`.
|
||||||
|
- Implement its :py:meth:`~.ScoringEngine.get_description` method to return the
|
||||||
|
user-friendly description of the implemented scoring engine. It might contain
|
||||||
|
information about algorithm used, learning data etc.
|
||||||
|
- Implement its :py:meth:`~.ScoringEngine.get_metainfo` method to return the
|
||||||
|
machine-friendly metadata about this scoring engine. For example, it could be
|
||||||
|
a JSON formatted text with information about the data model used, its input
|
||||||
|
and output data format, column names, etc.
|
||||||
|
- Implement its :py:meth:`~.ScoringEngine.calculate_score` method to return the
|
||||||
|
result calculated by this scoring engine.
|
||||||
|
|
||||||
|
Here is an example showing how you can write a plugin called ``NewScorer``:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
# filepath: thirdparty/new.py
|
||||||
|
# import path: thirdparty.new
|
||||||
|
from watcher.decision_engine.scoring import base
|
||||||
|
|
||||||
|
|
||||||
|
class NewScorer(base.ScoringEngine):
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return 'new_scorer'
|
||||||
|
|
||||||
|
def get_description(self):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def get_metainfo(self):
|
||||||
|
return """{
|
||||||
|
"feature_columns": [
|
||||||
|
"column1",
|
||||||
|
"column2",
|
||||||
|
"column3"],
|
||||||
|
"result_columns": [
|
||||||
|
"value",
|
||||||
|
"probability"]
|
||||||
|
}"""
|
||||||
|
|
||||||
|
def calculate_score(self, features):
|
||||||
|
return '[12, 0.83]'
|
||||||
|
|
||||||
|
As you can see in the above example, the
|
||||||
|
:py:meth:`~.ScoringEngine.calculate_score` method returns a string. Both this
|
||||||
|
class and the client (caller) should perform all the necessary serialization
|
||||||
|
or deserialization.
|
||||||
|
|
||||||
|
|
||||||
|
(Optional) Create a new scoring engine container plugin
|
||||||
|
=======================================================
|
||||||
|
|
||||||
|
Optionally, it's possible to implement a container plugin, which can return a
|
||||||
|
list of scoring engines. This list can be re-evaluated multiple times during
|
||||||
|
the lifecycle of :ref:`Watcher Decision Engine
|
||||||
|
<watcher_decision_engine_definition>` and synchronized with :ref:`Watcher
|
||||||
|
Database <watcher_database_definition>` using the ``watcher-sync`` command line
|
||||||
|
tool.
|
||||||
|
|
||||||
|
Below is an example of a container using some scoring engine implementation
|
||||||
|
that is simply made of a client responsible for communicating with a real
|
||||||
|
scoring engine deployed as a web service on external servers:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
class NewScoringContainer(base.ScoringEngineContainer):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_scoring_engine_list(self):
|
||||||
|
return [
|
||||||
|
RemoteScoringEngine(
|
||||||
|
name='scoring_engine1',
|
||||||
|
description='Some remote Scoring Engine 1',
|
||||||
|
remote_url='http://engine1.example.com/score'),
|
||||||
|
RemoteScoringEngine(
|
||||||
|
name='scoring_engine2',
|
||||||
|
description='Some remote Scoring Engine 2',
|
||||||
|
remote_url='http://engine2.example.com/score'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Abstract Plugin Class
|
||||||
|
=====================
|
||||||
|
|
||||||
|
Here below is the abstract :py:class:`~.ScoringEngine` class:
|
||||||
|
|
||||||
|
.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngine
|
||||||
|
:members:
|
||||||
|
:special-members: __init__
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
|
||||||
|
Abstract Plugin Container Class
|
||||||
|
===============================
|
||||||
|
|
||||||
|
Here below is the abstract :py:class:`~.ScoringContainer` class:
|
||||||
|
|
||||||
|
.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngineContainer
|
||||||
|
:members:
|
||||||
|
:special-members: __init__
|
||||||
|
:noindex:
|
||||||
|
|
||||||
|
|
||||||
|
.. _scoring_engine_plugin_add_entrypoint:
|
||||||
|
|
||||||
|
Add a new entry point
|
||||||
|
=====================
|
||||||
|
|
||||||
|
In order for the Watcher Decision Engine to load your new scoring engine, it
|
||||||
|
must be registered as a named entry point under the ``watcher_scoring_engines``
|
||||||
|
entry point of your ``setup.py`` file. If you are using pbr_, this entry point
|
||||||
|
should be placed in your ``setup.cfg`` file.
|
||||||
|
|
||||||
|
The name you give to your entry point has to be unique and should be the same
|
||||||
|
as the value returned by the :py:meth:`~.ScoringEngine.get_name` method of your
|
||||||
|
strategy.
|
||||||
|
|
||||||
|
Here below is how you would proceed to register ``NewScorer`` using pbr_:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
watcher_scoring_engines =
|
||||||
|
new_scorer = thirdparty.new:NewScorer
|
||||||
|
|
||||||
|
|
||||||
|
To get a better understanding on how to implement a more advanced scoring
|
||||||
|
engine, have a look at the :py:class:`~.DummyScorer` class. This implementation
|
||||||
|
is not really using machine learning, but other than that it contains all the
|
||||||
|
pieces which the "real" implementation would have.
|
||||||
|
|
||||||
|
In addition, for some use cases there is a need to register a list (possibly
|
||||||
|
dynamic, depending on the implementation and configuration) of scoring engines
|
||||||
|
in a single plugin, so there is no need to restart :ref:`Watcher Decision
|
||||||
|
Engine <watcher_decision_engine_definition>` every time such list changes. For
|
||||||
|
these cases, an additional ``watcher_scoring_engine_containers`` entry point
|
||||||
|
can be used.
|
||||||
|
|
||||||
|
For the example how to use scoring engine containers, please have a look at
|
||||||
|
the :py:class:`~.DummyScoringContainer` and the way it is configured in
|
||||||
|
``setup.cfg``. For new containers it could be done like this:
|
||||||
|
|
||||||
|
.. code-block:: ini
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
watcher_scoring_engine_containers =
|
||||||
|
new_scoring_container = thirdparty.new:NewContainer
|
||||||
|
|
||||||
|
.. _pbr: http://docs.openstack.org/developer/pbr/
|
||||||
|
|
||||||
|
|
||||||
|
Using scoring engine plugins
|
||||||
|
============================
|
||||||
|
|
||||||
|
The Watcher Decision Engine service will automatically discover any installed
|
||||||
|
plugins when it is restarted. If a Python package containing a custom plugin is
|
||||||
|
installed within the same environment as Watcher, Watcher will automatically
|
||||||
|
make that plugin available for use.
|
||||||
|
|
||||||
|
At this point, Watcher will scan and register inside the :ref:`Watcher Database
|
||||||
|
<watcher_database_definition>` all the scoring engines you implemented upon
|
||||||
|
restarting the :ref:`Watcher Decision Engine
|
||||||
|
<watcher_decision_engine_definition>`.
|
||||||
|
|
||||||
|
In addition, ``watcher-sync`` tool can be used to trigger :ref:`Watcher
|
||||||
|
Database <watcher_database_definition>` synchronization. This might be used for
|
||||||
|
"dynamic" scoring containers, which can return different scoring engines based
|
||||||
|
on some external configuration (if they support that).
|
||||||
@@ -21,6 +21,22 @@ Goals
|
|||||||
.. list-plugins:: watcher_goals
|
.. list-plugins:: watcher_goals
|
||||||
:detailed:
|
:detailed:
|
||||||
|
|
||||||
|
.. _watcher_scoring_engines:
|
||||||
|
|
||||||
|
Scoring Engines
|
||||||
|
===============
|
||||||
|
|
||||||
|
.. list-plugins:: watcher_scoring_engines
|
||||||
|
:detailed:
|
||||||
|
|
||||||
|
.. _watcher_scoring_engine_containers:
|
||||||
|
|
||||||
|
Scoring Engine Containers
|
||||||
|
=========================
|
||||||
|
|
||||||
|
.. list-plugins:: watcher_scoring_engine_containers
|
||||||
|
:detailed:
|
||||||
|
|
||||||
.. _watcher_strategies:
|
.. _watcher_strategies:
|
||||||
|
|
||||||
Strategies
|
Strategies
|
||||||
|
|||||||
1
doc/source/dev/rally_link.rst
Normal file
1
doc/source/dev/rally_link.rst
Normal file
@@ -0,0 +1 @@
|
|||||||
|
.. include:: ../../../rally-jobs/README.rst
|
||||||
@@ -280,6 +280,12 @@ specific domain.
|
|||||||
Please, read `the official OpenStack definition of a Project
|
Please, read `the official OpenStack definition of a Project
|
||||||
<http://docs.openstack.org/glossary/content/glossary.html>`_.
|
<http://docs.openstack.org/glossary/content/glossary.html>`_.
|
||||||
|
|
||||||
|
.. _scoring_engine_definition:
|
||||||
|
|
||||||
|
Scoring Engine
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. watcher-term:: watcher.api.controllers.v1.scoring_engine
|
||||||
|
|
||||||
.. _sla_definition:
|
.. _sla_definition:
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,12 @@
|
|||||||
hide methods
|
hide methods
|
||||||
hide stereotypes
|
hide stereotypes
|
||||||
|
|
||||||
table(goal) {
|
table(goals) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
name : String[63]
|
name : String[63]
|
||||||
display_name : String[63]
|
display_name : String[63]
|
||||||
|
efficacy_specification : JSONEncodedList, nullable
|
||||||
|
|
||||||
created_at : DateTime
|
created_at : DateTime
|
||||||
updated_at : DateTime
|
updated_at : DateTime
|
||||||
@@ -18,12 +19,13 @@ table(goal) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(strategy) {
|
table(strategies) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key(goal_id : Integer)
|
foreign_key(goal_id : Integer)
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
name : String[63]
|
name : String[63]
|
||||||
display_name : String[63]
|
display_name : String[63]
|
||||||
|
parameters_spec : JSONEncodedDict, nullable
|
||||||
|
|
||||||
created_at : DateTime
|
created_at : DateTime
|
||||||
updated_at : DateTime
|
updated_at : DateTime
|
||||||
@@ -32,7 +34,7 @@ table(strategy) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(audit_template) {
|
table(audit_templates) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key("goal_id : Integer")
|
foreign_key("goal_id : Integer")
|
||||||
foreign_key("strategy_id : Integer, nullable")
|
foreign_key("strategy_id : Integer, nullable")
|
||||||
@@ -50,14 +52,17 @@ table(audit_template) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(audit) {
|
table(audits) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key("audit_template_id : Integer")
|
foreign_key("goal_id : Integer")
|
||||||
|
foreign_key("strategy_id : Integer, nullable")
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
audit_type : String[20]
|
audit_type : String[20]
|
||||||
state : String[20], nullable
|
state : String[20], nullable
|
||||||
deadline :DateTime, nullable
|
deadline : DateTime, nullable
|
||||||
interval : Integer, nullable
|
interval : Integer, nullable
|
||||||
|
parameters : JSONEncodedDict, nullable
|
||||||
|
host_aggregate : Integer, nullable
|
||||||
|
|
||||||
created_at : DateTime
|
created_at : DateTime
|
||||||
updated_at : DateTime
|
updated_at : DateTime
|
||||||
@@ -66,9 +71,10 @@ table(audit) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(action_plan) {
|
table(action_plans) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key("audit_id : Integer, nullable")
|
foreign_key("audit_id : Integer, nullable")
|
||||||
|
foreign_key("strategy_id : Integer")
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
first_action_id : Integer
|
first_action_id : Integer
|
||||||
state : String[20], nullable
|
state : String[20], nullable
|
||||||
@@ -81,7 +87,7 @@ table(action_plan) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(action) {
|
table(actions) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key("action_plan_id : Integer")
|
foreign_key("action_plan_id : Integer")
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
@@ -97,7 +103,7 @@ table(action) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
table(efficacy_indicator) {
|
table(efficacy_indicators) {
|
||||||
primary_key(id: Integer)
|
primary_key(id: Integer)
|
||||||
foreign_key("action_plan_id : Integer")
|
foreign_key("action_plan_id : Integer")
|
||||||
uuid : String[36]
|
uuid : String[36]
|
||||||
@@ -112,12 +118,27 @@ table(efficacy_indicator) {
|
|||||||
deleted : Integer
|
deleted : Integer
|
||||||
}
|
}
|
||||||
|
|
||||||
"goal" <.. "strategy" : Foreign Key
|
table(scoring_engines) {
|
||||||
"goal" <.. "audit_template" : Foreign Key
|
primary_key(id: Integer)
|
||||||
"strategy" <.. "audit_template" : Foreign Key
|
uuid : String[36]
|
||||||
"audit_template" <.. "audit" : Foreign Key
|
name : String[63]
|
||||||
"action_plan" <.. "action" : Foreign Key
|
description : String[255], nullable
|
||||||
"action_plan" <.. "efficacy_indicator" : Foreign Key
|
metainfo : Text, nullable
|
||||||
"audit" <.. "action_plan" : Foreign Key
|
|
||||||
|
created_at : DateTime
|
||||||
|
updated_at : DateTime
|
||||||
|
deleted_at : DateTime
|
||||||
|
deleted : Integer
|
||||||
|
}
|
||||||
|
|
||||||
|
"goals" <.. "strategies" : Foreign Key
|
||||||
|
"goals" <.. "audit_templates" : Foreign Key
|
||||||
|
"strategies" <.. "audit_templates" : Foreign Key
|
||||||
|
"goals" <.. "audits" : Foreign Key
|
||||||
|
"strategies" <.. "audits" : Foreign Key
|
||||||
|
"action_plans" <.. "actions" : Foreign Key
|
||||||
|
"action_plans" <.. "efficacy_indicators" : Foreign Key
|
||||||
|
"strategies" <.. "action_plans" : Foreign Key
|
||||||
|
"audits" <.. "action_plans" : Foreign Key
|
||||||
|
|
||||||
@enduml
|
@enduml
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 56 KiB |
@@ -57,6 +57,7 @@ Getting Started
|
|||||||
deploy/configuration
|
deploy/configuration
|
||||||
deploy/conf-files
|
deploy/conf-files
|
||||||
dev/testing
|
dev/testing
|
||||||
|
dev/rally_link
|
||||||
|
|
||||||
API References
|
API References
|
||||||
--------------
|
--------------
|
||||||
@@ -74,6 +75,7 @@ Plugins
|
|||||||
|
|
||||||
dev/plugin/base-setup
|
dev/plugin/base-setup
|
||||||
dev/plugin/goal-plugin
|
dev/plugin/goal-plugin
|
||||||
|
dev/plugin/scoring-engine-plugin
|
||||||
dev/plugin/strategy-plugin
|
dev/plugin/strategy-plugin
|
||||||
dev/plugin/cdmc-plugin
|
dev/plugin/cdmc-plugin
|
||||||
dev/plugin/action-plugin
|
dev/plugin/action-plugin
|
||||||
|
|||||||
42
rally-jobs/README.rst
Normal file
42
rally-jobs/README.rst
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
Rally job
|
||||||
|
=========
|
||||||
|
|
||||||
|
We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service.
|
||||||
|
|
||||||
|
To launch this task with configured Rally you just need to run:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
rally task start watcher/rally-jobs/watcher.yaml
|
||||||
|
|
||||||
|
Structure
|
||||||
|
---------
|
||||||
|
|
||||||
|
* plugins - directory where you can add rally plugins. Almost everything in
|
||||||
|
Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
|
||||||
|
cleanup resources, ....
|
||||||
|
|
||||||
|
* extra - all files from this directory will be copy pasted to gates, so you
|
||||||
|
are able to use absolute paths in rally tasks.
|
||||||
|
Files will be located in ~/.rally/extra/*
|
||||||
|
|
||||||
|
* watcher.yaml is a task that is run in gates against OpenStack
|
||||||
|
deployed by DevStack
|
||||||
|
|
||||||
|
|
||||||
|
Useful links
|
||||||
|
------------
|
||||||
|
|
||||||
|
* How to install: http://docs.openstack.org/developer/rally/install.html
|
||||||
|
|
||||||
|
* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
|
||||||
|
|
||||||
|
* More about Rally: https://rally.readthedocs.org/en/latest/
|
||||||
|
|
||||||
|
* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html
|
||||||
|
|
||||||
|
* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
|
||||||
|
|
||||||
|
* About plugins: https://rally.readthedocs.org/en/latest/plugins.html
|
||||||
|
|
||||||
|
* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
|
||||||
67
rally-jobs/watcher-watcher.yaml
Normal file
67
rally-jobs/watcher-watcher.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
Watcher.create_audit_and_delete:
|
||||||
|
-
|
||||||
|
runner:
|
||||||
|
type: "constant"
|
||||||
|
times: 10
|
||||||
|
concurrency: 2
|
||||||
|
context:
|
||||||
|
users:
|
||||||
|
tenants: 2
|
||||||
|
users_per_tenant: 2
|
||||||
|
audit_templates:
|
||||||
|
audit_templates_per_admin: 5
|
||||||
|
fill_strategy: "round_robin"
|
||||||
|
params:
|
||||||
|
- goal:
|
||||||
|
name: "dummy"
|
||||||
|
strategy:
|
||||||
|
name: "dummy"
|
||||||
|
extra: {}
|
||||||
|
sla:
|
||||||
|
failure_rate:
|
||||||
|
max: 0
|
||||||
|
|
||||||
|
Watcher.create_audit_template_and_delete:
|
||||||
|
-
|
||||||
|
args:
|
||||||
|
goal:
|
||||||
|
name: "dummy"
|
||||||
|
strategy:
|
||||||
|
name: "dummy"
|
||||||
|
extra: {}
|
||||||
|
runner:
|
||||||
|
type: "constant"
|
||||||
|
times: 10
|
||||||
|
concurrency: 2
|
||||||
|
sla:
|
||||||
|
failure_rate:
|
||||||
|
max: 0
|
||||||
|
|
||||||
|
Watcher.list_audit_templates:
|
||||||
|
-
|
||||||
|
runner:
|
||||||
|
type: "constant"
|
||||||
|
times: 10
|
||||||
|
concurrency: 2
|
||||||
|
context:
|
||||||
|
users:
|
||||||
|
tenants: 2
|
||||||
|
users_per_tenant: 2
|
||||||
|
audit_templates:
|
||||||
|
audit_templates_per_admin: 5
|
||||||
|
fill_strategy: "random"
|
||||||
|
params:
|
||||||
|
- goal:
|
||||||
|
name: "workload_balancing"
|
||||||
|
strategy:
|
||||||
|
name: "workload_stabilization"
|
||||||
|
extra: {}
|
||||||
|
- goal:
|
||||||
|
name: "dummy"
|
||||||
|
strategy:
|
||||||
|
name: "dummy"
|
||||||
|
extra: {}
|
||||||
|
sla:
|
||||||
|
failure_rate:
|
||||||
|
max: 0
|
||||||
@@ -7,11 +7,12 @@ enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' #
|
|||||||
jsonpatch>=1.1 # BSD
|
jsonpatch>=1.1 # BSD
|
||||||
keystoneauth1>=2.10.0 # Apache-2.0
|
keystoneauth1>=2.10.0 # Apache-2.0
|
||||||
keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
|
keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
|
||||||
|
lxml>=2.3 # BSD
|
||||||
oslo.concurrency>=3.8.0 # Apache-2.0
|
oslo.concurrency>=3.8.0 # Apache-2.0
|
||||||
oslo.cache>=1.5.0 # Apache-2.0
|
oslo.cache>=1.5.0 # Apache-2.0
|
||||||
oslo.config>=3.14.0 # Apache-2.0
|
oslo.config>=3.14.0 # Apache-2.0
|
||||||
oslo.context>=2.9.0 # Apache-2.0
|
oslo.context>=2.9.0 # Apache-2.0
|
||||||
oslo.db>=4.10.0 # Apache-2.0
|
oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0
|
||||||
oslo.i18n>=2.1.0 # Apache-2.0
|
oslo.i18n>=2.1.0 # Apache-2.0
|
||||||
oslo.log>=1.14.0 # Apache-2.0
|
oslo.log>=1.14.0 # Apache-2.0
|
||||||
oslo.messaging>=5.2.0 # Apache-2.0
|
oslo.messaging>=5.2.0 # Apache-2.0
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = python-watcher
|
name = python-watcher
|
||||||
summary = Watcher takes advantage of CEP and ML algorithms/metaheuristics to improve physical resources usage through better VM placement. Watcher can improve your cloud optimization by reducing energy footprint and increasing profits.
|
summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds.
|
||||||
description-file =
|
description-file =
|
||||||
README.rst
|
README.rst
|
||||||
author = OpenStack
|
author = OpenStack
|
||||||
|
|||||||
13
tox.ini
13
tox.ini
@@ -1,16 +1,17 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.8
|
||||||
envlist = py35,py34,py27,pep8
|
envlist = py35,py34,py27,pep8
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
whitelist_externals = find
|
whitelist_externals = find
|
||||||
install_command = pip install -U {opts} {packages}
|
install_command =
|
||||||
|
constraints: pip install -U --force-reinstall -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/newton} {opts} {packages}
|
||||||
|
pip install -U {opts} {packages}
|
||||||
setenv =
|
setenv =
|
||||||
VIRTUAL_ENV={envdir}
|
VIRTUAL_ENV={envdir}
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
|
||||||
commands =
|
commands =
|
||||||
find . -type f -name "*.pyc" -delete
|
find . -type f -name "*.pyc" -delete
|
||||||
find . -type d -name "__pycache__" -delete
|
find . -type d -name "__pycache__" -delete
|
||||||
@@ -20,7 +21,7 @@ commands =
|
|||||||
commands =
|
commands =
|
||||||
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
|
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
|
||||||
flake8
|
flake8
|
||||||
bandit -r watcher -x tests -n5 -ll
|
bandit -r watcher -x tests -n5 -ll -s B320
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
setenv = PYTHONHASHSEED=0
|
setenv = PYTHONHASHSEED=0
|
||||||
@@ -66,4 +67,4 @@ commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasen
|
|||||||
|
|
||||||
[testenv:bandit]
|
[testenv:bandit]
|
||||||
deps = -r{toxinidir}/test-requirements.txt
|
deps = -r{toxinidir}/test-requirements.txt
|
||||||
commands = bandit -r watcher -x tests -n5 -ll
|
commands = bandit -r watcher -x tests -n5 -ll -s B320
|
||||||
|
|||||||
@@ -16,8 +16,10 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A :ref:`Scoring Engine <scoring_engine_definition>` is an instance of a data
|
A :ref:`Scoring Engine <scoring_engine_definition>` is an executable that has
|
||||||
model, to which a learning data was applied.
|
a well-defined input, a well-defined output, and performs a purely mathematical
|
||||||
|
task. That is, the calculation does not depend on the environment in which it
|
||||||
|
is running - it would produce the same result anywhere.
|
||||||
|
|
||||||
Because there might be multiple algorithms used to build a particular data
|
Because there might be multiple algorithms used to build a particular data
|
||||||
model (and therefore a scoring engine), the usage of scoring engine might
|
model (and therefore a scoring engine), the usage of scoring engine might
|
||||||
|
|||||||
@@ -18,6 +18,7 @@
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
from pecan import hooks
|
from pecan import hooks
|
||||||
|
from six.moves import http_client
|
||||||
|
|
||||||
from watcher.common import context
|
from watcher.common import context
|
||||||
|
|
||||||
@@ -95,18 +96,20 @@ class NoExceptionTracebackHook(hooks.PecanHook):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Do nothing if there is no error.
|
# Do nothing if there is no error.
|
||||||
if 200 <= state.response.status_int < 400:
|
# Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not
|
||||||
|
# an error.
|
||||||
|
if (http_client.OK <= state.response.status_int <
|
||||||
|
http_client.BAD_REQUEST):
|
||||||
return
|
return
|
||||||
|
|
||||||
json_body = state.response.json
|
json_body = state.response.json
|
||||||
# Do not remove traceback when server in debug mode (except 'Server'
|
# Do not remove traceback when traceback config is set
|
||||||
# errors when 'debuginfo' will be used for traces).
|
if cfg.CONF.debug:
|
||||||
if cfg.CONF.debug and json_body.get('faultcode') != 'Server':
|
|
||||||
return
|
return
|
||||||
|
|
||||||
faultstring = json_body.get('faultstring')
|
faultstring = json_body.get('faultstring')
|
||||||
traceback_marker = 'Traceback (most recent call last):'
|
traceback_marker = 'Traceback (most recent call last):'
|
||||||
if faultstring and (traceback_marker in faultstring):
|
if faultstring and traceback_marker in faultstring:
|
||||||
# Cut-off traceback.
|
# Cut-off traceback.
|
||||||
faultstring = faultstring.split(traceback_marker, 1)[0]
|
faultstring = faultstring.split(traceback_marker, 1)[0]
|
||||||
# Remove trailing newlines and spaces if any.
|
# Remove trailing newlines and spaces if any.
|
||||||
|
|||||||
@@ -27,14 +27,14 @@ from oslo_serialization import jsonutils
|
|||||||
import six
|
import six
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _, _LE
|
||||||
from watcher._i18n import _LE
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ParsableErrorMiddleware(object):
|
class ParsableErrorMiddleware(object):
|
||||||
"""Replace error body with something the client can parse."""
|
"""Replace error body with something the client can parse."""
|
||||||
|
|
||||||
def __init__(self, app):
|
def __init__(self, app):
|
||||||
self.app = app
|
self.app = app
|
||||||
|
|
||||||
@@ -59,8 +59,7 @@ class ParsableErrorMiddleware(object):
|
|||||||
# compute the length.
|
# compute the length.
|
||||||
headers = [(h, v)
|
headers = [(h, v)
|
||||||
for (h, v) in headers
|
for (h, v) in headers
|
||||||
if h not in ('Content-Length', 'Content-Type')
|
if h not in ('Content-Length', 'Content-Type')]
|
||||||
]
|
|
||||||
# Save the headers in case we need to modify them.
|
# Save the headers in case we need to modify them.
|
||||||
state['headers'] = headers
|
state['headers'] = headers
|
||||||
return start_response(status, headers, exc_info)
|
return start_response(status, headers, exc_info)
|
||||||
@@ -68,24 +67,27 @@ class ParsableErrorMiddleware(object):
|
|||||||
app_iter = self.app(environ, replacement_start_response)
|
app_iter = self.app(environ, replacement_start_response)
|
||||||
if (state['status_code'] // 100) not in (2, 3):
|
if (state['status_code'] // 100) not in (2, 3):
|
||||||
req = webob.Request(environ)
|
req = webob.Request(environ)
|
||||||
if (req.accept.best_match(['application/json', 'application/xml']
|
if (
|
||||||
) == 'application/xml'):
|
req.accept.best_match(
|
||||||
|
['application/json',
|
||||||
|
'application/xml']) == 'application/xml'
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
# simple check xml is valid
|
# simple check xml is valid
|
||||||
body = [et.ElementTree.tostring(
|
body = [
|
||||||
et.ElementTree.Element('error_message',
|
et.ElementTree.tostring(
|
||||||
text='\n'.join(app_iter)))]
|
et.ElementTree.Element(
|
||||||
|
'error_message', text='\n'.join(app_iter)))]
|
||||||
except et.ElementTree.ParseError as err:
|
except et.ElementTree.ParseError as err:
|
||||||
LOG.error(_LE('Error parsing HTTP response: %s'), err)
|
LOG.error(_LE('Error parsing HTTP response: %s'), err)
|
||||||
body = [et.ElementTree.tostring(
|
body = ['<error_message>%s'
|
||||||
et.ElementTree.Element('error_message',
|
'</error_message>' % state['status_code']]
|
||||||
text=state['status_code']))]
|
|
||||||
state['headers'].append(('Content-Type', 'application/xml'))
|
state['headers'].append(('Content-Type', 'application/xml'))
|
||||||
else:
|
else:
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
app_iter = [i.decode('utf-8') for i in app_iter]
|
app_iter = [i.decode('utf-8') for i in app_iter]
|
||||||
body = [jsonutils.dumps(
|
body = [jsonutils.dumps(
|
||||||
{'error_message': '\n'.join(app_iter)})]
|
{'error_message': '\n'.join(app_iter)})]
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
body = [item.encode('utf-8') for item in body]
|
body = [item.encode('utf-8') for item in body]
|
||||||
state['headers'].append(('Content-Type', 'application/json'))
|
state['headers'].append(('Content-Type', 'application/json'))
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from watcher._i18n import _LI
|
from watcher._i18n import _LI
|
||||||
from watcher.common import service as watcher_service
|
from watcher.common import service as watcher_service
|
||||||
|
from watcher.decision_engine import gmr
|
||||||
from watcher.decision_engine import manager
|
from watcher.decision_engine import manager
|
||||||
from watcher.decision_engine import scheduling
|
from watcher.decision_engine import scheduling
|
||||||
from watcher.decision_engine import sync
|
from watcher.decision_engine import sync
|
||||||
@@ -35,6 +36,7 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
watcher_service.prepare_service(sys.argv)
|
watcher_service.prepare_service(sys.argv)
|
||||||
|
gmr.register_gmr_plugins()
|
||||||
|
|
||||||
LOG.info(_LI('Starting Watcher Decision Engine service in PID %s'),
|
LOG.info(_LI('Starting Watcher Decision Engine service in PID %s'),
|
||||||
os.getpid())
|
os.getpid())
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ class RequestContext(context.RequestContext):
|
|||||||
show_deleted=kwargs.pop('show_deleted', False),
|
show_deleted=kwargs.pop('show_deleted', False),
|
||||||
request_id=request_id,
|
request_id=request_id,
|
||||||
resource_uuid=kwargs.pop('resource_uuid', None),
|
resource_uuid=kwargs.pop('resource_uuid', None),
|
||||||
is_admin_project=kwargs.pop('is_admin_project', None),
|
is_admin_project=kwargs.pop('is_admin_project', True),
|
||||||
overwrite=overwrite,
|
overwrite=overwrite,
|
||||||
roles=roles)
|
roles=roles)
|
||||||
|
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ class AuditTemplateAlreadyExists(Conflict):
|
|||||||
|
|
||||||
class AuditTemplateReferenced(Invalid):
|
class AuditTemplateReferenced(Invalid):
|
||||||
msg_fmt = _("AuditTemplate %(audit_template)s is referenced by one or "
|
msg_fmt = _("AuditTemplate %(audit_template)s is referenced by one or "
|
||||||
"multiple audit")
|
"multiple audits")
|
||||||
|
|
||||||
|
|
||||||
class AuditTypeNotFound(Invalid):
|
class AuditTypeNotFound(Invalid):
|
||||||
@@ -317,7 +317,7 @@ class AuthorizationFailure(WatcherException):
|
|||||||
|
|
||||||
|
|
||||||
class KeystoneFailure(WatcherException):
|
class KeystoneFailure(WatcherException):
|
||||||
msg_fmt = _("'Keystone API endpoint is missing''")
|
msg_fmt = _("Keystone API endpoint is missing")
|
||||||
|
|
||||||
|
|
||||||
class ClusterEmpty(WatcherException):
|
class ClusterEmpty(WatcherException):
|
||||||
@@ -336,6 +336,10 @@ class ClusterStateNotDefined(WatcherException):
|
|||||||
msg_fmt = _("The cluster state is not defined")
|
msg_fmt = _("The cluster state is not defined")
|
||||||
|
|
||||||
|
|
||||||
|
class CapacityNotDefined(WatcherException):
|
||||||
|
msg_fmt = _("The capacity %(capacity)s is not defined for '%(resource)s'")
|
||||||
|
|
||||||
|
|
||||||
class NoAvailableStrategyForGoal(WatcherException):
|
class NoAvailableStrategyForGoal(WatcherException):
|
||||||
msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.")
|
msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.")
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,8 @@ import cinderclient.exceptions as ciexceptions
|
|||||||
import novaclient.exceptions as nvexceptions
|
import novaclient.exceptions as nvexceptions
|
||||||
|
|
||||||
from watcher.common import clients
|
from watcher.common import clients
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import utils
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
@@ -43,6 +45,24 @@ class NovaHelper(object):
|
|||||||
def get_compute_node_list(self):
|
def get_compute_node_list(self):
|
||||||
return self.nova.hypervisors.list()
|
return self.nova.hypervisors.list()
|
||||||
|
|
||||||
|
def get_compute_node_by_id(self, node_id):
|
||||||
|
"""Get compute node by ID (*not* UUID)"""
|
||||||
|
# We need to pass an object with an 'id' attribute to make it work
|
||||||
|
return self.nova.hypervisors.get(utils.Struct(id=node_id))
|
||||||
|
|
||||||
|
def get_compute_node_by_hostname(self, node_hostname):
|
||||||
|
"""Get compute node by ID (*not* UUID)"""
|
||||||
|
# We need to pass an object with an 'id' attribute to make it work
|
||||||
|
try:
|
||||||
|
compute_nodes = self.nova.hypervisors.search(node_hostname)
|
||||||
|
if len(compute_nodes) != 1:
|
||||||
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
|
|
||||||
|
return self.get_compute_node_by_id(compute_nodes[0].id)
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
|
|
||||||
def find_instance(self, instance_id):
|
def find_instance(self, instance_id):
|
||||||
search_opts = {'all_tenants': True}
|
search_opts = {'all_tenants': True}
|
||||||
instances = self.nova.servers.list(detailed=True,
|
instances = self.nova.servers.list(detailed=True,
|
||||||
|
|||||||
@@ -272,4 +272,4 @@ def prepare_service(argv=(), conf=cfg.CONF):
|
|||||||
conf.log_opt_values(LOG, logging.DEBUG)
|
conf.log_opt_values(LOG, logging.DEBUG)
|
||||||
|
|
||||||
gmr.TextGuruMeditation.register_section(_('Plugins'), opts.show_plugins)
|
gmr.TextGuruMeditation.register_section(_('Plugins'), opts.show_plugins)
|
||||||
gmr.TextGuruMeditation.setup_autorun(version)
|
gmr.TextGuruMeditation.setup_autorun(version, conf=conf)
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ def extend_with_strict_schema(validator_class):
|
|||||||
raise exception.AuditParameterNotAllowed(parameter=para)
|
raise exception.AuditParameterNotAllowed(parameter=para)
|
||||||
|
|
||||||
for error in validate_properties(
|
for error in validate_properties(
|
||||||
validator, properties, instance, schema
|
validator, properties, instance, schema
|
||||||
):
|
):
|
||||||
yield error
|
yield error
|
||||||
|
|
||||||
|
|||||||
48
watcher/decision_engine/gmr.py
Normal file
48
watcher/decision_engine/gmr.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2016 b<>com
|
||||||
|
#
|
||||||
|
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_reports import guru_meditation_report as gmr
|
||||||
|
|
||||||
|
from watcher._i18n import _
|
||||||
|
from watcher.decision_engine.model.collector import manager
|
||||||
|
|
||||||
|
|
||||||
|
def register_gmr_plugins():
|
||||||
|
"""Register GMR plugins that are specific to watcher-decision-engine."""
|
||||||
|
gmr.TextGuruMeditation.register_section(_('CDMCs'), show_models)
|
||||||
|
|
||||||
|
|
||||||
|
def show_models():
|
||||||
|
"""Create a formatted output of all the CDMs
|
||||||
|
|
||||||
|
Mainly used as a Guru Meditation Report (GMR) plugin
|
||||||
|
"""
|
||||||
|
mgr = manager.CollectorManager()
|
||||||
|
|
||||||
|
output = []
|
||||||
|
for name, cdmc in mgr.get_collectors().items():
|
||||||
|
output.append("")
|
||||||
|
output.append("~" * len(name))
|
||||||
|
output.append(name)
|
||||||
|
output.append("~" * len(name))
|
||||||
|
output.append("")
|
||||||
|
|
||||||
|
cdmc_struct = cdmc.cluster_data_model.to_string()
|
||||||
|
output.append(cdmc_struct)
|
||||||
|
|
||||||
|
return "\n".join(output)
|
||||||
@@ -83,7 +83,7 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
|
|||||||
for n in nodes:
|
for n in nodes:
|
||||||
service = self.wrapper.nova.services.find(id=n.service['id'])
|
service = self.wrapper.nova.services.find(id=n.service['id'])
|
||||||
# create node in cluster_model_collector
|
# create node in cluster_model_collector
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(n.id)
|
||||||
node.uuid = service.host
|
node.uuid = service.host
|
||||||
node.hostname = n.hypervisor_hostname
|
node.hostname = n.hypervisor_hostname
|
||||||
# set capacity
|
# set capacity
|
||||||
@@ -105,7 +105,10 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
|
|||||||
# set capacity
|
# set capacity
|
||||||
self.wrapper.get_flavor_instance(v, flavor_cache)
|
self.wrapper.get_flavor_instance(v, flavor_cache)
|
||||||
mem.set_capacity(instance, v.flavor['ram'])
|
mem.set_capacity(instance, v.flavor['ram'])
|
||||||
|
# FIXME: update all strategies to use disk_capacity
|
||||||
|
# for instances instead of disk
|
||||||
disk.set_capacity(instance, v.flavor['disk'])
|
disk.set_capacity(instance, v.flavor['disk'])
|
||||||
|
disk_capacity.set_capacity(instance, v.flavor['disk'])
|
||||||
num_cores.set_capacity(instance, v.flavor['vcpus'])
|
num_cores.set_capacity(instance, v.flavor['vcpus'])
|
||||||
|
|
||||||
model.map_instance(instance, node)
|
model.map_instance(instance, node)
|
||||||
|
|||||||
@@ -28,8 +28,9 @@ class ServiceState(enum.Enum):
|
|||||||
|
|
||||||
class ComputeNode(compute_resource.ComputeResource):
|
class ComputeNode(compute_resource.ComputeResource):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, id):
|
||||||
super(ComputeNode, self).__init__()
|
super(ComputeNode, self).__init__()
|
||||||
|
self.id = id
|
||||||
self._state = ServiceState.ONLINE.value
|
self._state = ServiceState.ONLINE.value
|
||||||
self._status = ServiceState.ENABLED.value
|
self._status = ServiceState.ENABLED.value
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
import enum
|
import enum
|
||||||
|
|
||||||
|
from watcher.common import exception
|
||||||
|
|
||||||
|
|
||||||
class ResourceType(enum.Enum):
|
class ResourceType(enum.Enum):
|
||||||
cpu_cores = 'num_cores'
|
cpu_cores = 'num_cores'
|
||||||
@@ -50,12 +52,12 @@ class Resource(object):
|
|||||||
def unset_capacity(self, element):
|
def unset_capacity(self, element):
|
||||||
del self.mapping[element.uuid]
|
del self.mapping[element.uuid]
|
||||||
|
|
||||||
def get_capacity_from_id(self, uuid):
|
def get_capacity_by_uuid(self, uuid):
|
||||||
if str(uuid) in self.mapping.keys():
|
try:
|
||||||
return self.mapping[str(uuid)]
|
return self.mapping[str(uuid)]
|
||||||
else:
|
except KeyError:
|
||||||
# TODO(jed) throw exception
|
raise exception.CapacityNotDefined(
|
||||||
return None
|
capacity=self.name.value, resource=str(uuid))
|
||||||
|
|
||||||
def get_capacity(self, element):
|
def get_capacity(self, element):
|
||||||
return self.get_capacity_from_id(element.uuid)
|
return self.get_capacity_by_uuid(element.uuid)
|
||||||
|
|||||||
@@ -58,9 +58,9 @@ class Mapping(object):
|
|||||||
:param node: the node
|
:param node: the node
|
||||||
:param instance: the virtual machine or instance
|
:param instance: the virtual machine or instance
|
||||||
"""
|
"""
|
||||||
self.unmap_from_id(node.uuid, instance.uuid)
|
self.unmap_by_uuid(node.uuid, instance.uuid)
|
||||||
|
|
||||||
def unmap_from_id(self, node_uuid, instance_uuid):
|
def unmap_by_uuid(self, node_uuid, instance_uuid):
|
||||||
"""Remove the instance (by id) from the node (by id)
|
"""Remove the instance (by id) from the node (by id)
|
||||||
|
|
||||||
:rtype : object
|
:rtype : object
|
||||||
@@ -84,15 +84,15 @@ class Mapping(object):
|
|||||||
return self.compute_node_mapping
|
return self.compute_node_mapping
|
||||||
|
|
||||||
def get_node_from_instance(self, instance):
|
def get_node_from_instance(self, instance):
|
||||||
return self.get_node_from_instance_id(instance.uuid)
|
return self.get_node_by_instance_uuid(instance.uuid)
|
||||||
|
|
||||||
def get_node_from_instance_id(self, instance_uuid):
|
def get_node_by_instance_uuid(self, instance_uuid):
|
||||||
"""Getting host information from the guest instance
|
"""Getting host information from the guest instance
|
||||||
|
|
||||||
:param instance: the uuid of the instance
|
:param instance: the uuid of the instance
|
||||||
:return: node
|
:return: node
|
||||||
"""
|
"""
|
||||||
return self.model.get_node_from_id(
|
return self.model.get_node_by_uuid(
|
||||||
self.instance_mapping[str(instance_uuid)])
|
self.instance_mapping[str(instance_uuid)])
|
||||||
|
|
||||||
def get_node_instances(self, node):
|
def get_node_instances(self, node):
|
||||||
@@ -101,9 +101,9 @@ class Mapping(object):
|
|||||||
:param node:
|
:param node:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
return self.get_node_instances_from_id(node.uuid)
|
return self.get_node_instances_by_uuid(node.uuid)
|
||||||
|
|
||||||
def get_node_instances_from_id(self, node_uuid):
|
def get_node_instances_by_uuid(self, node_uuid):
|
||||||
if str(node_uuid) in self.compute_node_mapping.keys():
|
if str(node_uuid) in self.compute_node_mapping.keys():
|
||||||
return self.compute_node_mapping[str(node_uuid)]
|
return self.compute_node_mapping[str(node_uuid)]
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -14,6 +14,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from watcher._i18n import _
|
from watcher._i18n import _
|
||||||
@@ -74,9 +77,9 @@ class ModelRoot(object):
|
|||||||
:type node: str or :py:class:`~.Instance`
|
:type node: str or :py:class:`~.Instance`
|
||||||
"""
|
"""
|
||||||
if isinstance(instance, six.string_types):
|
if isinstance(instance, six.string_types):
|
||||||
instance = self.get_instance_from_id(instance)
|
instance = self.get_instance_by_uuid(instance)
|
||||||
if isinstance(node, six.string_types):
|
if isinstance(node, six.string_types):
|
||||||
node = self.get_node_from_id(node)
|
node = self.get_node_by_uuid(node)
|
||||||
|
|
||||||
self.add_instance(instance)
|
self.add_instance(instance)
|
||||||
self.mapping.map(node, instance)
|
self.mapping.map(node, instance)
|
||||||
@@ -90,17 +93,18 @@ class ModelRoot(object):
|
|||||||
:type node: str or :py:class:`~.Instance`
|
:type node: str or :py:class:`~.Instance`
|
||||||
"""
|
"""
|
||||||
if isinstance(instance, six.string_types):
|
if isinstance(instance, six.string_types):
|
||||||
instance = self.get_instance_from_id(instance)
|
instance = self.get_instance_by_uuid(instance)
|
||||||
if isinstance(node, six.string_types):
|
if isinstance(node, six.string_types):
|
||||||
node = self.get_node_from_id(node)
|
node = self.get_node_by_uuid(node)
|
||||||
|
|
||||||
self.add_instance(instance)
|
self.add_instance(instance)
|
||||||
self.mapping.unmap(node, instance)
|
self.mapping.unmap(node, instance)
|
||||||
|
|
||||||
def delete_instance(self, instance, node):
|
def delete_instance(self, instance, node=None):
|
||||||
self.remove_instance(instance)
|
if node is not None:
|
||||||
|
self.mapping.unmap(node, instance)
|
||||||
|
|
||||||
self.mapping.unmap(node, instance)
|
self.remove_instance(instance)
|
||||||
|
|
||||||
for resource in self.resource.values():
|
for resource in self.resource.values():
|
||||||
try:
|
try:
|
||||||
@@ -127,17 +131,17 @@ class ModelRoot(object):
|
|||||||
def get_all_compute_nodes(self):
|
def get_all_compute_nodes(self):
|
||||||
return self._nodes
|
return self._nodes
|
||||||
|
|
||||||
def get_node_from_id(self, node_uuid):
|
def get_node_by_uuid(self, node_uuid):
|
||||||
if str(node_uuid) not in self._nodes:
|
if str(node_uuid) not in self._nodes:
|
||||||
raise exception.ComputeNodeNotFound(name=node_uuid)
|
raise exception.ComputeNodeNotFound(name=node_uuid)
|
||||||
return self._nodes[str(node_uuid)]
|
return self._nodes[str(node_uuid)]
|
||||||
|
|
||||||
def get_instance_from_id(self, uuid):
|
def get_instance_by_uuid(self, uuid):
|
||||||
if str(uuid) not in self._instances:
|
if str(uuid) not in self._instances:
|
||||||
raise exception.InstanceNotFound(name=uuid)
|
raise exception.InstanceNotFound(name=uuid)
|
||||||
return self._instances[str(uuid)]
|
return self._instances[str(uuid)]
|
||||||
|
|
||||||
def get_node_from_instance_id(self, instance_uuid):
|
def get_node_by_instance_uuid(self, instance_uuid):
|
||||||
"""Getting host information from the guest instance
|
"""Getting host information from the guest instance
|
||||||
|
|
||||||
:param instance_uuid: the uuid of the instance
|
:param instance_uuid: the uuid of the instance
|
||||||
@@ -145,7 +149,7 @@ class ModelRoot(object):
|
|||||||
"""
|
"""
|
||||||
if str(instance_uuid) not in self.mapping.instance_mapping:
|
if str(instance_uuid) not in self.mapping.instance_mapping:
|
||||||
raise exception.InstanceNotFound(name=instance_uuid)
|
raise exception.InstanceNotFound(name=instance_uuid)
|
||||||
return self.get_node_from_id(
|
return self.get_node_by_uuid(
|
||||||
self.mapping.instance_mapping[str(instance_uuid)])
|
self.mapping.instance_mapping[str(instance_uuid)])
|
||||||
|
|
||||||
def get_all_instances(self):
|
def get_all_instances(self):
|
||||||
@@ -157,5 +161,115 @@ class ModelRoot(object):
|
|||||||
def create_resource(self, r):
|
def create_resource(self, r):
|
||||||
self.resource[str(r.name)] = r
|
self.resource[str(r.name)] = r
|
||||||
|
|
||||||
def get_resource_from_id(self, resource_id):
|
def get_resource_by_uuid(self, resource_id):
|
||||||
return self.resource[str(resource_id)]
|
return self.resource[str(resource_id)]
|
||||||
|
|
||||||
|
def get_node_instances(self, node):
|
||||||
|
return self.mapping.get_node_instances(node)
|
||||||
|
|
||||||
|
def _build_compute_node_element(self, compute_node):
|
||||||
|
attrib = collections.OrderedDict(
|
||||||
|
id=six.text_type(compute_node.id), uuid=compute_node.uuid,
|
||||||
|
human_id=compute_node.human_id, hostname=compute_node.hostname,
|
||||||
|
state=compute_node.state, status=compute_node.status)
|
||||||
|
|
||||||
|
for resource_name, resource in sorted(
|
||||||
|
self.resource.items(), key=lambda x: x[0]):
|
||||||
|
res_value = resource.get_capacity(compute_node)
|
||||||
|
if res_value is not None:
|
||||||
|
attrib[resource_name] = six.text_type(res_value)
|
||||||
|
|
||||||
|
compute_node_el = etree.Element("ComputeNode", attrib=attrib)
|
||||||
|
|
||||||
|
return compute_node_el
|
||||||
|
|
||||||
|
def _build_instance_element(self, instance):
|
||||||
|
attrib = collections.OrderedDict(
|
||||||
|
uuid=instance.uuid, human_id=instance.human_id,
|
||||||
|
hostname=instance.hostname, state=instance.state)
|
||||||
|
|
||||||
|
for resource_name, resource in sorted(
|
||||||
|
self.resource.items(), key=lambda x: x[0]):
|
||||||
|
res_value = resource.get_capacity(instance)
|
||||||
|
if res_value is not None:
|
||||||
|
attrib[resource_name] = six.text_type(res_value)
|
||||||
|
|
||||||
|
instance_el = etree.Element("Instance", attrib=attrib)
|
||||||
|
|
||||||
|
return instance_el
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
root = etree.Element("ModelRoot")
|
||||||
|
# Build compute node tree
|
||||||
|
for cn in sorted(self.get_all_compute_nodes().values(),
|
||||||
|
key=lambda cn: cn.uuid):
|
||||||
|
compute_node_el = self._build_compute_node_element(cn)
|
||||||
|
|
||||||
|
# Build mapped instance tree
|
||||||
|
node_instance_uuids = self.get_node_instances(cn)
|
||||||
|
for instance_uuid in sorted(node_instance_uuids):
|
||||||
|
instance = self.get_instance_by_uuid(instance_uuid)
|
||||||
|
instance_el = self._build_instance_element(instance)
|
||||||
|
compute_node_el.append(instance_el)
|
||||||
|
|
||||||
|
root.append(compute_node_el)
|
||||||
|
|
||||||
|
# Build unmapped instance tree (i.e. not assigned to any compute node)
|
||||||
|
for instance in sorted(self.get_all_instances().values(),
|
||||||
|
key=lambda inst: inst.uuid):
|
||||||
|
try:
|
||||||
|
self.get_node_by_instance_uuid(instance.uuid)
|
||||||
|
except exception.InstanceNotFound:
|
||||||
|
root.append(self._build_instance_element(instance))
|
||||||
|
|
||||||
|
return etree.tostring(root, pretty_print=True).decode('utf-8')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_xml(cls, data):
|
||||||
|
model = cls()
|
||||||
|
root = etree.fromstring(data)
|
||||||
|
|
||||||
|
mem = element.Resource(element.ResourceType.memory)
|
||||||
|
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||||
|
disk = element.Resource(element.ResourceType.disk)
|
||||||
|
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||||
|
model.create_resource(mem)
|
||||||
|
model.create_resource(num_cores)
|
||||||
|
model.create_resource(disk)
|
||||||
|
model.create_resource(disk_capacity)
|
||||||
|
|
||||||
|
for cn in root.findall('.//ComputeNode'):
|
||||||
|
node = element.ComputeNode(cn.get('id'))
|
||||||
|
node.uuid = cn.get('uuid')
|
||||||
|
node.hostname = cn.get('hostname')
|
||||||
|
# set capacity
|
||||||
|
mem.set_capacity(node, int(cn.get(str(mem.name))))
|
||||||
|
disk.set_capacity(node, int(cn.get(str(disk.name))))
|
||||||
|
disk_capacity.set_capacity(
|
||||||
|
node, int(cn.get(str(disk_capacity.name))))
|
||||||
|
num_cores.set_capacity(node, int(cn.get(str(num_cores.name))))
|
||||||
|
node.state = cn.get('state')
|
||||||
|
node.status = cn.get('status')
|
||||||
|
|
||||||
|
model.add_node(node)
|
||||||
|
|
||||||
|
for inst in root.findall('.//Instance'):
|
||||||
|
instance = element.Instance()
|
||||||
|
instance.uuid = inst.get('uuid')
|
||||||
|
instance.state = inst.get('state')
|
||||||
|
|
||||||
|
mem.set_capacity(instance, int(inst.get(str(mem.name))))
|
||||||
|
disk.set_capacity(instance, int(inst.get(str(disk.name))))
|
||||||
|
disk_capacity.set_capacity(
|
||||||
|
instance, int(inst.get(str(disk_capacity.name))))
|
||||||
|
num_cores.set_capacity(
|
||||||
|
instance, int(inst.get(str(num_cores.name))))
|
||||||
|
|
||||||
|
parent = inst.getparent()
|
||||||
|
if parent.tag == 'ComputeNode':
|
||||||
|
node = model.get_node_by_uuid(parent.get('uuid'))
|
||||||
|
model.map_instance(instance, node)
|
||||||
|
else:
|
||||||
|
model.add_instance(instance)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ from oslo_log import log
|
|||||||
|
|
||||||
from watcher._i18n import _LI
|
from watcher._i18n import _LI
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
|
from watcher.common import nova_helper
|
||||||
from watcher.decision_engine.model import element
|
from watcher.decision_engine.model import element
|
||||||
from watcher.decision_engine.model.notification import base
|
from watcher.decision_engine.model.notification import base
|
||||||
from watcher.decision_engine.model.notification import filtering
|
from watcher.decision_engine.model.notification import filtering
|
||||||
@@ -29,9 +30,19 @@ LOG = log.getLogger(__name__)
|
|||||||
|
|
||||||
class NovaNotification(base.NotificationEndpoint):
|
class NovaNotification(base.NotificationEndpoint):
|
||||||
|
|
||||||
|
def __init__(self, collector):
|
||||||
|
super(NovaNotification, self).__init__(collector)
|
||||||
|
self._nova = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def nova(self):
|
||||||
|
if self._nova is None:
|
||||||
|
self._nova = nova_helper.NovaHelper()
|
||||||
|
return self._nova
|
||||||
|
|
||||||
def get_or_create_instance(self, uuid):
|
def get_or_create_instance(self, uuid):
|
||||||
try:
|
try:
|
||||||
instance = self.cluster_data_model.get_instance_from_id(uuid)
|
instance = self.cluster_data_model.get_instance_by_uuid(uuid)
|
||||||
except exception.InstanceNotFound:
|
except exception.InstanceNotFound:
|
||||||
# The instance didn't exist yet so we create a new instance object
|
# The instance didn't exist yet so we create a new instance object
|
||||||
LOG.debug("New instance created: %s", uuid)
|
LOG.debug("New instance created: %s", uuid)
|
||||||
@@ -59,13 +70,20 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
element.ResourceType.cpu_cores, instance, num_cores)
|
element.ResourceType.cpu_cores, instance, num_cores)
|
||||||
self.update_capacity(
|
self.update_capacity(
|
||||||
element.ResourceType.disk, instance, disk_gb)
|
element.ResourceType.disk, instance, disk_gb)
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.disk_capacity, instance, disk_gb)
|
||||||
|
|
||||||
node = self.get_or_create_node(instance_data['host'])
|
try:
|
||||||
|
node = self.get_or_create_node(instance_data['host'])
|
||||||
|
except exception.ComputeNodeNotFound as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
# If we can't create the node, we consider the instance as unmapped
|
||||||
|
node = None
|
||||||
|
|
||||||
self.update_instance_mapping(instance, node)
|
self.update_instance_mapping(instance, node)
|
||||||
|
|
||||||
def update_capacity(self, resource_id, obj, value):
|
def update_capacity(self, resource_id, obj, value):
|
||||||
resource = self.cluster_data_model.get_resource_from_id(resource_id)
|
resource = self.cluster_data_model.get_resource_by_uuid(resource_id)
|
||||||
resource.set_capacity(obj, value)
|
resource.set_capacity(obj, value)
|
||||||
|
|
||||||
def legacy_update_instance(self, instance, data):
|
def legacy_update_instance(self, instance, data):
|
||||||
@@ -82,34 +100,83 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
element.ResourceType.cpu_cores, instance, num_cores)
|
element.ResourceType.cpu_cores, instance, num_cores)
|
||||||
self.update_capacity(
|
self.update_capacity(
|
||||||
element.ResourceType.disk, instance, disk_gb)
|
element.ResourceType.disk, instance, disk_gb)
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.disk_capacity, instance, disk_gb)
|
||||||
|
|
||||||
node = self.get_or_create_node(data['host'])
|
try:
|
||||||
|
node = self.get_or_create_node(data['host'])
|
||||||
|
except exception.ComputeNodeNotFound as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
# If we can't create the node, we consider the instance as unmapped
|
||||||
|
node = None
|
||||||
|
|
||||||
self.update_instance_mapping(instance, node)
|
self.update_instance_mapping(instance, node)
|
||||||
|
|
||||||
|
def update_compute_node(self, node, data):
|
||||||
|
"""Update the compute node using the notification data."""
|
||||||
|
node_data = data['nova_object.data']
|
||||||
|
node.hostname = node_data['host']
|
||||||
|
node.state = (
|
||||||
|
element.ServiceState.OFFLINE.value
|
||||||
|
if node_data['forced_down'] else element.ServiceState.ONLINE.value)
|
||||||
|
node.status = (
|
||||||
|
element.ServiceState.DISABLED.value
|
||||||
|
if node_data['host'] else element.ServiceState.ENABLED.value)
|
||||||
|
|
||||||
|
def create_compute_node(self, node_hostname):
|
||||||
|
"""Update the compute node by querying the Nova API."""
|
||||||
|
try:
|
||||||
|
_node = self.nova.get_compute_node_by_hostname(node_hostname)
|
||||||
|
node = element.ComputeNode(_node.id)
|
||||||
|
node.uuid = node_hostname
|
||||||
|
node.hostname = _node.hypervisor_hostname
|
||||||
|
node.state = _node.state
|
||||||
|
node.status = _node.status
|
||||||
|
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.memory, node, _node.memory_mb)
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.cpu_cores, node, _node.vcpus)
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.disk, node, _node.free_disk_gb)
|
||||||
|
self.update_capacity(
|
||||||
|
element.ResourceType.disk_capacity, node, _node.local_gb)
|
||||||
|
return node
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
LOG.debug("Could not refresh the node %s.", node_hostname)
|
||||||
|
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
def get_or_create_node(self, uuid):
|
def get_or_create_node(self, uuid):
|
||||||
if uuid is None:
|
if uuid is None:
|
||||||
LOG.debug("Compute node UUID not provided: skipping")
|
LOG.debug("Compute node UUID not provided: skipping")
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
node = self.cluster_data_model.get_node_from_id(uuid)
|
return self.cluster_data_model.get_node_by_uuid(uuid)
|
||||||
except exception.ComputeNodeNotFound:
|
except exception.ComputeNodeNotFound:
|
||||||
# The node didn't exist yet so we create a new node object
|
# The node didn't exist yet so we create a new node object
|
||||||
|
node = self.create_compute_node(uuid)
|
||||||
LOG.debug("New compute node created: %s", uuid)
|
LOG.debug("New compute node created: %s", uuid)
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = uuid
|
|
||||||
|
|
||||||
self.cluster_data_model.add_node(node)
|
self.cluster_data_model.add_node(node)
|
||||||
|
return node
|
||||||
return node
|
|
||||||
|
|
||||||
def update_instance_mapping(self, instance, node):
|
def update_instance_mapping(self, instance, node):
|
||||||
if not node:
|
if node is None:
|
||||||
|
self.cluster_data_model.add_instance(instance)
|
||||||
LOG.debug("Instance %s not yet attached to any node: skipping",
|
LOG.debug("Instance %s not yet attached to any node: skipping",
|
||||||
instance.uuid)
|
instance.uuid)
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
old_node = self.get_or_create_node(node.uuid)
|
try:
|
||||||
|
old_node = self.get_or_create_node(node.uuid)
|
||||||
|
except exception.ComputeNodeNotFound as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
# If we can't create the node,
|
||||||
|
# we consider the instance as unmapped
|
||||||
|
old_node = None
|
||||||
|
|
||||||
LOG.debug("Mapped node %s found", node.uuid)
|
LOG.debug("Mapped node %s found", node.uuid)
|
||||||
if node and node != old_node:
|
if node and node != old_node:
|
||||||
LOG.debug("Unmapping instance %s from %s",
|
LOG.debug("Unmapping instance %s from %s",
|
||||||
@@ -126,8 +193,7 @@ class NovaNotification(base.NotificationEndpoint):
|
|||||||
def delete_instance(self, instance, node):
|
def delete_instance(self, instance, node):
|
||||||
try:
|
try:
|
||||||
self.cluster_data_model.delete_instance(instance, node)
|
self.cluster_data_model.delete_instance(instance, node)
|
||||||
except Exception as exc:
|
except Exception:
|
||||||
LOG.exception(exc)
|
|
||||||
LOG.info(_LI("Instance %s already deleted"), instance.uuid)
|
LOG.info(_LI("Instance %s already deleted"), instance.uuid)
|
||||||
|
|
||||||
|
|
||||||
@@ -150,19 +216,18 @@ class ServiceUpdated(VersionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
node_data = payload['nova_object.data']
|
node_data = payload['nova_object.data']
|
||||||
node_uuid = node_data['host']
|
node_uuid = node_data['host']
|
||||||
node = self.get_or_create_node(node_uuid)
|
try:
|
||||||
|
node = self.get_or_create_node(node_uuid)
|
||||||
node.hostname = node_data['host']
|
self.update_compute_node(node, payload)
|
||||||
node.state = (
|
except exception.ComputeNodeNotFound as exc:
|
||||||
element.ServiceState.OFFLINE.value
|
LOG.exception(exc)
|
||||||
if node_data['forced_down'] else element.ServiceState.ONLINE.value)
|
|
||||||
node.status = (
|
|
||||||
element.ServiceState.DISABLED.value
|
|
||||||
if node_data['host'] else element.ServiceState.ENABLED.value)
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceCreated(VersionnedNotificationEndpoint):
|
class InstanceCreated(VersionnedNotificationEndpoint):
|
||||||
@@ -192,8 +257,11 @@ class InstanceCreated(VersionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
instance_data = payload['nova_object.data']
|
instance_data = payload['nova_object.data']
|
||||||
|
|
||||||
instance_uuid = instance_data['uuid']
|
instance_uuid = instance_data['uuid']
|
||||||
@@ -221,8 +289,11 @@ class InstanceUpdated(VersionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
instance_data = payload['nova_object.data']
|
instance_data = payload['nova_object.data']
|
||||||
instance_uuid = instance_data['uuid']
|
instance_uuid = instance_data['uuid']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
@@ -241,14 +312,22 @@ class InstanceDeletedEnd(VersionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
|
|
||||||
instance_data = payload['nova_object.data']
|
instance_data = payload['nova_object.data']
|
||||||
instance_uuid = instance_data['uuid']
|
instance_uuid = instance_data['uuid']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
|
|
||||||
node = self.get_or_create_node(instance_data['host'])
|
try:
|
||||||
|
node = self.get_or_create_node(instance_data['host'])
|
||||||
|
except exception.ComputeNodeNotFound as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
# If we can't create the node, we consider the instance as unmapped
|
||||||
|
node = None
|
||||||
|
|
||||||
self.delete_instance(instance, node)
|
self.delete_instance(instance, node)
|
||||||
|
|
||||||
@@ -264,8 +343,11 @@ class LegacyInstanceUpdated(UnversionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
|
|
||||||
instance_uuid = payload['instance_id']
|
instance_uuid = payload['instance_id']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
@@ -284,8 +366,11 @@ class LegacyInstanceCreatedEnd(UnversionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
|
|
||||||
instance_uuid = payload['instance_id']
|
instance_uuid = payload['instance_id']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
@@ -304,12 +389,20 @@ class LegacyInstanceDeletedEnd(UnversionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
instance_uuid = payload['instance_id']
|
instance_uuid = payload['instance_id']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
|
|
||||||
node = self.get_or_create_node(payload['host'])
|
try:
|
||||||
|
node = self.get_or_create_node(payload['host'])
|
||||||
|
except exception.ComputeNodeNotFound as exc:
|
||||||
|
LOG.exception(exc)
|
||||||
|
# If we can't create the node, we consider the instance as unmapped
|
||||||
|
node = None
|
||||||
|
|
||||||
self.delete_instance(instance, node)
|
self.delete_instance(instance, node)
|
||||||
|
|
||||||
@@ -325,8 +418,11 @@ class LegacyLiveMigratedEnd(UnversionnedNotificationEndpoint):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||||
dict(event=event_type, publisher=publisher_id))
|
"with metadata %(metadata)s") %
|
||||||
|
dict(event=event_type,
|
||||||
|
publisher=publisher_id,
|
||||||
|
metadata=metadata))
|
||||||
|
|
||||||
instance_uuid = payload['instance_id']
|
instance_uuid = payload['instance_id']
|
||||||
instance = self.get_or_create_instance(instance_uuid)
|
instance = self.get_or_create_instance(instance_uuid)
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ class DefaultPlanner(base.BasePlanner):
|
|||||||
if len(scheduled) == 0:
|
if len(scheduled) == 0:
|
||||||
LOG.warning(_LW("The action plan is empty"))
|
LOG.warning(_LW("The action plan is empty"))
|
||||||
action_plan.first_action_id = None
|
action_plan.first_action_id = None
|
||||||
|
action_plan.state = objects.action_plan.State.SUCCEEDED
|
||||||
action_plan.save()
|
action_plan.save()
|
||||||
else:
|
else:
|
||||||
# create the first action
|
# create the first action
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
# implied.
|
# implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
|
||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
@@ -33,6 +32,7 @@ class DummyScorer(base.ScoringEngine):
|
|||||||
Typically a scoring engine would be implemented using machine learning
|
Typically a scoring engine would be implemented using machine learning
|
||||||
techniques. For example, for workload classification problem the solution
|
techniques. For example, for workload classification problem the solution
|
||||||
could consist of the following steps:
|
could consist of the following steps:
|
||||||
|
|
||||||
1. Define a problem to solve: we want to detect the workload on the
|
1. Define a problem to solve: we want to detect the workload on the
|
||||||
machine based on the collected metrics like power consumption,
|
machine based on the collected metrics like power consumption,
|
||||||
temperature, CPU load, memory usage, disk usage, network usage, etc.
|
temperature, CPU load, memory usage, disk usage, network usage, etc.
|
||||||
|
|||||||
@@ -81,8 +81,6 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
self.number_of_released_nodes = 0
|
self.number_of_released_nodes = 0
|
||||||
# set default value for the number of migrations
|
# set default value for the number of migrations
|
||||||
self.number_of_migrations = 0
|
self.number_of_migrations = 0
|
||||||
# set default value for number of allowed migration attempts
|
|
||||||
self.migration_attempts = 0
|
|
||||||
|
|
||||||
# set default value for the efficacy
|
# set default value for the efficacy
|
||||||
self.efficacy = 100
|
self.efficacy = 100
|
||||||
@@ -94,21 +92,14 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
self.threshold_disk = 1
|
self.threshold_disk = 1
|
||||||
self.threshold_cores = 1
|
self.threshold_cores = 1
|
||||||
|
|
||||||
# TODO(jed): target efficacy
|
|
||||||
self.target_efficacy = 60
|
|
||||||
|
|
||||||
# TODO(jed): weight
|
|
||||||
self.weight_cpu = 1
|
|
||||||
self.weight_mem = 1
|
|
||||||
self.weight_disk = 1
|
|
||||||
|
|
||||||
# TODO(jed): bound migration attempts (80 %)
|
|
||||||
self.bound_migration = 0.80
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_name(cls):
|
def get_name(cls):
|
||||||
return "basic"
|
return "basic"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def migration_attempts(self):
|
||||||
|
return self.input_parameters.get('migration_attempts', 0)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_display_name(cls):
|
def get_display_name(cls):
|
||||||
return _("Basic offline consolidation")
|
return _("Basic offline consolidation")
|
||||||
@@ -117,6 +108,22 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
def get_translatable_display_name(cls):
|
def get_translatable_display_name(cls):
|
||||||
return "Basic offline consolidation"
|
return "Basic offline consolidation"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_schema(cls):
|
||||||
|
# Mandatory default setting for each element
|
||||||
|
return {
|
||||||
|
"properties": {
|
||||||
|
"migration_attempts": {
|
||||||
|
"description": "Maximum number of combinations to be "
|
||||||
|
"tried by the strategy while searching "
|
||||||
|
"for potential candidates. To remove the "
|
||||||
|
"limit, set it to 0 (by default)",
|
||||||
|
"type": "number",
|
||||||
|
"default": 0
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ceilometer(self):
|
def ceilometer(self):
|
||||||
if self._ceilometer is None:
|
if self._ceilometer is None:
|
||||||
@@ -127,13 +134,6 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
def ceilometer(self, ceilometer):
|
def ceilometer(self, ceilometer):
|
||||||
self._ceilometer = ceilometer
|
self._ceilometer = ceilometer
|
||||||
|
|
||||||
def compute_attempts(self, size_cluster):
|
|
||||||
"""Upper bound of the number of migration
|
|
||||||
|
|
||||||
:param size_cluster: The size of the cluster
|
|
||||||
"""
|
|
||||||
self.migration_attempts = size_cluster * self.bound_migration
|
|
||||||
|
|
||||||
def check_migration(self, source_node, destination_node,
|
def check_migration(self, source_node, destination_node,
|
||||||
instance_to_migrate):
|
instance_to_migrate):
|
||||||
"""Check if the migration is possible
|
"""Check if the migration is possible
|
||||||
@@ -152,16 +152,16 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
total_cores = 0
|
total_cores = 0
|
||||||
total_disk = 0
|
total_disk = 0
|
||||||
total_mem = 0
|
total_mem = 0
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = self.compute_model.get_resource_from_id(
|
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = self.compute_model.get_resource_from_id(
|
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
for instance_id in self.compute_model. \
|
for instance_id in self.compute_model.mapping.get_node_instances(
|
||||||
get_mapping().get_node_instances(destination_node):
|
destination_node):
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
total_cores += cpu_capacity.get_capacity(instance)
|
total_cores += cpu_capacity.get_capacity(instance)
|
||||||
total_disk += disk_capacity.get_capacity(instance)
|
total_disk += disk_capacity.get_capacity(instance)
|
||||||
total_mem += memory_capacity.get_capacity(instance)
|
total_mem += memory_capacity.get_capacity(instance)
|
||||||
@@ -188,27 +188,17 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:param total_mem: total memory used by the virtual machine
|
:param total_mem: total memory used by the virtual machine
|
||||||
:return: True if the threshold is not exceed
|
:return: True if the threshold is not exceed
|
||||||
"""
|
"""
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(destination_node)
|
element.ResourceType.cpu_cores).get_capacity(destination_node)
|
||||||
disk_capacity = self.compute_model.get_resource_from_id(
|
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk).get_capacity(destination_node)
|
element.ResourceType.disk).get_capacity(destination_node)
|
||||||
memory_capacity = self.compute_model.get_resource_from_id(
|
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory).get_capacity(destination_node)
|
element.ResourceType.memory).get_capacity(destination_node)
|
||||||
|
|
||||||
return (cpu_capacity >= total_cores * self.threshold_cores and
|
return (cpu_capacity >= total_cores * self.threshold_cores and
|
||||||
disk_capacity >= total_disk * self.threshold_disk and
|
disk_capacity >= total_disk * self.threshold_disk and
|
||||||
memory_capacity >= total_mem * self.threshold_mem)
|
memory_capacity >= total_mem * self.threshold_mem)
|
||||||
|
|
||||||
def get_allowed_migration_attempts(self):
|
|
||||||
"""Allowed migration
|
|
||||||
|
|
||||||
Maximum allowed number of migrations this allows us to fix
|
|
||||||
the upper bound of the number of migrations.
|
|
||||||
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
return self.migration_attempts
|
|
||||||
|
|
||||||
def calculate_weight(self, compute_resource, total_cores_used,
|
def calculate_weight(self, compute_resource, total_cores_used,
|
||||||
total_disk_used, total_memory_used):
|
total_disk_used, total_memory_used):
|
||||||
"""Calculate weight of every resource
|
"""Calculate weight of every resource
|
||||||
@@ -219,13 +209,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:param total_memory_used:
|
:param total_memory_used:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(compute_resource)
|
element.ResourceType.cpu_cores).get_capacity(compute_resource)
|
||||||
|
|
||||||
disk_capacity = self.compute_model.get_resource_from_id(
|
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk).get_capacity(compute_resource)
|
element.ResourceType.disk).get_capacity(compute_resource)
|
||||||
|
|
||||||
memory_capacity = self.compute_model.get_resource_from_id(
|
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory).get_capacity(compute_resource)
|
element.ResourceType.memory).get_capacity(compute_resource)
|
||||||
|
|
||||||
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
|
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
|
||||||
@@ -252,11 +242,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:rtype: float
|
:rtype: float
|
||||||
"""
|
"""
|
||||||
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
resource_id = "%s_%s" % (node.uuid, node.hostname)
|
||||||
host_avg_cpu_util = self.ceilometer. \
|
host_avg_cpu_util = self.ceilometer.statistic_aggregation(
|
||||||
statistic_aggregation(resource_id=resource_id,
|
resource_id=resource_id,
|
||||||
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
|
||||||
period="7200",
|
period="7200",
|
||||||
aggregate='avg')
|
aggregate='avg')
|
||||||
|
|
||||||
if host_avg_cpu_util is None:
|
if host_avg_cpu_util is None:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
@@ -266,10 +256,10 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
metric_name=self.HOST_CPU_USAGE_METRIC_NAME))
|
metric_name=self.HOST_CPU_USAGE_METRIC_NAME))
|
||||||
host_avg_cpu_util = 100
|
host_avg_cpu_util = 100
|
||||||
|
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(node)
|
element.ResourceType.cpu_cores).get_capacity(node)
|
||||||
|
|
||||||
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)
|
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100.0)
|
||||||
|
|
||||||
return self.calculate_weight(node, total_cores_used, 0, 0)
|
return self.calculate_weight(node, total_cores_used, 0, 0)
|
||||||
|
|
||||||
@@ -306,7 +296,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME))
|
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME))
|
||||||
instance_cpu_utilization = 100
|
instance_cpu_utilization = 100
|
||||||
|
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(instance)
|
element.ResourceType.cpu_cores).get_capacity(instance)
|
||||||
|
|
||||||
total_cores_used = cpu_capacity * (instance_cpu_utilization / 100.0)
|
total_cores_used = cpu_capacity * (instance_cpu_utilization / 100.0)
|
||||||
@@ -331,11 +321,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
resource_id=resource_id,
|
resource_id=resource_id,
|
||||||
input_parameters=parameters)
|
input_parameters=parameters)
|
||||||
|
|
||||||
def score_of_nodes(self, score):
|
def compute_score_of_nodes(self):
|
||||||
"""Calculate score of nodes based on load by VMs"""
|
"""Calculate score of nodes based on load by VMs"""
|
||||||
|
score = []
|
||||||
for node in self.compute_model.get_all_compute_nodes().values():
|
for node in self.compute_model.get_all_compute_nodes().values():
|
||||||
count = self.compute_model.mapping.get_node_instances_from_id(
|
count = self.compute_model.mapping.get_node_instances(node)
|
||||||
node.uuid)
|
|
||||||
if len(count) > 0:
|
if len(count) > 0:
|
||||||
result = self.calculate_score_node(node)
|
result = self.calculate_score_node(node)
|
||||||
else:
|
else:
|
||||||
@@ -345,16 +335,15 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
score.append((node.uuid, result))
|
score.append((node.uuid, result))
|
||||||
return score
|
return score
|
||||||
|
|
||||||
def node_and_instance_score(self, sorted_score, score):
|
def node_and_instance_score(self, sorted_scores):
|
||||||
"""Get List of VMs from node"""
|
"""Get List of VMs from node"""
|
||||||
node_to_release = sorted_score[len(score) - 1][0]
|
node_to_release = sorted_scores[len(sorted_scores) - 1][0]
|
||||||
instances_to_migrate = (
|
instances_to_migrate = self.compute_model.mapping.get_node_instances(
|
||||||
self.compute_model.mapping.get_node_instances_from_id(
|
self.compute_model.get_node_by_uuid(node_to_release))
|
||||||
node_to_release))
|
|
||||||
|
|
||||||
instance_score = []
|
instance_score = []
|
||||||
for instance_id in instances_to_migrate:
|
for instance_id in instances_to_migrate:
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
if instance.state == element.InstanceState.ACTIVE.value:
|
if instance.state == element.InstanceState.ACTIVE.value:
|
||||||
instance_score.append(
|
instance_score.append(
|
||||||
(instance_id, self.calculate_score_instance(instance)))
|
(instance_id, self.calculate_score_instance(instance)))
|
||||||
@@ -370,7 +359,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
mig_source_node.uuid,
|
mig_source_node.uuid,
|
||||||
mig_destination_node.uuid)
|
mig_destination_node.uuid)
|
||||||
|
|
||||||
if len(self.compute_model.get_mapping().get_node_instances(
|
if len(self.compute_model.mapping.get_node_instances(
|
||||||
mig_source_node)) == 0:
|
mig_source_node)) == 0:
|
||||||
self.add_change_service_state(mig_source_node.
|
self.add_change_service_state(mig_source_node.
|
||||||
uuid,
|
uuid,
|
||||||
@@ -382,11 +371,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
number_migrations = 0
|
number_migrations = 0
|
||||||
for instance in sorted_instances:
|
for instance in sorted_instances:
|
||||||
for j in range(0, len(sorted_score)):
|
for j in range(0, len(sorted_score)):
|
||||||
mig_instance = self.compute_model.get_instance_from_id(
|
mig_instance = self.compute_model.get_instance_by_uuid(
|
||||||
instance[0])
|
instance[0])
|
||||||
mig_source_node = self.compute_model.get_node_from_id(
|
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||||
node_to_release)
|
node_to_release)
|
||||||
mig_destination_node = self.compute_model.get_node_from_id(
|
mig_destination_node = self.compute_model.get_node_by_uuid(
|
||||||
sorted_score[j][0])
|
sorted_score[j][0])
|
||||||
|
|
||||||
result = self.check_migration(
|
result = self.check_migration(
|
||||||
@@ -411,65 +400,61 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise exception.ClusterStateNotDefined()
|
raise exception.ClusterStateNotDefined()
|
||||||
|
|
||||||
def do_execute(self):
|
if len(self.compute_model.get_all_compute_nodes()) == 0:
|
||||||
# todo(jed) clone model
|
|
||||||
self.efficacy = 100
|
|
||||||
unsuccessful_migration = 0
|
|
||||||
|
|
||||||
first_migration = True
|
|
||||||
size_cluster = len(self.compute_model.get_all_compute_nodes())
|
|
||||||
if size_cluster == 0:
|
|
||||||
raise exception.ClusterEmpty()
|
raise exception.ClusterEmpty()
|
||||||
|
|
||||||
self.compute_attempts(size_cluster)
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|
||||||
|
def do_execute(self):
|
||||||
|
unsuccessful_migration = 0
|
||||||
|
|
||||||
for node_uuid, node in self.compute_model.get_all_compute_nodes(
|
for node_uuid, node in self.compute_model.get_all_compute_nodes(
|
||||||
).items():
|
).items():
|
||||||
node_instances = (self.compute_model.mapping
|
node_instances = self.compute_model.mapping.get_node_instances(
|
||||||
.get_node_instances_from_id(node_uuid))
|
node)
|
||||||
if node_instances:
|
if node_instances:
|
||||||
if node.state == element.ServiceState.ENABLED:
|
if node.state == element.ServiceState.ENABLED:
|
||||||
self.add_change_service_state(
|
self.add_change_service_state(
|
||||||
node_uuid, element.ServiceState.DISABLED.value)
|
node_uuid, element.ServiceState.DISABLED.value)
|
||||||
|
|
||||||
while self.get_allowed_migration_attempts() >= unsuccessful_migration:
|
scores = self.compute_score_of_nodes()
|
||||||
if not first_migration:
|
# Sort compute nodes by Score decreasing
|
||||||
self.efficacy = self.calculate_migration_efficacy()
|
sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1]))
|
||||||
if self.efficacy < float(self.target_efficacy):
|
LOG.debug("Compute node(s) BFD %s", sorted_scores)
|
||||||
break
|
# Get Node to be released
|
||||||
first_migration = False
|
if len(scores) == 0:
|
||||||
score = []
|
LOG.warning(_LW(
|
||||||
|
"The workloads of the compute nodes"
|
||||||
score = self.score_of_nodes(score)
|
" of the cluster is zero"))
|
||||||
|
return
|
||||||
# Sort compute nodes by Score decreasing
|
|
||||||
sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
|
|
||||||
LOG.debug("Compute node(s) BFD %s", sorted_score)
|
|
||||||
|
|
||||||
# Get Node to be released
|
|
||||||
if len(score) == 0:
|
|
||||||
LOG.warning(_LW(
|
|
||||||
"The workloads of the compute nodes"
|
|
||||||
" of the cluster is zero"))
|
|
||||||
break
|
|
||||||
|
|
||||||
|
while sorted_scores and (
|
||||||
|
not self.migration_attempts or
|
||||||
|
self.migration_attempts >= unsuccessful_migration):
|
||||||
node_to_release, instance_score = self.node_and_instance_score(
|
node_to_release, instance_score = self.node_and_instance_score(
|
||||||
sorted_score, score)
|
sorted_scores)
|
||||||
|
|
||||||
# Sort instances by Score
|
# Sort instances by Score
|
||||||
sorted_instances = sorted(
|
sorted_instances = sorted(
|
||||||
instance_score, reverse=True, key=lambda x: (x[1]))
|
instance_score, reverse=True, key=lambda x: (x[1]))
|
||||||
# BFD: Best Fit Decrease
|
# BFD: Best Fit Decrease
|
||||||
LOG.debug("VM(s) BFD %s", sorted_instances)
|
LOG.debug("Instance(s) BFD %s", sorted_instances)
|
||||||
|
|
||||||
migrations = self.calculate_num_migrations(
|
migrations = self.calculate_num_migrations(
|
||||||
sorted_instances, node_to_release, sorted_score)
|
sorted_instances, node_to_release, sorted_scores)
|
||||||
|
|
||||||
unsuccessful_migration = self.unsuccessful_migration_actualization(
|
unsuccessful_migration = self.unsuccessful_migration_actualization(
|
||||||
migrations, unsuccessful_migration)
|
migrations, unsuccessful_migration)
|
||||||
|
|
||||||
|
if not migrations:
|
||||||
|
# We don't have any possible migrations to perform on this node
|
||||||
|
# so we discard the node so we can try to migrate instances
|
||||||
|
# from the next one in the list
|
||||||
|
sorted_scores.pop()
|
||||||
|
|
||||||
infos = {
|
infos = {
|
||||||
"number_of_migrations": self.number_of_migrations,
|
"released_compute_nodes_count": self.number_of_released_nodes,
|
||||||
"number_of_nodes_released": self.number_of_released_nodes,
|
"instance_migrations_count": self.number_of_migrations,
|
||||||
"efficacy": self.efficacy
|
"efficacy": self.efficacy
|
||||||
}
|
}
|
||||||
LOG.debug(infos)
|
LOG.debug(infos)
|
||||||
@@ -479,3 +464,4 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
released_compute_nodes_count=self.number_of_released_nodes,
|
released_compute_nodes_count=self.number_of_released_nodes,
|
||||||
instance_migrations_count=self.number_of_migrations,
|
instance_migrations_count=self.number_of_migrations,
|
||||||
)
|
)
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
disk_gb_used = 0
|
disk_gb_used = 0
|
||||||
if len(instances) > 0:
|
if len(instances) > 0:
|
||||||
for instance_id in instances:
|
for instance_id in instances:
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
vcpus_used += cpu_capacity.get_capacity(instance)
|
vcpus_used += cpu_capacity.get_capacity(instance)
|
||||||
memory_mb_used += memory_capacity.get_capacity(instance)
|
memory_mb_used += memory_capacity.get_capacity(instance)
|
||||||
disk_gb_used += disk_capacity.get_capacity(instance)
|
disk_gb_used += disk_capacity.get_capacity(instance)
|
||||||
@@ -147,7 +147,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
hosts_need_release = []
|
hosts_need_release = []
|
||||||
hosts_target = []
|
hosts_target = []
|
||||||
for node_id in nodes:
|
for node_id in nodes:
|
||||||
node = self.compute_model.get_node_from_id(
|
node = self.compute_model.get_node_by_uuid(
|
||||||
node_id)
|
node_id)
|
||||||
resource_id = node.uuid
|
resource_id = node.uuid
|
||||||
|
|
||||||
@@ -180,7 +180,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
for instance_id in instances_of_src:
|
for instance_id in instances_of_src:
|
||||||
try:
|
try:
|
||||||
# select the first active instance to migrate
|
# select the first active instance to migrate
|
||||||
instance = self.compute_model.get_instance_from_id(
|
instance = self.compute_model.get_instance_by_uuid(
|
||||||
instance_id)
|
instance_id)
|
||||||
if (instance.state !=
|
if (instance.state !=
|
||||||
element.InstanceState.ACTIVE.value):
|
element.InstanceState.ACTIVE.value):
|
||||||
@@ -196,11 +196,11 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
|
|
||||||
def filter_dest_servers(self, hosts, instance_to_migrate):
|
def filter_dest_servers(self, hosts, instance_to_migrate):
|
||||||
"""Only return hosts with sufficient available resources"""
|
"""Only return hosts with sufficient available resources"""
|
||||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = self.compute_model.get_resource_from_id(
|
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = self.compute_model.get_resource_from_id(
|
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
required_cores = cpu_capacity.get_capacity(instance_to_migrate)
|
required_cores = cpu_capacity.get_capacity(instance_to_migrate)
|
||||||
@@ -230,6 +230,8 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise wexc.ClusterStateNotDefined()
|
raise wexc.ClusterStateNotDefined()
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
# the migration plan will be triggered when the outlet temperature
|
# the migration plan will be triggered when the outlet temperature
|
||||||
# reaches threshold
|
# reaches threshold
|
||||||
@@ -284,3 +286,5 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||||||
def post_execute(self):
|
def post_execute(self):
|
||||||
self.solution.model = self.compute_model
|
self.solution.model = self.compute_model
|
||||||
# TODO(v-francoise): Add the indicators to the solution
|
# TODO(v-francoise): Add the indicators to the solution
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
memory_mb_used = 0
|
memory_mb_used = 0
|
||||||
disk_gb_used = 0
|
disk_gb_used = 0
|
||||||
for instance_id in instances:
|
for instance_id in instances:
|
||||||
instance = self.compute_model.get_instance_from_id(
|
instance = self.compute_model.get_instance_by_uuid(
|
||||||
instance_id)
|
instance_id)
|
||||||
vcpus_used += cap_cores.get_capacity(instance)
|
vcpus_used += cap_cores.get_capacity(instance)
|
||||||
memory_mb_used += cap_mem.get_capacity(instance)
|
memory_mb_used += cap_mem.get_capacity(instance)
|
||||||
@@ -179,7 +179,7 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
for instance_id in source_instances:
|
for instance_id in source_instances:
|
||||||
try:
|
try:
|
||||||
instance = (self.compute_model.
|
instance = (self.compute_model.
|
||||||
get_instance_from_id(instance_id))
|
get_instance_by_uuid(instance_id))
|
||||||
instances_tobe_migrate.append(instance)
|
instances_tobe_migrate.append(instance)
|
||||||
except wexc.InstanceNotFound:
|
except wexc.InstanceNotFound:
|
||||||
LOG.error(_LE("Instance not found; error: %s"),
|
LOG.error(_LE("Instance not found; error: %s"),
|
||||||
@@ -190,7 +190,7 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
for instance_id in source_instances:
|
for instance_id in source_instances:
|
||||||
try:
|
try:
|
||||||
instance = (self.compute_model.
|
instance = (self.compute_model.
|
||||||
get_instance_from_id(instance_id))
|
get_instance_by_uuid(instance_id))
|
||||||
if (instance.state !=
|
if (instance.state !=
|
||||||
element.InstanceState.ACTIVE.value):
|
element.InstanceState.ACTIVE.value):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
@@ -209,11 +209,11 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
def filter_destination_hosts(self, hosts, instances_to_migrate):
|
def filter_destination_hosts(self, hosts, instances_to_migrate):
|
||||||
"""Find instance and host with sufficient available resources"""
|
"""Find instance and host with sufficient available resources"""
|
||||||
|
|
||||||
cap_cores = self.compute_model.get_resource_from_id(
|
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
cap_disk = self.compute_model.get_resource_from_id(
|
cap_disk = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
cap_mem = self.compute_model.get_resource_from_id(
|
cap_mem = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
# large instance go first
|
# large instance go first
|
||||||
instances_to_migrate = sorted(
|
instances_to_migrate = sorted(
|
||||||
@@ -265,7 +265,7 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
overload_hosts = []
|
overload_hosts = []
|
||||||
nonoverload_hosts = []
|
nonoverload_hosts = []
|
||||||
for node_id in nodes:
|
for node_id in nodes:
|
||||||
node = self.compute_model.get_node_from_id(
|
node = self.compute_model.get_node_by_uuid(
|
||||||
node_id)
|
node_id)
|
||||||
resource_id = node.uuid
|
resource_id = node.uuid
|
||||||
airflow = self.ceilometer.statistic_aggregation(
|
airflow = self.ceilometer.statistic_aggregation(
|
||||||
@@ -293,6 +293,8 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise wexc.ClusterStateNotDefined()
|
raise wexc.ClusterStateNotDefined()
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
self.threshold_airflow = self.input_parameters.threshold_airflow
|
self.threshold_airflow = self.input_parameters.threshold_airflow
|
||||||
self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
|
self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
|
||||||
@@ -345,3 +347,5 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
def post_execute(self):
|
def post_execute(self):
|
||||||
self.solution.model = self.compute_model
|
self.solution.model = self.compute_model
|
||||||
# TODO(v-francoise): Add the indicators to the solution
|
# TODO(v-francoise): Add the indicators to the solution
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -162,7 +162,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:param model: model_root object
|
:param model: model_root object
|
||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
instance = model.get_instance_from_id(instance_uuid)
|
instance = model.get_instance_by_uuid(instance_uuid)
|
||||||
|
|
||||||
instance_state_str = self.get_state_str(instance.state)
|
instance_state_str = self.get_state_str(instance.state)
|
||||||
if instance_state_str != element.InstanceState.ACTIVE.value:
|
if instance_state_str != element.InstanceState.ACTIVE.value:
|
||||||
@@ -226,9 +226,9 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
instance_cpu_util = self.ceilometer.statistic_aggregation(
|
instance_cpu_util = self.ceilometer.statistic_aggregation(
|
||||||
resource_id=instance_uuid, meter_name=cpu_util_metric,
|
resource_id=instance_uuid, meter_name=cpu_util_metric,
|
||||||
period=period, aggregate=aggr)
|
period=period, aggregate=aggr)
|
||||||
instance_cpu_cores = model.get_resource_from_id(
|
instance_cpu_cores = model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(
|
element.ResourceType.cpu_cores).get_capacity(
|
||||||
model.get_instance_from_id(instance_uuid))
|
model.get_instance_by_uuid(instance_uuid))
|
||||||
|
|
||||||
if instance_cpu_util:
|
if instance_cpu_util:
|
||||||
total_cpu_utilization = (
|
total_cpu_utilization = (
|
||||||
@@ -271,7 +271,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:param aggr: string
|
:param aggr: string
|
||||||
:return: dict(cpu(number of cores used), ram(MB used), disk(B used))
|
:return: dict(cpu(number of cores used), ram(MB used), disk(B used))
|
||||||
"""
|
"""
|
||||||
node_instances = model.mapping.get_node_instances_from_id(
|
node_instances = model.mapping.get_node_instances_by_uuid(
|
||||||
node.uuid)
|
node.uuid)
|
||||||
node_ram_util = 0
|
node_ram_util = 0
|
||||||
node_disk_util = 0
|
node_disk_util = 0
|
||||||
@@ -293,13 +293,13 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
:param model: model_root object
|
:param model: model_root object
|
||||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||||
"""
|
"""
|
||||||
node_cpu_capacity = model.get_resource_from_id(
|
node_cpu_capacity = model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(node)
|
element.ResourceType.cpu_cores).get_capacity(node)
|
||||||
|
|
||||||
node_disk_capacity = model.get_resource_from_id(
|
node_disk_capacity = model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk_capacity).get_capacity(node)
|
element.ResourceType.disk_capacity).get_capacity(node)
|
||||||
|
|
||||||
node_ram_capacity = model.get_resource_from_id(
|
node_ram_capacity = model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory).get_capacity(node)
|
element.ResourceType.memory).get_capacity(node)
|
||||||
return dict(cpu=node_cpu_capacity, ram=node_ram_capacity,
|
return dict(cpu=node_cpu_capacity, ram=node_ram_capacity,
|
||||||
disk=node_disk_capacity)
|
disk=node_disk_capacity)
|
||||||
@@ -498,6 +498,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise exception.ClusterStateNotDefined()
|
raise exception.ClusterStateNotDefined()
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
"""Execute strategy.
|
"""Execute strategy.
|
||||||
|
|
||||||
@@ -548,3 +550,5 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
|||||||
released_compute_nodes_count=self.number_of_migrations,
|
released_compute_nodes_count=self.number_of_migrations,
|
||||||
instance_migrations_count=self.number_of_released_nodes,
|
instance_migrations_count=self.number_of_released_nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
memory_mb_used = 0
|
memory_mb_used = 0
|
||||||
disk_gb_used = 0
|
disk_gb_used = 0
|
||||||
for instance_id in instances:
|
for instance_id in instances:
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
vcpus_used += cap_cores.get_capacity(instance)
|
vcpus_used += cap_cores.get_capacity(instance)
|
||||||
memory_mb_used += cap_mem.get_capacity(instance)
|
memory_mb_used += cap_mem.get_capacity(instance)
|
||||||
disk_gb_used += cap_disk.get_capacity(instance)
|
disk_gb_used += cap_disk.get_capacity(instance)
|
||||||
@@ -147,7 +147,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
for inst_id in source_instances:
|
for inst_id in source_instances:
|
||||||
try:
|
try:
|
||||||
# select the first active VM to migrate
|
# select the first active VM to migrate
|
||||||
instance = self.compute_model.get_instance_from_id(
|
instance = self.compute_model.get_instance_by_uuid(
|
||||||
inst_id)
|
inst_id)
|
||||||
if (instance.state !=
|
if (instance.state !=
|
||||||
element.InstanceState.ACTIVE.value):
|
element.InstanceState.ACTIVE.value):
|
||||||
@@ -164,7 +164,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
instance_id)
|
instance_id)
|
||||||
if instance_id:
|
if instance_id:
|
||||||
return (source_node,
|
return (source_node,
|
||||||
self.compute_model.get_instance_from_id(
|
self.compute_model.get_instance_by_uuid(
|
||||||
instance_id))
|
instance_id))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("VM not found from node: %s"),
|
LOG.info(_LI("VM not found from node: %s"),
|
||||||
@@ -174,11 +174,11 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
avg_workload, workload_cache):
|
avg_workload, workload_cache):
|
||||||
'''Only return hosts with sufficient available resources'''
|
'''Only return hosts with sufficient available resources'''
|
||||||
|
|
||||||
cap_cores = self.compute_model.get_resource_from_id(
|
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
cap_disk = self.compute_model.get_resource_from_id(
|
cap_disk = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
cap_mem = self.compute_model.get_resource_from_id(
|
cap_mem = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
required_cores = cap_cores.get_capacity(instance_to_migrate)
|
required_cores = cap_cores.get_capacity(instance_to_migrate)
|
||||||
@@ -222,7 +222,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
if not nodes:
|
if not nodes:
|
||||||
raise wexc.ClusterEmpty()
|
raise wexc.ClusterEmpty()
|
||||||
# get cpu cores capacity of nodes and instances
|
# get cpu cores capacity of nodes and instances
|
||||||
cap_cores = self.compute_model.get_resource_from_id(
|
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
overload_hosts = []
|
overload_hosts = []
|
||||||
nonoverload_hosts = []
|
nonoverload_hosts = []
|
||||||
@@ -232,12 +232,12 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
# use workload_cache to store the workload of VMs for reuse purpose
|
# use workload_cache to store the workload of VMs for reuse purpose
|
||||||
workload_cache = {}
|
workload_cache = {}
|
||||||
for node_id in nodes:
|
for node_id in nodes:
|
||||||
node = self.compute_model.get_node_from_id(
|
node = self.compute_model.get_node_by_uuid(
|
||||||
node_id)
|
node_id)
|
||||||
instances = self.compute_model.mapping.get_node_instances(node)
|
instances = self.compute_model.mapping.get_node_instances(node)
|
||||||
node_workload = 0.0
|
node_workload = 0.0
|
||||||
for instance_id in instances:
|
for instance_id in instances:
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
try:
|
try:
|
||||||
cpu_util = self.ceilometer.statistic_aggregation(
|
cpu_util = self.ceilometer.statistic_aggregation(
|
||||||
resource_id=instance_id,
|
resource_id=instance_id,
|
||||||
@@ -283,6 +283,8 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise wexc.ClusterStateNotDefined()
|
raise wexc.ClusterStateNotDefined()
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
"""Strategy execution phase
|
"""Strategy execution phase
|
||||||
|
|
||||||
@@ -344,3 +346,5 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||||||
This can be used to compute the global efficacy
|
This can be used to compute the global efficacy
|
||||||
"""
|
"""
|
||||||
self.solution.model = self.compute_model
|
self.solution.model = self.compute_model
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from copy import deepcopy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
import random
|
import random
|
||||||
@@ -34,40 +34,8 @@ from watcher.decision_engine.model import element
|
|||||||
from watcher.decision_engine.strategy.strategies import base
|
from watcher.decision_engine.strategy.strategies import base
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
metrics = ['cpu_util', 'memory.resident']
|
|
||||||
thresholds_dict = {'cpu_util': 0.2, 'memory.resident': 0.2}
|
|
||||||
weights_dict = {'cpu_util_weight': 1.0, 'memory.resident_weight': 1.0}
|
|
||||||
instance_host_measures = {'cpu_util': 'hardware.cpu.util',
|
|
||||||
'memory.resident': 'hardware.memory.used'}
|
|
||||||
|
|
||||||
ws_opts = [
|
|
||||||
cfg.ListOpt('metrics',
|
|
||||||
default=metrics,
|
|
||||||
required=True,
|
|
||||||
help='Metrics used as rates of cluster loads.'),
|
|
||||||
cfg.DictOpt('thresholds',
|
|
||||||
default=thresholds_dict,
|
|
||||||
help=''),
|
|
||||||
cfg.DictOpt('weights',
|
|
||||||
default=weights_dict,
|
|
||||||
help='These weights used to calculate '
|
|
||||||
'common standard deviation. Name of weight '
|
|
||||||
'contains meter name and _weight suffix.'),
|
|
||||||
cfg.StrOpt('host_choice',
|
|
||||||
default='retry',
|
|
||||||
required=True,
|
|
||||||
help="Method of host's choice."),
|
|
||||||
cfg.IntOpt('retry_count',
|
|
||||||
default=1,
|
|
||||||
required=True,
|
|
||||||
help='Count of random returned hosts.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
CONF.register_opts(ws_opts, 'watcher_strategies.workload_stabilization')
|
|
||||||
|
|
||||||
|
|
||||||
def _set_memoize(conf):
|
def _set_memoize(conf):
|
||||||
oslo_cache.configure(conf)
|
oslo_cache.configure(conf)
|
||||||
@@ -111,14 +79,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
super(WorkloadStabilization, self).__init__(config, osc)
|
super(WorkloadStabilization, self).__init__(config, osc)
|
||||||
self._ceilometer = None
|
self._ceilometer = None
|
||||||
self._nova = None
|
self._nova = None
|
||||||
self.weights = CONF['watcher_strategies.workload_stabilization']\
|
self.weights = None
|
||||||
.weights
|
self.metrics = None
|
||||||
self.metrics = CONF['watcher_strategies.workload_stabilization']\
|
self.thresholds = None
|
||||||
.metrics
|
self.host_choice = None
|
||||||
self.thresholds = CONF['watcher_strategies.workload_stabilization']\
|
self.instance_metrics = None
|
||||||
.thresholds
|
self.retry_count = None
|
||||||
self.host_choice = CONF['watcher_strategies.workload_stabilization']\
|
|
||||||
.host_choice
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_name(cls):
|
def get_name(cls):
|
||||||
@@ -132,6 +98,55 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
def get_translatable_display_name(cls):
|
def get_translatable_display_name(cls):
|
||||||
return "Workload stabilization"
|
return "Workload stabilization"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_schema(cls):
|
||||||
|
return {
|
||||||
|
"properties": {
|
||||||
|
"metrics": {
|
||||||
|
"description": "Metrics used as rates of cluster loads.",
|
||||||
|
"type": "array",
|
||||||
|
"default": ["cpu_util", "memory.resident"]
|
||||||
|
},
|
||||||
|
"thresholds": {
|
||||||
|
"description": "Dict where key is a metric and value "
|
||||||
|
"is a trigger value.",
|
||||||
|
"type": "object",
|
||||||
|
"default": {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||||
|
},
|
||||||
|
"weights": {
|
||||||
|
"description": "These weights used to calculate "
|
||||||
|
"common standard deviation. Name of weight"
|
||||||
|
" contains meter name and _weight suffix.",
|
||||||
|
"type": "object",
|
||||||
|
"default": {"cpu_util_weight": 1.0,
|
||||||
|
"memory.resident_weight": 1.0}
|
||||||
|
},
|
||||||
|
"instance_metrics": {
|
||||||
|
"description": "Mapping to get hardware statistics using"
|
||||||
|
" instance metrics",
|
||||||
|
"type": "object",
|
||||||
|
"default": {"cpu_util": "hardware.cpu.util",
|
||||||
|
"memory.resident": "hardware.memory.used"}
|
||||||
|
},
|
||||||
|
"host_choice": {
|
||||||
|
"description": "Method of host's choice. There are cycle,"
|
||||||
|
" retry and fullsearch methods. "
|
||||||
|
"Cycle will iterate hosts in cycle. "
|
||||||
|
"Retry will get some hosts random "
|
||||||
|
"(count defined in retry_count option). "
|
||||||
|
"Fullsearch will return each host "
|
||||||
|
"from list.",
|
||||||
|
"type": "string",
|
||||||
|
"default": "retry"
|
||||||
|
},
|
||||||
|
"retry_count": {
|
||||||
|
"description": "Count of random returned hosts",
|
||||||
|
"type": "number",
|
||||||
|
"default": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ceilometer(self):
|
def ceilometer(self):
|
||||||
if self._ceilometer is None:
|
if self._ceilometer is None:
|
||||||
@@ -172,9 +187,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
:return: dict
|
:return: dict
|
||||||
"""
|
"""
|
||||||
LOG.debug('get_instance_load started')
|
LOG.debug('get_instance_load started')
|
||||||
instance_vcpus = self.compute_model.get_resource_from_id(
|
instance_vcpus = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(
|
element.ResourceType.cpu_cores).get_capacity(
|
||||||
self.compute_model.get_instance_from_id(instance_uuid))
|
self.compute_model.get_instance_by_uuid(instance_uuid))
|
||||||
instance_load = {'uuid': instance_uuid, 'vcpus': instance_vcpus}
|
instance_load = {'uuid': instance_uuid, 'vcpus': instance_vcpus}
|
||||||
for meter in self.metrics:
|
for meter in self.metrics:
|
||||||
avg_meter = self.ceilometer.statistic_aggregation(
|
avg_meter = self.ceilometer.statistic_aggregation(
|
||||||
@@ -190,12 +205,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
return instance_load
|
return instance_load
|
||||||
|
|
||||||
def normalize_hosts_load(self, hosts):
|
def normalize_hosts_load(self, hosts):
|
||||||
normalized_hosts = deepcopy(hosts)
|
normalized_hosts = copy.deepcopy(hosts)
|
||||||
for host in normalized_hosts:
|
for host in normalized_hosts:
|
||||||
if 'memory.resident' in normalized_hosts[host]:
|
if 'memory.resident' in normalized_hosts[host]:
|
||||||
h_memory = self.compute_model.get_resource_from_id(
|
h_memory = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory).get_capacity(
|
element.ResourceType.memory).get_capacity(
|
||||||
self.compute_model.get_node_from_id(host))
|
self.compute_model.get_node_by_uuid(host))
|
||||||
normalized_hosts[host]['memory.resident'] /= float(h_memory)
|
normalized_hosts[host]['memory.resident'] /= float(h_memory)
|
||||||
|
|
||||||
return normalized_hosts
|
return normalized_hosts
|
||||||
@@ -205,21 +220,21 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
hosts_load = {}
|
hosts_load = {}
|
||||||
for node_id in self.compute_model.get_all_compute_nodes():
|
for node_id in self.compute_model.get_all_compute_nodes():
|
||||||
hosts_load[node_id] = {}
|
hosts_load[node_id] = {}
|
||||||
host_vcpus = self.compute_model.get_resource_from_id(
|
host_vcpus = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores).get_capacity(
|
element.ResourceType.cpu_cores).get_capacity(
|
||||||
self.compute_model.get_node_from_id(node_id))
|
self.compute_model.get_node_by_uuid(node_id))
|
||||||
hosts_load[node_id]['vcpus'] = host_vcpus
|
hosts_load[node_id]['vcpus'] = host_vcpus
|
||||||
|
|
||||||
for metric in self.metrics:
|
for metric in self.metrics:
|
||||||
avg_meter = self.ceilometer.statistic_aggregation(
|
avg_meter = self.ceilometer.statistic_aggregation(
|
||||||
resource_id=node_id,
|
resource_id=node_id,
|
||||||
meter_name=instance_host_measures[metric],
|
meter_name=self.instance_metrics[metric],
|
||||||
period="60",
|
period="60",
|
||||||
aggregate='avg'
|
aggregate='avg'
|
||||||
)
|
)
|
||||||
if avg_meter is None:
|
if avg_meter is None:
|
||||||
raise exception.NoSuchMetricForHost(
|
raise exception.NoSuchMetricForHost(
|
||||||
metric=instance_host_measures[metric],
|
metric=self.instance_metrics[metric],
|
||||||
host=node_id)
|
host=node_id)
|
||||||
hosts_load[node_id][metric] = avg_meter
|
hosts_load[node_id][metric] = avg_meter
|
||||||
return hosts_load
|
return hosts_load
|
||||||
@@ -263,7 +278,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
:return: list of standard deviation values
|
:return: list of standard deviation values
|
||||||
"""
|
"""
|
||||||
migration_case = []
|
migration_case = []
|
||||||
new_hosts = deepcopy(hosts)
|
new_hosts = copy.deepcopy(hosts)
|
||||||
instance_load = self.get_instance_load(instance_id)
|
instance_load = self.get_instance_load(instance_id)
|
||||||
d_host_vcpus = new_hosts[dst_node_id]['vcpus']
|
d_host_vcpus = new_hosts[dst_node_id]['vcpus']
|
||||||
s_host_vcpus = new_hosts[src_node_id]['vcpus']
|
s_host_vcpus = new_hosts[src_node_id]['vcpus']
|
||||||
@@ -287,27 +302,27 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
def simulate_migrations(self, hosts):
|
def simulate_migrations(self, hosts):
|
||||||
"""Make sorted list of pairs instance:dst_host"""
|
"""Make sorted list of pairs instance:dst_host"""
|
||||||
def yield_nodes(nodes):
|
def yield_nodes(nodes):
|
||||||
ct = CONF['watcher_strategies.workload_stabilization'].retry_count
|
|
||||||
if self.host_choice == 'cycle':
|
if self.host_choice == 'cycle':
|
||||||
for i in itertools.cycle(nodes):
|
for i in itertools.cycle(nodes):
|
||||||
yield [i]
|
yield [i]
|
||||||
if self.host_choice == 'retry':
|
if self.host_choice == 'retry':
|
||||||
while True:
|
while True:
|
||||||
yield random.sample(nodes, ct)
|
yield random.sample(nodes, self.retry_count)
|
||||||
if self.host_choice == 'fullsearch':
|
if self.host_choice == 'fullsearch':
|
||||||
while True:
|
while True:
|
||||||
yield nodes
|
yield nodes
|
||||||
|
|
||||||
instance_host_map = []
|
instance_host_map = []
|
||||||
for source_hp_id in self.compute_model.get_all_compute_nodes():
|
nodes = list(self.compute_model.get_all_compute_nodes())
|
||||||
nodes = list(self.compute_model.get_all_compute_nodes())
|
for source_hp_id in nodes:
|
||||||
nodes.remove(source_hp_id)
|
c_nodes = copy.copy(nodes)
|
||||||
node_list = yield_nodes(nodes)
|
c_nodes.remove(source_hp_id)
|
||||||
|
node_list = yield_nodes(c_nodes)
|
||||||
instances_id = self.compute_model.get_mapping(). \
|
instances_id = self.compute_model.get_mapping(). \
|
||||||
get_node_instances_from_id(source_hp_id)
|
get_node_instances_by_uuid(source_hp_id)
|
||||||
for instance_id in instances_id:
|
for instance_id in instances_id:
|
||||||
min_sd_case = {'value': len(self.metrics)}
|
min_sd_case = {'value': len(self.metrics)}
|
||||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||||
if instance.state not in [element.InstanceState.ACTIVE.value,
|
if instance.state not in [element.InstanceState.ACTIVE.value,
|
||||||
element.InstanceState.PAUSED.value]:
|
element.InstanceState.PAUSED.value]:
|
||||||
continue
|
continue
|
||||||
@@ -323,7 +338,6 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
'host': dst_node_id, 'value': weighted_sd,
|
'host': dst_node_id, 'value': weighted_sd,
|
||||||
's_host': source_hp_id, 'instance': instance_id}
|
's_host': source_hp_id, 'instance': instance_id}
|
||||||
instance_host_map.append(min_sd_case)
|
instance_host_map.append(min_sd_case)
|
||||||
break
|
|
||||||
return sorted(instance_host_map, key=lambda x: x['value'])
|
return sorted(instance_host_map, key=lambda x: x['value'])
|
||||||
|
|
||||||
def check_threshold(self):
|
def check_threshold(self):
|
||||||
@@ -349,7 +363,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
|
|
||||||
def create_migration_instance(self, mig_instance, mig_source_node,
|
def create_migration_instance(self, mig_instance, mig_source_node,
|
||||||
mig_destination_node):
|
mig_destination_node):
|
||||||
"""Create migration VM """
|
"""Create migration VM"""
|
||||||
if self.compute_model.migrate_instance(
|
if self.compute_model.migrate_instance(
|
||||||
mig_instance, mig_source_node, mig_destination_node):
|
mig_instance, mig_source_node, mig_destination_node):
|
||||||
self.add_migration(mig_instance.uuid, 'live',
|
self.add_migration(mig_instance.uuid, 'live',
|
||||||
@@ -357,10 +371,10 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
mig_destination_node.uuid)
|
mig_destination_node.uuid)
|
||||||
|
|
||||||
def migrate(self, instance_uuid, src_host, dst_host):
|
def migrate(self, instance_uuid, src_host, dst_host):
|
||||||
mig_instance = self.compute_model.get_instance_from_id(instance_uuid)
|
mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid)
|
||||||
mig_source_node = self.compute_model.get_node_from_id(
|
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||||
src_host)
|
src_host)
|
||||||
mig_destination_node = self.compute_model.get_node_from_id(
|
mig_destination_node = self.compute_model.get_node_by_uuid(
|
||||||
dst_host)
|
dst_host)
|
||||||
self.create_migration_instance(mig_instance, mig_source_node,
|
self.create_migration_instance(mig_instance, mig_source_node,
|
||||||
mig_destination_node)
|
mig_destination_node)
|
||||||
@@ -375,6 +389,13 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
if not self.compute_model:
|
if not self.compute_model:
|
||||||
raise exception.ClusterStateNotDefined()
|
raise exception.ClusterStateNotDefined()
|
||||||
|
|
||||||
|
self.weights = self.input_parameters.weights
|
||||||
|
self.metrics = self.input_parameters.metrics
|
||||||
|
self.thresholds = self.input_parameters.thresholds
|
||||||
|
self.host_choice = self.input_parameters.host_choice
|
||||||
|
self.instance_metrics = self.input_parameters.instance_metrics
|
||||||
|
self.retry_count = self.input_parameters.retry_count
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
migration = self.check_threshold()
|
migration = self.check_threshold()
|
||||||
if migration:
|
if migration:
|
||||||
@@ -382,13 +403,13 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
min_sd = 1
|
min_sd = 1
|
||||||
balanced = False
|
balanced = False
|
||||||
for instance_host in migration:
|
for instance_host in migration:
|
||||||
dst_hp_disk = self.compute_model.get_resource_from_id(
|
dst_hp_disk = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk).get_capacity(
|
element.ResourceType.disk).get_capacity(
|
||||||
self.compute_model.get_node_from_id(
|
self.compute_model.get_node_by_uuid(
|
||||||
instance_host['host']))
|
instance_host['host']))
|
||||||
instance_disk = self.compute_model.get_resource_from_id(
|
instance_disk = self.compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk).get_capacity(
|
element.ResourceType.disk).get_capacity(
|
||||||
self.compute_model.get_instance_from_id(
|
self.compute_model.get_instance_by_uuid(
|
||||||
instance_host['instance']))
|
instance_host['instance']))
|
||||||
if instance_disk > dst_hp_disk:
|
if instance_disk > dst_hp_disk:
|
||||||
continue
|
continue
|
||||||
@@ -416,3 +437,5 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
This can be used to compute the global efficacy
|
This can be used to compute the global efficacy
|
||||||
"""
|
"""
|
||||||
self.fill_solution()
|
self.fill_solution()
|
||||||
|
|
||||||
|
LOG.debug(self.compute_model.to_string())
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ class Action(base.WatcherObject):
|
|||||||
for field in self.fields:
|
for field in self.fields:
|
||||||
if (hasattr(self, base.get_attrname(field)) and
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
self[field] != current[field]):
|
self[field] != current[field]):
|
||||||
self[field] = current[field]
|
self[field] = current[field]
|
||||||
|
|
||||||
def soft_delete(self, context=None):
|
def soft_delete(self, context=None):
|
||||||
"""soft Delete the Audit from the DB.
|
"""soft Delete the Audit from the DB.
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ class ActionPlan(base.WatcherObject):
|
|||||||
for field in self.fields:
|
for field in self.fields:
|
||||||
if (hasattr(self, base.get_attrname(field)) and
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
self[field] != current[field]):
|
self[field] != current[field]):
|
||||||
self[field] = current[field]
|
self[field] = current[field]
|
||||||
|
|
||||||
def soft_delete(self, context=None):
|
def soft_delete(self, context=None):
|
||||||
"""Soft Delete the Action plan from the DB.
|
"""Soft Delete the Action plan from the DB.
|
||||||
|
|||||||
@@ -252,7 +252,7 @@ class Audit(base.WatcherObject):
|
|||||||
for field in self.fields:
|
for field in self.fields:
|
||||||
if (hasattr(self, base.get_attrname(field)) and
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
self[field] != current[field]):
|
self[field] != current[field]):
|
||||||
self[field] = current[field]
|
self[field] = current[field]
|
||||||
|
|
||||||
def soft_delete(self, context=None):
|
def soft_delete(self, context=None):
|
||||||
"""soft Delete the Audit from the DB.
|
"""soft Delete the Audit from the DB.
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ class AuditTemplate(base.WatcherObject):
|
|||||||
for field in self.fields:
|
for field in self.fields:
|
||||||
if (hasattr(self, base.get_attrname(field)) and
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
self[field] != current[field]):
|
self[field] != current[field]):
|
||||||
self[field] = current[field]
|
self[field] = current[field]
|
||||||
|
|
||||||
def soft_delete(self, context=None):
|
def soft_delete(self, context=None):
|
||||||
"""soft Delete the :class:`AuditTemplate` from the DB.
|
"""soft Delete the :class:`AuditTemplate` from the DB.
|
||||||
|
|||||||
@@ -182,7 +182,7 @@ class EfficacyIndicator(base.WatcherObject):
|
|||||||
for field in self.fields:
|
for field in self.fields:
|
||||||
if (hasattr(self, base.get_attrname(field)) and
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
self[field] != current[field]):
|
self[field] != current[field]):
|
||||||
self[field] = current[field]
|
self[field] = current[field]
|
||||||
|
|
||||||
def soft_delete(self, context=None):
|
def soft_delete(self, context=None):
|
||||||
"""Soft Delete the efficacy indicator from the DB.
|
"""Soft Delete the efficacy indicator from the DB.
|
||||||
|
|||||||
@@ -1,17 +1,18 @@
|
|||||||
# Copyright 2014
|
# -*- encoding: utf-8 -*-
|
||||||
# The Cloudscaling Group, Inc.
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# use this file except in compliance with the License. You may obtain a copy
|
# not use this file except in compliance with the License. You may obtain
|
||||||
# of the License at
|
# a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# See the License for the specific language governing permissions and
|
# License for the specific language governing permissions and limitations
|
||||||
# limitations under the License.
|
# under the License.
|
||||||
|
|
||||||
|
"""Tests for the Pecan API hooks."""
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
@@ -19,70 +20,107 @@ import mock
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
import six
|
||||||
|
from six.moves import http_client
|
||||||
|
|
||||||
from watcher.api.controllers import root
|
from watcher.api.controllers import root
|
||||||
from watcher.api import hooks
|
from watcher.api import hooks
|
||||||
from watcher.common import context as watcher_context
|
from watcher.common import context
|
||||||
from watcher.tests.api import base as api_base
|
from watcher.tests.api import base
|
||||||
from watcher.tests import base
|
|
||||||
from watcher.tests import fakes
|
|
||||||
|
|
||||||
|
|
||||||
class TestContextHook(base.BaseTestCase):
|
class FakeRequest(object):
|
||||||
|
def __init__(self, headers, context, environ):
|
||||||
def setUp(self):
|
self.headers = headers
|
||||||
super(TestContextHook, self).setUp()
|
self.context = context
|
||||||
self.app = fakes.FakeApp()
|
self.environ = environ or {}
|
||||||
|
self.version = (1, 0)
|
||||||
def test_context_hook_before_method(self):
|
self.host_url = 'http://127.0.0.1:6385'
|
||||||
state = mock.Mock(request=fakes.FakePecanRequest())
|
|
||||||
hook = hooks.ContextHook()
|
|
||||||
hook.before(state)
|
|
||||||
ctx = state.request.context
|
|
||||||
self.assertIsInstance(ctx, watcher_context.RequestContext)
|
|
||||||
self.assertEqual(ctx.auth_token,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-Auth-Token'])
|
|
||||||
self.assertEqual(ctx.project_id,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-Project-Id'])
|
|
||||||
self.assertEqual(ctx.user_id,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-User-Id'])
|
|
||||||
self.assertEqual(ctx.auth_url,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-Auth-Url'])
|
|
||||||
self.assertEqual(ctx.domain_name,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-User-Domain-Name'])
|
|
||||||
self.assertEqual(ctx.domain_id,
|
|
||||||
fakes.fakeAuthTokenHeaders['X-User-Domain-Id'])
|
|
||||||
self.assertIsNone(ctx.auth_token_info)
|
|
||||||
|
|
||||||
def test_context_hook_before_method_auth_info(self):
|
|
||||||
state = mock.Mock(request=fakes.FakePecanRequest())
|
|
||||||
state.request.environ['keystone.token_info'] = 'assert_this'
|
|
||||||
hook = hooks.ContextHook()
|
|
||||||
hook.before(state)
|
|
||||||
ctx = state.request.context
|
|
||||||
self.assertIsInstance(ctx, watcher_context.RequestContext)
|
|
||||||
self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'],
|
|
||||||
ctx.auth_token)
|
|
||||||
self.assertEqual('assert_this', ctx.auth_token_info)
|
|
||||||
|
|
||||||
|
|
||||||
class TestNoExceptionTracebackHook(api_base.FunctionalTest):
|
class FakeRequestState(object):
|
||||||
|
def __init__(self, headers=None, context=None, environ=None):
|
||||||
|
self.request = FakeRequest(headers, context, environ)
|
||||||
|
self.response = FakeRequest(headers, context, environ)
|
||||||
|
|
||||||
TRACE = [
|
def set_context(self):
|
||||||
'Traceback (most recent call last):',
|
headers = self.request.headers
|
||||||
' File "/opt/stack/watcher/watcher/openstack/common/rpc/amqp.py",'
|
creds = {
|
||||||
' line 434, in _process_data\\n **args)',
|
'user': headers.get('X-User') or headers.get('X-User-Id'),
|
||||||
' File "/opt/stack/watcher/watcher/openstack/common/rpc/'
|
'domain_id': headers.get('X-User-Domain-Id'),
|
||||||
'dispatcher.py", line 172, in dispatch\\n result ='
|
'domain_name': headers.get('X-User-Domain-Name'),
|
||||||
' getattr(proxyobj, method)(context, **kwargs)']
|
'auth_token': headers.get('X-Auth-Token'),
|
||||||
|
'roles': headers.get('X-Roles', '').split(','),
|
||||||
|
}
|
||||||
|
is_admin = ('admin' in creds['roles'] or
|
||||||
|
'administrator' in creds['roles'])
|
||||||
|
is_public_api = self.request.environ.get('is_public_api', False)
|
||||||
|
|
||||||
|
self.request.context = context.RequestContext(
|
||||||
|
is_admin=is_admin, is_public_api=is_public_api, **creds)
|
||||||
|
|
||||||
|
|
||||||
|
def fake_headers(admin=False):
|
||||||
|
headers = {
|
||||||
|
'X-Auth-Token': '8d9f235ca7464dd7ba46f81515797ea0',
|
||||||
|
'X-Domain-Id': 'None',
|
||||||
|
'X-Domain-Name': 'None',
|
||||||
|
'X-Project-Domain-Id': 'default',
|
||||||
|
'X-Project-Domain-Name': 'Default',
|
||||||
|
'X-Role': '_member_,admin',
|
||||||
|
'X-Roles': '_member_,admin',
|
||||||
|
# 'X-Tenant': 'foo',
|
||||||
|
# 'X-Tenant-Id': 'b4efa69d4ffa4973863f2eefc094f7f8',
|
||||||
|
# 'X-Tenant-Name': 'foo',
|
||||||
|
'X-User': 'foo',
|
||||||
|
'X-User-Domain-Id': 'default',
|
||||||
|
'X-User-Domain-Name': 'Default',
|
||||||
|
'X-User-Id': '604ab2a197c442c2a84aba66708a9e1e',
|
||||||
|
'X-User-Name': 'foo',
|
||||||
|
}
|
||||||
|
if admin:
|
||||||
|
headers.update({
|
||||||
|
'X-Project-Name': 'admin',
|
||||||
|
'X-Role': '_member_,admin',
|
||||||
|
'X-Roles': '_member_,admin',
|
||||||
|
'X-Tenant': 'admin',
|
||||||
|
# 'X-Tenant-Name': 'admin',
|
||||||
|
# 'X-Tenant': 'admin'
|
||||||
|
'X-Tenant-Name': 'admin',
|
||||||
|
'X-Tenant-Id': 'c2a3a69d456a412376efdd9dac38',
|
||||||
|
'X-Project-Name': 'admin',
|
||||||
|
'X-Project-Id': 'c2a3a69d456a412376efdd9dac38',
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
headers.update({
|
||||||
|
'X-Role': '_member_',
|
||||||
|
'X-Roles': '_member_',
|
||||||
|
'X-Tenant': 'foo',
|
||||||
|
'X-Tenant-Name': 'foo',
|
||||||
|
'X-Tenant-Id': 'b4efa69d,4ffa4973863f2eefc094f7f8',
|
||||||
|
'X-Project-Name': 'foo',
|
||||||
|
'X-Project-Id': 'b4efa69d4ffa4973863f2eefc094f7f8',
|
||||||
|
})
|
||||||
|
return headers
|
||||||
|
|
||||||
|
|
||||||
|
class TestNoExceptionTracebackHook(base.FunctionalTest):
|
||||||
|
|
||||||
|
TRACE = ['Traceback (most recent call last):',
|
||||||
|
' File "/opt/stack/watcher/watcher/common/rpc/amqp.py",'
|
||||||
|
' line 434, in _process_data\\n **args)',
|
||||||
|
' File "/opt/stack/watcher/watcher/common/rpc/'
|
||||||
|
'dispatcher.py", line 172, in dispatch\\n result ='
|
||||||
|
' getattr(proxyobj, method)(ctxt, **kwargs)']
|
||||||
MSG_WITHOUT_TRACE = "Test exception message."
|
MSG_WITHOUT_TRACE = "Test exception message."
|
||||||
MSG_WITH_TRACE = "{0}\n{1}".format(MSG_WITHOUT_TRACE, "\n".join(TRACE))
|
MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestNoExceptionTracebackHook, self).setUp()
|
super(TestNoExceptionTracebackHook, self).setUp()
|
||||||
p = mock.patch.object(root.Root, 'convert')
|
p = mock.patch.object(root.Root, 'convert')
|
||||||
self.root_convert_mock = p.start()
|
self.root_convert_mock = p.start()
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
cfg.CONF.set_override('debug', False, enforce_type=True)
|
||||||
|
|
||||||
def test_hook_exception_success(self):
|
def test_hook_exception_success(self):
|
||||||
self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE)
|
self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE)
|
||||||
@@ -96,7 +134,7 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
|
|||||||
def test_hook_remote_error_success(self):
|
def test_hook_remote_error_success(self):
|
||||||
test_exc_type = 'TestException'
|
test_exc_type = 'TestException'
|
||||||
self.root_convert_mock.side_effect = messaging.rpc.RemoteError(
|
self.root_convert_mock.side_effect = messaging.rpc.RemoteError(
|
||||||
test_exc_type, self.MSG_WITHOUT_TRACE, "\n".join(self.TRACE))
|
test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE)
|
||||||
|
|
||||||
response = self.get_json('/', path_prefix='', expect_errors=True)
|
response = self.get_json('/', path_prefix='', expect_errors=True)
|
||||||
|
|
||||||
@@ -106,12 +144,13 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
|
|||||||
# rare thing (happens due to wrong deserialization settings etc.)
|
# rare thing (happens due to wrong deserialization settings etc.)
|
||||||
# we don't care about this garbage.
|
# we don't care about this garbage.
|
||||||
expected_msg = ("Remote error: %s %s"
|
expected_msg = ("Remote error: %s %s"
|
||||||
% (test_exc_type, self.MSG_WITHOUT_TRACE))
|
% (test_exc_type, self.MSG_WITHOUT_TRACE)
|
||||||
|
+ ("\n[u'" if six.PY2 else "\n['"))
|
||||||
actual_msg = jsonutils.loads(
|
actual_msg = jsonutils.loads(
|
||||||
response.json['error_message'])['faultstring']
|
response.json['error_message'])['faultstring']
|
||||||
self.assertEqual(expected_msg, actual_msg)
|
self.assertEqual(expected_msg, actual_msg)
|
||||||
|
|
||||||
def test_hook_without_traceback(self):
|
def _test_hook_without_traceback(self):
|
||||||
msg = "Error message without traceback \n but \n multiline"
|
msg = "Error message without traceback \n but \n multiline"
|
||||||
self.root_convert_mock.side_effect = Exception(msg)
|
self.root_convert_mock.side_effect = Exception(msg)
|
||||||
|
|
||||||
@@ -121,24 +160,118 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
|
|||||||
response.json['error_message'])['faultstring']
|
response.json['error_message'])['faultstring']
|
||||||
self.assertEqual(msg, actual_msg)
|
self.assertEqual(msg, actual_msg)
|
||||||
|
|
||||||
def test_hook_server_debug_on_serverfault(self):
|
def test_hook_without_traceback(self):
|
||||||
|
self._test_hook_without_traceback()
|
||||||
|
|
||||||
|
def test_hook_without_traceback_debug(self):
|
||||||
cfg.CONF.set_override('debug', True, enforce_type=True)
|
cfg.CONF.set_override('debug', True, enforce_type=True)
|
||||||
|
self._test_hook_without_traceback()
|
||||||
|
|
||||||
|
def test_hook_without_traceback_debug_tracebacks(self):
|
||||||
|
cfg.CONF.set_override('debug', True, enforce_type=True)
|
||||||
|
self._test_hook_without_traceback()
|
||||||
|
|
||||||
|
def _test_hook_on_serverfault(self):
|
||||||
self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE)
|
self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE)
|
||||||
|
|
||||||
response = self.get_json('/', path_prefix='', expect_errors=True)
|
response = self.get_json('/', path_prefix='', expect_errors=True)
|
||||||
|
|
||||||
actual_msg = jsonutils.loads(
|
actual_msg = jsonutils.loads(
|
||||||
response.json['error_message'])['faultstring']
|
response.json['error_message'])['faultstring']
|
||||||
self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg)
|
return actual_msg
|
||||||
|
|
||||||
def test_hook_server_debug_on_clientfault(self):
|
def test_hook_on_serverfault(self):
|
||||||
|
cfg.CONF.set_override('debug', False, enforce_type=True)
|
||||||
|
msg = self._test_hook_on_serverfault()
|
||||||
|
self.assertEqual(self.MSG_WITHOUT_TRACE, msg)
|
||||||
|
|
||||||
|
def test_hook_on_serverfault_debug(self):
|
||||||
cfg.CONF.set_override('debug', True, enforce_type=True)
|
cfg.CONF.set_override('debug', True, enforce_type=True)
|
||||||
|
msg = self._test_hook_on_serverfault()
|
||||||
|
self.assertEqual(self.MSG_WITH_TRACE, msg)
|
||||||
|
|
||||||
|
def _test_hook_on_clientfault(self):
|
||||||
client_error = Exception(self.MSG_WITH_TRACE)
|
client_error = Exception(self.MSG_WITH_TRACE)
|
||||||
client_error.code = 400
|
client_error.code = http_client.BAD_REQUEST
|
||||||
self.root_convert_mock.side_effect = client_error
|
self.root_convert_mock.side_effect = client_error
|
||||||
|
|
||||||
response = self.get_json('/', path_prefix='', expect_errors=True)
|
response = self.get_json('/', path_prefix='', expect_errors=True)
|
||||||
|
|
||||||
actual_msg = jsonutils.loads(
|
actual_msg = jsonutils.loads(
|
||||||
response.json['error_message'])['faultstring']
|
response.json['error_message'])['faultstring']
|
||||||
self.assertEqual(self.MSG_WITH_TRACE, actual_msg)
|
return actual_msg
|
||||||
|
|
||||||
|
def test_hook_on_clientfault(self):
|
||||||
|
msg = self._test_hook_on_clientfault()
|
||||||
|
self.assertEqual(self.MSG_WITHOUT_TRACE, msg)
|
||||||
|
|
||||||
|
def test_hook_on_clientfault_debug_tracebacks(self):
|
||||||
|
cfg.CONF.set_override('debug', True, enforce_type=True)
|
||||||
|
msg = self._test_hook_on_clientfault()
|
||||||
|
self.assertEqual(self.MSG_WITH_TRACE, msg)
|
||||||
|
|
||||||
|
|
||||||
|
class TestContextHook(base.FunctionalTest):
|
||||||
|
@mock.patch.object(context, 'RequestContext')
|
||||||
|
def test_context_hook_not_admin(self, mock_ctx):
|
||||||
|
cfg.CONF.set_override(
|
||||||
|
'auth_type', 'password', group='watcher_clients_auth')
|
||||||
|
headers = fake_headers(admin=False)
|
||||||
|
reqstate = FakeRequestState(headers=headers)
|
||||||
|
context_hook = hooks.ContextHook()
|
||||||
|
context_hook.before(reqstate)
|
||||||
|
mock_ctx.assert_called_with(
|
||||||
|
auth_token=headers['X-Auth-Token'],
|
||||||
|
user=headers['X-User'],
|
||||||
|
user_id=headers['X-User-Id'],
|
||||||
|
domain_id=headers['X-User-Domain-Id'],
|
||||||
|
domain_name=headers['X-User-Domain-Name'],
|
||||||
|
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||||
|
project=headers['X-Project-Name'],
|
||||||
|
project_id=headers['X-Project-Id'],
|
||||||
|
show_deleted=None,
|
||||||
|
auth_token_info=self.token_info,
|
||||||
|
roles=headers['X-Roles'].split(','))
|
||||||
|
|
||||||
|
@mock.patch.object(context, 'RequestContext')
|
||||||
|
def test_context_hook_admin(self, mock_ctx):
|
||||||
|
cfg.CONF.set_override(
|
||||||
|
'auth_type', 'password', group='watcher_clients_auth')
|
||||||
|
headers = fake_headers(admin=True)
|
||||||
|
reqstate = FakeRequestState(headers=headers)
|
||||||
|
context_hook = hooks.ContextHook()
|
||||||
|
context_hook.before(reqstate)
|
||||||
|
mock_ctx.assert_called_with(
|
||||||
|
auth_token=headers['X-Auth-Token'],
|
||||||
|
user=headers['X-User'],
|
||||||
|
user_id=headers['X-User-Id'],
|
||||||
|
domain_id=headers['X-User-Domain-Id'],
|
||||||
|
domain_name=headers['X-User-Domain-Name'],
|
||||||
|
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||||
|
project=headers['X-Project-Name'],
|
||||||
|
project_id=headers['X-Project-Id'],
|
||||||
|
show_deleted=None,
|
||||||
|
auth_token_info=self.token_info,
|
||||||
|
roles=headers['X-Roles'].split(','))
|
||||||
|
|
||||||
|
@mock.patch.object(context, 'RequestContext')
|
||||||
|
def test_context_hook_public_api(self, mock_ctx):
|
||||||
|
cfg.CONF.set_override(
|
||||||
|
'auth_type', 'password', group='watcher_clients_auth')
|
||||||
|
headers = fake_headers(admin=True)
|
||||||
|
env = {'is_public_api': True}
|
||||||
|
reqstate = FakeRequestState(headers=headers, environ=env)
|
||||||
|
context_hook = hooks.ContextHook()
|
||||||
|
context_hook.before(reqstate)
|
||||||
|
mock_ctx.assert_called_with(
|
||||||
|
auth_token=headers['X-Auth-Token'],
|
||||||
|
user=headers['X-User'],
|
||||||
|
user_id=headers['X-User-Id'],
|
||||||
|
domain_id=headers['X-User-Domain-Id'],
|
||||||
|
domain_name=headers['X-User-Domain-Name'],
|
||||||
|
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
|
||||||
|
project=headers['X-Project-Name'],
|
||||||
|
project_id=headers['X-Project-Id'],
|
||||||
|
show_deleted=None,
|
||||||
|
auth_token_info=self.token_info,
|
||||||
|
roles=headers['X-Roles'].split(','))
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ class TestCase(BaseTestCase):
|
|||||||
|
|
||||||
app_config_path = os.path.join(os.path.dirname(__file__), 'config.py')
|
app_config_path = os.path.join(os.path.dirname(__file__), 'config.py')
|
||||||
self.app = testing.load_test_app(app_config_path)
|
self.app = testing.load_test_app(app_config_path)
|
||||||
token_info = {
|
self.token_info = {
|
||||||
'token': {
|
'token': {
|
||||||
'project': {
|
'project': {
|
||||||
'id': 'fake_project'
|
'id': 'fake_project'
|
||||||
@@ -77,14 +77,16 @@ class TestCase(BaseTestCase):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.context = watcher_context.RequestContext(
|
self.context = watcher_context.RequestContext(
|
||||||
auth_token_info=token_info,
|
auth_token_info=self.token_info,
|
||||||
project_id='fake_project',
|
project_id='fake_project',
|
||||||
user_id='fake_user')
|
user_id='fake_user')
|
||||||
|
|
||||||
|
self.policy = self.useFixture(policy_fixture.PolicyFixture())
|
||||||
|
|
||||||
def make_context(*args, **kwargs):
|
def make_context(*args, **kwargs):
|
||||||
# If context hasn't been constructed with token_info
|
# If context hasn't been constructed with token_info
|
||||||
if not kwargs.get('auth_token_info'):
|
if not kwargs.get('auth_token_info'):
|
||||||
kwargs['auth_token_info'] = copy.deepcopy(token_info)
|
kwargs['auth_token_info'] = copy.deepcopy(self.token_info)
|
||||||
if not kwargs.get('project_id'):
|
if not kwargs.get('project_id'):
|
||||||
kwargs['project_id'] = 'fake_project'
|
kwargs['project_id'] = 'fake_project'
|
||||||
if not kwargs.get('user_id'):
|
if not kwargs.get('user_id'):
|
||||||
|
|||||||
@@ -15,13 +15,9 @@
|
|||||||
|
|
||||||
"""Watcher DB test base class."""
|
"""Watcher DB test base class."""
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
import fixtures
|
import fixtures
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from watcher.common import paths
|
|
||||||
from watcher.db import api as dbapi
|
from watcher.db import api as dbapi
|
||||||
from watcher.db.sqlalchemy import api as sqla_api
|
from watcher.db.sqlalchemy import api as sqla_api
|
||||||
from watcher.db.sqlalchemy import migration
|
from watcher.db.sqlalchemy import migration
|
||||||
@@ -38,32 +34,17 @@ _DB_CACHE = None
|
|||||||
|
|
||||||
class Database(fixtures.Fixture):
|
class Database(fixtures.Fixture):
|
||||||
|
|
||||||
def __init__(self, db_api, db_migrate, sql_connection,
|
def __init__(self, db_api, db_migrate, sql_connection):
|
||||||
sqlite_db, sqlite_clean_db):
|
|
||||||
self.sql_connection = sql_connection
|
self.sql_connection = sql_connection
|
||||||
self.sqlite_db = sqlite_db
|
|
||||||
self.sqlite_clean_db = sqlite_clean_db
|
|
||||||
|
|
||||||
self.engine = db_api.get_engine()
|
self.engine = db_api.get_engine()
|
||||||
self.engine.dispose()
|
self.engine.dispose()
|
||||||
conn = self.engine.connect()
|
conn = self.engine.connect()
|
||||||
if sql_connection == "sqlite://":
|
self.setup_sqlite(db_migrate)
|
||||||
self.setup_sqlite(db_migrate)
|
|
||||||
elif sql_connection.startswith('sqlite:///'):
|
|
||||||
testdb = paths.state_path_rel(sqlite_db)
|
|
||||||
if os.path.exists(testdb):
|
|
||||||
return
|
|
||||||
self.setup_sqlite(db_migrate)
|
|
||||||
else:
|
|
||||||
db_migrate.upgrade('head')
|
|
||||||
self.post_migrations()
|
self.post_migrations()
|
||||||
if sql_connection == "sqlite://":
|
|
||||||
conn = self.engine.connect()
|
self._DB = "".join(line for line in conn.connection.iterdump())
|
||||||
self._DB = "".join(line for line in conn.connection.iterdump())
|
self.engine.dispose()
|
||||||
self.engine.dispose()
|
|
||||||
else:
|
|
||||||
cleandb = paths.state_path_rel(sqlite_clean_db)
|
|
||||||
shutil.copyfile(testdb, cleandb)
|
|
||||||
|
|
||||||
def setup_sqlite(self, db_migrate):
|
def setup_sqlite(self, db_migrate):
|
||||||
if db_migrate.version():
|
if db_migrate.version():
|
||||||
@@ -74,14 +55,9 @@ class Database(fixtures.Fixture):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(Database, self).setUp()
|
super(Database, self).setUp()
|
||||||
|
|
||||||
if self.sql_connection == "sqlite://":
|
conn = self.engine.connect()
|
||||||
conn = self.engine.connect()
|
conn.connection.executescript(self._DB)
|
||||||
conn.connection.executescript(self._DB)
|
self.addCleanup(self.engine.dispose)
|
||||||
self.addCleanup(self.engine.dispose)
|
|
||||||
else:
|
|
||||||
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
|
|
||||||
paths.state_path_rel(self.sqlite_db))
|
|
||||||
self.addCleanup(os.unlink, self.sqlite_db)
|
|
||||||
|
|
||||||
def post_migrations(self):
|
def post_migrations(self):
|
||||||
"""Any addition steps that are needed outside of the migrations."""
|
"""Any addition steps that are needed outside of the migrations."""
|
||||||
@@ -95,8 +71,6 @@ class DbTestCase(base.TestCase):
|
|||||||
# To use in-memory SQLite DB
|
# To use in-memory SQLite DB
|
||||||
cfg.CONF.set_override("connection", "sqlite://", group="database",
|
cfg.CONF.set_override("connection", "sqlite://", group="database",
|
||||||
enforce_type=True)
|
enforce_type=True)
|
||||||
cfg.CONF.set_override("sqlite_db", "", group="database",
|
|
||||||
enforce_type=True)
|
|
||||||
|
|
||||||
super(DbTestCase, self).setUp()
|
super(DbTestCase, self).setUp()
|
||||||
|
|
||||||
@@ -105,7 +79,5 @@ class DbTestCase(base.TestCase):
|
|||||||
global _DB_CACHE
|
global _DB_CACHE
|
||||||
if not _DB_CACHE:
|
if not _DB_CACHE:
|
||||||
_DB_CACHE = Database(sqla_api, migration,
|
_DB_CACHE = Database(sqla_api, migration,
|
||||||
sql_connection=CONF.database.connection,
|
sql_connection=CONF.database.connection)
|
||||||
sqlite_db=CONF.database.sqlite_db,
|
|
||||||
sqlite_clean_db='clean.sqlite')
|
|
||||||
self.useFixture(_DB_CACHE)
|
self.useFixture(_DB_CACHE)
|
||||||
|
|||||||
@@ -25,8 +25,7 @@ from watcher.decision_engine.messaging import events
|
|||||||
from watcher.decision_engine.model.collector import manager
|
from watcher.decision_engine.model.collector import manager
|
||||||
from watcher.objects import audit as audit_objects
|
from watcher.objects import audit as audit_objects
|
||||||
from watcher.tests.db import base
|
from watcher.tests.db import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state as faker
|
||||||
faker_cluster_state as faker
|
|
||||||
from watcher.tests.objects import utils as obj_utils
|
from watcher.tests.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -23,8 +23,7 @@ from watcher.common import exception
|
|||||||
from watcher.decision_engine.loading import default as default_loading
|
from watcher.decision_engine.loading import default as default_loading
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests import conf_fixture
|
from watcher.tests import conf_fixture
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestClusterDataModelCollectorLoader(base.TestCase):
|
class TestClusterDataModelCollectorLoader(base.TestCase):
|
||||||
|
|||||||
@@ -20,8 +20,7 @@ from watcher.decision_engine.audit import oneshot as oneshot_handler
|
|||||||
from watcher.decision_engine.messaging import audit_endpoint
|
from watcher.decision_engine.messaging import audit_endpoint
|
||||||
from watcher.decision_engine.model.collector import manager
|
from watcher.decision_engine.model.collector import manager
|
||||||
from watcher.tests.db import base
|
from watcher.tests.db import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
|
||||||
from watcher.tests.objects import utils as obj_utils
|
from watcher.tests.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
47
watcher/tests/decision_engine/model/data/scenario_1.xml
Normal file
47
watcher/tests/decision_engine/model/data/scenario_1.xml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_0" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_1" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_2" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_2" status="enabled" state="up" id="2" hostname="hostname_2" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_3" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_4" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_5" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_3" status="enabled" state="up" id="3" hostname="hostname_3" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_6" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_4" status="enabled" state="up" id="4" hostname="hostname_4" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_7" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_10" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_11" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_12" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_13" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_14" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_15" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_16" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_17" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_18" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_19" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_20" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_21" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_22" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_23" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_24" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_25" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_26" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_27" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_28" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_29" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_30" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_31" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_32" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_33" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_34" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_8" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_9" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode hostname="hostname_0" uuid="Node_0" id="0" state="enabled" human_id="" status="enabled" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_0" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_1" uuid="Node_1" id="1" state="enabled" human_id="" status="enabled" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_1" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode hostname="hostname_0" uuid="Node_0" id="0" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_0" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_1" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_2" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_3" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_4" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_5" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_1" uuid="Node_1" id="1" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64"/>
|
||||||
|
<ComputeNode hostname="hostname_2" uuid="Node_2" id="2" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64"/>
|
||||||
|
<ComputeNode hostname="hostname_3" uuid="Node_3" id="3" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64"/>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="a4cab39b-9828-413a-bf88-f76921bf1517" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode hostname="hostname_0" uuid="Node_0" id="0" state="up" human_id="" status="enabled" ResourceType.cpu_cores="10" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_6" ResourceType.cpu_cores="1" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_7" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_8" ResourceType.cpu_cores="4" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_9" ResourceType.cpu_cores="8" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_1" uuid="Node_1" id="1" state="up" human_id="" status="enabled" ResourceType.cpu_cores="10" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64"/>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="1" ResourceType.disk="1" ResourceType.disk_capacity="1" ResourceType.memory="1"/>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="4" ResourceType.disk="4" ResourceType.disk_capacity="4" ResourceType.memory="4">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_0" hostname="" ResourceType.cpu_cores="4" ResourceType.disk="0" ResourceType.disk_capacity="0" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_1" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" ResourceType.cpu_cores="40" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_3" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_4" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode human_id="" uuid="Node_0" status="enabled" state="up" id="0" hostname="hostname_0" ResourceType.cpu_cores="50" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="73b09e16-35b7-4922-804e-e8f5d9b740fc" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="cae81432-1631-4d4e-b29c-6f3acdcde906" hostname="" ResourceType.cpu_cores="15" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode human_id="" uuid="Node_1" status="enabled" state="up" id="1" hostname="hostname_1" ResourceType.cpu_cores="50" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="132">
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_3" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance state="active" human_id="" uuid="INSTANCE_4" hostname="" ResourceType.cpu_cores="10" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode hostname="hostname_0" uuid="Node_0" id="0" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_0" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_1" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_2" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_1" uuid="Node_1" id="1" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_3" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_2" uuid="Node_2" id="2" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_4" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_3" uuid="Node_3" id="3" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_5" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
<ModelRoot>
|
||||||
|
<ComputeNode hostname="hostname_0" uuid="Node_0" id="0" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_0" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_1" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_2" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_1" uuid="Node_1" id="1" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_3" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_2" uuid="Node_2" id="2" state="up" human_id="" status="enabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_4" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
<ComputeNode hostname="hostname_3" uuid="Node_3" id="3" state="up" human_id="" status="disabled" ResourceType.cpu_cores="16" ResourceType.disk="250" ResourceType.disk_capacity="250" ResourceType.memory="64">
|
||||||
|
<Instance hostname="" human_id="" state="active" uuid="INSTANCE_5" ResourceType.cpu_cores="2" ResourceType.disk="20" ResourceType.disk_capacity="20" ResourceType.memory="2"/>
|
||||||
|
</ComputeNode>
|
||||||
|
</ModelRoot>
|
||||||
161
watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
Normal file
161
watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Authors: Vojtech CIMA <cima@zhaw.ch>
|
||||||
|
# Bruno GRAZIOLI <gaea@zhaw.ch>
|
||||||
|
# Sean MURPHY <murp@zhaw.ch>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from watcher.decision_engine.model.collector import base
|
||||||
|
from watcher.decision_engine.model import element
|
||||||
|
from watcher.decision_engine.model import model_root as modelroot
|
||||||
|
|
||||||
|
|
||||||
|
class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||||
|
|
||||||
|
def __init__(self, config=None, osc=None):
|
||||||
|
if config is None:
|
||||||
|
config = mock.Mock()
|
||||||
|
super(FakerModelCollector, self).__init__(config)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def notification_endpoints(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
return self.generate_scenario_1()
|
||||||
|
|
||||||
|
def load_data(self, filename):
|
||||||
|
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
data_folder = os.path.join(cwd, "data")
|
||||||
|
|
||||||
|
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
|
||||||
|
xml_data = xml_file.read()
|
||||||
|
|
||||||
|
return xml_data
|
||||||
|
|
||||||
|
def load_model(self, filename):
|
||||||
|
return modelroot.ModelRoot.from_xml(self.load_data(filename))
|
||||||
|
|
||||||
|
def generate_scenario_1(self):
|
||||||
|
"""Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
|
||||||
|
return self.load_model('scenario_1_with_metrics.xml')
|
||||||
|
|
||||||
|
def generate_scenario_2(self):
|
||||||
|
"""Simulates a cluster
|
||||||
|
|
||||||
|
With 4 nodes and 6 instances all mapped to a single node
|
||||||
|
"""
|
||||||
|
return self.load_model('scenario_2_with_metrics.xml')
|
||||||
|
|
||||||
|
def generate_scenario_3(self):
|
||||||
|
"""Simulates a cluster
|
||||||
|
|
||||||
|
With 4 nodes and 6 instances all mapped to one node
|
||||||
|
"""
|
||||||
|
return self.load_model('scenario_3_with_metrics.xml')
|
||||||
|
|
||||||
|
def generate_scenario_4(self):
|
||||||
|
"""Simulates a cluster
|
||||||
|
|
||||||
|
With 4 nodes and 6 instances spread on all nodes
|
||||||
|
"""
|
||||||
|
return self.load_model('scenario_4_with_metrics.xml')
|
||||||
|
|
||||||
|
|
||||||
|
class FakeCeilometerMetrics(object):
|
||||||
|
def __init__(self, model):
|
||||||
|
self.model = model
|
||||||
|
|
||||||
|
def mock_get_statistics(self, resource_id, meter_name, period=3600,
|
||||||
|
aggregate='avg'):
|
||||||
|
if meter_name == "compute.node.cpu.percent":
|
||||||
|
return self.get_node_cpu_util(resource_id)
|
||||||
|
elif meter_name == "cpu_util":
|
||||||
|
return self.get_instance_cpu_util(resource_id)
|
||||||
|
elif meter_name == "memory.usage":
|
||||||
|
return self.get_instance_ram_util(resource_id)
|
||||||
|
elif meter_name == "disk.root.size":
|
||||||
|
return self.get_instance_disk_root_size(resource_id)
|
||||||
|
|
||||||
|
def get_node_cpu_util(self, r_id):
|
||||||
|
"""Calculates node utilization dynamicaly.
|
||||||
|
|
||||||
|
node CPU utilization should consider
|
||||||
|
and corelate with actual instance-node mappings
|
||||||
|
provided within a cluster model.
|
||||||
|
Returns relative node CPU utilization <0, 100>.
|
||||||
|
:param r_id: resource id
|
||||||
|
"""
|
||||||
|
|
||||||
|
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
|
||||||
|
instances = self.model.get_mapping().get_node_instances_by_uuid(id)
|
||||||
|
util_sum = 0.0
|
||||||
|
node_cpu_cores = self.model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.cpu_cores).get_capacity_by_uuid(id)
|
||||||
|
for instance_uuid in instances:
|
||||||
|
instance_cpu_cores = self.model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.cpu_cores).\
|
||||||
|
get_capacity(self.model.get_instance_by_uuid(instance_uuid))
|
||||||
|
total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
|
||||||
|
instance_uuid)
|
||||||
|
util_sum += total_cpu_util / 100.0
|
||||||
|
util_sum /= node_cpu_cores
|
||||||
|
return util_sum * 100.0
|
||||||
|
|
||||||
|
def get_instance_cpu_util(self, r_id):
|
||||||
|
instance_cpu_util = dict()
|
||||||
|
instance_cpu_util['INSTANCE_0'] = 10
|
||||||
|
instance_cpu_util['INSTANCE_1'] = 30
|
||||||
|
instance_cpu_util['INSTANCE_2'] = 60
|
||||||
|
instance_cpu_util['INSTANCE_3'] = 20
|
||||||
|
instance_cpu_util['INSTANCE_4'] = 40
|
||||||
|
instance_cpu_util['INSTANCE_5'] = 50
|
||||||
|
instance_cpu_util['INSTANCE_6'] = 100
|
||||||
|
instance_cpu_util['INSTANCE_7'] = 100
|
||||||
|
instance_cpu_util['INSTANCE_8'] = 100
|
||||||
|
instance_cpu_util['INSTANCE_9'] = 100
|
||||||
|
return instance_cpu_util[str(r_id)]
|
||||||
|
|
||||||
|
def get_instance_ram_util(self, r_id):
|
||||||
|
instance_ram_util = dict()
|
||||||
|
instance_ram_util['INSTANCE_0'] = 1
|
||||||
|
instance_ram_util['INSTANCE_1'] = 2
|
||||||
|
instance_ram_util['INSTANCE_2'] = 4
|
||||||
|
instance_ram_util['INSTANCE_3'] = 8
|
||||||
|
instance_ram_util['INSTANCE_4'] = 3
|
||||||
|
instance_ram_util['INSTANCE_5'] = 2
|
||||||
|
instance_ram_util['INSTANCE_6'] = 1
|
||||||
|
instance_ram_util['INSTANCE_7'] = 2
|
||||||
|
instance_ram_util['INSTANCE_8'] = 4
|
||||||
|
instance_ram_util['INSTANCE_9'] = 8
|
||||||
|
return instance_ram_util[str(r_id)]
|
||||||
|
|
||||||
|
def get_instance_disk_root_size(self, r_id):
|
||||||
|
instance_disk_util = dict()
|
||||||
|
instance_disk_util['INSTANCE_0'] = 10
|
||||||
|
instance_disk_util['INSTANCE_1'] = 15
|
||||||
|
instance_disk_util['INSTANCE_2'] = 30
|
||||||
|
instance_disk_util['INSTANCE_3'] = 35
|
||||||
|
instance_disk_util['INSTANCE_4'] = 20
|
||||||
|
instance_disk_util['INSTANCE_5'] = 25
|
||||||
|
instance_disk_util['INSTANCE_6'] = 25
|
||||||
|
instance_disk_util['INSTANCE_7'] = 25
|
||||||
|
instance_disk_util['INSTANCE_8'] = 25
|
||||||
|
instance_disk_util['INSTANCE_9'] = 25
|
||||||
|
return instance_disk_util[str(r_id)]
|
||||||
157
watcher/tests/decision_engine/model/faker_cluster_state.py
Normal file
157
watcher/tests/decision_engine/model/faker_cluster_state.py
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2015 b<>com
|
||||||
|
#
|
||||||
|
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from watcher.decision_engine.model.collector import base
|
||||||
|
from watcher.decision_engine.model import element
|
||||||
|
from watcher.decision_engine.model import model_root as modelroot
|
||||||
|
|
||||||
|
|
||||||
|
class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||||
|
|
||||||
|
def __init__(self, config=None, osc=None):
|
||||||
|
if config is None:
|
||||||
|
config = mock.Mock(period=777)
|
||||||
|
super(FakerModelCollector, self).__init__(config)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def notification_endpoints(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def load_data(self, filename):
|
||||||
|
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
data_folder = os.path.join(cwd, "data")
|
||||||
|
|
||||||
|
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
|
||||||
|
xml_data = xml_file.read()
|
||||||
|
|
||||||
|
return xml_data
|
||||||
|
|
||||||
|
def load_model(self, filename):
|
||||||
|
return modelroot.ModelRoot.from_xml(self.load_data(filename))
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
return self._cluster_data_model or self.build_scenario_1()
|
||||||
|
|
||||||
|
def build_scenario_1(self):
|
||||||
|
instances = []
|
||||||
|
|
||||||
|
current_state_cluster = modelroot.ModelRoot()
|
||||||
|
# number of nodes
|
||||||
|
node_count = 5
|
||||||
|
# number max of instance per node
|
||||||
|
node_instance_count = 7
|
||||||
|
# total number of virtual machine
|
||||||
|
instance_count = (node_count * node_instance_count)
|
||||||
|
|
||||||
|
# define ressouce ( CPU, MEM disk, ... )
|
||||||
|
mem = element.Resource(element.ResourceType.memory)
|
||||||
|
# 2199.954 Mhz
|
||||||
|
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||||
|
disk = element.Resource(element.ResourceType.disk)
|
||||||
|
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||||
|
|
||||||
|
current_state_cluster.create_resource(mem)
|
||||||
|
current_state_cluster.create_resource(num_cores)
|
||||||
|
current_state_cluster.create_resource(disk)
|
||||||
|
current_state_cluster.create_resource(disk_capacity)
|
||||||
|
|
||||||
|
for id_ in range(0, node_count):
|
||||||
|
node_uuid = "Node_{0}".format(id_)
|
||||||
|
node = element.ComputeNode(id_)
|
||||||
|
node.uuid = node_uuid
|
||||||
|
node.hostname = "hostname_{0}".format(id_)
|
||||||
|
|
||||||
|
mem.set_capacity(node, 132)
|
||||||
|
disk.set_capacity(node, 250)
|
||||||
|
disk_capacity.set_capacity(node, 250)
|
||||||
|
num_cores.set_capacity(node, 40)
|
||||||
|
current_state_cluster.add_node(node)
|
||||||
|
|
||||||
|
for i in range(0, instance_count):
|
||||||
|
instance_uuid = "INSTANCE_{0}".format(i)
|
||||||
|
instance = element.Instance()
|
||||||
|
instance.uuid = instance_uuid
|
||||||
|
mem.set_capacity(instance, 2)
|
||||||
|
disk.set_capacity(instance, 20)
|
||||||
|
disk_capacity.set_capacity(instance, 20)
|
||||||
|
num_cores.set_capacity(instance, 10)
|
||||||
|
instances.append(instance)
|
||||||
|
current_state_cluster.add_instance(instance)
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_1"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_2"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_3"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_4"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_5"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_3"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_6"))
|
||||||
|
|
||||||
|
current_state_cluster.mapping.map(
|
||||||
|
current_state_cluster.get_node_by_uuid("Node_4"),
|
||||||
|
current_state_cluster.get_instance_by_uuid("INSTANCE_7"))
|
||||||
|
|
||||||
|
return current_state_cluster
|
||||||
|
|
||||||
|
def generate_scenario_1(self):
|
||||||
|
return self.load_model('scenario_1.xml')
|
||||||
|
|
||||||
|
def generate_scenario_3_with_2_nodes(self):
|
||||||
|
return self.load_model('scenario_3_with_2_nodes.xml')
|
||||||
|
|
||||||
|
def generate_scenario_4_with_1_node_no_instance(self):
|
||||||
|
return self.load_model('scenario_4_with_1_node_no_instance.xml')
|
||||||
|
|
||||||
|
def generate_scenario_5_with_instance_disk_0(self):
|
||||||
|
return self.load_model('scenario_5_with_instance_disk_0.xml')
|
||||||
|
|
||||||
|
def generate_scenario_6_with_2_nodes(self):
|
||||||
|
return self.load_model('scenario_6_with_2_nodes.xml')
|
||||||
|
|
||||||
|
def generate_scenario_7_with_2_nodes(self):
|
||||||
|
return self.load_model('scenario_7_with_2_nodes.xml')
|
||||||
|
|
||||||
|
def generate_scenario_8_with_4_nodes(self):
|
||||||
|
return self.load_model('scenario_8_with_4_nodes.xml')
|
||||||
|
|
||||||
|
def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self):
|
||||||
|
return self.load_model(
|
||||||
|
'scenario_9_with_3_active_plus_1_disabled_nodes.xml')
|
||||||
@@ -250,9 +250,3 @@ class FakerMetricsCollector(object):
|
|||||||
|
|
||||||
def get_virtual_machine_capacity(self, instance_uuid):
|
def get_virtual_machine_capacity(self, instance_uuid):
|
||||||
return random.randint(1, 4)
|
return random.randint(1, 4)
|
||||||
|
|
||||||
def get_average_network_incomming(self, node):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_average_network_outcomming(self, node):
|
|
||||||
pass
|
|
||||||
@@ -0,0 +1,65 @@
|
|||||||
|
{
|
||||||
|
"event_type": "instance.update",
|
||||||
|
"payload": {
|
||||||
|
"nova_object.data": {
|
||||||
|
"architecture": "x86_64",
|
||||||
|
"audit_period": {
|
||||||
|
"nova_object.data": {
|
||||||
|
"audit_period_beginning": "2012-10-01T00:00:00Z",
|
||||||
|
"audit_period_ending": "2012-10-29T13:42:11Z"},
|
||||||
|
"nova_object.name": "AuditPeriodPayload",
|
||||||
|
"nova_object.namespace": "nova",
|
||||||
|
"nova_object.version": "1.0"
|
||||||
|
},
|
||||||
|
"availability_zone": null,
|
||||||
|
"bandwidth": [],
|
||||||
|
"created_at": "2012-10-29T13:42:11Z",
|
||||||
|
"deleted_at": null,
|
||||||
|
"display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||||
|
"host": "Node_2",
|
||||||
|
"host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||||
|
"image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
|
||||||
|
"kernel_id": "",
|
||||||
|
"launched_at": null,
|
||||||
|
"metadata": {},
|
||||||
|
"node": "hostname_0",
|
||||||
|
"old_display_name": null,
|
||||||
|
"os_type": null,
|
||||||
|
"progress": 0,
|
||||||
|
"ramdisk_id": "",
|
||||||
|
"reservation_id": "r-sd3ygfjj",
|
||||||
|
"state": "paused",
|
||||||
|
"task_state": "scheduling",
|
||||||
|
"power_state": "pending",
|
||||||
|
"ip_addresses": [],
|
||||||
|
"state_update": {
|
||||||
|
"nova_object.data": {
|
||||||
|
"old_task_state": null,
|
||||||
|
"new_task_state": null,
|
||||||
|
"old_state": "paused",
|
||||||
|
"state": "paused"},
|
||||||
|
"nova_object.name": "InstanceStateUpdatePayload",
|
||||||
|
"nova_object.namespace": "nova",
|
||||||
|
"nova_object.version": "1.0"},
|
||||||
|
"tenant_id": "6f70656e737461636b20342065766572",
|
||||||
|
"terminated_at": null,
|
||||||
|
"flavor": {
|
||||||
|
"nova_object.name": "FlavorPayload",
|
||||||
|
"nova_object.data": {
|
||||||
|
"flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
|
||||||
|
"root_gb": 1,
|
||||||
|
"vcpus": 1,
|
||||||
|
"ephemeral_gb": 0,
|
||||||
|
"memory_mb": 512
|
||||||
|
},
|
||||||
|
"nova_object.version": "1.0",
|
||||||
|
"nova_object.namespace": "nova"
|
||||||
|
},
|
||||||
|
"user_id": "fake",
|
||||||
|
"uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"},
|
||||||
|
"nova_object.name": "InstanceUpdatePayload",
|
||||||
|
"nova_object.namespace": "nova",
|
||||||
|
"nova_object.version": "1.0"},
|
||||||
|
"priority": "INFO",
|
||||||
|
"publisher_id": "nova-compute:Node_2"
|
||||||
|
}
|
||||||
@@ -0,0 +1,52 @@
|
|||||||
|
{
|
||||||
|
"publisher_id": "compute:Node_2",
|
||||||
|
"event_type": "compute.instance.update",
|
||||||
|
"payload": {
|
||||||
|
"access_ip_v4": null,
|
||||||
|
"access_ip_v6": null,
|
||||||
|
"architecture": null,
|
||||||
|
"audit_period_beginning": "2016-08-17T13:00:00.000000",
|
||||||
|
"audit_period_ending": "2016-08-17T13:56:05.262440",
|
||||||
|
"availability_zone": "nova",
|
||||||
|
"bandwidth": {},
|
||||||
|
"cell_name": "",
|
||||||
|
"created_at": "2016-08-17 13:53:23+00:00",
|
||||||
|
"deleted_at": "",
|
||||||
|
"disk_gb": 1,
|
||||||
|
"display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||||
|
"ephemeral_gb": 0,
|
||||||
|
"host": "Node_2",
|
||||||
|
"hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||||
|
"image_meta": {
|
||||||
|
"base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97",
|
||||||
|
"container_format": "bare",
|
||||||
|
"disk_format": "qcow2",
|
||||||
|
"min_disk": "1",
|
||||||
|
"min_ram": "0"
|
||||||
|
},
|
||||||
|
"image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97",
|
||||||
|
"instance_flavor_id": "1",
|
||||||
|
"instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||||
|
"instance_type": "m1.tiny",
|
||||||
|
"instance_type_id": 2,
|
||||||
|
"kernel_id": "",
|
||||||
|
"launched_at": "2016-08-17T13:53:35.000000",
|
||||||
|
"memory_mb": 512,
|
||||||
|
"metadata": {},
|
||||||
|
"new_task_state": null,
|
||||||
|
"node": "hostname_0",
|
||||||
|
"old_state": "paused",
|
||||||
|
"old_task_state": null,
|
||||||
|
"os_type": null,
|
||||||
|
"progress": "",
|
||||||
|
"ramdisk_id": "",
|
||||||
|
"reservation_id": "r-0822ymml",
|
||||||
|
"root_gb": 1,
|
||||||
|
"state": "paused",
|
||||||
|
"state_description": "paused",
|
||||||
|
"tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1",
|
||||||
|
"terminated_at": "",
|
||||||
|
"user_id": "ce64facc93354bbfa90f4f9f9a3e1e75",
|
||||||
|
"vcpus": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,8 +17,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from watcher.decision_engine.model.notification import nova as novanotification
|
from watcher.decision_engine.model.notification import nova as novanotification
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class FakeManager(object):
|
class FakeManager(object):
|
||||||
|
|||||||
@@ -23,14 +23,14 @@ from oslo_serialization import jsonutils
|
|||||||
|
|
||||||
from watcher.common import context
|
from watcher.common import context
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
|
from watcher.common import nova_helper
|
||||||
from watcher.common import service as watcher_service
|
from watcher.common import service as watcher_service
|
||||||
from watcher.decision_engine.model import element
|
from watcher.decision_engine.model import element
|
||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.model.notification import nova as novanotification
|
from watcher.decision_engine.model.notification import nova as novanotification
|
||||||
from watcher.tests import base as base_test
|
from watcher.tests import base as base_test
|
||||||
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
from watcher.tests.decision_engine.model.notification import fake_managers
|
from watcher.tests.decision_engine.model.notification import fake_managers
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class NotificationTestCase(base_test.TestCase):
|
class NotificationTestCase(base_test.TestCase):
|
||||||
@@ -125,7 +125,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
handler = novanotification.ServiceUpdated(self.fake_cdmc)
|
handler = novanotification.ServiceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
node0_uuid = 'Node_0'
|
node0_uuid = 'Node_0'
|
||||||
node0 = compute_model.get_node_from_id(node0_uuid)
|
node0 = compute_model.get_node_by_uuid(node0_uuid)
|
||||||
|
|
||||||
message = self.load_message('scenario3_service-update.json')
|
message = self.load_message('scenario3_service-update.json')
|
||||||
|
|
||||||
@@ -151,7 +151,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
|
|
||||||
message = self.load_message('scenario3_instance-update.json')
|
message = self.load_message('scenario3_instance-update.json')
|
||||||
|
|
||||||
@@ -167,34 +167,46 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
|
|
||||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
|
|
||||||
def test_nova_instance_update_notfound_creates(self):
|
@mock.patch.object(nova_helper, "NovaHelper")
|
||||||
|
def test_nova_instance_update_notfound_still_creates(
|
||||||
|
self, m_nova_helper_cls):
|
||||||
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
|
side_effect=lambda uuid: mock.Mock(
|
||||||
|
name='m_get_compute_node_by_hostname',
|
||||||
|
id=3,
|
||||||
|
uuid=uuid,
|
||||||
|
memory_mb=7777,
|
||||||
|
vcpus=42,
|
||||||
|
free_disk_gb=974,
|
||||||
|
local_gb=1337))
|
||||||
|
m_nova_helper_cls.return_value = mock.Mock(
|
||||||
|
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||||
|
name='m_nova_helper')
|
||||||
|
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||||
|
|
||||||
message = self.load_message('scenario3_instance-update.json')
|
message = self.load_message('scenario3_notfound_instance-update.json')
|
||||||
|
|
||||||
with mock.patch.object(
|
handler.info(
|
||||||
model_root.ModelRoot, 'get_instance_from_id'
|
ctxt=self.context,
|
||||||
) as m_get_instance_from_id:
|
publisher_id=message['publisher_id'],
|
||||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
event_type=message['event_type'],
|
||||||
name='TEST')
|
payload=message['payload'],
|
||||||
handler.info(
|
metadata=self.FAKE_METADATA,
|
||||||
ctxt=self.context,
|
)
|
||||||
publisher_id=message['publisher_id'],
|
|
||||||
event_type=message['event_type'],
|
|
||||||
payload=message['payload'],
|
|
||||||
metadata=self.FAKE_METADATA,
|
|
||||||
)
|
|
||||||
|
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
cpu_capacity = compute_model.get_resource_from_id(
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = compute_model.get_resource_from_id(
|
disk = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = compute_model.get_resource_from_id(
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk_capacity)
|
||||||
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
@@ -202,6 +214,60 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||||
|
|
||||||
|
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
|
||||||
|
node_2 = compute_model.get_node_by_uuid('Node_2')
|
||||||
|
self.assertEqual(7777, memory_capacity.get_capacity(node_2))
|
||||||
|
self.assertEqual(42, cpu_capacity.get_capacity(node_2))
|
||||||
|
self.assertEqual(974, disk.get_capacity(node_2))
|
||||||
|
self.assertEqual(1337, disk_capacity.get_capacity(node_2))
|
||||||
|
|
||||||
|
@mock.patch.object(nova_helper, "NovaHelper")
|
||||||
|
def test_instance_update_node_notfound_set_unmapped(
|
||||||
|
self, m_nova_helper_cls):
|
||||||
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
|
side_effect=exception.ComputeNodeNotFound)
|
||||||
|
m_nova_helper_cls.return_value = mock.Mock(
|
||||||
|
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||||
|
name='m_nova_helper')
|
||||||
|
|
||||||
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
|
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
|
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||||
|
|
||||||
|
message = self.load_message(
|
||||||
|
'scenario3_notfound_instance-update.json')
|
||||||
|
|
||||||
|
handler.info(
|
||||||
|
ctxt=self.context,
|
||||||
|
publisher_id=message['publisher_id'],
|
||||||
|
event_type=message['event_type'],
|
||||||
|
payload=message['payload'],
|
||||||
|
metadata=self.FAKE_METADATA,
|
||||||
|
)
|
||||||
|
|
||||||
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.cpu_cores)
|
||||||
|
disk = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk)
|
||||||
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk_capacity)
|
||||||
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.memory)
|
||||||
|
|
||||||
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
|
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||||
|
self.assertEqual(1, disk.get_capacity(instance0))
|
||||||
|
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||||
|
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||||
|
|
||||||
|
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||||
|
self.assertRaises(
|
||||||
|
exception.ComputeNodeNotFound,
|
||||||
|
compute_model.get_node_by_uuid, 'Node_2')
|
||||||
|
|
||||||
def test_nova_instance_create(self):
|
def test_nova_instance_create(self):
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
@@ -211,7 +277,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InstanceNotFound,
|
exception.InstanceNotFound,
|
||||||
compute_model.get_instance_from_id, instance0_uuid)
|
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||||
|
|
||||||
message = self.load_message('scenario3_instance-create.json')
|
message = self.load_message('scenario3_instance-create.json')
|
||||||
handler.info(
|
handler.info(
|
||||||
@@ -222,12 +288,12 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
cpu_capacity = compute_model.get_resource_from_id(
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = compute_model.get_resource_from_id(
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = compute_model.get_resource_from_id(
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
@@ -243,7 +309,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
|
|
||||||
# Before
|
# Before
|
||||||
self.assertTrue(compute_model.get_instance_from_id(instance0_uuid))
|
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
|
||||||
for resource in compute_model.resource.values():
|
for resource in compute_model.resource.values():
|
||||||
self.assertIn(instance0_uuid, resource.mapping)
|
self.assertIn(instance0_uuid, resource.mapping)
|
||||||
|
|
||||||
@@ -259,7 +325,7 @@ class TestNovaNotifications(NotificationTestCase):
|
|||||||
# After
|
# After
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InstanceNotFound,
|
exception.InstanceNotFound,
|
||||||
compute_model.get_instance_from_id, instance0_uuid)
|
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||||
|
|
||||||
for resource in compute_model.resource.values():
|
for resource in compute_model.resource.values():
|
||||||
self.assertNotIn(instance0_uuid, resource.mapping)
|
self.assertNotIn(instance0_uuid, resource.mapping)
|
||||||
@@ -282,7 +348,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2'
|
instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2'
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InstanceNotFound,
|
exception.InstanceNotFound,
|
||||||
compute_model.get_instance_from_id, instance0_uuid)
|
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||||
|
|
||||||
message = self.load_message(
|
message = self.load_message(
|
||||||
'scenario3_legacy_instance-create-end.json')
|
'scenario3_legacy_instance-create-end.json')
|
||||||
@@ -295,12 +361,12 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
cpu_capacity = compute_model.get_resource_from_id(
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = compute_model.get_resource_from_id(
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = compute_model.get_resource_from_id(
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
@@ -314,7 +380,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
|
|
||||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
message = self.load_message('scenario3_legacy_instance-update.json')
|
||||||
|
|
||||||
@@ -330,7 +396,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
|
|
||||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
|
|
||||||
def test_legacy_instance_update_notfound_creates(self):
|
def test_legacy_instance_update_instance_notfound_creates(self):
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||||
@@ -340,9 +406,9 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
message = self.load_message('scenario3_legacy_instance-update.json')
|
||||||
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
model_root.ModelRoot, 'get_instance_from_id'
|
model_root.ModelRoot, 'get_instance_by_uuid'
|
||||||
) as m_get_instance_from_id:
|
) as m_get_instance_by_uuid:
|
||||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
m_get_instance_by_uuid.side_effect = exception.InstanceNotFound(
|
||||||
name='TEST')
|
name='TEST')
|
||||||
handler.info(
|
handler.info(
|
||||||
ctxt=self.context,
|
ctxt=self.context,
|
||||||
@@ -352,58 +418,121 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
|
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
|
|
||||||
def test_legacy_instance_update_node_notfound_stil_creates(self):
|
@mock.patch.object(nova_helper, "NovaHelper")
|
||||||
|
def test_legacy_instance_update_node_notfound_still_creates(
|
||||||
|
self, m_nova_helper_cls):
|
||||||
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
|
side_effect=lambda uuid: mock.Mock(
|
||||||
|
name='m_get_compute_node_by_hostname',
|
||||||
|
id=3,
|
||||||
|
uuid=uuid,
|
||||||
|
memory_mb=7777,
|
||||||
|
vcpus=42,
|
||||||
|
free_disk_gb=974,
|
||||||
|
local_gb=1337))
|
||||||
|
m_nova_helper_cls.return_value = mock.Mock(
|
||||||
|
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||||
|
name='m_nova_helper')
|
||||||
|
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||||
|
|
||||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
message = self.load_message(
|
||||||
|
'scenario3_notfound_legacy_instance-update.json')
|
||||||
|
|
||||||
with mock.patch.object(
|
handler.info(
|
||||||
model_root.ModelRoot, 'get_instance_from_id'
|
ctxt=self.context,
|
||||||
) as m_get_instance_from_id:
|
publisher_id=message['publisher_id'],
|
||||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
event_type=message['event_type'],
|
||||||
name='TEST')
|
payload=message['payload'],
|
||||||
with mock.patch.object(
|
metadata=self.FAKE_METADATA,
|
||||||
model_root.ModelRoot, 'get_node_from_id'
|
)
|
||||||
) as m_get_node_from_id:
|
|
||||||
m_get_node_from_id.side_effect = exception.ComputeNodeNotFound(
|
|
||||||
name='TEST')
|
|
||||||
handler.info(
|
|
||||||
ctxt=self.context,
|
|
||||||
publisher_id=message['publisher_id'],
|
|
||||||
event_type=message['event_type'],
|
|
||||||
payload=message['payload'],
|
|
||||||
metadata=self.FAKE_METADATA,
|
|
||||||
)
|
|
||||||
|
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
cpu_capacity = compute_model.get_resource_from_id(
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.cpu_cores)
|
element.ResourceType.cpu_cores)
|
||||||
disk_capacity = compute_model.get_resource_from_id(
|
disk = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.disk)
|
element.ResourceType.disk)
|
||||||
memory_capacity = compute_model.get_resource_from_id(
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk_capacity)
|
||||||
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
element.ResourceType.memory)
|
element.ResourceType.memory)
|
||||||
|
|
||||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||||
|
self.assertEqual(1, disk.get_capacity(instance0))
|
||||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||||
|
|
||||||
|
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||||
|
node_2 = compute_model.get_node_by_uuid('Node_2')
|
||||||
|
self.assertEqual(7777, memory_capacity.get_capacity(node_2))
|
||||||
|
self.assertEqual(42, cpu_capacity.get_capacity(node_2))
|
||||||
|
self.assertEqual(974, disk.get_capacity(node_2))
|
||||||
|
self.assertEqual(1337, disk_capacity.get_capacity(node_2))
|
||||||
|
|
||||||
|
@mock.patch.object(nova_helper, "NovaHelper")
|
||||||
|
def test_legacy_instance_update_node_notfound_set_unmapped(
|
||||||
|
self, m_nova_helper_cls):
|
||||||
|
m_get_compute_node_by_hostname = mock.Mock(
|
||||||
|
side_effect=exception.ComputeNodeNotFound)
|
||||||
|
m_nova_helper_cls.return_value = mock.Mock(
|
||||||
|
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||||
|
name='m_nova_helper')
|
||||||
|
|
||||||
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
|
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||||
|
|
||||||
|
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||||
|
|
||||||
|
message = self.load_message(
|
||||||
|
'scenario3_notfound_legacy_instance-update.json')
|
||||||
|
|
||||||
|
handler.info(
|
||||||
|
ctxt=self.context,
|
||||||
|
publisher_id=message['publisher_id'],
|
||||||
|
event_type=message['event_type'],
|
||||||
|
payload=message['payload'],
|
||||||
|
metadata=self.FAKE_METADATA,
|
||||||
|
)
|
||||||
|
|
||||||
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
|
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.cpu_cores)
|
||||||
|
disk = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk)
|
||||||
|
disk_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.disk_capacity)
|
||||||
|
memory_capacity = compute_model.get_resource_by_uuid(
|
||||||
|
element.ResourceType.memory)
|
||||||
|
|
||||||
|
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||||
|
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||||
|
self.assertEqual(1, disk.get_capacity(instance0))
|
||||||
|
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||||
|
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||||
|
|
||||||
|
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||||
|
self.assertRaises(
|
||||||
|
exception.ComputeNodeNotFound,
|
||||||
|
compute_model.get_node_by_uuid, 'Node_2')
|
||||||
|
|
||||||
def test_legacy_live_migrated_end(self):
|
def test_legacy_live_migrated_end(self):
|
||||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||||
self.fake_cdmc.cluster_data_model = compute_model
|
self.fake_cdmc.cluster_data_model = compute_model
|
||||||
handler = novanotification.LegacyLiveMigratedEnd(self.fake_cdmc)
|
handler = novanotification.LegacyLiveMigratedEnd(self.fake_cdmc)
|
||||||
|
|
||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||||
|
|
||||||
node = compute_model.get_node_from_instance_id(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual('Node_0', node.uuid)
|
||||||
|
|
||||||
message = self.load_message(
|
message = self.load_message(
|
||||||
@@ -415,7 +544,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
payload=message['payload'],
|
payload=message['payload'],
|
||||||
metadata=self.FAKE_METADATA,
|
metadata=self.FAKE_METADATA,
|
||||||
)
|
)
|
||||||
node = compute_model.get_node_from_instance_id(instance0_uuid)
|
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||||
self.assertEqual('Node_1', node.uuid)
|
self.assertEqual('Node_1', node.uuid)
|
||||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||||
|
|
||||||
@@ -427,7 +556,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||||
|
|
||||||
# Before
|
# Before
|
||||||
self.assertTrue(compute_model.get_instance_from_id(instance0_uuid))
|
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
|
||||||
for resource in compute_model.resource.values():
|
for resource in compute_model.resource.values():
|
||||||
self.assertIn(instance0_uuid, resource.mapping)
|
self.assertIn(instance0_uuid, resource.mapping)
|
||||||
|
|
||||||
@@ -444,7 +573,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
|||||||
# After
|
# After
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InstanceNotFound,
|
exception.InstanceNotFound,
|
||||||
compute_model.get_instance_from_id, instance0_uuid)
|
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||||
|
|
||||||
for resource in compute_model.resource.values():
|
for resource in compute_model.resource.values():
|
||||||
self.assertNotIn(instance0_uuid, resource.mapping)
|
self.assertNotIn(instance0_uuid, resource.mapping)
|
||||||
|
|||||||
@@ -20,8 +20,7 @@ import uuid
|
|||||||
|
|
||||||
from watcher.decision_engine.model import element
|
from watcher.decision_engine.model import element
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestMapping(base.TestCase):
|
class TestMapping(base.TestCase):
|
||||||
@@ -44,10 +43,10 @@ class TestMapping(base.TestCase):
|
|||||||
node = model.mapping.get_node_from_instance(instance)
|
node = model.mapping.get_node_from_instance(instance)
|
||||||
self.assertEqual('Node_0', node.uuid)
|
self.assertEqual('Node_0', node.uuid)
|
||||||
|
|
||||||
def test_get_node_from_instance_id(self):
|
def test_get_node_by_instance_uuid(self):
|
||||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
|
|
||||||
nodes = model.mapping.get_node_instances_from_id("BLABLABLA")
|
nodes = model.mapping.get_node_instances_by_uuid("BLABLABLA")
|
||||||
self.assertEqual(0, len(nodes))
|
self.assertEqual(0, len(nodes))
|
||||||
|
|
||||||
def test_get_all_instances(self):
|
def test_get_all_instances(self):
|
||||||
@@ -74,9 +73,9 @@ class TestMapping(base.TestCase):
|
|||||||
instances = model.get_all_instances()
|
instances = model.get_all_instances()
|
||||||
keys = list(instances.keys())
|
keys = list(instances.keys())
|
||||||
instance0 = instances[keys[0]]
|
instance0 = instances[keys[0]]
|
||||||
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
|
node0 = model.mapping.get_node_by_instance_uuid(instance0.uuid)
|
||||||
instance1 = instances[keys[1]]
|
instance1 = instances[keys[1]]
|
||||||
node1 = model.mapping.get_node_from_instance_id(instance1.uuid)
|
node1 = model.mapping.get_node_by_instance_uuid(instance1.uuid)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
False,
|
False,
|
||||||
@@ -91,26 +90,24 @@ class TestMapping(base.TestCase):
|
|||||||
True,
|
True,
|
||||||
model.migrate_instance(instance1, node0, node1))
|
model.migrate_instance(instance1, node0, node1))
|
||||||
|
|
||||||
def test_unmap_from_id_log_warning(self):
|
def test_unmap_by_uuid_log_warning(self):
|
||||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
instances = model.get_all_instances()
|
instances = model.get_all_instances()
|
||||||
keys = list(instances.keys())
|
keys = list(instances.keys())
|
||||||
instance0 = instances[keys[0]]
|
instance0 = instances[keys[0]]
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuid.uuid4())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
|
|
||||||
model.mapping.unmap_from_id(node.uuid, instance0.uuid)
|
model.mapping.unmap_by_uuid(node.uuid, instance0.uuid)
|
||||||
# self.assertEqual(len(model.mapping.get_node_instances_from_id(
|
|
||||||
# node.uuid)), 1)
|
|
||||||
|
|
||||||
def test_unmap_from_id(self):
|
def test_unmap_by_uuid(self):
|
||||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
instances = model.get_all_instances()
|
instances = model.get_all_instances()
|
||||||
keys = list(instances.keys())
|
keys = list(instances.keys())
|
||||||
instance0 = instances[keys[0]]
|
instance0 = instances[keys[0]]
|
||||||
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
|
node0 = model.mapping.get_node_by_instance_uuid(instance0.uuid)
|
||||||
|
|
||||||
model.mapping.unmap_from_id(node0.uuid, instance0.uuid)
|
model.mapping.unmap_by_uuid(node0.uuid, instance0.uuid)
|
||||||
self.assertEqual(0, len(model.mapping.get_node_instances_from_id(
|
self.assertEqual(0, len(model.mapping.get_node_instances_by_uuid(
|
||||||
node0.uuid)))
|
node0.uuid)))
|
||||||
|
|||||||
@@ -15,109 +15,149 @@
|
|||||||
# implied.
|
# implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
|
||||||
import uuid
|
import os
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
import six
|
||||||
|
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
from watcher.decision_engine.model import element
|
from watcher.decision_engine.model import element
|
||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestModel(base.TestCase):
|
class TestModel(base.TestCase):
|
||||||
def test_model(self):
|
|
||||||
|
def load_data(self, filename):
|
||||||
|
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
data_folder = os.path.join(cwd, "data")
|
||||||
|
|
||||||
|
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
|
||||||
|
xml_data = xml_file.read()
|
||||||
|
|
||||||
|
return xml_data
|
||||||
|
|
||||||
|
def load_model(self, filename):
|
||||||
|
return model_root.ModelRoot.from_xml(self.load_data(filename))
|
||||||
|
|
||||||
|
def test_model_structure(self):
|
||||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||||
model = fake_cluster.generate_scenario_1()
|
model = fake_cluster.build_scenario_1()
|
||||||
|
|
||||||
self.assertEqual(5, len(model._nodes))
|
self.assertEqual(5, len(model._nodes))
|
||||||
self.assertEqual(35, len(model._instances))
|
self.assertEqual(35, len(model._instances))
|
||||||
self.assertEqual(5, len(model.mapping.get_mapping()))
|
self.assertEqual(5, len(model.mapping.get_mapping()))
|
||||||
|
|
||||||
|
expected_struct_str = self.load_data('scenario_1.xml')
|
||||||
|
parser = etree.XMLParser(remove_blank_text=True)
|
||||||
|
expected_struct = etree.fromstring(expected_struct_str, parser)
|
||||||
|
model_structure = etree.fromstring(model.to_string(), parser)
|
||||||
|
|
||||||
|
normalized_expected_output = six.BytesIO()
|
||||||
|
normalized_model_output = six.BytesIO()
|
||||||
|
expected_struct.getroottree().write_c14n(normalized_expected_output)
|
||||||
|
model_structure.getroottree().write_c14n(normalized_model_output)
|
||||||
|
|
||||||
|
normalized_expected_struct = normalized_expected_output.getvalue()
|
||||||
|
normalized_model_struct = normalized_model_output.getvalue()
|
||||||
|
|
||||||
|
self.assertEqual(normalized_expected_struct, normalized_model_struct)
|
||||||
|
|
||||||
|
def test_build_model_from_xml(self):
|
||||||
|
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||||
|
|
||||||
|
expected_model = fake_cluster.generate_scenario_1()
|
||||||
|
struct_str = self.load_data('scenario_1.xml')
|
||||||
|
|
||||||
|
model = model_root.ModelRoot.from_xml(struct_str)
|
||||||
|
self.assertEqual(expected_model.to_string(), model.to_string())
|
||||||
|
|
||||||
def test_add_node(self):
|
def test_add_node(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
self.assertEqual(node, model.get_node_from_id(id_))
|
self.assertEqual(node, model.get_node_by_uuid(uuid_))
|
||||||
|
|
||||||
def test_delete_node(self):
|
def test_delete_node(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
self.assertEqual(node, model.get_node_from_id(id_))
|
self.assertEqual(node, model.get_node_by_uuid(uuid_))
|
||||||
model.remove_node(node)
|
model.remove_node(node)
|
||||||
self.assertRaises(exception.ComputeNodeNotFound,
|
self.assertRaises(exception.ComputeNodeNotFound,
|
||||||
model.get_node_from_id, id_)
|
model.get_node_by_uuid, uuid_)
|
||||||
|
|
||||||
def test_get_all_compute_nodes(self):
|
def test_get_all_compute_nodes(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
for _ in range(10):
|
for id_ in range(10):
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id_)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
all_nodes = model.get_all_compute_nodes()
|
all_nodes = model.get_all_compute_nodes()
|
||||||
for id_ in all_nodes:
|
for uuid_ in all_nodes:
|
||||||
node = model.get_node_from_id(id_)
|
node = model.get_node_by_uuid(uuid_)
|
||||||
model.assert_node(node)
|
model.assert_node(node)
|
||||||
|
|
||||||
def test_set_get_state_nodes(self):
|
def test_set_get_state_nodes(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
|
|
||||||
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
||||||
|
|
||||||
node = model.get_node_from_id(id_)
|
node = model.get_node_by_uuid(uuid_)
|
||||||
node.state = element.ServiceState.OFFLINE.value
|
node.state = element.ServiceState.OFFLINE.value
|
||||||
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
||||||
|
|
||||||
def test_node_from_id_raise(self):
|
def test_node_from_uuid_raise(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
|
|
||||||
id2 = "{0}".format(uuid.uuid4())
|
uuid2 = "{0}".format(uuidutils.generate_uuid())
|
||||||
self.assertRaises(exception.ComputeNodeNotFound,
|
self.assertRaises(exception.ComputeNodeNotFound,
|
||||||
model.get_node_from_id, id2)
|
model.get_node_by_uuid, uuid2)
|
||||||
|
|
||||||
def test_remove_node_raise(self):
|
def test_remove_node_raise(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
|
|
||||||
id2 = "{0}".format(uuid.uuid4())
|
uuid2 = "{0}".format(uuidutils.generate_uuid())
|
||||||
node2 = element.ComputeNode()
|
node2 = element.ComputeNode(id=2)
|
||||||
node2.uuid = id2
|
node2.uuid = uuid2
|
||||||
|
|
||||||
self.assertRaises(exception.ComputeNodeNotFound,
|
self.assertRaises(exception.ComputeNodeNotFound,
|
||||||
model.remove_node, node2)
|
model.remove_node, node2)
|
||||||
|
|
||||||
def test_assert_node_raise(self):
|
def test_assert_node_raise(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
id_ = "{0}".format(uuid.uuid4())
|
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||||
node = element.ComputeNode()
|
node = element.ComputeNode(id=1)
|
||||||
node.uuid = id_
|
node.uuid = uuid_
|
||||||
model.add_node(node)
|
model.add_node(node)
|
||||||
self.assertRaises(exception.IllegalArgumentException,
|
self.assertRaises(exception.IllegalArgumentException,
|
||||||
model.assert_node, "objet_qcq")
|
model.assert_node, "objet_qcq")
|
||||||
|
|
||||||
def test_instance_from_id_raise(self):
|
def test_instance_from_uuid_raise(self):
|
||||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||||
model = fake_cluster.generate_scenario_1()
|
model = fake_cluster.generate_scenario_1()
|
||||||
self.assertRaises(exception.InstanceNotFound,
|
self.assertRaises(exception.InstanceNotFound,
|
||||||
model.get_instance_from_id, "valeur_qcq")
|
model.get_instance_by_uuid, "valeur_qcq")
|
||||||
|
|
||||||
def test_assert_instance_raise(self):
|
def test_assert_instance_raise(self):
|
||||||
model = model_root.ModelRoot()
|
model = model_root.ModelRoot()
|
||||||
|
|||||||
@@ -24,10 +24,8 @@ from watcher.decision_engine.strategy import strategies
|
|||||||
from watcher import objects
|
from watcher import objects
|
||||||
from watcher.tests.db import base
|
from watcher.tests.db import base
|
||||||
from watcher.tests.db import utils as db_utils
|
from watcher.tests.db import utils as db_utils
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector as fake
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector as fake
|
|
||||||
from watcher.tests.objects import utils as obj_utils
|
from watcher.tests.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -30,12 +30,12 @@ class TestDummyScorer(base.TestCase):
|
|||||||
def test_metadata(self):
|
def test_metadata(self):
|
||||||
scorer = dummy_scorer.DummyScorer(config=None)
|
scorer = dummy_scorer.DummyScorer(config=None)
|
||||||
self.assertEqual('dummy_scorer', scorer.get_name())
|
self.assertEqual('dummy_scorer', scorer.get_name())
|
||||||
self.assertTrue('Dummy' in scorer.get_description())
|
self.assertIn('Dummy', scorer.get_description())
|
||||||
|
|
||||||
metainfo = scorer.get_metainfo()
|
metainfo = scorer.get_metainfo()
|
||||||
self.assertTrue('feature_columns' in metainfo)
|
self.assertIn('feature_columns', metainfo)
|
||||||
self.assertTrue('result_columns' in metainfo)
|
self.assertIn('result_columns', metainfo)
|
||||||
self.assertTrue('workloads' in metainfo)
|
self.assertIn('workloads', metainfo)
|
||||||
|
|
||||||
def test_calculate_score(self):
|
def test_calculate_score(self):
|
||||||
scorer = dummy_scorer.DummyScorer(config=None)
|
scorer = dummy_scorer.DummyScorer(config=None)
|
||||||
|
|||||||
@@ -1,270 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Authors: Vojtech CIMA <cima@zhaw.ch>
|
|
||||||
# Bruno GRAZIOLI <gaea@zhaw.ch>
|
|
||||||
# Sean MURPHY <murp@zhaw.ch>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from watcher.decision_engine.model.collector import base
|
|
||||||
from watcher.decision_engine.model import element
|
|
||||||
from watcher.decision_engine.model import model_root as modelroot
|
|
||||||
|
|
||||||
|
|
||||||
class FakerModelCollector(base.BaseClusterDataModelCollector):
|
|
||||||
|
|
||||||
def __init__(self, config=None, osc=None):
|
|
||||||
if config is None:
|
|
||||||
config = mock.Mock()
|
|
||||||
super(FakerModelCollector, self).__init__(config)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def notification_endpoints(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def execute(self):
|
|
||||||
return self.generate_scenario_1()
|
|
||||||
|
|
||||||
def generate_scenario_1(self):
|
|
||||||
"""Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
|
|
||||||
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
count_node = 2
|
|
||||||
count_instance = 2
|
|
||||||
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
disk_capacity =\
|
|
||||||
element.Resource(element.ResourceType.disk_capacity)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
current_state_cluster.create_resource(disk_capacity)
|
|
||||||
|
|
||||||
for i in range(0, count_node):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
node.state = 'enabled'
|
|
||||||
|
|
||||||
mem.set_capacity(node, 64)
|
|
||||||
disk_capacity.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 40)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
for i in range(0, count_instance):
|
|
||||||
instance_uuid = "INSTANCE_{0}".format(i)
|
|
||||||
instance = element.Instance()
|
|
||||||
instance.uuid = instance_uuid
|
|
||||||
instance.state = element.InstanceState.ACTIVE.value
|
|
||||||
mem.set_capacity(instance, 2)
|
|
||||||
disk.set_capacity(instance, 20)
|
|
||||||
num_cores.set_capacity(instance, 10)
|
|
||||||
current_state_cluster.add_instance(instance)
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_1"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_1"))
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
def generate_scenario_2(self):
|
|
||||||
"""Simulates a cluster
|
|
||||||
|
|
||||||
With 4 nodes and 6 instances all mapped to a single node
|
|
||||||
"""
|
|
||||||
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
count_node = 4
|
|
||||||
count_instance = 6
|
|
||||||
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
disk_capacity =\
|
|
||||||
element.Resource(element.ResourceType.disk_capacity)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
current_state_cluster.create_resource(disk_capacity)
|
|
||||||
|
|
||||||
for i in range(0, count_node):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
node.state = 'up'
|
|
||||||
|
|
||||||
mem.set_capacity(node, 64)
|
|
||||||
disk_capacity.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 16)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
for i in range(0, count_instance):
|
|
||||||
instance_uuid = "INSTANCE_{0}".format(i)
|
|
||||||
instance = element.Instance()
|
|
||||||
instance.uuid = instance_uuid
|
|
||||||
instance.state = element.InstanceState.ACTIVE.value
|
|
||||||
mem.set_capacity(instance, 2)
|
|
||||||
disk.set_capacity(instance, 20)
|
|
||||||
num_cores.set_capacity(instance, 10)
|
|
||||||
current_state_cluster.add_instance(instance)
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
def generate_scenario_3(self):
|
|
||||||
"""Simulates a cluster
|
|
||||||
|
|
||||||
With 4 nodes and 6 instances all mapped to one node
|
|
||||||
"""
|
|
||||||
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
count_node = 2
|
|
||||||
count_instance = 4
|
|
||||||
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
disk_capacity =\
|
|
||||||
element.Resource(element.ResourceType.disk_capacity)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
current_state_cluster.create_resource(disk_capacity)
|
|
||||||
|
|
||||||
for i in range(0, count_node):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
node.state = 'up'
|
|
||||||
|
|
||||||
mem.set_capacity(node, 64)
|
|
||||||
disk_capacity.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 10)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
for i in range(6, 6 + count_instance):
|
|
||||||
instance_uuid = "INSTANCE_{0}".format(i)
|
|
||||||
instance = element.Instance()
|
|
||||||
instance.uuid = instance_uuid
|
|
||||||
instance.state = element.InstanceState.ACTIVE.value
|
|
||||||
mem.set_capacity(instance, 2)
|
|
||||||
disk.set_capacity(instance, 20)
|
|
||||||
num_cores.set_capacity(instance, 2 ** (i-6))
|
|
||||||
current_state_cluster.add_instance(instance)
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
|
|
||||||
class FakeCeilometerMetrics(object):
|
|
||||||
def __init__(self, model):
|
|
||||||
self.model = model
|
|
||||||
|
|
||||||
def mock_get_statistics(self, resource_id, meter_name, period=3600,
|
|
||||||
aggregate='avg'):
|
|
||||||
if meter_name == "compute.node.cpu.percent":
|
|
||||||
return self.get_node_cpu_util(resource_id)
|
|
||||||
elif meter_name == "cpu_util":
|
|
||||||
return self.get_instance_cpu_util(resource_id)
|
|
||||||
elif meter_name == "memory.usage":
|
|
||||||
return self.get_instance_ram_util(resource_id)
|
|
||||||
elif meter_name == "disk.root.size":
|
|
||||||
return self.get_instance_disk_root_size(resource_id)
|
|
||||||
|
|
||||||
def get_node_cpu_util(self, r_id):
|
|
||||||
"""Calculates node utilization dynamicaly.
|
|
||||||
|
|
||||||
node CPU utilization should consider
|
|
||||||
and corelate with actual instance-node mappings
|
|
||||||
provided within a cluster model.
|
|
||||||
Returns relative node CPU utilization <0, 100>.
|
|
||||||
:param r_id: resource id
|
|
||||||
"""
|
|
||||||
|
|
||||||
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
|
|
||||||
instances = self.model.get_mapping().get_node_instances_from_id(id)
|
|
||||||
util_sum = 0.0
|
|
||||||
node_cpu_cores = self.model.get_resource_from_id(
|
|
||||||
element.ResourceType.cpu_cores).get_capacity_from_id(id)
|
|
||||||
for instance_uuid in instances:
|
|
||||||
instance_cpu_cores = self.model.get_resource_from_id(
|
|
||||||
element.ResourceType.cpu_cores).\
|
|
||||||
get_capacity(self.model.get_instance_from_id(instance_uuid))
|
|
||||||
total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
|
|
||||||
instance_uuid)
|
|
||||||
util_sum += total_cpu_util / 100.0
|
|
||||||
util_sum /= node_cpu_cores
|
|
||||||
return util_sum * 100.0
|
|
||||||
|
|
||||||
def get_instance_cpu_util(self, r_id):
|
|
||||||
instance_cpu_util = dict()
|
|
||||||
instance_cpu_util['INSTANCE_0'] = 10
|
|
||||||
instance_cpu_util['INSTANCE_1'] = 30
|
|
||||||
instance_cpu_util['INSTANCE_2'] = 60
|
|
||||||
instance_cpu_util['INSTANCE_3'] = 20
|
|
||||||
instance_cpu_util['INSTANCE_4'] = 40
|
|
||||||
instance_cpu_util['INSTANCE_5'] = 50
|
|
||||||
instance_cpu_util['INSTANCE_6'] = 100
|
|
||||||
instance_cpu_util['INSTANCE_7'] = 100
|
|
||||||
instance_cpu_util['INSTANCE_8'] = 100
|
|
||||||
instance_cpu_util['INSTANCE_9'] = 100
|
|
||||||
return instance_cpu_util[str(r_id)]
|
|
||||||
|
|
||||||
def get_instance_ram_util(self, r_id):
|
|
||||||
instance_ram_util = dict()
|
|
||||||
instance_ram_util['INSTANCE_0'] = 1
|
|
||||||
instance_ram_util['INSTANCE_1'] = 2
|
|
||||||
instance_ram_util['INSTANCE_2'] = 4
|
|
||||||
instance_ram_util['INSTANCE_3'] = 8
|
|
||||||
instance_ram_util['INSTANCE_4'] = 3
|
|
||||||
instance_ram_util['INSTANCE_5'] = 2
|
|
||||||
instance_ram_util['INSTANCE_6'] = 1
|
|
||||||
instance_ram_util['INSTANCE_7'] = 2
|
|
||||||
instance_ram_util['INSTANCE_8'] = 4
|
|
||||||
instance_ram_util['INSTANCE_9'] = 8
|
|
||||||
return instance_ram_util[str(r_id)]
|
|
||||||
|
|
||||||
def get_instance_disk_root_size(self, r_id):
|
|
||||||
instance_disk_util = dict()
|
|
||||||
instance_disk_util['INSTANCE_0'] = 10
|
|
||||||
instance_disk_util['INSTANCE_1'] = 15
|
|
||||||
instance_disk_util['INSTANCE_2'] = 30
|
|
||||||
instance_disk_util['INSTANCE_3'] = 35
|
|
||||||
instance_disk_util['INSTANCE_4'] = 20
|
|
||||||
instance_disk_util['INSTANCE_5'] = 25
|
|
||||||
instance_disk_util['INSTANCE_6'] = 25
|
|
||||||
instance_disk_util['INSTANCE_7'] = 25
|
|
||||||
instance_disk_util['INSTANCE_8'] = 25
|
|
||||||
instance_disk_util['INSTANCE_9'] = 25
|
|
||||||
return instance_disk_util[str(r_id)]
|
|
||||||
@@ -1,384 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
# Copyright (c) 2015 b<>com
|
|
||||||
#
|
|
||||||
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from watcher.decision_engine.model.collector import base
|
|
||||||
from watcher.decision_engine.model import element
|
|
||||||
from watcher.decision_engine.model import model_root as modelroot
|
|
||||||
|
|
||||||
|
|
||||||
class FakerModelCollector(base.BaseClusterDataModelCollector):
|
|
||||||
|
|
||||||
def __init__(self, config=None, osc=None):
|
|
||||||
if config is None:
|
|
||||||
config = mock.Mock(period=777)
|
|
||||||
super(FakerModelCollector, self).__init__(config)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def notification_endpoints(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def execute(self):
|
|
||||||
return self._cluster_data_model or self.generate_scenario_1()
|
|
||||||
|
|
||||||
def generate_scenario_1(self):
|
|
||||||
instances = []
|
|
||||||
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
node_count = 5
|
|
||||||
# number max of instance per node
|
|
||||||
node_instance_count = 7
|
|
||||||
# total number of virtual machine
|
|
||||||
instance_count = (node_count * node_instance_count)
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, node_count):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 132)
|
|
||||||
disk.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 40)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
for i in range(0, instance_count):
|
|
||||||
instance_uuid = "INSTANCE_{0}".format(i)
|
|
||||||
instance = element.Instance()
|
|
||||||
instance.uuid = instance_uuid
|
|
||||||
mem.set_capacity(instance, 2)
|
|
||||||
disk.set_capacity(instance, 20)
|
|
||||||
num_cores.set_capacity(instance, 10)
|
|
||||||
instances.append(instance)
|
|
||||||
current_state_cluster.add_instance(instance)
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_1"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_1"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_2"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_2"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_3"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_2"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_4"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_2"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_5"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_3"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_6"))
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_4"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_7"))
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
def map(self, model, h_id, instance_id):
|
|
||||||
model.get_mapping().map(
|
|
||||||
model.get_node_from_id(h_id),
|
|
||||||
model.get_instance_from_id(instance_id))
|
|
||||||
|
|
||||||
def generate_scenario_3_with_2_nodes(self):
|
|
||||||
instances = []
|
|
||||||
|
|
||||||
root = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
node_count = 2
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
root.create_resource(mem)
|
|
||||||
root.create_resource(num_cores)
|
|
||||||
root.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, node_count):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 132)
|
|
||||||
disk.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 40)
|
|
||||||
root.add_node(node)
|
|
||||||
|
|
||||||
instance1 = element.Instance()
|
|
||||||
instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
|
||||||
mem.set_capacity(instance1, 2)
|
|
||||||
disk.set_capacity(instance1, 20)
|
|
||||||
num_cores.set_capacity(instance1, 10)
|
|
||||||
instances.append(instance1)
|
|
||||||
root.add_instance(instance1)
|
|
||||||
|
|
||||||
instance2 = element.Instance()
|
|
||||||
instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
|
|
||||||
mem.set_capacity(instance2, 2)
|
|
||||||
disk.set_capacity(instance2, 20)
|
|
||||||
num_cores.set_capacity(instance2, 10)
|
|
||||||
instances.append(instance2)
|
|
||||||
root.add_instance(instance2)
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
|
||||||
root.get_instance_from_id(str(instance1.uuid)))
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
|
||||||
root.get_instance_from_id(str(instance2.uuid)))
|
|
||||||
|
|
||||||
return root
|
|
||||||
|
|
||||||
def generate_scenario_4_with_1_node_no_instance(self):
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
node_count = 1
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, node_count):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 1)
|
|
||||||
disk.set_capacity(node, 1)
|
|
||||||
num_cores.set_capacity(node, 1)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
def generate_scenario_5_with_instance_disk_0(self):
|
|
||||||
instances = []
|
|
||||||
current_state_cluster = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
node_count = 1
|
|
||||||
# number of instances
|
|
||||||
instance_count = 1
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
current_state_cluster.create_resource(mem)
|
|
||||||
current_state_cluster.create_resource(num_cores)
|
|
||||||
current_state_cluster.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, node_count):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 4)
|
|
||||||
disk.set_capacity(node, 4)
|
|
||||||
num_cores.set_capacity(node, 4)
|
|
||||||
current_state_cluster.add_node(node)
|
|
||||||
|
|
||||||
for i in range(0, instance_count):
|
|
||||||
instance_uuid = "INSTANCE_{0}".format(i)
|
|
||||||
instance = element.Instance()
|
|
||||||
instance.uuid = instance_uuid
|
|
||||||
mem.set_capacity(instance, 2)
|
|
||||||
disk.set_capacity(instance, 0)
|
|
||||||
num_cores.set_capacity(instance, 4)
|
|
||||||
instances.append(instance)
|
|
||||||
current_state_cluster.add_instance(instance)
|
|
||||||
|
|
||||||
current_state_cluster.get_mapping().map(
|
|
||||||
current_state_cluster.get_node_from_id("Node_0"),
|
|
||||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
|
||||||
|
|
||||||
return current_state_cluster
|
|
||||||
|
|
||||||
def generate_scenario_6_with_2_nodes(self):
|
|
||||||
instances = []
|
|
||||||
root = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
node_count = 2
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
root.create_resource(mem)
|
|
||||||
root.create_resource(num_cores)
|
|
||||||
root.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, node_count):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 132)
|
|
||||||
disk.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 40)
|
|
||||||
root.add_node(node)
|
|
||||||
|
|
||||||
instance1 = element.Instance()
|
|
||||||
instance1.uuid = "INSTANCE_1"
|
|
||||||
mem.set_capacity(instance1, 2)
|
|
||||||
disk.set_capacity(instance1, 20)
|
|
||||||
num_cores.set_capacity(instance1, 10)
|
|
||||||
instances.append(instance1)
|
|
||||||
root.add_instance(instance1)
|
|
||||||
|
|
||||||
instance11 = element.Instance()
|
|
||||||
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
|
||||||
mem.set_capacity(instance11, 2)
|
|
||||||
disk.set_capacity(instance11, 20)
|
|
||||||
num_cores.set_capacity(instance11, 10)
|
|
||||||
instances.append(instance11)
|
|
||||||
root.add_instance(instance11)
|
|
||||||
|
|
||||||
instance2 = element.Instance()
|
|
||||||
instance2.uuid = "INSTANCE_3"
|
|
||||||
mem.set_capacity(instance2, 2)
|
|
||||||
disk.set_capacity(instance2, 20)
|
|
||||||
num_cores.set_capacity(instance2, 10)
|
|
||||||
instances.append(instance2)
|
|
||||||
root.add_instance(instance2)
|
|
||||||
|
|
||||||
instance21 = element.Instance()
|
|
||||||
instance21.uuid = "INSTANCE_4"
|
|
||||||
mem.set_capacity(instance21, 2)
|
|
||||||
disk.set_capacity(instance21, 20)
|
|
||||||
num_cores.set_capacity(instance21, 10)
|
|
||||||
instances.append(instance21)
|
|
||||||
root.add_instance(instance21)
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
|
||||||
root.get_instance_from_id(str(instance1.uuid)))
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
|
||||||
root.get_instance_from_id(str(instance11.uuid)))
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
|
||||||
root.get_instance_from_id(str(instance2.uuid)))
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
|
||||||
root.get_instance_from_id(str(instance21.uuid)))
|
|
||||||
return root
|
|
||||||
|
|
||||||
def generate_scenario_7_with_2_nodes(self):
|
|
||||||
instances = []
|
|
||||||
root = modelroot.ModelRoot()
|
|
||||||
# number of nodes
|
|
||||||
count_node = 2
|
|
||||||
|
|
||||||
# define ressouce ( CPU, MEM disk, ... )
|
|
||||||
mem = element.Resource(element.ResourceType.memory)
|
|
||||||
# 2199.954 Mhz
|
|
||||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
|
||||||
disk = element.Resource(element.ResourceType.disk)
|
|
||||||
|
|
||||||
root.create_resource(mem)
|
|
||||||
root.create_resource(num_cores)
|
|
||||||
root.create_resource(disk)
|
|
||||||
|
|
||||||
for i in range(0, count_node):
|
|
||||||
node_uuid = "Node_{0}".format(i)
|
|
||||||
node = element.ComputeNode()
|
|
||||||
node.uuid = node_uuid
|
|
||||||
node.hostname = "hostname_{0}".format(i)
|
|
||||||
|
|
||||||
mem.set_capacity(node, 132)
|
|
||||||
disk.set_capacity(node, 250)
|
|
||||||
num_cores.set_capacity(node, 50)
|
|
||||||
root.add_node(node)
|
|
||||||
|
|
||||||
instance1 = element.Instance()
|
|
||||||
instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
|
|
||||||
mem.set_capacity(instance1, 2)
|
|
||||||
disk.set_capacity(instance1, 20)
|
|
||||||
num_cores.set_capacity(instance1, 15)
|
|
||||||
instances.append(instance1)
|
|
||||||
root.add_instance(instance1)
|
|
||||||
|
|
||||||
instance11 = element.Instance()
|
|
||||||
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
|
||||||
mem.set_capacity(instance11, 2)
|
|
||||||
disk.set_capacity(instance11, 20)
|
|
||||||
num_cores.set_capacity(instance11, 10)
|
|
||||||
instances.append(instance11)
|
|
||||||
root.add_instance(instance11)
|
|
||||||
|
|
||||||
instance2 = element.Instance()
|
|
||||||
instance2.uuid = "INSTANCE_3"
|
|
||||||
mem.set_capacity(instance2, 2)
|
|
||||||
disk.set_capacity(instance2, 20)
|
|
||||||
num_cores.set_capacity(instance2, 10)
|
|
||||||
instances.append(instance2)
|
|
||||||
root.add_instance(instance2)
|
|
||||||
|
|
||||||
instance21 = element.Instance()
|
|
||||||
instance21.uuid = "INSTANCE_4"
|
|
||||||
mem.set_capacity(instance21, 2)
|
|
||||||
disk.set_capacity(instance21, 20)
|
|
||||||
num_cores.set_capacity(instance21, 10)
|
|
||||||
instances.append(instance21)
|
|
||||||
root.add_instance(instance21)
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
|
||||||
root.get_instance_from_id(str(instance1.uuid)))
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
|
||||||
root.get_instance_from_id(str(instance11.uuid)))
|
|
||||||
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
|
||||||
root.get_instance_from_id(str(instance2.uuid)))
|
|
||||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
|
||||||
root.get_instance_from_id(str(instance21.uuid)))
|
|
||||||
return root
|
|
||||||
@@ -20,14 +20,14 @@ import collections
|
|||||||
import mock
|
import mock
|
||||||
|
|
||||||
from watcher.applier.loading import default
|
from watcher.applier.loading import default
|
||||||
|
from watcher.common import clients
|
||||||
from watcher.common import exception
|
from watcher.common import exception
|
||||||
|
from watcher.decision_engine.model.collector import nova
|
||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector
|
|
||||||
|
|
||||||
|
|
||||||
class TestBasicConsolidation(base.TestCase):
|
class TestBasicConsolidation(base.TestCase):
|
||||||
@@ -39,9 +39,13 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
# fake cluster
|
# fake cluster
|
||||||
self.fake_cluster = faker_cluster_state.FakerModelCollector()
|
self.fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||||
|
|
||||||
|
p_osc = mock.patch.object(
|
||||||
|
clients, "OpenStackClients")
|
||||||
|
self.m_osc = p_osc.start()
|
||||||
|
self.addCleanup(p_osc.stop)
|
||||||
|
|
||||||
p_model = mock.patch.object(
|
p_model = mock.patch.object(
|
||||||
strategies.BasicConsolidation, "compute_model",
|
nova.NovaClusterDataModelCollector, "execute")
|
||||||
new_callable=mock.PropertyMock)
|
|
||||||
self.m_model = p_model.start()
|
self.m_model = p_model.start()
|
||||||
self.addCleanup(p_model.stop)
|
self.addCleanup(p_model.stop)
|
||||||
|
|
||||||
@@ -67,39 +71,39 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
node_1_score = 0.023333333333333317
|
node_1_score = 0.023333333333333317
|
||||||
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
|
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
|
||||||
model.get_node_from_id("Node_1")))
|
model.get_node_by_uuid("Node_1")))
|
||||||
node_2_score = 0.26666666666666666
|
node_2_score = 0.26666666666666666
|
||||||
self.assertEqual(node_2_score, self.strategy.calculate_score_node(
|
self.assertEqual(node_2_score, self.strategy.calculate_score_node(
|
||||||
model.get_node_from_id("Node_2")))
|
model.get_node_by_uuid("Node_2")))
|
||||||
node_0_score = 0.023333333333333317
|
node_0_score = 0.023333333333333317
|
||||||
self.assertEqual(node_0_score, self.strategy.calculate_score_node(
|
self.assertEqual(node_0_score, self.strategy.calculate_score_node(
|
||||||
model.get_node_from_id("Node_0")))
|
model.get_node_by_uuid("Node_0")))
|
||||||
|
|
||||||
def test_basic_consolidation_score_instance(self):
|
def test_basic_consolidation_score_instance(self):
|
||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||||
instance_0_score = 0.023333333333333317
|
instance_0_score = 0.023333333333333317
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_0_score,
|
instance_0_score,
|
||||||
self.strategy.calculate_score_instance(instance_0))
|
self.strategy.calculate_score_instance(instance_0))
|
||||||
|
|
||||||
instance_1 = model.get_instance_from_id("INSTANCE_1")
|
instance_1 = model.get_instance_by_uuid("INSTANCE_1")
|
||||||
instance_1_score = 0.023333333333333317
|
instance_1_score = 0.023333333333333317
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_1_score,
|
instance_1_score,
|
||||||
self.strategy.calculate_score_instance(instance_1))
|
self.strategy.calculate_score_instance(instance_1))
|
||||||
instance_2 = model.get_instance_from_id("INSTANCE_2")
|
instance_2 = model.get_instance_by_uuid("INSTANCE_2")
|
||||||
instance_2_score = 0.033333333333333326
|
instance_2_score = 0.033333333333333326
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_2_score,
|
instance_2_score,
|
||||||
self.strategy.calculate_score_instance(instance_2))
|
self.strategy.calculate_score_instance(instance_2))
|
||||||
instance_6 = model.get_instance_from_id("INSTANCE_6")
|
instance_6 = model.get_instance_by_uuid("INSTANCE_6")
|
||||||
instance_6_score = 0.02666666666666669
|
instance_6_score = 0.02666666666666669
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_6_score,
|
instance_6_score,
|
||||||
self.strategy.calculate_score_instance(instance_6))
|
self.strategy.calculate_score_instance(instance_6))
|
||||||
instance_7 = model.get_instance_from_id("INSTANCE_7")
|
instance_7 = model.get_instance_by_uuid("INSTANCE_7")
|
||||||
instance_7_score = 0.013333333333333345
|
instance_7_score = 0.013333333333333345
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_7_score,
|
instance_7_score,
|
||||||
@@ -108,7 +112,7 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
def test_basic_consolidation_score_instance_disk(self):
|
def test_basic_consolidation_score_instance_disk(self):
|
||||||
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
|
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||||
instance_0_score = 0.023333333333333355
|
instance_0_score = 0.023333333333333355
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_0_score,
|
instance_0_score,
|
||||||
@@ -117,7 +121,7 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
def test_basic_consolidation_weight(self):
|
def test_basic_consolidation_weight(self):
|
||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||||
cores = 16
|
cores = 16
|
||||||
# 80 Go
|
# 80 Go
|
||||||
disk = 80
|
disk = 80
|
||||||
@@ -162,6 +166,14 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
self.assertFalse(self.strategy.check_threshold(
|
self.assertFalse(self.strategy.check_threshold(
|
||||||
node0, 1000, 1000, 1000))
|
node0, 1000, 1000, 1000))
|
||||||
|
|
||||||
|
def test_basic_consolidation_works_on_model_copy(self):
|
||||||
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
|
self.m_model.return_value = model
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
model.to_string(), self.strategy.compute_model.to_string())
|
||||||
|
self.assertIsNot(model, self.strategy.compute_model)
|
||||||
|
|
||||||
def test_basic_consolidation_migration(self):
|
def test_basic_consolidation_migration(self):
|
||||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
@@ -172,14 +184,37 @@ class TestBasicConsolidation(base.TestCase):
|
|||||||
[action.get('action_type') for action in solution.actions])
|
[action.get('action_type') for action in solution.actions])
|
||||||
|
|
||||||
expected_num_migrations = 1
|
expected_num_migrations = 1
|
||||||
expected_power_state = 0
|
expected_power_state = 1
|
||||||
|
|
||||||
num_migrations = actions_counter.get("migrate", 0)
|
num_migrations = actions_counter.get("migrate", 0)
|
||||||
num_node_state_change = actions_counter.get(
|
num_node_state_change = actions_counter.get(
|
||||||
"change_node_state", 0)
|
"change_nova_service_state", 0)
|
||||||
self.assertEqual(expected_num_migrations, num_migrations)
|
self.assertEqual(expected_num_migrations, num_migrations)
|
||||||
self.assertEqual(expected_power_state, num_node_state_change)
|
self.assertEqual(expected_power_state, num_node_state_change)
|
||||||
|
|
||||||
|
def test_basic_consolidation_execute_scenario_8_with_4_nodes(self):
|
||||||
|
model = self.fake_cluster.generate_scenario_8_with_4_nodes()
|
||||||
|
self.m_model.return_value = model
|
||||||
|
|
||||||
|
solution = self.strategy.execute()
|
||||||
|
|
||||||
|
actions_counter = collections.Counter(
|
||||||
|
[action.get('action_type') for action in solution.actions])
|
||||||
|
|
||||||
|
expected_num_migrations = 5
|
||||||
|
expected_power_state = 3
|
||||||
|
expected_global_efficacy = 60
|
||||||
|
|
||||||
|
num_migrations = actions_counter.get("migrate", 0)
|
||||||
|
num_node_state_change = actions_counter.get(
|
||||||
|
"change_nova_service_state", 0)
|
||||||
|
|
||||||
|
global_efficacy_value = solution.global_efficacy.get("value", 0)
|
||||||
|
|
||||||
|
self.assertEqual(expected_num_migrations, num_migrations)
|
||||||
|
self.assertEqual(expected_power_state, num_node_state_change)
|
||||||
|
self.assertEqual(expected_global_efficacy, global_efficacy_value)
|
||||||
|
|
||||||
def test_exception_stale_cdm(self):
|
def test_exception_stale_cdm(self):
|
||||||
self.fake_cluster.set_cluster_data_model_as_stale()
|
self.fake_cluster.set_cluster_data_model_as_stale()
|
||||||
self.m_model.return_value = self.fake_cluster.cluster_data_model
|
self.m_model.return_value = self.fake_cluster.cluster_data_model
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ from watcher.common import utils
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestDummyStrategy(base.TestCase):
|
class TestDummyStrategy(base.TestCase):
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ from watcher.common import utils
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestDummyWithScorer(base.TestCase):
|
class TestDummyWithScorer(base.TestCase):
|
||||||
|
|||||||
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector
|
|
||||||
|
|
||||||
|
|
||||||
class TestOutletTempControl(base.TestCase):
|
class TestOutletTempControl(base.TestCase):
|
||||||
@@ -65,10 +63,10 @@ class TestOutletTempControl(base.TestCase):
|
|||||||
def test_calc_used_res(self):
|
def test_calc_used_res(self):
|
||||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
node = model.get_node_from_id('Node_0')
|
node = model.get_node_by_uuid('Node_0')
|
||||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||||
cores_used, mem_used, disk_used = self.strategy.calc_used_res(
|
cores_used, mem_used, disk_used = self.strategy.calc_used_res(
|
||||||
node, cap_cores, cap_mem, cap_disk)
|
node, cap_cores, cap_mem, cap_disk)
|
||||||
|
|
||||||
|
|||||||
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector
|
|
||||||
|
|
||||||
|
|
||||||
class TestUniformAirflow(base.TestCase):
|
class TestUniformAirflow(base.TestCase):
|
||||||
@@ -70,10 +68,10 @@ class TestUniformAirflow(base.TestCase):
|
|||||||
def test_calc_used_res(self):
|
def test_calc_used_res(self):
|
||||||
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
node = model.get_node_from_id('Node_0')
|
node = model.get_node_by_uuid('Node_0')
|
||||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||||
cores_used, mem_used, disk_used = self.\
|
cores_used, mem_used, disk_used = self.\
|
||||||
strategy.calculate_used_resource(
|
strategy.calculate_used_resource(
|
||||||
node, cap_cores, cap_mem, cap_disk)
|
node, cap_cores, cap_mem, cap_disk)
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ from watcher.common import exception
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_and_metrics
|
||||||
import faker_cluster_and_metrics
|
|
||||||
|
|
||||||
|
|
||||||
class TestVMWorkloadConsolidation(base.TestCase):
|
class TestVMWorkloadConsolidation(base.TestCase):
|
||||||
@@ -69,7 +68,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||||
instance_util = dict(cpu=1.0, ram=1, disk=10)
|
instance_util = dict(cpu=1.0, ram=1, disk=10)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_util,
|
instance_util,
|
||||||
@@ -79,7 +78,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
node_0 = model.get_node_from_id("Node_0")
|
node_0 = model.get_node_by_uuid("Node_0")
|
||||||
node_util = dict(cpu=1.0, ram=1, disk=10)
|
node_util = dict(cpu=1.0, ram=1, disk=10)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
node_util,
|
node_util,
|
||||||
@@ -89,7 +88,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
node_0 = model.get_node_from_id("Node_0")
|
node_0 = model.get_node_by_uuid("Node_0")
|
||||||
node_util = dict(cpu=40, ram=64, disk=250)
|
node_util = dict(cpu=40, ram=64, disk=250)
|
||||||
self.assertEqual(node_util,
|
self.assertEqual(node_util,
|
||||||
self.strategy.get_node_capacity(node_0, model))
|
self.strategy.get_node_capacity(node_0, model))
|
||||||
@@ -98,7 +97,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
node = model.get_node_from_id('Node_0')
|
node = model.get_node_by_uuid('Node_0')
|
||||||
rhu = self.strategy.get_relative_node_utilization(
|
rhu = self.strategy.get_relative_node_utilization(
|
||||||
node, model)
|
node, model)
|
||||||
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
|
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
|
||||||
@@ -116,8 +115,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
n2 = model.get_node_from_id('Node_1')
|
n2 = model.get_node_by_uuid('Node_1')
|
||||||
instance_uuid = 'INSTANCE_0'
|
instance_uuid = 'INSTANCE_0'
|
||||||
self.strategy.add_migration(instance_uuid, n1, n2, model)
|
self.strategy.add_migration(instance_uuid, n1, n2, model)
|
||||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||||
@@ -132,7 +131,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
res = self.strategy.is_overloaded(n1, model, cc)
|
res = self.strategy.is_overloaded(n1, model, cc)
|
||||||
self.assertFalse(res)
|
self.assertFalse(res)
|
||||||
@@ -149,7 +148,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n = model.get_node_from_id('Node_1')
|
n = model.get_node_by_uuid('Node_1')
|
||||||
instance_uuid = 'INSTANCE_0'
|
instance_uuid = 'INSTANCE_0'
|
||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
|
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
|
||||||
@@ -163,7 +162,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n = model.get_node_from_id('Node_0')
|
n = model.get_node_by_uuid('Node_0')
|
||||||
self.strategy.add_action_enable_compute_node(n)
|
self.strategy.add_action_enable_compute_node(n)
|
||||||
expected = [{'action_type': 'change_nova_service_state',
|
expected = [{'action_type': 'change_nova_service_state',
|
||||||
'input_parameters': {'state': 'enabled',
|
'input_parameters': {'state': 'enabled',
|
||||||
@@ -174,7 +173,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n = model.get_node_from_id('Node_0')
|
n = model.get_node_by_uuid('Node_0')
|
||||||
self.strategy.add_action_disable_node(n)
|
self.strategy.add_action_disable_node(n)
|
||||||
expected = [{'action_type': 'change_nova_service_state',
|
expected = [{'action_type': 'change_nova_service_state',
|
||||||
'input_parameters': {'state': 'disabled',
|
'input_parameters': {'state': 'disabled',
|
||||||
@@ -185,8 +184,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
n2 = model.get_node_from_id('Node_1')
|
n2 = model.get_node_by_uuid('Node_1')
|
||||||
instance_uuid = 'INSTANCE_0'
|
instance_uuid = 'INSTANCE_0'
|
||||||
self.strategy.disable_unused_nodes(model)
|
self.strategy.disable_unused_nodes(model)
|
||||||
self.assertEqual(0, len(self.strategy.solution.actions))
|
self.assertEqual(0, len(self.strategy.solution.actions))
|
||||||
@@ -214,8 +213,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_1()
|
model = self.fake_cluster.generate_scenario_1()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
n2 = model.get_node_from_id('Node_1')
|
n2 = model.get_node_by_uuid('Node_1')
|
||||||
instance_uuid = 'INSTANCE_0'
|
instance_uuid = 'INSTANCE_0'
|
||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
self.strategy.consolidation_phase(model, cc)
|
self.strategy.consolidation_phase(model, cc)
|
||||||
@@ -230,7 +229,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_2()
|
model = self.fake_cluster.generate_scenario_2()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
self.strategy.offload_phase(model, cc)
|
self.strategy.offload_phase(model, cc)
|
||||||
self.strategy.consolidation_phase(model, cc)
|
self.strategy.consolidation_phase(model, cc)
|
||||||
@@ -254,8 +253,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
|||||||
model = self.fake_cluster.generate_scenario_3()
|
model = self.fake_cluster.generate_scenario_3()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
self.fake_metrics.model = model
|
self.fake_metrics.model = model
|
||||||
n1 = model.get_node_from_id('Node_0')
|
n1 = model.get_node_by_uuid('Node_0')
|
||||||
n2 = model.get_node_from_id('Node_1')
|
n2 = model.get_node_by_uuid('Node_1')
|
||||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||||
self.strategy.offload_phase(model, cc)
|
self.strategy.offload_phase(model, cc)
|
||||||
expected = [{'action_type': 'migrate',
|
expected = [{'action_type': 'migrate',
|
||||||
|
|||||||
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
|
|||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector
|
|
||||||
|
|
||||||
|
|
||||||
class TestWorkloadBalance(base.TestCase):
|
class TestWorkloadBalance(base.TestCase):
|
||||||
@@ -66,10 +64,10 @@ class TestWorkloadBalance(base.TestCase):
|
|||||||
def test_calc_used_res(self):
|
def test_calc_used_res(self):
|
||||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||||
self.m_model.return_value = model
|
self.m_model.return_value = model
|
||||||
node = model.get_node_from_id('Node_0')
|
node = model.get_node_by_uuid('Node_0')
|
||||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||||
cores_used, mem_used, disk_used = (
|
cores_used, mem_used, disk_used = (
|
||||||
self.strategy.calculate_used_resource(
|
self.strategy.calculate_used_resource(
|
||||||
node, cap_cores, cap_mem, cap_disk))
|
node, cap_cores, cap_mem, cap_disk))
|
||||||
|
|||||||
@@ -19,13 +19,12 @@
|
|||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
|
from watcher.common import utils
|
||||||
from watcher.decision_engine.model import model_root
|
from watcher.decision_engine.model import model_root
|
||||||
from watcher.decision_engine.strategy import strategies
|
from watcher.decision_engine.strategy import strategies
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
import faker_cluster_state
|
from watcher.tests.decision_engine.model import faker_metrics_collector
|
||||||
from watcher.tests.decision_engine.strategy.strategies \
|
|
||||||
import faker_metrics_collector
|
|
||||||
|
|
||||||
|
|
||||||
class TestWorkloadStabilization(base.TestCase):
|
class TestWorkloadStabilization(base.TestCase):
|
||||||
@@ -62,6 +61,26 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
self.m_ceilometer.return_value = mock.Mock(
|
self.m_ceilometer.return_value = mock.Mock(
|
||||||
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||||
self.strategy = strategies.WorkloadStabilization(config=mock.Mock())
|
self.strategy = strategies.WorkloadStabilization(config=mock.Mock())
|
||||||
|
self.strategy.input_parameters = utils.Struct()
|
||||||
|
self.strategy.input_parameters.update(
|
||||||
|
{'metrics': ["cpu_util", "memory.resident"],
|
||||||
|
'thresholds': {"cpu_util": 0.2, "memory.resident": 0.2},
|
||||||
|
'weights': {"cpu_util_weight": 1.0,
|
||||||
|
"memory.resident_weight": 1.0},
|
||||||
|
'instance_metrics':
|
||||||
|
{"cpu_util": "hardware.cpu.util",
|
||||||
|
"memory.resident": "hardware.memory.used"},
|
||||||
|
'host_choice': 'retry',
|
||||||
|
'retry_count': 1})
|
||||||
|
self.strategy.metrics = ["cpu_util", "memory.resident"]
|
||||||
|
self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||||
|
self.strategy.weights = {"cpu_util_weight": 1.0,
|
||||||
|
"memory.resident_weight": 1.0}
|
||||||
|
self.strategy.instance_metrics = {"cpu_util": "hardware.cpu.util",
|
||||||
|
"memory.resident":
|
||||||
|
"hardware.memory.used"}
|
||||||
|
self.strategy.host_choice = 'retry'
|
||||||
|
self.strategy.retry_count = 1
|
||||||
|
|
||||||
def test_get_instance_load(self):
|
def test_get_instance_load(self):
|
||||||
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
||||||
@@ -138,7 +157,7 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
'host': 'Node_1'}]
|
'host': 'Node_1'}]
|
||||||
)
|
)
|
||||||
with mock.patch.object(self.strategy, 'migrate') as mock_migration:
|
with mock.patch.object(self.strategy, 'migrate') as mock_migration:
|
||||||
self.strategy.execute()
|
self.strategy.do_execute()
|
||||||
mock_migration.assert_called_once_with(
|
mock_migration.assert_called_once_with(
|
||||||
'INSTANCE_4', 'Node_2', 'Node_1')
|
'INSTANCE_4', 'Node_2', 'Node_1')
|
||||||
|
|
||||||
@@ -154,7 +173,7 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
'host': 'Node_3'}]
|
'host': 'Node_3'}]
|
||||||
)
|
)
|
||||||
with mock.patch.object(self.strategy, 'migrate') as mock_migrate:
|
with mock.patch.object(self.strategy, 'migrate') as mock_migrate:
|
||||||
self.strategy.execute()
|
self.strategy.do_execute()
|
||||||
self.assertEqual(mock_migrate.call_count, 1)
|
self.assertEqual(mock_migrate.call_count, 1)
|
||||||
|
|
||||||
def test_execute_nothing_to_migrate(self):
|
def test_execute_nothing_to_migrate(self):
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ import mock
|
|||||||
from watcher.decision_engine.loading import default as default_loading
|
from watcher.decision_engine.loading import default as default_loading
|
||||||
from watcher.decision_engine import scheduling
|
from watcher.decision_engine import scheduling
|
||||||
from watcher.tests import base
|
from watcher.tests import base
|
||||||
from watcher.tests.decision_engine.strategy.strategies import \
|
from watcher.tests.decision_engine.model import faker_cluster_state
|
||||||
faker_cluster_state
|
|
||||||
|
|
||||||
|
|
||||||
class TestDecisionEngineSchedulingService(base.TestCase):
|
class TestDecisionEngineSchedulingService(base.TestCase):
|
||||||
|
|||||||
@@ -49,6 +49,8 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
|||||||
enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup
|
enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup
|
||||||
if cn.get('status') == 'enabled']
|
if cn.get('status') == 'enabled']
|
||||||
|
|
||||||
|
cls.wait_for_compute_node_setup()
|
||||||
|
|
||||||
if len(enabled_compute_nodes) < 2:
|
if len(enabled_compute_nodes) < 2:
|
||||||
raise cls.skipException(
|
raise cls.skipException(
|
||||||
"Less than 2 compute nodes are enabled, "
|
"Less than 2 compute nodes are enabled, "
|
||||||
@@ -62,6 +64,32 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
|||||||
return [srv for srv in available_services
|
return [srv for srv in available_services
|
||||||
if srv.get('binary') == 'nova-compute']
|
if srv.get('binary') == 'nova-compute']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def wait_for_compute_node_setup(cls):
|
||||||
|
|
||||||
|
def _are_compute_nodes_setup():
|
||||||
|
try:
|
||||||
|
hypervisors_client = cls.mgr.hypervisor_client
|
||||||
|
hypervisors = hypervisors_client.list_hypervisors(
|
||||||
|
detail=True)['hypervisors']
|
||||||
|
available_hypervisors = set(
|
||||||
|
hyp['hypervisor_hostname'] for hyp in hypervisors)
|
||||||
|
available_services = set(
|
||||||
|
service['host']
|
||||||
|
for service in cls.get_compute_nodes_setup())
|
||||||
|
|
||||||
|
return (
|
||||||
|
available_hypervisors == available_services and
|
||||||
|
len(hypervisors) >= 2)
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
assert test.call_until_true(
|
||||||
|
func=_are_compute_nodes_setup,
|
||||||
|
duration=600,
|
||||||
|
sleep_for=2
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def rollback_compute_nodes_status(cls):
|
def rollback_compute_nodes_status(cls):
|
||||||
current_compute_nodes_setup = cls.get_compute_nodes_setup()
|
current_compute_nodes_setup = cls.get_compute_nodes_setup()
|
||||||
@@ -107,6 +135,7 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
|||||||
"""
|
"""
|
||||||
self.addCleanup(self.rollback_compute_nodes_status)
|
self.addCleanup(self.rollback_compute_nodes_status)
|
||||||
self._create_one_instance_per_host()
|
self._create_one_instance_per_host()
|
||||||
|
|
||||||
_, goal = self.client.show_goal(self.BASIC_GOAL)
|
_, goal = self.client.show_goal(self.BASIC_GOAL)
|
||||||
_, strategy = self.client.show_strategy("basic")
|
_, strategy = self.client.show_strategy("basic")
|
||||||
_, audit_template = self.create_audit_template(
|
_, audit_template = self.create_audit_template(
|
||||||
|
|||||||
Reference in New Issue
Block a user