Cinder model integration

This patch adds Cinder model integration.

Change-Id: I31d5bc5e2bbed885d074d66bf7999d42cec15f10
Implements: blueprint cinder-model-integration
This commit is contained in:
Hidekazu Nakamura
2017-03-28 17:50:10 +09:00
parent 5b6768140f
commit 489356da3a
32 changed files with 2544 additions and 1 deletions

View File

@@ -0,0 +1,126 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from watcher.common import cinder_helper
from watcher.common import clients
from watcher.common import exception
from watcher.tests import base
@mock.patch.object(clients.OpenStackClients, 'cinder')
class TestCinderHelper(base.TestCase):
def setUp(self):
super(TestCinderHelper, self).setUp()
@staticmethod
def fake_storage_node(**kwargs):
node = mock.MagicMock()
node.binary = kwargs.get('binary', 'cinder-volume')
node.host = kwargs.get('name', 'host@backend')
return node
def test_get_storage_node_list(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
cinder_util.get_storage_node_list()
cinder_util.cinder.services.list.assert_called_once_with(
binary='cinder-volume')
def test_get_storage_node_by_name_success(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
node = cinder_util.get_storage_node_by_name('host@backend')
self.assertEqual(node, node1)
def test_get_storage_node_by_name_failure(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
self.assertRaisesRegex(
exception.StorageNodeNotFound,
"The storage node failure could not be found",
cinder_util.get_storage_node_by_name, 'failure')
@staticmethod
def fake_pool(**kwargs):
pool = mock.MagicMock()
pool.name = kwargs.get('name', 'host@backend#pool')
return pool
def test_get_storage_pool_list(self, mock_cinder):
pool = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.pools.list.return_value = [pool]
cinder_util.get_storage_pool_list()
cinder_util.cinder.pools.list.assert_called_once_with(detailed=True)
def test_get_storage_pool_by_name_success(self, mock_cinder):
pool1 = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.pools.list.return_value = [pool1]
pool = cinder_util.get_storage_pool_by_name('host@backend#pool')
self.assertEqual(pool, pool1)
def test_get_storage_pool_by_name_failure(self, mock_cinder):
pool1 = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [pool1]
self.assertRaisesRegex(
exception.PoolNotFound,
"The pool failure could not be found",
cinder_util.get_storage_pool_by_name, 'failure')
@staticmethod
def fake_volume_type(**kwargs):
volume_type = mock.MagicMock()
volume_type.name = kwargs.get('name', 'fake_type')
extra_specs = {'volume_backend_name': 'backend'}
volume_type.extra_specs = kwargs.get('extra_specs', extra_specs)
return volume_type
def test_get_volume_type_list(self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
cinder_util.get_volume_type_list()
cinder_util.cinder.volume_types.list.assert_called_once_with()
def test_get_volume_type_by_backendname_with_backend_exist(
self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
volume_type_name = cinder_util.get_volume_type_by_backendname(
'backend')
self.assertEqual(volume_type_name, volume_type1.name)
def test_get_volume_type_by_backendname_with_no_backend_exist(
self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
volume_type_name = cinder_util.get_volume_type_by_backendname(
'nobackend')
self.assertEqual("", volume_type_name)

View File

@@ -0,0 +1,23 @@
<ModelRoot>
<StorageNode status="enabled" zone="zone_0" state="up" volume_type="type_0" host="host_0@backend_0" human_id="">
<Pool total_capacity_gb="500" name="host_0@backend_0#pool_0" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_0" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_0" human_id="" name="name_0" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_1" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_1" human_id="" name="name_1" bootable="false"/>
</Pool>
<Pool total_capacity_gb="500" name="host_0@backend_0#pool_1" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_2" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_2" human_id="" name="name_2" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_3" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_3" human_id="" name="name_3" bootable="false"/>
</Pool>
</StorageNode>
<StorageNode status="enabled" zone="zone_1" state="up" volume_type="type_1" host="host_1@backend_1" human_id="">
<Pool total_capacity_gb="500" name="host_1@backend_1#pool_0" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_4" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_4" human_id="" name="name_4" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_5" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_5" human_id="" name="name_5" bootable="false"/>
</Pool>
<Pool total_capacity_gb="500" name="host_1@backend_1#pool_1" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_6" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_6" human_id="" name="name_6" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_7" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_7" human_id="" name="name_7" bootable="false"/>
</Pool>
</StorageNode>
<Volume status="in-use" uuid="VOLUME_8" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_8" human_id="" name="name_8" bootable="false"/>
</ModelRoot>

View File

@@ -135,3 +135,123 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self):
return self.load_model(
'scenario_9_with_3_active_plus_1_disabled_nodes.xml')
class FakerStorageModelCollector(base.BaseClusterDataModelCollector):
def __init__(self, config=None, osc=None):
if config is None:
config = mock.Mock(period=777)
super(FakerStorageModelCollector, self).__init__(config)
@property
def notification_endpoints(self):
return []
def load_data(self, filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
xml_data = xml_file.read()
return xml_data
def load_model(self, filename):
return modelroot.StorageModelRoot.from_xml(self.load_data(filename))
def execute(self):
return self._cluster_data_model or self.build_scenario_1()
def build_scenario_1(self):
model = modelroot.StorageModelRoot()
# number of nodes
node_count = 2
# number of pools per node
pool_count = 2
# number of volumes
volume_count = 9
for i in range(0, node_count):
host = "host_{0}@backend_{0}".format(i)
zone = "zone_{0}".format(i)
volume_type = "type_{0}".format(i)
node_attributes = {
"host": host,
"zone": zone,
"status": 'enabled',
"state": 'up',
"volume_type": volume_type,
}
node = element.StorageNode(**node_attributes)
model.add_node(node)
for j in range(0, pool_count):
name = "host_{0}@backend_{0}#pool_{1}".format(i, j)
pool_attributes = {
"name": name,
"total_volumes": 2,
"total_capacity_gb": 500,
"free_capacity_gb": 420,
"provisioned_capacity_gb": 80,
"allocated_capacity_gb": 80,
"virtual_free": 420,
}
pool = element.Pool(**pool_attributes)
model.add_pool(pool)
mappings = [
("host_0@backend_0#pool_0", "host_0@backend_0"),
("host_0@backend_0#pool_1", "host_0@backend_0"),
("host_1@backend_1#pool_0", "host_1@backend_1"),
("host_1@backend_1#pool_1", "host_1@backend_1"),
]
for pool_name, node_name in mappings:
model.map_pool(
model.get_pool_by_pool_name(pool_name),
model.get_node_by_name(node_name),
)
for k in range(volume_count):
uuid = "VOLUME_{0}".format(k)
name = "name_{0}".format(k)
project_id = "project_{0}".format(k)
volume_attributes = {
"size": 40,
"status": "in-use",
"uuid": uuid,
"attachments":
'[{"server_id": "server","attachment_id": "attachment"}]',
"name": name,
"multiattach": 'True',
"snapshot_id": uuid,
"project_id": project_id,
"metadata": '{"readonly": false,"attached_mode": "rw"}',
"bootable": 'False'
}
volume = element.Volume(**volume_attributes)
model.add_volume(volume)
mappings = [
("VOLUME_0", "host_0@backend_0#pool_0"),
("VOLUME_1", "host_0@backend_0#pool_0"),
("VOLUME_2", "host_0@backend_0#pool_1"),
("VOLUME_3", "host_0@backend_0#pool_1"),
("VOLUME_4", "host_1@backend_1#pool_0"),
("VOLUME_5", "host_1@backend_1#pool_0"),
("VOLUME_6", "host_1@backend_1#pool_1"),
("VOLUME_7", "host_1@backend_1#pool_1"),
]
for volume_uuid, pool_name in mappings:
model.map_volume(
model.get_volume_by_uuid(volume_uuid),
model.get_pool_by_pool_name(pool_name),
)
return model
def generate_scenario_1(self):
return self.load_model('storage_scenario_1.xml')

View File

@@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host1@backend1#pool1",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "capacity.host1@backend1#pool1",
"total": 3,
"free": 1,
"allocated": 2,
"provisioned": 2,
"virtual_free": 1,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@@ -0,0 +1,17 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"},
"glance_metadata": {}
}
}

View File

@@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_0@backend_0#pool_0",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_0@backend_0#pool_0",
"total": 500,
"free": 460,
"allocated": 40,
"provisioned": 40,
"virtual_free": 460,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_2@backend_2#pool_0",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_2@backend_2#pool_0",
"total": 500,
"free": 460,
"allocated": 40,
"provisioned": 40,
"virtual_free": 460,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_0@backend_0#pool_2",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_0@backend_0#pool_2",
"total": 500,
"free": 380,
"allocated": 120,
"provisioned": 120,
"virtual_free": 380,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "error",
"volume_attachment": [],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.attach.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "in-use",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_2@backend_2#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_2@backend_2#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.delete.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "deleting",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.detach.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "available",
"volume_attachment": [],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.resize.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "20",
"status": "in-use",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.update.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_01",
"size": "40",
"status": "enabled",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@@ -17,6 +17,7 @@
# limitations under the License.
from watcher.common import service_manager
from watcher.decision_engine.model.notification import cinder as cnotification
from watcher.decision_engine.model.notification import nova as novanotification
from watcher.tests.decision_engine.model import faker_cluster_state
@@ -65,3 +66,20 @@ class FakeManager(service_manager.ServiceManager):
novanotification.LegacyLiveMigratedEnd(self.fake_cdmc),
novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc),
]
class FakeStorageManager(FakeManager):
fake_cdmc = faker_cluster_state.FakerStorageModelCollector()
@property
def notification_endpoints(self):
return [
cnotification.CapacityNotificationEndpoint(self.fake_cdmc),
cnotification.VolumeCreateEnd(self.fake_cdmc),
cnotification.VolumeUpdateEnd(self.fake_cdmc),
cnotification.VolumeDeleteEnd(self.fake_cdmc),
cnotification.VolumeAttachEnd(self.fake_cdmc),
cnotification.VolumeDetachEnd(self.fake_cdmc),
cnotification.VolumeResizeEnd(self.fake_cdmc),
]

View File

@@ -0,0 +1,607 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import mock
from oslo_serialization import jsonutils
from watcher.common import cinder_helper
from watcher.common import context
from watcher.common import exception
from watcher.common import service as watcher_service
from watcher.db.sqlalchemy import api as db_api
from watcher.decision_engine.model.notification import cinder as cnotification
from watcher.tests import base as base_test
from watcher.tests.db import utils
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model.notification import fake_managers
class NotificationTestCase(base_test.TestCase):
@staticmethod
def load_message(filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as json_file:
json_data = jsonutils.load(json_file)
return json_data
class TestReceiveCinderNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestReceiveCinderNotifications, self).setUp()
p_from_dict = mock.patch.object(context.RequestContext, 'from_dict')
m_from_dict = p_from_dict.start()
m_from_dict.return_value = self.context
self.addCleanup(p_from_dict.stop)
p_get_service_list = mock.patch.object(
db_api.Connection, 'get_service_list')
p_update_service = mock.patch.object(
db_api.Connection, 'update_service')
m_get_service_list = p_get_service_list.start()
m_update_service = p_update_service.start()
fake_service = utils.get_test_service(
created_at=datetime.datetime.utcnow())
m_get_service_list.return_value = [fake_service]
m_update_service.return_value = fake_service.copy()
self.addCleanup(p_get_service_list.stop)
self.addCleanup(p_update_service.stop)
@mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info')
def test_cinder_receive_capacity(self, m_info):
message = self.load_message('capacity.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'capacity.host1@backend1#pool1', 'capacity.pool',
expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeCreateEnd, 'info')
def test_cinder_receive_volume_create_end(self, m_info):
message = self.load_message('scenario_1_volume-create.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.create.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeUpdateEnd, 'info')
def test_cinder_receive_volume_update_end(self, m_info):
message = self.load_message('scenario_1_volume-update.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.update.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeAttachEnd, 'info')
def test_cinder_receive_volume_attach_end(self, m_info):
message = self.load_message('scenario_1_volume-attach.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.attach.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeDetachEnd, 'info')
def test_cinder_receive_volume_detach_end(self, m_info):
message = self.load_message('scenario_1_volume-detach.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.detach.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeResizeEnd, 'info')
def test_cinder_receive_volume_resize_end(self, m_info):
message = self.load_message('scenario_1_volume-resize.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.resize.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeDeleteEnd, 'info')
def test_cinder_receive_volume_delete_end(self, m_info):
message = self.load_message('scenario_1_volume-delete.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.delete.end', expected_message, self.FAKE_METADATA)
class TestCinderNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestCinderNotifications, self).setUp()
# fake cluster
self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector()
def test_cinder_capacity(self):
"""test consuming capacity"""
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
pool_0_name = 'host_0@backend_0#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
# before
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(420, pool_0.free_capacity_gb)
self.assertEqual(420, pool_0.virtual_free)
self.assertEqual(80, pool_0.allocated_capacity_gb)
self.assertEqual(80, pool_0.provisioned_capacity_gb)
message = self.load_message('scenario_1_capacity.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# after
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(460, pool_0.virtual_free)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_capacity_pool_notfound(self, m_cinder_helper):
"""test consuming capacity, new pool in existing node"""
# storage_pool_by_name mock
return_mock = mock.Mock()
return_mock.configure_mock(
name='host_0@backend_0#pool_2',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
message = self.load_message('scenario_1_capacity_pool_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# after consuming message, still pool_0 exists
pool_0_name = 'host_0@backend_0#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(420, pool_0.free_capacity_gb)
self.assertEqual(420, pool_0.virtual_free)
self.assertEqual(80, pool_0.allocated_capacity_gb)
self.assertEqual(80, pool_0.provisioned_capacity_gb)
# new pool was added
pool_1_name = 'host_0@backend_0#pool_2'
m_get_storage_pool_by_name.assert_called_once_with(pool_1_name)
storage_node = storage_model.get_node_by_pool_name(pool_1_name)
self.assertEqual('host_0@backend_0', storage_node.host)
pool_1 = storage_model.get_pool_by_pool_name(pool_1_name)
self.assertEqual(pool_1_name, pool_1.name)
self.assertEqual(500, pool_1.total_capacity_gb)
self.assertEqual(380, pool_1.free_capacity_gb)
self.assertEqual(120, pool_1.allocated_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_capacity_node_notfound(self, m_cinder_helper):
"""test consuming capacity, new pool in new node"""
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_2@backend_2#pool_0',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
# storage_node_by_name mock
return_node_mock = mock.Mock()
return_node_mock.configure_mock(
host='host_2@backend_2',
zone='nova',
state='up',
status='enabled')
m_get_storage_node_by_name = mock.Mock(
side_effect=lambda name: return_node_mock)
m_get_volume_type_by_backendname = mock.Mock(
side_effect=lambda name: mock.Mock('backend_2'))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name,
get_storage_node_by_name=m_get_storage_node_by_name,
get_volume_type_by_backendname=m_get_volume_type_by_backendname)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
message = self.load_message('scenario_1_capacity_node_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# new pool and new node was added
node_1_name = 'host_2@backend_2'
pool_1_name = node_1_name + '#pool_0'
volume_type = 'backend_2'
m_get_storage_pool_by_name.assert_called_once_with(pool_1_name)
m_get_storage_node_by_name.assert_called_once_with(node_1_name)
m_get_volume_type_by_backendname.assert_called_once_with(volume_type)
# new node was added
storage_node = storage_model.get_node_by_pool_name(pool_1_name)
self.assertEqual('host_2@backend_2', storage_node.host)
# new pool was added
pool_1 = storage_model.get_pool_by_pool_name(pool_1_name)
self.assertEqual(pool_1_name, pool_1.name)
self.assertEqual(500, pool_1.total_capacity_gb)
self.assertEqual(460, pool_1.free_capacity_gb)
self.assertEqual(40, pool_1.allocated_capacity_gb)
self.assertEqual(40, pool_1.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_create(self, m_cinder_helper):
"""test creating volume in existing pool and node"""
# create storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='3',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
self.assertFalse(volume_00.bootable)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(3, pool_0.total_volumes)
self.assertEqual(380, pool_0.free_capacity_gb)
self.assertEqual(120, pool_0.allocated_capacity_gb)
self.assertEqual(120, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_bootable_volume_create(self, m_cinder_helper):
"""test creating bootable volume in existing pool and node"""
# create storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='3',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_bootable-volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
self.assertTrue(volume_00.bootable)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(3, pool_0.total_volumes)
self.assertEqual(380, pool_0.free_capacity_gb)
self.assertEqual(120, pool_0.allocated_capacity_gb)
self.assertEqual(120, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_create_pool_notfound(self, m_cinder_helper):
"""check creating volume in not existing pool and node"""
# get_storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_2@backend_2#pool_0',
total_volumes='1',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
# create storage_node_by_name mock
return_node_mock = mock.Mock()
return_node_mock.configure_mock(
host='host_2@backend_2',
zone='nova',
state='up',
status='enabled')
m_get_storage_node_by_name = mock.Mock(
side_effect=lambda name: return_node_mock)
m_get_volume_type_by_backendname = mock.Mock(
side_effect=lambda name: mock.Mock('backend_2'))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name,
get_storage_node_by_name=m_get_storage_node_by_name,
get_volume_type_by_backendname=m_get_volume_type_by_backendname)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message(
'scenario_1_volume-create_pool_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
# check that capacity was updated
node_2_name = 'host_2@backend_2'
pool_0_name = node_2_name + '#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(1, pool_0.total_volumes)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)
# check that node was added
m_get_storage_node_by_name.assert_called_once_with(node_2_name)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_error_volume_unmapped(self, m_cinder_helper):
"""test creating error volume unmapped"""
m_get_storage_pool_by_name = mock.Mock(
side_effect=exception.PoolNotFound(name="TEST"))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_error-volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# we do not call get_storage_pool_by_name
m_get_storage_pool_by_name.assert_not_called()
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_update(self, m_cinder_helper):
"""test updating volume in existing pool and node"""
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeUpdateEnd(self.fake_cdmc)
volume_0_name = 'VOLUME_0'
volume_0 = storage_model.get_volume_by_uuid(volume_0_name)
self.assertEqual('name_0', volume_0.name)
# create storage_pool_by name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='420',
provisioned_capacity_gb='80',
allocated_capacity_gb='80')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
message = self.load_message('scenario_1_volume-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that name of volume_0 was updated in the model
volume_0 = storage_model.get_volume_by_uuid(volume_0_name)
self.assertEqual('name_01', volume_0.name)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_delete(self, m_cinder_helper):
"""test deleting volume"""
# create storage_pool_by name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='1',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeDeleteEnd(self.fake_cdmc)
# volume exists before consuming
volume_0_uuid = 'VOLUME_0'
volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid)
self.assertEqual(volume_0_uuid, volume_0.uuid)
message = self.load_message('scenario_1_volume-delete.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# volume does not exists after consuming
self.assertRaises(
exception.VolumeNotFound,
storage_model.get_volume_by_uuid, volume_0_uuid)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(1, pool_0.total_volumes)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)

View File

@@ -70,3 +70,85 @@ class TestElement(base.TestCase):
def test_as_xml_element(self):
el = self.cls(**self.data)
el.as_xml_element()
class TestStorageElement(base.TestCase):
scenarios = [
("StorageNode_with_all_fields", dict(
cls=element.StorageNode,
data={
'host': 'host@backend',
'zone': 'zone',
'status': 'enabled',
'state': 'up',
'volume_type': 'volume_type',
})),
("Pool_with_all_fields", dict(
cls=element.Pool,
data={
'name': 'host@backend#pool',
'total_volumes': 1,
'total_capacity_gb': 500,
'free_capacity_gb': 420,
'provisioned_capacity_gb': 80,
'allocated_capacity_gb': 80,
'virtual_free': 420,
})),
("Pool_without_virtual_free_fields", dict(
cls=element.Pool,
data={
'name': 'host@backend#pool',
'total_volumes': 1,
'total_capacity_gb': 500,
'free_capacity_gb': 420,
'provisioned_capacity_gb': 80,
'allocated_capacity_gb': 80,
})),
("Volume_with_all_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[{"key": "value"}]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
'bootable': 'false',
'human_id': 'human_id',
})),
("Volume_without_bootable_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
'human_id': 'human_id',
})),
("Volume_without_human_id_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
})),
]
def test_as_xml_element(self):
el = self.cls(**self.data)
el.as_xml_element()

View File

@@ -63,6 +63,21 @@ class TestModel(base.TestCase):
model = model_root.ModelRoot.from_xml(struct_str)
self.assertEqual(expected_model.to_string(), model.to_string())
def test_get_node_by_instance_uuid(self):
model = model_root.ModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
node = element.ComputeNode(id=1)
node.uuid = uuid_
model.add_node(node)
self.assertEqual(node, model.get_node_by_uuid(uuid_))
uuid_ = "{0}".format(uuidutils.generate_uuid())
instance = element.Instance(id=1)
instance.uuid = uuid_
model.add_instance(instance)
self.assertEqual(instance, model.get_instance_by_uuid(uuid_))
model.map_instance(instance, node)
self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid))
def test_add_node(self):
model = model_root.ModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
@@ -151,3 +166,204 @@ class TestModel(base.TestCase):
model = model_root.ModelRoot()
self.assertRaises(exception.IllegalArgumentException,
model.assert_instance, "valeur_qcq")
class TestStorageModel(base.TestCase):
def load_data(self, filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
xml_data = xml_file.read()
return xml_data
def load_model(self, filename):
return model_root.StorageModelRoot.from_xml(self.load_data(filename))
def test_model_structure(self):
fake_cluster = faker_cluster_state.FakerStorageModelCollector()
model1 = fake_cluster.build_scenario_1()
self.assertEqual(2, len(model1.get_all_storage_nodes()))
self.assertEqual(9, len(model1.get_all_volumes()))
self.assertEqual(12, len(model1.edges()))
expected_struct_str = self.load_data('storage_scenario_1.xml')
model2 = model_root.StorageModelRoot.from_xml(expected_struct_str)
self.assertTrue(
model_root.StorageModelRoot.is_isomorphic(model2, model1))
def test_build_model_from_xml(self):
fake_cluster = faker_cluster_state.FakerStorageModelCollector()
expected_model = fake_cluster.generate_scenario_1()
struct_str = self.load_data('storage_scenario_1.xml')
model = model_root.StorageModelRoot.from_xml(struct_str)
self.assertEqual(expected_model.to_string(), model.to_string())
def test_assert_node_raise(self):
model = model_root.StorageModelRoot()
node = element.StorageNode(host="host@backend")
model.add_node(node)
self.assertRaises(exception.IllegalArgumentException,
model.assert_node, "obj")
def test_assert_pool_raise(self):
model = model_root.StorageModelRoot()
pool = element.Pool(name="host@backend#pool")
model.add_pool(pool)
self.assertRaises(exception.IllegalArgumentException,
model.assert_pool, "obj")
def test_assert_volume_raise(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertRaises(exception.IllegalArgumentException,
model.assert_volume, "obj")
def test_add_node(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
def test_add_pool(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
def test_remove_node(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
model.remove_node(node)
self.assertRaises(exception.StorageNodeNotFound,
model.get_node_by_name, hostname)
def test_remove_pool(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.remove_pool(pool)
self.assertRaises(exception.PoolNotFound,
model.get_pool_by_pool_name, pool_name)
def test_map_unmap_pool(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.map_pool(pool, node)
self.assertTrue(pool.name in model.predecessors(node.host))
model.unmap_pool(pool, node)
self.assertFalse(pool.name in model.predecessors(node.host))
def test_add_volume(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
def test_remove_volume(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.remove_volume(volume)
self.assertRaises(exception.VolumeNotFound,
model.get_volume_by_uuid, uuid_)
def test_map_unmap_volume(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertTrue(volume.uuid in model.predecessors(pool.name))
model.unmap_volume(volume, pool)
self.assertFalse(volume.uuid in model.predecessors(pool.name))
def test_get_all_storage_nodes(self):
model = model_root.StorageModelRoot()
for i in range(10):
hostname = "host_{0}".format(i)
node = element.StorageNode(host=hostname)
model.add_node(node)
all_nodes = model.get_all_storage_nodes()
for hostname in all_nodes:
node = model.get_node_by_name(hostname)
model.assert_node(node)
def test_get_all_volumes(self):
model = model_root.StorageModelRoot()
for id_ in range(10):
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
all_volumes = model.get_all_volumes()
for vol in all_volumes:
volume = model.get_volume_by_uuid(vol)
model.assert_volume(volume)
def test_get_node_pools(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.map_pool(pool, node)
self.assertEqual([pool], model.get_node_pools(node))
def test_get_pool_by_volume(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertEqual(pool, model.get_pool_by_volume(volume))
def test_get_pool_volumes(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertEqual([volume], model.get_pool_volumes(pool))