Merge "Enable storage model collector by default"

This commit is contained in:
Zuul
2025-09-11 19:36:22 +00:00
committed by Gerrit Code Review
10 changed files with 148 additions and 3 deletions

View File

@@ -32,6 +32,11 @@
period: 120
watcher_cluster_data_model_collectors.storage:
period: 120
$CINDER_CONF:
# enable notifications in compute node, by default they are only
# configured in the controller
oslo_messaging_notifications:
driver: messagingv2
devstack_services:
watcher-api: false
watcher-decision-engine: true
@@ -63,6 +68,11 @@
period: 120
watcher_cluster_data_model_collectors.storage:
period: 120
$CINDER_CONF:
# enable notifications in compute node, by default they are only
# configured in the controller
oslo_messaging_notifications:
driver: messagingv2
test-config:
$TEMPEST_CONFIG:
compute:

View File

@@ -0,0 +1,7 @@
---
upgrade:
- |
The storage model collector is now enabled by default if a cinder service exists
in the deployment. This is done to prevent the storage model becoming obsolete
when running volume migrations if new volumes are created and the model was not
enabled in the configuration.

View File

@@ -88,3 +88,16 @@ class KeystoneHelper(object):
message=(_("Domain name seems ambiguous: %s") %
name_or_id))
return domains[0]
def is_service_enabled_by_type(self, svc_type):
services = self.keystone.services.list(type=svc_type)
svcs_enabled = [svc for svc in services if svc.enabled]
if len(svcs_enabled) == 0:
LOG.warning(f"Service enabled not found for type: {svc_type}")
return False
elif len(svcs_enabled) > 1:
LOG.warning(f"Multiple services found for type: {svc_type}")
return False
# if there is only one enabled service, consider it a valid
# case
return True

View File

@@ -22,7 +22,7 @@ collector = cfg.OptGroup(name='collector',
COLLECTOR_OPTS = [
cfg.ListOpt('collector_plugins',
default=['compute'],
default=['compute', 'storage'],
help="""
The cluster data model plugin names.

View File

@@ -18,23 +18,46 @@
# limitations under the License.
from oslo_config import cfg
from oslo_log import log
from watcher.common import clients
from watcher.common import keystone_helper
from watcher.common import utils
from watcher.decision_engine.loading import default
LOG = log.getLogger(__name__)
class CollectorManager(object):
def __init__(self):
def __init__(self, osc=None):
self.collector_loader = default.ClusterDataModelCollectorLoader()
self._collectors = None
self._notification_endpoints = None
self.osc = osc if osc else clients.OpenStackClients()
def is_cinder_enabled(self):
keystone = keystone_helper.KeystoneHelper(self.osc)
if keystone.is_service_enabled_by_type(svc_type='block-storage'):
return True
elif keystone.is_service_enabled_by_type(svc_type='volumev3'):
# volumev3 is a commonly used alias for the cinder keystone service
# type
return True
return False
def get_collectors(self):
if self._collectors is None:
collectors = utils.Struct()
collector_plugins = cfg.CONF.collector.collector_plugins
for collector_name in collector_plugins:
if collector_name == 'storage':
if not self.is_cinder_enabled():
LOG.warning(
"Block storage service is not enabled,"
" skipping storage collector"
)
continue
collector = self.collector_loader.load(collector_name)
collectors[collector_name] = collector
self._collectors = collectors

View File

@@ -273,7 +273,7 @@ class BaseStrategy(loadable.Loadable, metaclass=abc.ABCMeta):
@property
def collector_manager(self):
if self._collector_manager is None:
self._collector_manager = manager.CollectorManager()
self._collector_manager = manager.CollectorManager(osc=self.osc)
return self._collector_manager
@property

View File

@@ -25,6 +25,7 @@ from watcher.common import service as watcher_service
from watcher.decision_engine.audit import continuous
from watcher.decision_engine import sync
from watcher.tests import base
from watcher.tests.fixtures import watcher as watcher_fixtures
class TestDecisionEngine(base.BaseTestCase):
@@ -49,6 +50,7 @@ class TestDecisionEngine(base.BaseTestCase):
continuous.ContinuousAuditHandler, "start")
self.m_continuoushandler = p_continuoushandler.start()
self.addCleanup(p_continuoushandler.stop)
self.fake_keystone = self.useFixture(watcher_fixtures.KeystoneClient())
def tearDown(self):
super(TestDecisionEngine, self).tearDown()
@@ -57,5 +59,6 @@ class TestDecisionEngine(base.BaseTestCase):
@mock.patch.object(sync.Syncer, "sync", mock.Mock())
@mock.patch.object(service, "launch")
def test_run_de_app(self, m_launch):
decisionengine.main()
self.assertEqual(1, m_launch.call_count)

View File

@@ -0,0 +1,60 @@
# Copyright 2025 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
import keystoneclient.v3.services as ks_service
from watcher.common import keystone_helper
from watcher.tests import base as test
from watcher.tests.fixtures import watcher as watcher_fixtures
class TestKeystoneHelper(test.TestCase):
def setUp(self):
super(TestKeystoneHelper, self).setUp()
self.fake_keystone = self.useFixture(watcher_fixtures.KeystoneClient())
self.keystone_svs = self.fake_keystone.m_keystone.return_value.services
self.keystone_helper = keystone_helper.KeystoneHelper()
def test_is_service_enabled(self):
self.keystone_svs.list.return_value = [mock.MagicMock(
spec=ks_service.Service, enabled=True)]
self.assertTrue(
self.keystone_helper.is_service_enabled_by_type('block-storage'))
self.keystone_svs.list.assert_called_once_with(type='block-storage')
def test_is_service_enabled_not_found(self):
self.keystone_svs.list.return_value = []
self.assertFalse(
self.keystone_helper.is_service_enabled_by_type('block-storage'))
self.keystone_svs.list.assert_called_once_with(type='block-storage')
def test_is_service_enabled_with_multiple_services_one_enabled(self):
self.keystone_svs.list.return_value = [
mock.MagicMock(spec=ks_service.Service, enabled=True),
mock.MagicMock(spec=ks_service.Service, enabled=False)]
self.assertTrue(
self.keystone_helper.is_service_enabled_by_type('block-storage'))
self.keystone_svs.list.assert_called_once_with(type='block-storage')
def test_is_service_enabled_multiple_services_two_enabled(self):
self.keystone_svs.list.return_value = [
mock.MagicMock(spec=ks_service.Service, enabled=True),
mock.MagicMock(spec=ks_service.Service, enabled=True)]
self.assertFalse(
self.keystone_helper.is_service_enabled_by_type('block-storage'))
self.keystone_svs.list.assert_called_once_with(type='block-storage')

View File

@@ -32,6 +32,7 @@ from watcher import objects
from watcher.tests import base
from watcher.tests.db import base as db_base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.fixtures import watcher as watcher_fixtures
from watcher.tests.objects import utils as obj_utils
@@ -80,6 +81,10 @@ class TestCancelOngoingAudits(db_base.DbTestCase):
@mock.patch.object(objects.audit.Audit, 'list')
class TestDecisionEngineSchedulingService(base.TestCase):
def setUp(self):
super(TestDecisionEngineSchedulingService, self).setUp()
self.fake_keystone = self.useFixture(watcher_fixtures.KeystoneClient())
@mock.patch.object(
default_loading.ClusterDataModelCollectorLoader, 'load')
@mock.patch.object(

View File

@@ -17,8 +17,13 @@
import logging as std_logging
import os
from unittest import mock
import fixtures
import keystoneclient.v3.client as ks_client
import keystoneclient.v3.services as ks_service
from watcher.common.clients import OpenStackClients
class NullHandler(std_logging.Handler):
@@ -113,3 +118,22 @@ class StandardLogging(fixtures.Fixture):
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
class KeystoneClient(fixtures.Fixture):
"""Fixture to mock the keystone client.
Prevents the OpenStackClients from actually connecting to the
keystone server.
Mocks the services.get and services.list methods.
"""
def setUp(self):
super(KeystoneClient, self).setUp()
mock_osc = mock.patch.object(
OpenStackClients, "keystone",
return_value=mock.MagicMock(spec=ks_client.Client))
self.m_keystone = mock_osc.start()
self.m_keystone.return_value.services = mock.MagicMock(
spec=ks_service.ServiceManager)
self.addCleanup(mock_osc.stop)