diff --git a/dashboard/mock_data.py b/dashboard/mock_data.py index 84fad84..4efee31 100644 --- a/dashboard/mock_data.py +++ b/dashboard/mock_data.py @@ -1,4 +1,5 @@ """Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus).""" + import json @@ -35,15 +36,17 @@ def get_mock_context(): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": json.dumps([ - { - "instanceName": "instance-1", - "source": "compute-0", - "destination": "compute-3", - "flavor": "m1.small", - "impact": "Low", - } - ]), + "migrations": json.dumps( + [ + { + "instanceName": "instance-1", + "source": "compute-0", + "destination": "compute-3", + "flavor": "m1.small", + "impact": "Low", + } + ] + ), "host_labels": json.dumps(host_labels), "cpu_current": json.dumps(cpu_current), "cpu_projected": json.dumps(cpu_projected), diff --git a/dashboard/openstack_utils/audits.py b/dashboard/openstack_utils/audits.py index ae33d38..5bc20a3 100644 --- a/dashboard/openstack_utils/audits.py +++ b/dashboard/openstack_utils/audits.py @@ -1,13 +1,12 @@ -import pandas - from copy import copy +import pandas from openstack.connection import Connection - -from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS +from watcher_visio.settings import PROMETHEUS_METRICS, WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME from dashboard.prometheus_utils.query import query_prometheus + def convert_cpu_data(data: list): metrics = [] @@ -16,33 +15,37 @@ def convert_cpu_data(data: list): for entry in data: for t, val in entry["values"]: - metrics.append({ - "timestamp": int(t), - "host": entry["metric"]["host"], - "cpu_usage": float(val), - "instance": entry["metric"]["instanceName"] - }) + metrics.append( + { + "timestamp": int(t), + "host": entry["metric"]["host"], + "cpu_usage": float(val), + "instance": entry["metric"]["instanceName"], + } + ) df_cpu = pandas.DataFrame(metrics) df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s") # Aggregate CPU usage per host return ( - df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum() - .groupby("host").mean() + df_cpu.groupby(["host", "timestamp"])["cpu_usage"] + .sum() + .groupby("host") + .mean() .reset_index() ) def get_current_cluster_cpu(connection: Connection) -> dict: """Return current per-host CPU state for the cluster (no Watcher dependency).""" - cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage']) + cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"]) cpu_metrics = convert_cpu_data(data=cpu_data) if cpu_metrics.empty: return {"host_labels": [], "cpu_current": []} return { - "host_labels": cpu_metrics['host'].to_list(), - "cpu_current": cpu_metrics['cpu_usage'].to_list(), + "host_labels": cpu_metrics["host"].to_list(), + "cpu_current": cpu_metrics["cpu_usage"].to_list(), } @@ -50,43 +53,38 @@ def get_audits(connection: Connection) -> list[dict] | None: session = connection.session watcher_endpoint = connection.endpoint_for( - service_type=WATCHER_ENDPOINT_NAME, - interface=WATCHER_INTERFACE_NAME + service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME ) # Collect instances prometheus metrics - cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage']) + cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"]) cpu_metrics = convert_cpu_data(data=cpu_data) # Fetch audit list - audits_resp = session.get( - f"{watcher_endpoint}/v1/audits" - ) + audits_resp = session.get(f"{watcher_endpoint}/v1/audits") audits_resp.raise_for_status() - audits_resp = audits_resp.json().get('audits') or [] + audits_resp = audits_resp.json().get("audits") or [] # Fetch action plan list - actionplans_resp = session.get( - f"{watcher_endpoint}/v1/action_plans" - ) + actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans") actionplans_resp.raise_for_status() - actionplans_resp = actionplans_resp.json().get('action_plans') or [] + actionplans_resp = actionplans_resp.json().get("action_plans") or [] # Filtering audits by PENDING state - pending_audits = [plan for plan in actionplans_resp if plan['state'] == "RECOMMENDED"] + pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"] result = [] for item in pending_audits: projected_cpu_data = copy(cpu_data) - audit_resp = session.get( - f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}" - ) + audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}") audit_resp.raise_for_status() audit_resp = audit_resp.json() - actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None) + actionplan = next( + filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None + ) if actionplan is None: continue @@ -94,49 +92,55 @@ def get_audits(connection: Connection) -> list[dict] | None: f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}" ) actions_resp.raise_for_status() - actions_resp = actions_resp.json().get('actions') or [] + actions_resp = actions_resp.json().get("actions") or [] migrations = [] mapping = {} for action in actions_resp: - action_resp = session.get( - f"{watcher_endpoint}/v1/actions/{action['uuid']}" - ) + action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}") action_resp.raise_for_status() action_resp = action_resp.json() - server = connection.get_server_by_id(action_resp['input_parameters']['resource_id']) - params = action_resp['input_parameters'] - mapping[params['resource_name']] = params['destination_node'] + server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"]) + params = action_resp["input_parameters"] + mapping[params["resource_name"]] = params["destination_node"] - migrations.append({ - "instanceName": action_resp['input_parameters']['resource_name'], - "source": action_resp['input_parameters']['source_node'], - "destination": action_resp['input_parameters']['destination_node'], - "flavor": server.flavor.name, - "impact": 'Low' - }) + migrations.append( + { + "instanceName": action_resp["input_parameters"]["resource_name"], + "source": action_resp["input_parameters"]["source_node"], + "destination": action_resp["input_parameters"]["destination_node"], + "flavor": server.flavor.name, + "impact": "Low", + } + ) for entry in projected_cpu_data: - if (instance := entry['metric']['instanceName']) in mapping: - entry['metric']['host'] = mapping[instance] + if (instance := entry["metric"]["instanceName"]) in mapping: + entry["metric"]["host"] = mapping[instance] projected_cpu_metrics = convert_cpu_data(projected_cpu_data) - result.append({ - "id": audit_resp['uuid'], - "name": audit_resp['name'], - "created_at": audit_resp['created_at'], - "strategy": audit_resp['strategy_name'], - "goal": audit_resp['goal_name'], - "type": audit_resp['audit_type'], - "scope": audit_resp['scope'], - "cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"), - "ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"), - "migrations": migrations, - "host_labels": cpu_metrics['host'].to_list(), - "cpu_current": cpu_metrics['cpu_usage'].to_list(), - "cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(), - }) + result.append( + { + "id": audit_resp["uuid"], + "name": audit_resp["name"], + "created_at": audit_resp["created_at"], + "strategy": audit_resp["strategy_name"], + "goal": audit_resp["goal_name"], + "type": audit_resp["audit_type"], + "scope": audit_resp["scope"], + "cpu_weight": audit_resp["parameters"] + .get("weights", {}) + .get("instance_cpu_usage_weight", "none"), + "ram_weight": audit_resp["parameters"] + .get("weights", {}) + .get("instance_ram_usage_weight", "none"), + "migrations": migrations, + "host_labels": cpu_metrics["host"].to_list(), + "cpu_current": cpu_metrics["cpu_usage"].to_list(), + "cpu_projected": projected_cpu_metrics["cpu_usage"].to_list(), + } + ) return result diff --git a/dashboard/openstack_utils/connect.py b/dashboard/openstack_utils/connect.py index 798ad54..557c684 100644 --- a/dashboard/openstack_utils/connect.py +++ b/dashboard/openstack_utils/connect.py @@ -1,6 +1,5 @@ import openstack from openstack.connection import Connection - from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME diff --git a/dashboard/openstack_utils/flavor.py b/dashboard/openstack_utils/flavor.py index 179b124..5eb56bd 100644 --- a/dashboard/openstack_utils/flavor.py +++ b/dashboard/openstack_utils/flavor.py @@ -1,10 +1,11 @@ +from collections import Counter + from openstack.connection import Connection -from collections import Counter def get_flavor_list(connection: Connection) -> dict: servers = list(connection.compute.servers(all_projects=True)) - flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor] + flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor] flavor_count = Counter(flavor_ids).most_common() flavors = list(flavor_count) @@ -13,10 +14,7 @@ def get_flavor_list(connection: Connection) -> dict: placeholder = {"name": "—", "count": 0} for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: if len(flavors) > idx: - result[f"{prefix}_common_flavor"] = { - "name": flavors[idx][0], - "count": flavors[idx][1] - } + result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]} else: result[f"{prefix}_common_flavor"] = placeholder diff --git a/dashboard/prometheus_utils/query.py b/dashboard/prometheus_utils/query.py index b1a4a2c..80be3af 100644 --- a/dashboard/prometheus_utils/query.py +++ b/dashboard/prometheus_utils/query.py @@ -1,5 +1,4 @@ import requests - from watcher_visio.settings import PROMETHEUS_URL # Timeout for lightweight health check (seconds) diff --git a/dashboard/templatetags/mathfilters.py b/dashboard/templatetags/mathfilters.py index 2142957..7d602d9 100644 --- a/dashboard/templatetags/mathfilters.py +++ b/dashboard/templatetags/mathfilters.py @@ -2,37 +2,41 @@ from django import template register = template.Library() + @register.filter def div(a, b): try: return float(a) / float(b) - except: + except (TypeError, ValueError, ZeroDivisionError): return 0 + @register.filter def mul(a, b): try: return float(a) * float(b) - except: + except (TypeError, ValueError): return 0 + @register.filter def sub(a, b): try: return float(a) - float(b) - except: + except (TypeError, ValueError): return 0 + @register.filter -def convert_bytes(bytes_value, target_unit='GB'): +def convert_bytes(bytes_value, target_unit="GB"): """ Convert bytes to specific unit - + Args: bytes_value: Size in bytes target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') precision: Number of decimal places - + Returns: Float value in target unit """ @@ -41,16 +45,16 @@ def convert_bytes(bytes_value, target_unit='GB'): except (ValueError, TypeError): return 0.0 conversion_factors = { - 'B': 1, - 'KB': 1024, - 'MB': 1024 * 1024, - 'GB': 1024 * 1024 * 1024, - 'TB': 1024 * 1024 * 1024 * 1024, + "B": 1, + "KB": 1024, + "MB": 1024 * 1024, + "GB": 1024 * 1024 * 1024, + "TB": 1024 * 1024 * 1024 * 1024, } - + target_unit = target_unit.upper() if target_unit not in conversion_factors: - target_unit = 'MB' - + target_unit = "MB" + result = bytes_value / conversion_factors[target_unit] return round(result, 1) diff --git a/dashboard/tests/test_audits.py b/dashboard/tests/test_audits.py index f0ef331..88ecd73 100644 --- a/dashboard/tests/test_audits.py +++ b/dashboard/tests/test_audits.py @@ -1,4 +1,5 @@ """Tests for dashboard.openstack_utils.audits.""" + from unittest.mock import MagicMock, patch from django.test import TestCase @@ -31,8 +32,10 @@ class ConvertCpuDataTest(TestCase): self.assertEqual(len(hosts), 2) self.assertIn("compute-0", hosts) self.assertIn("compute-1", hosts) - # compute-0: (10+20)/2 for ts 1000 and 5 for ts 1000 -> groupby host,timestamp sum -> then groupby host mean - # For compute-0: two timestamps 1000 (10+5=15) and 1001 (20). Mean over timestamps = (15+20)/2 = 17.5 + # compute-0: (10+20)/2 for ts 1000 and 5 for ts 1000 -> groupby host,timestamp sum + # -> then groupby host mean + # For compute-0: two timestamps 1000 (10+5=15) and 1001 (20). + # Mean over timestamps = (15+20)/2 = 17.5 # For compute-1: one value 30 by_host = result.set_index("host")["cpu_usage"] self.assertAlmostEqual(by_host["compute-0"], 17.5) @@ -60,11 +63,14 @@ class GetCurrentClusterCpuTest(TestCase): @patch("dashboard.openstack_utils.audits.query_prometheus") def test_returns_host_labels_and_cpu_current(self, mock_query, mock_convert): import pandas as pd + mock_query.return_value = [{"metric": {"host": "h0"}, "values": [[0, "1.0"]]}] - mock_convert.return_value = pd.DataFrame({ - "host": ["compute-0", "compute-1"], - "cpu_usage": [25.0, 35.0], - }) + mock_convert.return_value = pd.DataFrame( + { + "host": ["compute-0", "compute-1"], + "cpu_usage": [25.0, 35.0], + } + ) conn = MagicMock() result = get_current_cluster_cpu(conn) self.assertEqual(result["host_labels"], ["compute-0", "compute-1"]) diff --git a/dashboard/tests/test_flavor.py b/dashboard/tests/test_flavor.py index c0bf0ba..3846910 100644 --- a/dashboard/tests/test_flavor.py +++ b/dashboard/tests/test_flavor.py @@ -1,4 +1,5 @@ """Tests for dashboard.openstack_utils.flavor.""" + from unittest.mock import MagicMock from django.test import TestCase diff --git a/dashboard/tests/test_mathfilters.py b/dashboard/tests/test_mathfilters.py index 1827d76..d8ee7df 100644 --- a/dashboard/tests/test_mathfilters.py +++ b/dashboard/tests/test_mathfilters.py @@ -1,8 +1,9 @@ """Tests for dashboard.templatetags.mathfilters.""" -from django.test import TestCase -from django.template import Template, Context -from dashboard.templatetags.mathfilters import div, mul, sub, convert_bytes +from django.template import Context, Template +from django.test import TestCase + +from dashboard.templatetags.mathfilters import convert_bytes, div, mul, sub class DivFilterTest(TestCase): @@ -48,25 +49,25 @@ class SubFilterTest(TestCase): class ConvertBytesFilterTest(TestCase): """Tests for the convert_bytes template filter.""" - def test_convert_to_B(self): + def test_convert_to_b(self): self.assertEqual(convert_bytes(1024, "B"), 1024.0) - def test_convert_to_KB(self): + def test_convert_to_kb(self): self.assertEqual(convert_bytes(2048, "KB"), 2.0) - def test_convert_to_MB(self): + def test_convert_to_mb(self): self.assertEqual(convert_bytes(1024 * 1024 * 3, "MB"), 3.0) - def test_convert_to_GB(self): - self.assertEqual(convert_bytes(1024 ** 3 * 5, "GB"), 5.0) + def test_convert_to_gb(self): + self.assertEqual(convert_bytes(1024**3 * 5, "GB"), 5.0) - def test_convert_to_TB(self): - self.assertEqual(convert_bytes(1024 ** 4, "TB"), 1.0) + def test_convert_to_tb(self): + self.assertEqual(convert_bytes(1024**4, "TB"), 1.0) - def test_convert_default_GB(self): - self.assertEqual(convert_bytes(1024 ** 3 * 2), 2.0) + def test_convert_default_gb(self): + self.assertEqual(convert_bytes(1024**3 * 2), 2.0) - def test_convert_invalid_unit_fallback_to_MB(self): + def test_convert_invalid_unit_fallback_to_mb(self): self.assertEqual(convert_bytes(1024 * 1024, "invalid"), 1.0) self.assertEqual(convert_bytes(1024 * 1024, "xyz"), 1.0) @@ -79,8 +80,8 @@ class ConvertBytesFilterTest(TestCase): self.assertEqual(convert_bytes(1536 * 1024 * 1024, "GB"), 1.5) def test_convert_case_insensitive_unit(self): - self.assertEqual(convert_bytes(1024 ** 3, "gb"), 1.0) - self.assertEqual(convert_bytes(1024 ** 3, "GB"), 1.0) + self.assertEqual(convert_bytes(1024**3, "gb"), 1.0) + self.assertEqual(convert_bytes(1024**3, "GB"), 1.0) class MathfiltersTemplateIntegrationTest(TestCase): @@ -100,4 +101,4 @@ class MathfiltersTemplateIntegrationTest(TestCase): def test_convert_bytes_in_template(self): t = Template("{% load mathfilters %}{{ bytes|convert_bytes:'GB' }}") - self.assertEqual(t.render(Context({"bytes": 1024 ** 3 * 2})), "2.0") + self.assertEqual(t.render(Context({"bytes": 1024**3 * 2})), "2.0") diff --git a/dashboard/tests/test_mock_data.py b/dashboard/tests/test_mock_data.py index 1d5b929..fc44467 100644 --- a/dashboard/tests/test_mock_data.py +++ b/dashboard/tests/test_mock_data.py @@ -1,4 +1,5 @@ """Tests for dashboard.mock_data.""" + import json from django.test import TestCase @@ -11,7 +12,17 @@ class GetMockContextTest(TestCase): def test_returns_all_top_level_keys(self): ctx = get_mock_context() - expected_keys = {"region", "pcpu", "vcpu", "pram", "vram", "vm", "flavors", "audits", "current_cluster"} + expected_keys = { + "region", + "pcpu", + "vcpu", + "pram", + "vram", + "vm", + "flavors", + "audits", + "current_cluster", + } self.assertEqual(set(ctx.keys()), expected_keys) def test_region_structure(self): diff --git a/dashboard/tests/test_prometheus_query.py b/dashboard/tests/test_prometheus_query.py index 4393f24..78db95d 100644 --- a/dashboard/tests/test_prometheus_query.py +++ b/dashboard/tests/test_prometheus_query.py @@ -1,5 +1,6 @@ """Tests for dashboard.prometheus_utils.query.""" -from unittest.mock import patch, MagicMock + +from unittest.mock import MagicMock, patch from django.test import TestCase @@ -12,13 +13,7 @@ class QueryPrometheusTest(TestCase): @patch("dashboard.prometheus_utils.query.requests.get") def test_single_result_returns_value_string(self, mock_get): mock_response = MagicMock() - mock_response.json.return_value = { - "data": { - "result": [ - {"value": ["1234567890", "42"]} - ] - } - } + mock_response.json.return_value = {"data": {"result": [{"value": ["1234567890", "42"]}]}} mock_response.raise_for_status = MagicMock() mock_get.return_value = mock_response diff --git a/dashboard/tests/test_views.py b/dashboard/tests/test_views.py index 30335dc..042fff6 100644 --- a/dashboard/tests/test_views.py +++ b/dashboard/tests/test_views.py @@ -1,18 +1,17 @@ """Tests for dashboard.views.""" -import json -from unittest.mock import patch, MagicMock -from django.test import TestCase, RequestFactory +import json +from unittest.mock import MagicMock, patch + from django.core.cache import cache +from django.test import RequestFactory, TestCase from dashboard.views import ( - index, - collect_context, - collect_stats, - collect_audits, - api_stats, api_audits, api_source_status, + api_stats, + collect_context, + index, ) @@ -21,10 +20,31 @@ def _minimal_render_context(region_name="test", first_flavor_name="f1", vm_count return { "region": {"name": region_name, "hosts_total": 1}, "pcpu": {"total": 1, "usage": 0, "free": 1, "used_percentage": 0}, - "vcpu": {"total": 2, "allocated": 1, "free": 1, "allocated_percentage": 50, "overcommit_ratio": 1, "overcommit_max": 2}, + "vcpu": { + "total": 2, + "allocated": 1, + "free": 1, + "allocated_percentage": 50, + "overcommit_ratio": 1, + "overcommit_max": 2, + }, "pram": {"total": 1024**3, "usage": 0, "free": 1024**3, "used_percentage": 0}, - "vram": {"total": 1024**3, "allocated": 0, "free": 1024**3, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 1}, - "vm": {"count": vm_count, "active": vm_count, "stopped": 0, "avg_cpu": 1, "avg_ram": 0, "density": float(vm_count)}, + "vram": { + "total": 1024**3, + "allocated": 0, + "free": 1024**3, + "allocated_percentage": 0, + "overcommit_ratio": 0, + "overcommit_max": 1, + }, + "vm": { + "count": vm_count, + "active": vm_count, + "stopped": 0, + "avg_cpu": 1, + "avg_ram": 0, + "density": float(vm_count), + }, "flavors": { "first_common_flavor": {"name": first_flavor_name, "count": vm_count}, "second_common_flavor": {"name": "—", "count": 0}, @@ -81,10 +101,18 @@ class CollectContextTest(TestCase): @patch("dashboard.views.get_flavor_list") @patch("dashboard.views.get_connection") def test_collect_context_structure_and_calculation( - self, mock_get_connection, mock_get_flavor_list, mock_get_audits, mock_fetch_metrics, mock_get_current_cluster_cpu + self, + mock_get_connection, + mock_get_flavor_list, + mock_get_audits, + mock_fetch_metrics, + mock_get_current_cluster_cpu, ): mock_get_connection.return_value = self._make_mock_connection("my-region") - mock_get_current_cluster_cpu.return_value = {"host_labels": ["h0", "h1"], "cpu_current": [30.0, 40.0]} + mock_get_current_cluster_cpu.return_value = { + "host_labels": ["h0", "h1"], + "cpu_current": [30.0, 40.0], + } mock_get_flavor_list.return_value = { "first_common_flavor": {"name": "m1.small", "count": 5}, "second_common_flavor": {"name": "—", "count": 0}, @@ -125,6 +153,7 @@ class CollectContextTest(TestCase): self.assertEqual(len(context["audits"]), 1) # Serialized for JS import json + self.assertIsInstance(context["audits"][0]["migrations"], str) self.assertEqual(json.loads(context["audits"][0]["host_labels"]), ["h0", "h1"]) self.assertIn("current_cluster", context) @@ -187,7 +216,15 @@ class ApiStatsTest(TestCase): @patch("dashboard.views.settings") def test_api_stats_uses_cache(self, mock_settings, mock_collect_stats): mock_settings.DASHBOARD_CACHE_TTL = 120 - cached = {"region": {"name": "cached", "hosts_total": 1}, "pcpu": {}, "pram": {}, "vcpu": {}, "vram": {}, "vm": {}, "flavors": {}} + cached = { + "region": {"name": "cached", "hosts_total": 1}, + "pcpu": {}, + "pram": {}, + "vcpu": {}, + "vram": {}, + "vm": {}, + "flavors": {}, + } cache.clear() cache.set("dashboard_stats", cached, timeout=120) request = self.factory.get("/api/stats/") @@ -219,13 +256,24 @@ class ApiAuditsTest(TestCase): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": [{"instanceName": "i1", "source": "h0", "destination": "h1", "flavor": "m1.small", "impact": "Low"}], + "migrations": [ + { + "instanceName": "i1", + "source": "h0", + "destination": "h1", + "flavor": "m1.small", + "impact": "Low", + } + ], "host_labels": ["h0", "h1"], "cpu_current": [30.0, 40.0], "cpu_projected": [35.0, 35.0], } ] - mock_get_current_cluster_cpu.return_value = {"host_labels": ["h0", "h1"], "cpu_current": [30.0, 40.0]} + mock_get_current_cluster_cpu.return_value = { + "host_labels": ["h0", "h1"], + "cpu_current": [30.0, 40.0], + } cache.clear() request = self.factory.get("/api/audits/") with patch("dashboard.views.settings") as mock_settings: @@ -246,9 +294,20 @@ class ApiAuditsTest(TestCase): @patch("dashboard.views.get_current_cluster_cpu") @patch("dashboard.views.collect_audits") @patch("dashboard.views.settings") - def test_api_audits_uses_cache(self, mock_settings, mock_collect_audits, mock_get_current_cluster_cpu): + def test_api_audits_uses_cache( + self, mock_settings, mock_collect_audits, mock_get_current_cluster_cpu + ): mock_settings.DASHBOARD_CACHE_TTL = 120 - cached_audits = [{"id": "cached-1", "name": "Cached Audit", "migrations": "[]", "host_labels": "[]", "cpu_current": "[]", "cpu_projected": "[]"}] + cached_audits = [ + { + "id": "cached-1", + "name": "Cached Audit", + "migrations": "[]", + "host_labels": "[]", + "cpu_current": "[]", + "cpu_projected": "[]", + } + ] cached_cluster = {"host_labels": ["cached-h0"], "cpu_current": [10.0]} cache.clear() cache.set("dashboard_audits", cached_audits, timeout=120) diff --git a/dashboard/urls.py b/dashboard/urls.py index 0121c33..8e79809 100644 --- a/dashboard/urls.py +++ b/dashboard/urls.py @@ -1,9 +1,10 @@ from django.urls import path + from . import views urlpatterns = [ - path('', views.index, name='index'), - path('api/stats/', views.api_stats), - path('api/audits/', views.api_audits), - path('api/source-status/', views.api_source_status), -] \ No newline at end of file + path("", views.index, name="index"), + path("api/stats/", views.api_stats), + path("api/audits/", views.api_audits), + path("api/source-status/", views.api_source_status), +] diff --git a/dashboard/views.py b/dashboard/views.py index 3a86dc8..3da163b 100644 --- a/dashboard/views.py +++ b/dashboard/views.py @@ -5,23 +5,32 @@ from django.conf import settings from django.core.cache import cache from django.http import JsonResponse from django.shortcuts import render + +from dashboard.mock_data import get_mock_context +from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu from dashboard.openstack_utils.connect import check_openstack, get_connection from dashboard.openstack_utils.flavor import get_flavor_list from dashboard.prometheus_utils.query import check_prometheus, query_prometheus -from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu -from dashboard.mock_data import get_mock_context # Prometheus queries run in parallel (query_key -> query string) _PROMETHEUS_QUERIES = { "hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})", - "pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))", + "pcpu_total": ( + "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) " + "without (cpu,mode))" + ), "pcpu_usage": "sum(node_load5{job='node_exporter_compute'})", "vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)", - "vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})", + "vcpu_overcommit_max": ( + "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" + ), "pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})", "pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})", "vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)", - "vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))", + "vram_overcommit_max": ( + "avg(avg_over_time(" + "openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" + ), "vm_count": "sum(libvirt_domain_state_code)", "vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})", } @@ -44,7 +53,9 @@ def _fetch_prometheus_metrics(): else: result[key] = int(raw) except (ValueError, TypeError): - result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0 + result[key] = ( + 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0 + ) return result @@ -221,8 +232,22 @@ def _skeleton_context(): "region": {"name": "—", "hosts_total": 0}, "pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, "pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, - "vcpu": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0}, - "vram": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0}, + "vcpu": { + "total": 0, + "allocated": 0, + "free": 0, + "allocated_percentage": 0, + "overcommit_ratio": 0, + "overcommit_max": 0, + }, + "vram": { + "total": 0, + "allocated": 0, + "free": 0, + "allocated_percentage": 0, + "overcommit_ratio": 0, + "overcommit_max": 0, + }, "vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0}, "flavors": empty_flavors, "audits": [], @@ -270,10 +295,12 @@ def api_audits(request): def api_source_status(request): """Return status of Prometheus and OpenStack data sources (ok / error / mock).""" if getattr(settings, "USE_MOCK_DATA", False): - return JsonResponse({ - "prometheus": {"status": "mock"}, - "openstack": {"status": "mock"}, - }) + return JsonResponse( + { + "prometheus": {"status": "mock"}, + "openstack": {"status": "mock"}, + } + ) cache_key = "dashboard_source_status" cache_ttl = getattr(settings, "SOURCE_STATUS_CACHE_TTL", 30) @@ -284,4 +311,4 @@ def api_source_status(request): "openstack": check_openstack(), } cache.set(cache_key, data, timeout=cache_ttl) - return JsonResponse(data) \ No newline at end of file + return JsonResponse(data) diff --git a/manage.py b/manage.py index 5e0bec0..1be308e 100644 --- a/manage.py +++ b/manage.py @@ -1,12 +1,13 @@ #!/usr/bin/env python """Django's command-line utility for administrative tasks.""" + import os import sys def main(): """Run administrative tasks.""" - os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings') + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: @@ -18,5 +19,5 @@ def main(): execute_from_command_line(sys.argv) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/watcher_visio/asgi.py b/watcher_visio/asgi.py index b957ebc..903585c 100644 --- a/watcher_visio/asgi.py +++ b/watcher_visio/asgi.py @@ -11,6 +11,6 @@ import os from django.core.asgi import get_asgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings") application = get_asgi_application() diff --git a/watcher_visio/settings.py b/watcher_visio/settings.py index 8211dc2..f3c6de1 100644 --- a/watcher_visio/settings.py +++ b/watcher_visio/settings.py @@ -24,31 +24,31 @@ USE_MOCK_DATA = os.environ.get("USE_MOCK_DATA", "false").lower() in ("1", "true" # See https://docs.djangoproject.com/en/5.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = 'django-insecure-747*14ir*49hoo6c2225)kxr%4^am0ub_s-m^_7i4cctu)v$g8' +SECRET_KEY = "django-insecure-747*14ir*49hoo6c2225)kxr%4^am0ub_s-m^_7i4cctu)v$g8" # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True -ALLOWED_HOSTS = ['*'] +ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'dashboard', + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "dashboard", ] # Prometheus settings (environment override recommended) PROMETHEUS_URL = "http://10.226.74.53:9090/" PROMETHEUS_METRICS = { "cpu_usage": "rate(libvirt_domain_info_cpu_time_seconds_total)[300s]", - "ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]" + "ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]", } # Openstack cloud settings @@ -60,45 +60,45 @@ WATCHER_ENDPOINT_NAME = "infra-optim" WATCHER_INTERFACE_NAME = "public" MIDDLEWARE = [ - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", ] # COOP ignored on non-HTTPS / non-localhost; disable to avoid console warning SECURE_CROSS_ORIGIN_OPENER_POLICY = None -ROOT_URLCONF = 'watcher_visio.urls' +ROOT_URLCONF = "watcher_visio.urls" TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [BASE_DIR / 'templates'], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [BASE_DIR / "templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", ], }, }, ] -WSGI_APPLICATION = 'watcher_visio.wsgi.application' +WSGI_APPLICATION = "watcher_visio.wsgi.application" # Database # https://docs.djangoproject.com/en/5.2/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': BASE_DIR / 'db.sqlite3', + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": BASE_DIR / "db.sqlite3", } } @@ -108,16 +108,16 @@ DATABASES = { AUTH_PASSWORD_VALIDATORS = [ { - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] @@ -125,9 +125,9 @@ AUTH_PASSWORD_VALIDATORS = [ # Internationalization # https://docs.djangoproject.com/en/5.2/topics/i18n/ -LANGUAGE_CODE = 'en-us' +LANGUAGE_CODE = "en-us" -TIME_ZONE = 'UTC' +TIME_ZONE = "UTC" USE_I18N = True @@ -137,7 +137,7 @@ USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/5.2/howto/static-files/ -STATIC_URL = '/static/' +STATIC_URL = "/static/" STATICFILES_DIRS = [ BASE_DIR / "static", @@ -148,13 +148,13 @@ STATIC_ROOT = BASE_DIR / "staticfiles" # Default primary key field type # https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field -DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' +DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" # Dashboard cache (reduces load on OpenStack/Prometheus and allows concurrent users) CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', - 'LOCATION': 'watcher-visio-dashboard', + "default": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "watcher-visio-dashboard", } } DASHBOARD_CACHE_TTL = 120 # seconds diff --git a/watcher_visio/urls.py b/watcher_visio/urls.py index 2b37c86..80ed067 100644 --- a/watcher_visio/urls.py +++ b/watcher_visio/urls.py @@ -14,13 +14,17 @@ Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ + from django.conf import settings from django.contrib import admin -from django.urls import path, include +from django.urls import include, path from django.views.generic import RedirectView urlpatterns = [ - path('admin/', admin.site.urls), - path('favicon.ico', RedirectView.as_view(url=settings.STATIC_URL + 'favicon.ico', permanent=False)), - path('', include('dashboard.urls')), + path("admin/", admin.site.urls), + path( + "favicon.ico", + RedirectView.as_view(url=settings.STATIC_URL + "favicon.ico", permanent=False), + ), + path("", include("dashboard.urls")), ] diff --git a/watcher_visio/wsgi.py b/watcher_visio/wsgi.py index bdd1418..95245d8 100644 --- a/watcher_visio/wsgi.py +++ b/watcher_visio/wsgi.py @@ -11,6 +11,6 @@ import os from django.core.wsgi import get_wsgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings") application = get_wsgi_application()