diff --git a/dashboard/mock_data.py b/dashboard/mock_data.py index 4efee31..ba5d774 100644 --- a/dashboard/mock_data.py +++ b/dashboard/mock_data.py @@ -1,6 +1,9 @@ """Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus).""" -import json +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) def get_mock_context(): @@ -21,11 +24,24 @@ def get_mock_context(): vram_total = pram_total * vram_overcommit_max # Two sample audits with serialized fields for JS - host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"] - cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5] - cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0] + host_labels = [ + "compute-0", + "compute-1", + "compute-2", + "compute-3", + "compute-4", + "compute-5", + "compute-6", + "compute-7", + "compute-8", + "compute-9", + "compute-10", + "compute-11", + ] + cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3] + cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0] - audits = [ + audits_raw = [ { "id": "mock-audit-uuid-1", "name": "Mock audit (balanced)", @@ -36,20 +52,18 @@ def get_mock_context(): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": json.dumps( - [ - { - "instanceName": "instance-1", - "source": "compute-0", - "destination": "compute-3", - "flavor": "m1.small", - "impact": "Low", - } - ] - ), - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - "cpu_projected": json.dumps(cpu_projected), + "migrations": [ + { + "instanceName": "instance-1", + "source": "compute-0", + "destination": "compute-3", + "flavor": "m1.small", + "impact": "Low", + } + ], + "host_labels": host_labels, + "cpu_current": cpu_current, + "cpu_projected": cpu_projected, }, { "id": "mock-audit-uuid-2", @@ -61,12 +75,13 @@ def get_mock_context(): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": json.dumps([]), - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - "cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]), + "migrations": [], + "host_labels": host_labels, + "cpu_current": cpu_current, + "cpu_projected": [40.0, 42.0, 50.0, 43.0, 36.0, 45.0], }, ] + audits = [serialize_audit_for_response(a) for a in audits_raw] return { "region": { @@ -115,8 +130,7 @@ def get_mock_context(): "third_common_flavor": {"name": "m1.large", "count": 4}, }, "audits": audits, - "current_cluster": { - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - }, + "current_cluster": serialize_current_cluster_for_template( + {"host_labels": host_labels, "cpu_current": cpu_current} + ), } diff --git a/dashboard/openstack_utils/audits.py b/dashboard/openstack_utils/audits.py index 5bc20a3..a5a7cc4 100644 --- a/dashboard/openstack_utils/audits.py +++ b/dashboard/openstack_utils/audits.py @@ -49,41 +49,81 @@ def get_current_cluster_cpu(connection: Connection) -> dict: } +def _fetch_audits_and_action_plans(session, watcher_endpoint): + """GET audits and action_plans from Watcher API. Returns (audits_list, action_plans_list).""" + audits_resp = session.get(f"{watcher_endpoint}/v1/audits") + audits_resp.raise_for_status() + audits_list = audits_resp.json().get("audits") or [] + + actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans") + actionplans_resp.raise_for_status() + action_plans_list = actionplans_resp.json().get("action_plans") or [] + + return audits_list, action_plans_list + + +def _fetch_migrations_for_audit( + connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp +): + """ + Fetch action details for the given action plan and build migrations list and + instance->destination mapping. Returns (migrations, mapping). + """ + migrations = [] + mapping = {} + for action in actions_resp: + action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}") + action_resp.raise_for_status() + action_resp = action_resp.json() + + server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"]) + params = action_resp["input_parameters"] + mapping[params["resource_name"]] = params["destination_node"] + + migrations.append( + { + "instanceName": params["resource_name"], + "source": params["source_node"], + "destination": params["destination_node"], + "flavor": server.flavor.name, + "impact": "Low", + } + ) + return migrations, mapping + + +def _build_projected_cpu_metrics(cpu_data, mapping): + """ + Apply instance->destination mapping to a copy of cpu_data and return + aggregated CPU metrics DataFrame (host, cpu_usage). + """ + projected_cpu_data = copy(cpu_data) + for entry in projected_cpu_data: + if (instance := entry["metric"]["instanceName"]) in mapping: + entry["metric"]["host"] = mapping[instance] + return convert_cpu_data(projected_cpu_data) + + def get_audits(connection: Connection) -> list[dict] | None: session = connection.session - watcher_endpoint = connection.endpoint_for( service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME ) - # Collect instances prometheus metrics cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"]) - cpu_metrics = convert_cpu_data(data=cpu_data) - # Fetch audit list - audits_resp = session.get(f"{watcher_endpoint}/v1/audits") - audits_resp.raise_for_status() - audits_resp = audits_resp.json().get("audits") or [] - - # Fetch action plan list - actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans") - actionplans_resp.raise_for_status() - actionplans_resp = actionplans_resp.json().get("action_plans") or [] - - # Filtering audits by PENDING state - pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"] + _, action_plans_list = _fetch_audits_and_action_plans(session, watcher_endpoint) + pending_audits = [plan for plan in action_plans_list if plan["state"] == "RECOMMENDED"] result = [] for item in pending_audits: - projected_cpu_data = copy(cpu_data) - audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}") audit_resp.raise_for_status() audit_resp = audit_resp.json() actionplan = next( - filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None + filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], action_plans_list), None ) if actionplan is None: continue @@ -94,32 +134,10 @@ def get_audits(connection: Connection) -> list[dict] | None: actions_resp.raise_for_status() actions_resp = actions_resp.json().get("actions") or [] - migrations = [] - mapping = {} - for action in actions_resp: - action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}") - action_resp.raise_for_status() - action_resp = action_resp.json() - - server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"]) - params = action_resp["input_parameters"] - mapping[params["resource_name"]] = params["destination_node"] - - migrations.append( - { - "instanceName": action_resp["input_parameters"]["resource_name"], - "source": action_resp["input_parameters"]["source_node"], - "destination": action_resp["input_parameters"]["destination_node"], - "flavor": server.flavor.name, - "impact": "Low", - } - ) - - for entry in projected_cpu_data: - if (instance := entry["metric"]["instanceName"]) in mapping: - entry["metric"]["host"] = mapping[instance] - - projected_cpu_metrics = convert_cpu_data(projected_cpu_data) + migrations, mapping = _fetch_migrations_for_audit( + connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp + ) + projected_cpu_metrics = _build_projected_cpu_metrics(cpu_data, mapping) result.append( { diff --git a/dashboard/openstack_utils/flavor.py b/dashboard/openstack_utils/flavor.py index 5eb56bd..0b0240b 100644 --- a/dashboard/openstack_utils/flavor.py +++ b/dashboard/openstack_utils/flavor.py @@ -1,21 +1,21 @@ -from collections import Counter - -from openstack.connection import Connection - - -def get_flavor_list(connection: Connection) -> dict: - servers = list(connection.compute.servers(all_projects=True)) - flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor] - flavor_count = Counter(flavor_ids).most_common() - - flavors = list(flavor_count) - - result = {} - placeholder = {"name": "—", "count": 0} - for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: - if len(flavors) > idx: - result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]} - else: - result[f"{prefix}_common_flavor"] = placeholder - - return result +from collections import Counter + +from openstack.connection import Connection + + +def get_flavor_list(connection: Connection) -> dict: + servers = list(connection.compute.servers(all_projects=True)) + flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor] + flavor_count = Counter(flavor_ids).most_common() + + flavors = list(flavor_count) + + result = {} + placeholder = {"name": "—", "count": 0} + for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: + if len(flavors) > idx: + result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]} + else: + result[f"{prefix}_common_flavor"] = placeholder + + return result diff --git a/dashboard/prometheus_utils/query.py b/dashboard/prometheus_utils/query.py index 0a5d093..03f4553 100644 --- a/dashboard/prometheus_utils/query.py +++ b/dashboard/prometheus_utils/query.py @@ -1,9 +1,37 @@ +from concurrent.futures import ThreadPoolExecutor, as_completed + import requests from watcher_visio.settings import PROMETHEUS_URL # Timeout for lightweight health check (seconds) CHECK_TIMEOUT = 5 +# Dashboard Prometheus queries (query_key -> query string), run in parallel +DASHBOARD_QUERIES = { + "hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})", + "pcpu_total": ( + "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) " + "without (cpu,mode))" + ), + "pcpu_usage": "sum(node_load5{job='node_exporter_compute'})", + "vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)", + "vcpu_overcommit_max": ( + "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" + ), + "pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})", + "pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})", + "vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)", + "vram_overcommit_max": ( + "avg(avg_over_time(" + "openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" + ), + "vm_count": "sum(libvirt_domain_state_code)", + "vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})", +} + +# Keys that should be parsed as float (rest as int) +DASHBOARD_FLOAT_KEYS = frozenset(("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max")) + def check_prometheus() -> dict: """ @@ -36,3 +64,23 @@ def query_prometheus(query: str) -> str | list[str]: return result else: return result[0]["value"][1] + + +def fetch_dashboard_metrics() -> dict: + """Run all dashboard Prometheus queries in parallel and return a dict of name -> value.""" + result = {} + with ThreadPoolExecutor(max_workers=len(DASHBOARD_QUERIES)) as executor: + future_to_key = { + executor.submit(query_prometheus, query=q): key for key, q in DASHBOARD_QUERIES.items() + } + for future in as_completed(future_to_key): + key = future_to_key[future] + try: + raw = future.result() + if key in DASHBOARD_FLOAT_KEYS: + result[key] = float(raw) + else: + result[key] = int(raw) + except (ValueError, TypeError): + result[key] = float(0) if key in DASHBOARD_FLOAT_KEYS else 0 + return result diff --git a/dashboard/serializers.py b/dashboard/serializers.py new file mode 100644 index 0000000..e47abf2 --- /dev/null +++ b/dashboard/serializers.py @@ -0,0 +1,32 @@ +"""Serialization helpers for dashboard context and API responses.""" + +import json + + +def _ensure_json_str(value): + """Return value as JSON string; if already a string, return as-is.""" + return value if isinstance(value, str) else json.dumps(value) + + +def serialize_audit_for_response(audit: dict) -> dict: + """ + Return a copy of the audit dict with migrations, host_labels, cpu_current, + and cpu_projected serialized as JSON strings (for template/API response). + """ + result = dict(audit) + result["migrations"] = _ensure_json_str(audit.get("migrations")) + result["host_labels"] = _ensure_json_str(audit.get("host_labels")) + result["cpu_current"] = _ensure_json_str(audit.get("cpu_current")) + result["cpu_projected"] = _ensure_json_str(audit.get("cpu_projected")) + return result + + +def serialize_current_cluster_for_template(current_cluster: dict) -> dict: + """ + Return current_cluster with host_labels and cpu_current as JSON strings + for template embedding (e.g. in index.html). + """ + return { + "host_labels": _ensure_json_str(current_cluster.get("host_labels")), + "cpu_current": _ensure_json_str(current_cluster.get("cpu_current")), + } diff --git a/dashboard/stats.py b/dashboard/stats.py new file mode 100644 index 0000000..a6eb75a --- /dev/null +++ b/dashboard/stats.py @@ -0,0 +1,76 @@ +"""Dashboard statistics building and cache key constants.""" + +# Cache keys used by views +CACHE_KEY_STATS = "dashboard_stats" +CACHE_KEY_AUDITS = "dashboard_audits" +CACHE_KEY_CURRENT_CLUSTER = "dashboard_current_cluster" +CACHE_KEY_SOURCE_STATUS = "dashboard_source_status" + +# Empty structures for skeleton context (same shape as build_stats output) +EMPTY_FLAVORS = { + "first_common_flavor": {"name": "—", "count": 0}, + "second_common_flavor": None, + "third_common_flavor": None, +} + + +def build_stats(metrics: dict, region_name: str, flavors: dict) -> dict: + """ + Build stats dict from raw metrics and OpenStack-derived data. + Returns region, pcpu, vcpu, pram, vram, vm, flavors (no audits/current_cluster). + """ + hosts_total = metrics.get("hosts_total") or 1 + pcpu_total = metrics.get("pcpu_total", 0) + pcpu_usage = metrics.get("pcpu_usage", 0) + vcpu_allocated = metrics.get("vcpu_allocated", 0) + vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) + pram_total = metrics.get("pram_total", 0) + pram_usage = metrics.get("pram_usage", 0) + vram_allocated = metrics.get("vram_allocated", 0) + vram_overcommit_max = metrics.get("vram_overcommit_max", 0) + vm_count = metrics.get("vm_count", 0) + vm_active = metrics.get("vm_active", 0) + + vcpu_total = pcpu_total * vcpu_overcommit_max + vram_total = pram_total * vram_overcommit_max + + return { + "region": {"name": region_name, "hosts_total": hosts_total}, + "pcpu": { + "total": pcpu_total, + "usage": pcpu_usage, + "free": pcpu_total - pcpu_usage, + "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, + }, + "vcpu": { + "total": vcpu_total, + "allocated": vcpu_allocated, + "free": vcpu_total - vcpu_allocated, + "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, + "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, + "overcommit_max": vcpu_overcommit_max, + }, + "pram": { + "total": pram_total, + "usage": pram_usage, + "free": pram_total - pram_usage, + "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, + }, + "vram": { + "total": vram_total, + "allocated": vram_allocated, + "free": vram_total - vram_allocated, + "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, + "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, + "overcommit_max": vram_overcommit_max, + }, + "vm": { + "count": vm_count, + "active": vm_active, + "stopped": vm_count - vm_active, + "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, + "avg_ram": vram_allocated / vm_count if vm_count else 0, + "density": vm_count / hosts_total if hosts_total else 0, + }, + "flavors": flavors, + } diff --git a/dashboard/templatetags/mathfilters.py b/dashboard/templatetags/mathfilters.py index 7d602d9..22dafa6 100644 --- a/dashboard/templatetags/mathfilters.py +++ b/dashboard/templatetags/mathfilters.py @@ -1,60 +1,60 @@ -from django import template - -register = template.Library() - - -@register.filter -def div(a, b): - try: - return float(a) / float(b) - except (TypeError, ValueError, ZeroDivisionError): - return 0 - - -@register.filter -def mul(a, b): - try: - return float(a) * float(b) - except (TypeError, ValueError): - return 0 - - -@register.filter -def sub(a, b): - try: - return float(a) - float(b) - except (TypeError, ValueError): - return 0 - - -@register.filter -def convert_bytes(bytes_value, target_unit="GB"): - """ - Convert bytes to specific unit - - Args: - bytes_value: Size in bytes - target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') - precision: Number of decimal places - - Returns: - Float value in target unit - """ - try: - bytes_value = float(bytes_value) - except (ValueError, TypeError): - return 0.0 - conversion_factors = { - "B": 1, - "KB": 1024, - "MB": 1024 * 1024, - "GB": 1024 * 1024 * 1024, - "TB": 1024 * 1024 * 1024 * 1024, - } - - target_unit = target_unit.upper() - if target_unit not in conversion_factors: - target_unit = "MB" - - result = bytes_value / conversion_factors[target_unit] - return round(result, 1) +from django import template + +register = template.Library() + + +@register.filter +def div(a, b): + try: + return float(a) / float(b) + except (TypeError, ValueError, ZeroDivisionError): + return 0 + + +@register.filter +def mul(a, b): + try: + return float(a) * float(b) + except (TypeError, ValueError): + return 0 + + +@register.filter +def sub(a, b): + try: + return float(a) - float(b) + except (TypeError, ValueError): + return 0 + + +@register.filter +def convert_bytes(bytes_value, target_unit="GB"): + """ + Convert bytes to specific unit + + Args: + bytes_value: Size in bytes + target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') + precision: Number of decimal places + + Returns: + Float value in target unit + """ + try: + bytes_value = float(bytes_value) + except (ValueError, TypeError): + return 0.0 + conversion_factors = { + "B": 1, + "KB": 1024, + "MB": 1024 * 1024, + "GB": 1024 * 1024 * 1024, + "TB": 1024 * 1024 * 1024 * 1024, + } + + target_unit = target_unit.upper() + if target_unit not in conversion_factors: + target_unit = "MB" + + result = bytes_value / conversion_factors[target_unit] + return round(result, 1) diff --git a/dashboard/tests/test_serializers.py b/dashboard/tests/test_serializers.py new file mode 100644 index 0000000..4ca50e6 --- /dev/null +++ b/dashboard/tests/test_serializers.py @@ -0,0 +1,57 @@ +"""Tests for dashboard.serializers.""" + +import json + +from django.test import TestCase + +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) + + +class SerializeAuditForResponseTest(TestCase): + def test_serializes_list_fields_to_json_strings(self): + audit = { + "id": "audit-1", + "name": "Test", + "migrations": [{"instanceName": "i1", "source": "h1", "destination": "h2"}], + "host_labels": ["h1", "h2"], + "cpu_current": [10.0, 20.0], + "cpu_projected": [15.0, 25.0], + } + result = serialize_audit_for_response(audit) + self.assertEqual(result["id"], "audit-1") + self.assertEqual(result["name"], "Test") + self.assertEqual(json.loads(result["migrations"]), audit["migrations"]) + self.assertEqual(json.loads(result["host_labels"]), audit["host_labels"]) + self.assertEqual(json.loads(result["cpu_current"]), audit["cpu_current"]) + self.assertEqual(json.loads(result["cpu_projected"]), audit["cpu_projected"]) + + def test_leaves_already_serialized_strings_unchanged(self): + audit = { + "id": "a", + "migrations": "[1,2]", + "host_labels": "[]", + "cpu_current": "[0]", + "cpu_projected": "[0]", + } + result = serialize_audit_for_response(audit) + self.assertEqual(result["migrations"], "[1,2]") + self.assertEqual(result["host_labels"], "[]") + self.assertEqual(result["cpu_current"], "[0]") + self.assertEqual(result["cpu_projected"], "[0]") + + +class SerializeCurrentClusterForTemplateTest(TestCase): + def test_serializes_lists_to_json_strings(self): + cluster = {"host_labels": ["c0", "c1"], "cpu_current": [30.0, 40.0]} + result = serialize_current_cluster_for_template(cluster) + self.assertEqual(json.loads(result["host_labels"]), cluster["host_labels"]) + self.assertEqual(json.loads(result["cpu_current"]), cluster["cpu_current"]) + + def test_leaves_already_serialized_strings_unchanged(self): + cluster = {"host_labels": "[]", "cpu_current": "[]"} + result = serialize_current_cluster_for_template(cluster) + self.assertEqual(result["host_labels"], "[]") + self.assertEqual(result["cpu_current"], "[]") diff --git a/dashboard/tests/test_views.py b/dashboard/tests/test_views.py index 042fff6..813e0dd 100644 --- a/dashboard/tests/test_views.py +++ b/dashboard/tests/test_views.py @@ -96,7 +96,7 @@ class CollectContextTest(TestCase): return conn @patch("dashboard.views.get_current_cluster_cpu") - @patch("dashboard.views._fetch_prometheus_metrics") + @patch("dashboard.views.fetch_dashboard_metrics") @patch("dashboard.views.get_audits") @patch("dashboard.views.get_flavor_list") @patch("dashboard.views.get_connection") @@ -152,8 +152,6 @@ class CollectContextTest(TestCase): self.assertEqual(context["flavors"]["first_common_flavor"]["name"], "m1.small") self.assertEqual(len(context["audits"]), 1) # Serialized for JS - import json - self.assertIsInstance(context["audits"][0]["migrations"], str) self.assertEqual(json.loads(context["audits"][0]["host_labels"]), ["h0", "h1"]) self.assertIn("current_cluster", context) @@ -167,7 +165,7 @@ class ApiStatsTest(TestCase): def setUp(self): self.factory = RequestFactory() - @patch("dashboard.views._fetch_prometheus_metrics") + @patch("dashboard.views.fetch_dashboard_metrics") @patch("dashboard.views.get_flavor_list") @patch("dashboard.views.get_connection") def test_api_stats_returns_json_with_expected_keys( diff --git a/dashboard/views.py b/dashboard/views.py index 3da163b..4252b43 100644 --- a/dashboard/views.py +++ b/dashboard/views.py @@ -1,6 +1,3 @@ -import json -from concurrent.futures import ThreadPoolExecutor, as_completed - from django.conf import settings from django.core.cache import cache from django.http import JsonResponse @@ -10,53 +7,36 @@ from dashboard.mock_data import get_mock_context from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu from dashboard.openstack_utils.connect import check_openstack, get_connection from dashboard.openstack_utils.flavor import get_flavor_list -from dashboard.prometheus_utils.query import check_prometheus, query_prometheus - -# Prometheus queries run in parallel (query_key -> query string) -_PROMETHEUS_QUERIES = { - "hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})", - "pcpu_total": ( - "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) " - "without (cpu,mode))" - ), - "pcpu_usage": "sum(node_load5{job='node_exporter_compute'})", - "vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)", - "vcpu_overcommit_max": ( - "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" - ), - "pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})", - "pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})", - "vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)", - "vram_overcommit_max": ( - "avg(avg_over_time(" - "openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" - ), - "vm_count": "sum(libvirt_domain_state_code)", - "vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})", -} +from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) +from dashboard.stats import ( + CACHE_KEY_AUDITS, + CACHE_KEY_CURRENT_CLUSTER, + CACHE_KEY_SOURCE_STATUS, + CACHE_KEY_STATS, + EMPTY_FLAVORS, + build_stats, +) -def _fetch_prometheus_metrics(): - """Run all Prometheus queries in parallel and return a dict of name -> value.""" - result = {} - with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor: - future_to_key = { - executor.submit(query_prometheus, query=q): key - for key, q in _PROMETHEUS_QUERIES.items() - } - for future in as_completed(future_to_key): - key = future_to_key[future] - try: - raw = future.result() - if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"): - result[key] = float(raw) - else: - result[key] = int(raw) - except (ValueError, TypeError): - result[key] = ( - 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0 - ) - return result +def _empty_metrics(): + """Metrics dict with zero/default values for skeleton context.""" + return { + "hosts_total": 0, + "pcpu_total": 0, + "pcpu_usage": 0, + "vcpu_allocated": 0, + "vcpu_overcommit_max": 0, + "pram_total": 0, + "pram_usage": 0, + "vram_allocated": 0, + "vram_overcommit_max": 0, + "vm_count": 0, + "vm_active": 0, + } def collect_context(): @@ -64,86 +44,11 @@ def collect_context(): region_name = connection._compute_region flavors = get_flavor_list(connection=connection) audits = get_audits(connection=connection) - - metrics = _fetch_prometheus_metrics() - hosts_total = metrics.get("hosts_total") or 1 - pcpu_total = metrics.get("pcpu_total", 0) - pcpu_usage = metrics.get("pcpu_usage", 0) - vcpu_allocated = metrics.get("vcpu_allocated", 0) - vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) - pram_total = metrics.get("pram_total", 0) - pram_usage = metrics.get("pram_usage", 0) - vram_allocated = metrics.get("vram_allocated", 0) - vram_overcommit_max = metrics.get("vram_overcommit_max", 0) - vm_count = metrics.get("vm_count", 0) - vm_active = metrics.get("vm_active", 0) - - vcpu_total = pcpu_total * vcpu_overcommit_max - vram_total = pram_total * vram_overcommit_max - - context = { - # <--- Region data ---> - "region": { - "name": region_name, - "hosts_total": hosts_total, - }, - # <--- CPU data ---> - # pCPU data - "pcpu": { - "total": pcpu_total, - "usage": pcpu_usage, - "free": pcpu_total - pcpu_usage, - "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, - }, - # vCPU data - "vcpu": { - "total": vcpu_total, - "allocated": vcpu_allocated, - "free": vcpu_total - vcpu_allocated, - "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, - "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, - "overcommit_max": vcpu_overcommit_max, - }, - # <--- RAM data ---> - # pRAM data - "pram": { - "total": pram_total, - "usage": pram_usage, - "free": pram_total - pram_usage, - "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, - }, - # vRAM data - "vram": { - "total": vram_total, - "allocated": vram_allocated, - "free": vram_total - vram_allocated, - "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, - "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, - "overcommit_max": vram_overcommit_max, - }, - # <--- VM data ---> - "vm": { - "count": vm_count, - "active": vm_active, - "stopped": vm_count - vm_active, - "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, - "avg_ram": vram_allocated / vm_count if vm_count else 0, - "density": vm_count / hosts_total if hosts_total else 0, - }, - "flavors": flavors, - "audits": audits, - } + metrics = fetch_dashboard_metrics() + context = build_stats(metrics, region_name, flavors) + context["audits"] = [serialize_audit_for_response(a) for a in audits] current_cluster = get_current_cluster_cpu(connection) - context["current_cluster"] = { - "host_labels": json.dumps(current_cluster["host_labels"]), - "cpu_current": json.dumps(current_cluster["cpu_current"]), - } - # Serialize audit list fields for JavaScript so cached context is render-ready - for audit in context["audits"]: - audit["migrations"] = json.dumps(audit["migrations"]) - audit["host_labels"] = json.dumps(audit["host_labels"]) - audit["cpu_current"] = json.dumps(audit["cpu_current"]) - audit["cpu_projected"] = json.dumps(audit["cpu_projected"]) + context["current_cluster"] = serialize_current_cluster_for_template(current_cluster) return context @@ -152,110 +57,27 @@ def collect_stats(): connection = get_connection() region_name = connection._compute_region flavors = get_flavor_list(connection=connection) - metrics = _fetch_prometheus_metrics() - hosts_total = metrics.get("hosts_total") or 1 - pcpu_total = metrics.get("pcpu_total", 0) - pcpu_usage = metrics.get("pcpu_usage", 0) - vcpu_allocated = metrics.get("vcpu_allocated", 0) - vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) - pram_total = metrics.get("pram_total", 0) - pram_usage = metrics.get("pram_usage", 0) - vram_allocated = metrics.get("vram_allocated", 0) - vram_overcommit_max = metrics.get("vram_overcommit_max", 0) - vm_count = metrics.get("vm_count", 0) - vm_active = metrics.get("vm_active", 0) - vcpu_total = pcpu_total * vcpu_overcommit_max - vram_total = pram_total * vram_overcommit_max - return { - "region": {"name": region_name, "hosts_total": hosts_total}, - "pcpu": { - "total": pcpu_total, - "usage": pcpu_usage, - "free": pcpu_total - pcpu_usage, - "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, - }, - "vcpu": { - "total": vcpu_total, - "allocated": vcpu_allocated, - "free": vcpu_total - vcpu_allocated, - "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, - "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, - "overcommit_max": vcpu_overcommit_max, - }, - "pram": { - "total": pram_total, - "usage": pram_usage, - "free": pram_total - pram_usage, - "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, - }, - "vram": { - "total": vram_total, - "allocated": vram_allocated, - "free": vram_total - vram_allocated, - "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, - "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, - "overcommit_max": vram_overcommit_max, - }, - "vm": { - "count": vm_count, - "active": vm_active, - "stopped": vm_count - vm_active, - "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, - "avg_ram": vram_allocated / vm_count if vm_count else 0, - "density": vm_count / hosts_total if hosts_total else 0, - }, - "flavors": flavors, - } + metrics = fetch_dashboard_metrics() + return build_stats(metrics, region_name, flavors) def collect_audits(): """Build audits list with serialized fields for frontend.""" connection = get_connection() audits = get_audits(connection=connection) - for audit in audits: - audit["migrations"] = json.dumps(audit["migrations"]) - audit["host_labels"] = json.dumps(audit["host_labels"]) - audit["cpu_current"] = json.dumps(audit["cpu_current"]) - audit["cpu_projected"] = json.dumps(audit["cpu_projected"]) - return audits + return [serialize_audit_for_response(a) for a in audits] def _skeleton_context(): """Minimal context for skeleton-only index render.""" - empty_flavors = { - "first_common_flavor": {"name": "—", "count": 0}, - "second_common_flavor": None, - "third_common_flavor": None, - } - return { - "skeleton": True, - "region": {"name": "—", "hosts_total": 0}, - "pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, - "pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, - "vcpu": { - "total": 0, - "allocated": 0, - "free": 0, - "allocated_percentage": 0, - "overcommit_ratio": 0, - "overcommit_max": 0, - }, - "vram": { - "total": 0, - "allocated": 0, - "free": 0, - "allocated_percentage": 0, - "overcommit_ratio": 0, - "overcommit_max": 0, - }, - "vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0}, - "flavors": empty_flavors, - "audits": [], - "current_cluster": { - "host_labels": "[]", - "cpu_current": "[]", - }, + context = build_stats(_empty_metrics(), "—", EMPTY_FLAVORS) + context["skeleton"] = True + context["audits"] = [] + context["current_cluster"] = { + "host_labels": "[]", + "cpu_current": "[]", } + return context def index(request): @@ -267,28 +89,25 @@ def index(request): def api_stats(request): - cache_key = "dashboard_stats" cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120) - data = cache.get(cache_key) + data = cache.get(CACHE_KEY_STATS) if data is None: data = collect_stats() - cache.set(cache_key, data, timeout=cache_ttl) + cache.set(CACHE_KEY_STATS, data, timeout=cache_ttl) return JsonResponse(data) def api_audits(request): - cache_key_audits = "dashboard_audits" - cache_key_cluster = "dashboard_current_cluster" cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120) - audits = cache.get(cache_key_audits) - current_cluster = cache.get(cache_key_cluster) + audits = cache.get(CACHE_KEY_AUDITS) + current_cluster = cache.get(CACHE_KEY_CURRENT_CLUSTER) if audits is None: audits = collect_audits() - cache.set(cache_key_audits, audits, timeout=cache_ttl) + cache.set(CACHE_KEY_AUDITS, audits, timeout=cache_ttl) if current_cluster is None: connection = get_connection() current_cluster = get_current_cluster_cpu(connection) - cache.set(cache_key_cluster, current_cluster, timeout=cache_ttl) + cache.set(CACHE_KEY_CURRENT_CLUSTER, current_cluster, timeout=cache_ttl) return JsonResponse({"audits": audits, "current_cluster": current_cluster}) @@ -302,13 +121,12 @@ def api_source_status(request): } ) - cache_key = "dashboard_source_status" cache_ttl = getattr(settings, "SOURCE_STATUS_CACHE_TTL", 30) - data = cache.get(cache_key) + data = cache.get(CACHE_KEY_SOURCE_STATUS) if data is None: data = { "prometheus": check_prometheus(), "openstack": check_openstack(), } - cache.set(cache_key, data, timeout=cache_ttl) + cache.set(CACHE_KEY_SOURCE_STATUS, data, timeout=cache_ttl) return JsonResponse(data) diff --git a/docs/api_context.md b/docs/api_context.md new file mode 100644 index 0000000..b6c6399 --- /dev/null +++ b/docs/api_context.md @@ -0,0 +1,80 @@ +# Dashboard API and context contract + +This document describes the structure of data passed to the index template and returned by the dashboard API endpoints. Cache keys are defined in `dashboard/stats.py`. + +## Index page context (server-rendered) + +When the index is rendered with full data (e.g. `USE_MOCK_DATA=True` or after JS loads from API), the template receives a context with these top-level keys: + +| Key | Description | +|-----|-------------| +| `region` | `{ "name": str, "hosts_total": int }` | +| `pcpu` | Physical CPU: `total`, `usage`, `free`, `used_percentage` | +| `vcpu` | Virtual CPU: `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` | +| `pram` | Physical RAM (bytes): `total`, `usage`, `free`, `used_percentage` | +| `vram` | Virtual RAM (bytes): `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` | +| `vm` | VMs: `count`, `active`, `stopped`, `avg_cpu`, `avg_ram`, `density` | +| `flavors` | `first_common_flavor`, `second_common_flavor`, `third_common_flavor` — each `{ "name": str, "count": int }` or `None`. The `name` may be a human-readable flavor name or a flavor UUID depending on OpenStack. | +| `audits` | List of audit objects (see below). For template, `migrations`, `host_labels`, `cpu_current`, `cpu_projected` are JSON strings. | +| `current_cluster` | `{ "host_labels": str (JSON array), "cpu_current": str (JSON array) }` for embedding in the page. | +| `skeleton` | Optional boolean; when true, stats placeholders are shown and data is loaded via API. | + +## Single audit object (for template / API response) + +When serialized for the template or for `api/audits`, each audit has: + +| Field | Type | Description | +|-------|------|-------------| +| `id` | str | Audit UUID | +| `name` | str | Audit name | +| `created_at` | str | ISO 8601 datetime | +| `strategy` | str | Strategy name | +| `goal` | str | Goal name | +| `type` | str | e.g. `ONESHOT` | +| `scope` | str | e.g. `Full Cluster` | +| `cpu_weight` | str | Weight parameter | +| `ram_weight` | str | Weight parameter | +| `migrations` | str (template) / list (API raw) | JSON string of migration list, or list of `{ instanceName, source, destination, flavor, impact }` | +| `host_labels` | str (template) / list (API raw) | JSON string of host names, or list | +| `cpu_current` | str (template) / list (API raw) | JSON string of CPU usage per host, or list of numbers | +| `cpu_projected` | str (template) / list (API raw) | JSON string of projected CPU per host, or list of numbers | + +For the **index template**, `migrations`, `host_labels`, `cpu_current`, and `cpu_projected` are always JSON strings so they can be embedded in the page. For **api/audits**, `audits` are returned with these four fields as JSON strings (same as template). The **current_cluster** in the API response uses raw lists (see below). + +## GET /api/stats/ + +Returns a JSON object with the same keys as the index context, **excluding** `audits`, `current_cluster`, and `skeleton`: `region`, `pcpu`, `vcpu`, `pram`, `vram`, `vm`, `flavors`. All numeric values are numbers; sizes are in bytes where applicable. + +## GET /api/audits/ + +Returns: + +```json +{ + "audits": [ /* list of audit objects with migrations, host_labels, cpu_current, cpu_projected as JSON strings */ ], + "current_cluster": { + "host_labels": [ "compute-0", "compute-1", ... ], + "cpu_current": [ 30.5, 42.1, ... ] + } +} +``` + +Here `audits` use the same serialized form as the template (JSON strings for list fields). The `current_cluster` is with **raw lists** (not JSON strings) so the frontend can use them directly without parsing. + +## GET /api/source-status/ + +Returns: + +```json +{ + "prometheus": { "status": "ok" | "error" | "mock", "message"?: "..." }, + "openstack": { "status": "ok" | "error" | "mock", "message"?: "..." } +} +``` + +## Cache keys (dashboard/stats.py) + +- `CACHE_KEY_STATS` — stats for `/api/stats/` +- `CACHE_KEY_AUDITS` — serialized audits list +- `CACHE_KEY_CURRENT_CLUSTER` — raw current_cluster (host_labels, cpu_current lists) +- `CACHE_KEY_SOURCE_STATUS` — source status result diff --git a/package.json b/package.json index f45326e..b28524d 100644 --- a/package.json +++ b/package.json @@ -1,29 +1,29 @@ -{ - "name": "watcher-visio", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify", - "dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch" - }, - "repository": { - "type": "git", - "url": "https://git.arnike.ru/Arnike/watcher-visio.git" - }, - "keywords": [], - "author": "", - "license": "ISC", - "type": "commonjs", - "devDependencies": { - "@fontsource/dm-sans": "^5.2.8", - "@tailwindcss/typography": "^0.5.19", - "autoprefixer": "^10.4.22", - "daisyui": "^5.5.5", - "postcss": "^8.5.6", - "tailwindcss": "^4.1.17" - }, - "dependencies": { - "@tailwindcss/cli": "^4.1.17" - } -} +{ + "name": "watcher-visio", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify", + "dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch" + }, + "repository": { + "type": "git", + "url": "https://git.arnike.ru/Arnike/watcher-visio.git" + }, + "keywords": [], + "author": "", + "license": "ISC", + "type": "commonjs", + "devDependencies": { + "@fontsource/dm-sans": "^5.2.8", + "@tailwindcss/typography": "^0.5.19", + "autoprefixer": "^10.4.22", + "daisyui": "^5.5.5", + "postcss": "^8.5.6", + "tailwindcss": "^4.1.17" + }, + "dependencies": { + "@tailwindcss/cli": "^4.1.17" + } +} diff --git a/static/css/main.css b/static/css/main.css index 672fd8e..fd9a0fa 100644 --- a/static/css/main.css +++ b/static/css/main.css @@ -96,7 +96,7 @@ --border: 1px; --depth: 1; --noise: 0; - --chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent); + --chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent); } @plugin "daisyui/theme" { @@ -133,7 +133,7 @@ --border: 1px; --depth: 1; --noise: 0; - --chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent); + --chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent); } /* VTB gradient (both themes) */ @@ -229,16 +229,64 @@ label.swap:focus-within:not(.theme-toggle) { @source "../../templates"; /* --- Print (Save as PDF) --- */ +@page { + size: A4; + margin: 15mm; +} + @media print { + /* Force printable area width (A4 minus margins) so layout doesn't use screen width */ + html { + width: 180mm !important; + min-width: 180mm !important; + max-width: 180mm !important; + margin: 0 !important; + padding: 0 !important; + overflow-x: hidden !important; + } + body { + width: 180mm !important; + min-width: 180mm !important; + max-width: 180mm !important; + margin: 0 !important; + padding: 0 !important; + overflow-x: hidden !important; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; + box-sizing: border-box !important; + } + body *, + body *::before, + body *::after { + box-sizing: border-box !important; + } + /* Allow flex/grid children to shrink so they don't force overflow */ + body * { + min-width: 0 !important; + } .no-print { display: none !important; } .print-only { display: block !important; } + /* Main and content: stay within body width */ + main.container { + width: 100% !important; + max-width: 100% !important; + margin: 0 !important; + padding: 0.5rem 0.5rem 0 !important; + min-width: 0 !important; + } + #dashboard-content { + width: 100% !important; + max-width: 100% !important; + min-width: 0 !important; + overflow-x: hidden !important; + padding: 0.5rem 0 !important; + } /* Keep card backgrounds and colors when printing */ .card, - main, .badge, .progress { -webkit-print-color-adjust: exact; @@ -253,13 +301,49 @@ label.swap:focus-within:not(.theme-toggle) { break-inside: avoid; page-break-inside: avoid; } - /* Reduce top padding so content starts higher */ - main { - padding-top: 0.5rem !important; + /* Tables: fit to page, allow column shrink */ + .overflow-x-auto { + max-width: 100% !important; + overflow-x: visible !important; + } + .table { + table-layout: fixed !important; + width: 100% !important; + max-width: 100% !important; + } + .table td, + .table th { + overflow: hidden; + text-overflow: ellipsis; + } + /* Chart: constrain so it doesn't overflow (canvas has fixed size from Chart.js) */ + section[aria-label="CPU distribution chart"] .card-body { + max-width: 100% !important; + overflow: hidden !important; + } + section[aria-label="CPU distribution chart"] .h-48, + section[aria-label="CPU distribution chart"] [class*="h-48"] { + max-width: 100% !important; + min-height: 10rem !important; + } + section[aria-label="CPU distribution chart"] canvas { + max-width: 100% !important; + height: auto !important; + } + /* Navbar fits page width */ + .navbar { + width: 100% !important; + max-width: 100% !important; } } @media screen { .print-only { display: none !important; } +} + +/* Multi-line tooltip (formula): preserve newlines from data-tip */ +.tooltip-formula[data-tip]::before { + white-space: pre-line; + text-align: left; } \ No newline at end of file diff --git a/static/js/dashboard.js b/static/js/dashboard.js new file mode 100644 index 0000000..e57d1ca --- /dev/null +++ b/static/js/dashboard.js @@ -0,0 +1,360 @@ +/** + * Dashboard logic: stats rendering, audit selector, CPU chart, migration table. + * + * Expected globals (set by index.html / inline script): + * - SKELETON_MODE (boolean): whether to fetch data from API instead of using embedded context + * - CURRENT_CLUSTER: { host_labels, cpu_current } for "current" cluster chart when no audits + * - auditData: object keyed by audit id, each value { name, migrations, hostData: { labels, current, projected } } + * - INITIAL_AUDIT_ID: first audit id to select when not in skeleton mode + * + * Required DOM element ids: + * - auditSelector, previewCpu, previewRam, previewScope, previewStrategy + * - regionBadge, auditsCount, migrationTableBody, migrationCount, cpuDistributionChart + * - currentCpuMean, currentCpuStd, currentCpuStdBlock + * - elements with data-stats="..." for renderStats() + * + * Depends on: utils.js (formatBytes, getCSSVar, calculateStats, escapeHtml, formatAuditDate) + */ +(function() { + var cpuDistributionChart = null; + var escapeHtml = typeof window.escapeHtml === 'function' ? window.escapeHtml : function(text) { + if (text == null) return ''; + var s = String(text); + return s.replace(/&/g, '&').replace(//g, '>').replace(/"/g, '"').replace(/'/g, '''); + }; + + // --- Initialization: audit selector change (preview panel) --- + document.getElementById('auditSelector').addEventListener('change', function(e) { + var option = this.options[this.selectedIndex]; + if (!option) return; + document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0'; + document.getElementById('previewRam').textContent = option.dataset.ram || '1.0'; + document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster'; + document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced'; + }); + + // --- Stats: setStat, setProgress, renderStats --- + function setStat(key, text) { + document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { + el.textContent = text; + el.classList.remove('animate-pulse'); + }); + } + function setProgress(key, value) { + document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { + if (el.tagName === 'PROGRESS') { + el.value = value; + el.classList.remove('animate-pulse'); + } + }); + } + + function renderStats(data) { + if (!data) return; + var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); }; + var regionBadge = document.getElementById('regionBadge'); + if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—'; + setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1)); + setStat('pcpu.total', Number((data.pcpu && data.pcpu.total) || 0).toFixed(1)); + setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%'); + setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU'); + setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0); + setStat('pcpu.free', Number((data.pcpu && data.pcpu.free) || 0).toFixed(1)); + var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB'); + var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB'); + var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB'); + setStat('pram.usage_gb', pramUsageGb); + setStat('pram.total_gb', pramTotalGb); + setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%'); + setStat('pram.usage_gb_val', pramUsageGb + ' GB'); + setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0); + setStat('pram.free_gb', pramFreeGb + ' GB'); + setStat('vm.active', String(data.vm && data.vm.active)); + setStat('vm.stopped', String(data.vm && data.vm.stopped)); + setStat('vm.count', String(data.vm && data.vm.count)); + setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—'); + setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1)); + setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host'); + setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU'); + setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0); + setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%'); + var vcpuOver = el('vcpu.overcommit'); + if (vcpuOver) { + vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated'; + vcpuOver.classList.remove('animate-pulse'); + } + var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB'); + var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB'); + setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB'); + setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0); + setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%'); + var vramOver = el('vram.overcommit'); + if (vramOver) { + vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated'; + vramOver.classList.remove('animate-pulse'); + } + setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances'); + var vmCount = data.vm && data.vm.count ? data.vm.count : 0; + var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0; + setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%'); + setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—'); + setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—'); + setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—'); + setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—'); + document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); }); + } + + // --- Audits: renderAudits, loadSelectedAudit --- + function renderAudits(auditsList) { + if (!auditsList || !auditsList.length) { + var countEl = document.getElementById('auditsCount'); + if (countEl) countEl.textContent = '0 available'; + var sel = document.getElementById('auditSelector'); + if (sel) { sel.disabled = false; sel.innerHTML = ''; } + return; + } + window.auditData = {}; + auditsList.forEach(function(a) { + window.auditData[a.id] = { + name: a.name, + migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations, + hostData: { + labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels, + current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current, + projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected + } + }; + }); + var sel = document.getElementById('auditSelector'); + if (sel) { + sel.disabled = false; + sel.innerHTML = ''; + auditsList.forEach(function(audit) { + var opt = document.createElement('option'); + opt.value = audit.id; + opt.setAttribute('data-cpu', audit.cpu_weight || '1.0'); + opt.setAttribute('data-ram', audit.ram_weight || '1.0'); + opt.setAttribute('data-scope', audit.scope || 'Full Cluster'); + opt.setAttribute('data-strategy', audit.strategy || 'Balanced'); + opt.setAttribute('data-goal', audit.goal || ''); + var dateStr = formatAuditDate(audit.created_at); + opt.textContent = audit.name + ' (' + dateStr + ')'; + sel.appendChild(opt); + }); + } + var countEl = document.getElementById('auditsCount'); + if (countEl) countEl.textContent = auditsList.length + ' available'; + if (auditsList.length > 0) { + document.getElementById('auditSelector').dispatchEvent(new Event('change')); + loadSelectedAudit(); + } + } + + window.loadSelectedAudit = function() { + var auditId = document.getElementById('auditSelector').value; + updateMigrationTable(auditId); + updateCPUCharts(auditId); + }; + + // --- Migration table: updateMigrationTable --- + function updateMigrationTable(auditId) { + var tbody = document.getElementById('migrationTableBody'); + var migrationCount = document.getElementById('migrationCount'); + var data = window.auditData && window.auditData[auditId]; + + if (!data || !data.migrations || data.migrations.length === 0) { + tbody.innerHTML = '
' + escapeHtml(migration.flavor) + '