From 6a27fecb132fdb39e2631dd60f05eaef9f6899c8 Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Sat, 7 Feb 2026 18:39:28 +0300 Subject: [PATCH 01/10] Add dashboard statistics module and refactor views for metrics handling - Introduced a new `stats.py` module to encapsulate dashboard statistics building and cache key constants. - Refactored `views.py` to utilize the new `build_stats` function for constructing metrics context, improving code organization and readability. - Updated Prometheus query handling to streamline metrics fetching with a new `fetch_dashboard_metrics` function. - Enhanced test cases to reflect changes in metrics fetching and context building, ensuring accurate functionality. - Added new HTML templates for displaying detailed resource allocation and flavor statistics on the dashboard. --- dashboard/prometheus_utils/query.py | 48 + dashboard/stats.py | 76 ++ dashboard/tests/test_views.py | 6 +- dashboard/views.py | 259 +----- static/js/dashboard.js | 309 +++++++ templates/dashboard/_allocation_flavors.html | 151 ++++ templates/dashboard/_audit_section.html | 71 ++ templates/dashboard/_chart_migrations.html | 53 ++ templates/dashboard/_stats_cards.html | 162 ++++ templates/index.html | 871 +------------------ watcher_visio/settings.py | 2 +- 11 files changed, 925 insertions(+), 1083 deletions(-) create mode 100644 dashboard/stats.py create mode 100644 static/js/dashboard.js create mode 100644 templates/dashboard/_allocation_flavors.html create mode 100644 templates/dashboard/_audit_section.html create mode 100644 templates/dashboard/_chart_migrations.html create mode 100644 templates/dashboard/_stats_cards.html diff --git a/dashboard/prometheus_utils/query.py b/dashboard/prometheus_utils/query.py index 0a5d093..03f4553 100644 --- a/dashboard/prometheus_utils/query.py +++ b/dashboard/prometheus_utils/query.py @@ -1,9 +1,37 @@ +from concurrent.futures import ThreadPoolExecutor, as_completed + import requests from watcher_visio.settings import PROMETHEUS_URL # Timeout for lightweight health check (seconds) CHECK_TIMEOUT = 5 +# Dashboard Prometheus queries (query_key -> query string), run in parallel +DASHBOARD_QUERIES = { + "hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})", + "pcpu_total": ( + "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) " + "without (cpu,mode))" + ), + "pcpu_usage": "sum(node_load5{job='node_exporter_compute'})", + "vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)", + "vcpu_overcommit_max": ( + "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" + ), + "pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})", + "pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})", + "vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)", + "vram_overcommit_max": ( + "avg(avg_over_time(" + "openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" + ), + "vm_count": "sum(libvirt_domain_state_code)", + "vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})", +} + +# Keys that should be parsed as float (rest as int) +DASHBOARD_FLOAT_KEYS = frozenset(("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max")) + def check_prometheus() -> dict: """ @@ -36,3 +64,23 @@ def query_prometheus(query: str) -> str | list[str]: return result else: return result[0]["value"][1] + + +def fetch_dashboard_metrics() -> dict: + """Run all dashboard Prometheus queries in parallel and return a dict of name -> value.""" + result = {} + with ThreadPoolExecutor(max_workers=len(DASHBOARD_QUERIES)) as executor: + future_to_key = { + executor.submit(query_prometheus, query=q): key for key, q in DASHBOARD_QUERIES.items() + } + for future in as_completed(future_to_key): + key = future_to_key[future] + try: + raw = future.result() + if key in DASHBOARD_FLOAT_KEYS: + result[key] = float(raw) + else: + result[key] = int(raw) + except (ValueError, TypeError): + result[key] = float(0) if key in DASHBOARD_FLOAT_KEYS else 0 + return result diff --git a/dashboard/stats.py b/dashboard/stats.py new file mode 100644 index 0000000..a6eb75a --- /dev/null +++ b/dashboard/stats.py @@ -0,0 +1,76 @@ +"""Dashboard statistics building and cache key constants.""" + +# Cache keys used by views +CACHE_KEY_STATS = "dashboard_stats" +CACHE_KEY_AUDITS = "dashboard_audits" +CACHE_KEY_CURRENT_CLUSTER = "dashboard_current_cluster" +CACHE_KEY_SOURCE_STATUS = "dashboard_source_status" + +# Empty structures for skeleton context (same shape as build_stats output) +EMPTY_FLAVORS = { + "first_common_flavor": {"name": "—", "count": 0}, + "second_common_flavor": None, + "third_common_flavor": None, +} + + +def build_stats(metrics: dict, region_name: str, flavors: dict) -> dict: + """ + Build stats dict from raw metrics and OpenStack-derived data. + Returns region, pcpu, vcpu, pram, vram, vm, flavors (no audits/current_cluster). + """ + hosts_total = metrics.get("hosts_total") or 1 + pcpu_total = metrics.get("pcpu_total", 0) + pcpu_usage = metrics.get("pcpu_usage", 0) + vcpu_allocated = metrics.get("vcpu_allocated", 0) + vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) + pram_total = metrics.get("pram_total", 0) + pram_usage = metrics.get("pram_usage", 0) + vram_allocated = metrics.get("vram_allocated", 0) + vram_overcommit_max = metrics.get("vram_overcommit_max", 0) + vm_count = metrics.get("vm_count", 0) + vm_active = metrics.get("vm_active", 0) + + vcpu_total = pcpu_total * vcpu_overcommit_max + vram_total = pram_total * vram_overcommit_max + + return { + "region": {"name": region_name, "hosts_total": hosts_total}, + "pcpu": { + "total": pcpu_total, + "usage": pcpu_usage, + "free": pcpu_total - pcpu_usage, + "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, + }, + "vcpu": { + "total": vcpu_total, + "allocated": vcpu_allocated, + "free": vcpu_total - vcpu_allocated, + "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, + "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, + "overcommit_max": vcpu_overcommit_max, + }, + "pram": { + "total": pram_total, + "usage": pram_usage, + "free": pram_total - pram_usage, + "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, + }, + "vram": { + "total": vram_total, + "allocated": vram_allocated, + "free": vram_total - vram_allocated, + "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, + "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, + "overcommit_max": vram_overcommit_max, + }, + "vm": { + "count": vm_count, + "active": vm_active, + "stopped": vm_count - vm_active, + "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, + "avg_ram": vram_allocated / vm_count if vm_count else 0, + "density": vm_count / hosts_total if hosts_total else 0, + }, + "flavors": flavors, + } diff --git a/dashboard/tests/test_views.py b/dashboard/tests/test_views.py index 042fff6..813e0dd 100644 --- a/dashboard/tests/test_views.py +++ b/dashboard/tests/test_views.py @@ -96,7 +96,7 @@ class CollectContextTest(TestCase): return conn @patch("dashboard.views.get_current_cluster_cpu") - @patch("dashboard.views._fetch_prometheus_metrics") + @patch("dashboard.views.fetch_dashboard_metrics") @patch("dashboard.views.get_audits") @patch("dashboard.views.get_flavor_list") @patch("dashboard.views.get_connection") @@ -152,8 +152,6 @@ class CollectContextTest(TestCase): self.assertEqual(context["flavors"]["first_common_flavor"]["name"], "m1.small") self.assertEqual(len(context["audits"]), 1) # Serialized for JS - import json - self.assertIsInstance(context["audits"][0]["migrations"], str) self.assertEqual(json.loads(context["audits"][0]["host_labels"]), ["h0", "h1"]) self.assertIn("current_cluster", context) @@ -167,7 +165,7 @@ class ApiStatsTest(TestCase): def setUp(self): self.factory = RequestFactory() - @patch("dashboard.views._fetch_prometheus_metrics") + @patch("dashboard.views.fetch_dashboard_metrics") @patch("dashboard.views.get_flavor_list") @patch("dashboard.views.get_connection") def test_api_stats_returns_json_with_expected_keys( diff --git a/dashboard/views.py b/dashboard/views.py index 3da163b..c6aa561 100644 --- a/dashboard/views.py +++ b/dashboard/views.py @@ -1,5 +1,4 @@ import json -from concurrent.futures import ThreadPoolExecutor, as_completed from django.conf import settings from django.core.cache import cache @@ -10,53 +9,32 @@ from dashboard.mock_data import get_mock_context from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu from dashboard.openstack_utils.connect import check_openstack, get_connection from dashboard.openstack_utils.flavor import get_flavor_list -from dashboard.prometheus_utils.query import check_prometheus, query_prometheus - -# Prometheus queries run in parallel (query_key -> query string) -_PROMETHEUS_QUERIES = { - "hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})", - "pcpu_total": ( - "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) " - "without (cpu,mode))" - ), - "pcpu_usage": "sum(node_load5{job='node_exporter_compute'})", - "vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)", - "vcpu_overcommit_max": ( - "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" - ), - "pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})", - "pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})", - "vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)", - "vram_overcommit_max": ( - "avg(avg_over_time(" - "openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" - ), - "vm_count": "sum(libvirt_domain_state_code)", - "vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})", -} +from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics +from dashboard.stats import ( + CACHE_KEY_AUDITS, + CACHE_KEY_CURRENT_CLUSTER, + CACHE_KEY_SOURCE_STATUS, + CACHE_KEY_STATS, + EMPTY_FLAVORS, + build_stats, +) -def _fetch_prometheus_metrics(): - """Run all Prometheus queries in parallel and return a dict of name -> value.""" - result = {} - with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor: - future_to_key = { - executor.submit(query_prometheus, query=q): key - for key, q in _PROMETHEUS_QUERIES.items() - } - for future in as_completed(future_to_key): - key = future_to_key[future] - try: - raw = future.result() - if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"): - result[key] = float(raw) - else: - result[key] = int(raw) - except (ValueError, TypeError): - result[key] = ( - 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0 - ) - return result +def _empty_metrics(): + """Metrics dict with zero/default values for skeleton context.""" + return { + "hosts_total": 0, + "pcpu_total": 0, + "pcpu_usage": 0, + "vcpu_allocated": 0, + "vcpu_overcommit_max": 0, + "pram_total": 0, + "pram_usage": 0, + "vram_allocated": 0, + "vram_overcommit_max": 0, + "vm_count": 0, + "vm_active": 0, + } def collect_context(): @@ -64,81 +42,14 @@ def collect_context(): region_name = connection._compute_region flavors = get_flavor_list(connection=connection) audits = get_audits(connection=connection) - - metrics = _fetch_prometheus_metrics() - hosts_total = metrics.get("hosts_total") or 1 - pcpu_total = metrics.get("pcpu_total", 0) - pcpu_usage = metrics.get("pcpu_usage", 0) - vcpu_allocated = metrics.get("vcpu_allocated", 0) - vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) - pram_total = metrics.get("pram_total", 0) - pram_usage = metrics.get("pram_usage", 0) - vram_allocated = metrics.get("vram_allocated", 0) - vram_overcommit_max = metrics.get("vram_overcommit_max", 0) - vm_count = metrics.get("vm_count", 0) - vm_active = metrics.get("vm_active", 0) - - vcpu_total = pcpu_total * vcpu_overcommit_max - vram_total = pram_total * vram_overcommit_max - - context = { - # <--- Region data ---> - "region": { - "name": region_name, - "hosts_total": hosts_total, - }, - # <--- CPU data ---> - # pCPU data - "pcpu": { - "total": pcpu_total, - "usage": pcpu_usage, - "free": pcpu_total - pcpu_usage, - "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, - }, - # vCPU data - "vcpu": { - "total": vcpu_total, - "allocated": vcpu_allocated, - "free": vcpu_total - vcpu_allocated, - "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, - "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, - "overcommit_max": vcpu_overcommit_max, - }, - # <--- RAM data ---> - # pRAM data - "pram": { - "total": pram_total, - "usage": pram_usage, - "free": pram_total - pram_usage, - "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, - }, - # vRAM data - "vram": { - "total": vram_total, - "allocated": vram_allocated, - "free": vram_total - vram_allocated, - "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, - "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, - "overcommit_max": vram_overcommit_max, - }, - # <--- VM data ---> - "vm": { - "count": vm_count, - "active": vm_active, - "stopped": vm_count - vm_active, - "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, - "avg_ram": vram_allocated / vm_count if vm_count else 0, - "density": vm_count / hosts_total if hosts_total else 0, - }, - "flavors": flavors, - "audits": audits, - } + metrics = fetch_dashboard_metrics() + context = build_stats(metrics, region_name, flavors) + context["audits"] = audits current_cluster = get_current_cluster_cpu(connection) context["current_cluster"] = { "host_labels": json.dumps(current_cluster["host_labels"]), "cpu_current": json.dumps(current_cluster["cpu_current"]), } - # Serialize audit list fields for JavaScript so cached context is render-ready for audit in context["audits"]: audit["migrations"] = json.dumps(audit["migrations"]) audit["host_labels"] = json.dumps(audit["host_labels"]) @@ -152,60 +63,8 @@ def collect_stats(): connection = get_connection() region_name = connection._compute_region flavors = get_flavor_list(connection=connection) - metrics = _fetch_prometheus_metrics() - hosts_total = metrics.get("hosts_total") or 1 - pcpu_total = metrics.get("pcpu_total", 0) - pcpu_usage = metrics.get("pcpu_usage", 0) - vcpu_allocated = metrics.get("vcpu_allocated", 0) - vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0) - pram_total = metrics.get("pram_total", 0) - pram_usage = metrics.get("pram_usage", 0) - vram_allocated = metrics.get("vram_allocated", 0) - vram_overcommit_max = metrics.get("vram_overcommit_max", 0) - vm_count = metrics.get("vm_count", 0) - vm_active = metrics.get("vm_active", 0) - vcpu_total = pcpu_total * vcpu_overcommit_max - vram_total = pram_total * vram_overcommit_max - return { - "region": {"name": region_name, "hosts_total": hosts_total}, - "pcpu": { - "total": pcpu_total, - "usage": pcpu_usage, - "free": pcpu_total - pcpu_usage, - "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0, - }, - "vcpu": { - "total": vcpu_total, - "allocated": vcpu_allocated, - "free": vcpu_total - vcpu_allocated, - "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0, - "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0, - "overcommit_max": vcpu_overcommit_max, - }, - "pram": { - "total": pram_total, - "usage": pram_usage, - "free": pram_total - pram_usage, - "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0, - }, - "vram": { - "total": vram_total, - "allocated": vram_allocated, - "free": vram_total - vram_allocated, - "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0, - "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0, - "overcommit_max": vram_overcommit_max, - }, - "vm": { - "count": vm_count, - "active": vm_active, - "stopped": vm_count - vm_active, - "avg_cpu": vcpu_allocated / vm_count if vm_count else 0, - "avg_ram": vram_allocated / vm_count if vm_count else 0, - "density": vm_count / hosts_total if hosts_total else 0, - }, - "flavors": flavors, - } + metrics = fetch_dashboard_metrics() + return build_stats(metrics, region_name, flavors) def collect_audits(): @@ -222,40 +81,14 @@ def collect_audits(): def _skeleton_context(): """Minimal context for skeleton-only index render.""" - empty_flavors = { - "first_common_flavor": {"name": "—", "count": 0}, - "second_common_flavor": None, - "third_common_flavor": None, - } - return { - "skeleton": True, - "region": {"name": "—", "hosts_total": 0}, - "pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, - "pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0}, - "vcpu": { - "total": 0, - "allocated": 0, - "free": 0, - "allocated_percentage": 0, - "overcommit_ratio": 0, - "overcommit_max": 0, - }, - "vram": { - "total": 0, - "allocated": 0, - "free": 0, - "allocated_percentage": 0, - "overcommit_ratio": 0, - "overcommit_max": 0, - }, - "vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0}, - "flavors": empty_flavors, - "audits": [], - "current_cluster": { - "host_labels": "[]", - "cpu_current": "[]", - }, + context = build_stats(_empty_metrics(), "—", EMPTY_FLAVORS) + context["skeleton"] = True + context["audits"] = [] + context["current_cluster"] = { + "host_labels": "[]", + "cpu_current": "[]", } + return context def index(request): @@ -267,28 +100,25 @@ def index(request): def api_stats(request): - cache_key = "dashboard_stats" cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120) - data = cache.get(cache_key) + data = cache.get(CACHE_KEY_STATS) if data is None: data = collect_stats() - cache.set(cache_key, data, timeout=cache_ttl) + cache.set(CACHE_KEY_STATS, data, timeout=cache_ttl) return JsonResponse(data) def api_audits(request): - cache_key_audits = "dashboard_audits" - cache_key_cluster = "dashboard_current_cluster" cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120) - audits = cache.get(cache_key_audits) - current_cluster = cache.get(cache_key_cluster) + audits = cache.get(CACHE_KEY_AUDITS) + current_cluster = cache.get(CACHE_KEY_CURRENT_CLUSTER) if audits is None: audits = collect_audits() - cache.set(cache_key_audits, audits, timeout=cache_ttl) + cache.set(CACHE_KEY_AUDITS, audits, timeout=cache_ttl) if current_cluster is None: connection = get_connection() current_cluster = get_current_cluster_cpu(connection) - cache.set(cache_key_cluster, current_cluster, timeout=cache_ttl) + cache.set(CACHE_KEY_CURRENT_CLUSTER, current_cluster, timeout=cache_ttl) return JsonResponse({"audits": audits, "current_cluster": current_cluster}) @@ -302,13 +132,12 @@ def api_source_status(request): } ) - cache_key = "dashboard_source_status" cache_ttl = getattr(settings, "SOURCE_STATUS_CACHE_TTL", 30) - data = cache.get(cache_key) + data = cache.get(CACHE_KEY_SOURCE_STATUS) if data is None: data = { "prometheus": check_prometheus(), "openstack": check_openstack(), } - cache.set(cache_key, data, timeout=cache_ttl) + cache.set(CACHE_KEY_SOURCE_STATUS, data, timeout=cache_ttl) return JsonResponse(data) diff --git a/static/js/dashboard.js b/static/js/dashboard.js new file mode 100644 index 0000000..cd5c444 --- /dev/null +++ b/static/js/dashboard.js @@ -0,0 +1,309 @@ +/** + * Dashboard logic: stats rendering, audit selector, CPU chart, migration table. + * Expects globals: SKELETON_MODE, CURRENT_CLUSTER, auditData (set by index.html). + * Depends on: utils.js (formatBytes, getCSSVar, calculateStats) + */ +(function() { + var cpuDistributionChart = null; + + document.getElementById('auditSelector').addEventListener('change', function(e) { + var option = this.options[this.selectedIndex]; + if (!option) return; + document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0'; + document.getElementById('previewRam').textContent = option.dataset.ram || '1.0'; + document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster'; + document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced'; + }); + + function setStat(key, text) { + document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { + el.textContent = text; + el.classList.remove('animate-pulse'); + }); + } + function setProgress(key, value) { + document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { + if (el.tagName === 'PROGRESS') { + el.value = value; + el.classList.remove('animate-pulse'); + } + }); + } + + function renderStats(data) { + if (!data) return; + var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); }; + var regionBadge = document.getElementById('regionBadge'); + if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—'; + setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1)); + setStat('pcpu.total', String((data.pcpu && data.pcpu.total) || 0)); + setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%'); + setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU'); + setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0); + setStat('pcpu.free', String((data.pcpu && data.pcpu.free) || 0)); + var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB'); + var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB'); + var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB'); + setStat('pram.usage_gb', pramUsageGb); + setStat('pram.total_gb', pramTotalGb); + setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%'); + setStat('pram.usage_gb_val', pramUsageGb + ' GB'); + setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0); + setStat('pram.free_gb', pramFreeGb + ' GB'); + setStat('vm.active', String(data.vm && data.vm.active)); + setStat('vm.stopped', String(data.vm && data.vm.stopped)); + setStat('vm.count', String(data.vm && data.vm.count)); + setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—'); + setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1)); + setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host'); + setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU'); + setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0); + setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%'); + var vcpuOver = el('vcpu.overcommit'); + if (vcpuOver) { + vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated'; + vcpuOver.classList.remove('animate-pulse'); + } + var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB'); + var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB'); + setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB'); + setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0); + setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%'); + var vramOver = el('vram.overcommit'); + if (vramOver) { + vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated'; + vramOver.classList.remove('animate-pulse'); + } + setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances'); + var vmCount = data.vm && data.vm.count ? data.vm.count : 0; + var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0; + setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%'); + setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—'); + setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—'); + setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—'); + setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—'); + document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); }); + } + + function renderAudits(auditsList) { + if (!auditsList || !auditsList.length) { + var countEl = document.getElementById('auditsCount'); + if (countEl) countEl.textContent = '0 available'; + var sel = document.getElementById('auditSelector'); + if (sel) { sel.disabled = false; sel.innerHTML = ''; } + return; + } + window.auditData = {}; + auditsList.forEach(function(a) { + window.auditData[a.id] = { + name: a.name, + migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations, + hostData: { + labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels, + current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current, + projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected + } + }; + }); + var sel = document.getElementById('auditSelector'); + if (sel) { + sel.disabled = false; + sel.innerHTML = ''; + auditsList.forEach(function(audit) { + var opt = document.createElement('option'); + opt.value = audit.id; + opt.setAttribute('data-cpu', audit.cpu_weight || '1.0'); + opt.setAttribute('data-ram', audit.ram_weight || '1.0'); + opt.setAttribute('data-scope', audit.scope || 'Full Cluster'); + opt.setAttribute('data-strategy', audit.strategy || 'Balanced'); + opt.setAttribute('data-goal', audit.goal || ''); + var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : ''; + opt.textContent = audit.name + ' (' + dateStr + ')'; + sel.appendChild(opt); + }); + } + var countEl = document.getElementById('auditsCount'); + if (countEl) countEl.textContent = auditsList.length + ' available'; + if (auditsList.length > 0) { + document.getElementById('auditSelector').dispatchEvent(new Event('change')); + loadSelectedAudit(); + } + } + + window.loadSelectedAudit = function() { + var auditId = document.getElementById('auditSelector').value; + updateMigrationTable(auditId); + updateCPUCharts(auditId); + }; + + function updateMigrationTable(auditId) { + var tbody = document.getElementById('migrationTableBody'); + var migrationCount = document.getElementById('migrationCount'); + var data = window.auditData && window.auditData[auditId]; + + if (!data || !data.migrations || data.migrations.length === 0) { + tbody.innerHTML = 'No migration actions recommended'; + migrationCount.textContent = '0 actions'; + return; + } + + var html = ''; + data.migrations.forEach(function(migration) { + var impact = migration.impact || 'Low'; + var impactClass = { 'Low': 'badge-success', 'Medium': 'badge-warning', 'High': 'badge-error' }[impact] || 'badge-neutral'; + html += '
' + migration.instanceName + '
' + migration.source + '' + migration.destination + '
' + migration.flavor + '' + impact + ''; + }); + tbody.innerHTML = html; + migrationCount.textContent = data.migrations.length + ' action' + (data.migrations.length !== 1 ? 's' : ''); + } + + function updateCPUCharts(auditId) { + var data = window.auditData && window.auditData[auditId]; + if (!data || !data.hostData) return; + + var ctx = document.getElementById('cpuDistributionChart').getContext('2d'); + var currentStats = calculateStats(data.hostData.current); + + document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1); + document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1); + + if (cpuDistributionChart) cpuDistributionChart.destroy(); + + var colors = { + primary: getCSSVar('--color-primary'), + secondary: getCSSVar('--color-secondary'), + accent: getCSSVar('--color-accent'), + neutral: getCSSVar('--color-neutral'), + info: getCSSVar('--color-info'), + success: getCSSVar('--color-success'), + warning: getCSSVar('--color-warning'), + error: getCSSVar('--color-error') + }; + var textColor = getCSSVar('--color-base-content'); + var gridColor = getCSSVar('--chart-grid-color') || textColor; + + cpuDistributionChart = new Chart(ctx, { + type: 'bar', + data: { + labels: data.hostData.labels, + datasets: [ + { label: 'Current', data: data.hostData.current.slice(), backgroundColor: colors.info + '40', borderColor: colors.info, borderWidth: 1, borderRadius: 3 }, + { label: 'Projected', data: data.hostData.projected.slice(), backgroundColor: colors.warning + '40', borderColor: colors.warning, borderWidth: 1, borderRadius: 3 } + ] + }, + options: { + responsive: true, + maintainAspectRatio: false, + animation: { + onComplete: function() { + var chart = this.chart || this; + if (chart._hidingDataset === undefined) return; + var i = chart._hidingDataset; + chart.getDatasetMeta(i).hidden = true; + chart.data.datasets[i].data = chart._cpuOriginalData[i].slice(); + delete chart._hidingDataset; + chart.update('none'); + } + }, + plugins: { + legend: { + display: true, + position: 'top', + align: 'center', + onClick: function(e, legendItem, legend) { + var i = legendItem.datasetIndex; + var chart = legend.chart; + var len = chart.data.labels.length; + if (chart.isDatasetVisible(i)) { + chart._hidingDataset = i; + chart.data.datasets[i].data = Array(len).fill(0); + chart.update(); + } else { + chart.data.datasets[i].data = Array(len).fill(0); + chart.show(i); + chart.update('none'); + chart.data.datasets[i].data = chart._cpuOriginalData[i].slice(); + chart.update(); + } + }, + labels: { + usePointStyle: true, + pointStyle: 'rect', + boxWidth: 14, + boxHeight: 14, + padding: 12, + color: textColor, + generateLabels: function(chart) { + var datasets = chart.data.datasets; + var labelColor = getCSSVar('--color-base-content') || textColor; + return datasets.map(function(ds, i) { + return { text: ds.label, fillStyle: ds.borderColor, strokeStyle: ds.borderColor, lineWidth: 1, fontColor: labelColor, color: labelColor, hidden: !chart.isDatasetVisible(i), datasetIndex: i }; + }); + } + } + }, + tooltip: { + callbacks: { label: function(ctx) { return ctx.dataset.label + ': ' + Number(ctx.parsed.y).toFixed(2) + '% CPU'; } } + }, + annotation: { + annotations: { + MeanLine: { type: 'line', yMin: currentStats.mean, yMax: currentStats.mean, borderColor: colors.success, borderWidth: 2, borderDash: [] }, + upperStdLine: { type: 'line', yMin: currentStats.mean + currentStats.std * 0.5, yMax: currentStats.mean + currentStats.std * 0.5, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] }, + lowerStdLine: { type: 'line', yMin: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, yMax: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] } + } + } + }, + scales: { + y: { beginAtZero: true, max: 100, grid: { drawBorder: false, color: gridColor }, ticks: { color: textColor, callback: function(value) { return value + '%'; } } }, + x: { grid: { display: false }, ticks: { display: false }, barPercentage: 1, categoryPercentage: 0.85 } + } + } + }); + cpuDistributionChart._cpuOriginalData = [ data.hostData.current.slice(), data.hostData.projected.slice() ]; + } + + document.addEventListener('DOMContentLoaded', function() { + if (typeof SKELETON_MODE !== 'undefined' && SKELETON_MODE) { + Promise.all([ + fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }), + fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }) + ]).then(function(results) { + renderStats(results[0]); + renderAudits(results[1].audits); + if (!results[1].audits || results[1].audits.length === 0) { + var cc = results[1].current_cluster; + if (cc && cc.host_labels && cc.cpu_current && cc.host_labels.length) { + window.auditData = window.auditData || {}; + window.auditData.current = { hostData: { labels: cc.host_labels, current: cc.cpu_current, projected: cc.cpu_current } }; + updateCPUCharts('current'); + } + } + }).catch(function(err) { + var msg = err.status ? 'Failed to load data (' + err.status + ')' : 'Failed to load data'; + var countEl = document.getElementById('auditsCount'); + if (countEl) countEl.textContent = msg; + fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d) renderStats(d); }); + fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d && d.audits) renderAudits(d.audits); }); + }); + } else { + var initialAudit = typeof INITIAL_AUDIT_ID !== 'undefined' ? INITIAL_AUDIT_ID : ''; + if (initialAudit && window.auditData && window.auditData[initialAudit]) { + document.getElementById('auditSelector').dispatchEvent(new Event('change')); + loadSelectedAudit(); + } else if (!initialAudit && typeof CURRENT_CLUSTER !== 'undefined' && CURRENT_CLUSTER && CURRENT_CLUSTER.host_labels && CURRENT_CLUSTER.host_labels.length) { + window.auditData = window.auditData || {}; + window.auditData.current = { hostData: { labels: CURRENT_CLUSTER.host_labels, current: CURRENT_CLUSTER.cpu_current, projected: CURRENT_CLUSTER.cpu_current } }; + updateCPUCharts('current'); + } + } + }); + + document.addEventListener('themechange', function() { + if (cpuDistributionChart) { + var auditId = document.getElementById('auditSelector').value; + cpuDistributionChart.destroy(); + cpuDistributionChart = null; + if (auditId) updateCPUCharts(auditId); + } + }); +})(); diff --git a/templates/dashboard/_allocation_flavors.html b/templates/dashboard/_allocation_flavors.html new file mode 100644 index 0000000..efb177d --- /dev/null +++ b/templates/dashboard/_allocation_flavors.html @@ -0,0 +1,151 @@ +{% load mathfilters %} + +
+ +
+
+

+ + + + Resource Allocation +

+ {% if skeleton %} +
+
+ CPU Allocation + — / — vCPU +
+
+ + —% +
+
+
+
+
+ RAM Allocation + — / — GB +
+
+ + —% +
+
+
+ {% else %} + +
+
+ CPU Allocation + {{ vcpu.allocated }} / {{ vcpu.total }} vCPU +
+
+ + {{ vcpu.allocated_percentage|floatformat:1 }}% +
+
+ overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }} + {{ vcpu.allocated_percentage|floatformat:1 }}% allocated +
+
+ + +
+
+ RAM Allocation + {{ vram.allocated|convert_bytes }} / {{ vram.total|convert_bytes }} GB +
+
+ + {{ vram.allocated_percentage|floatformat:1 }}% +
+
+ overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }} + {{ vram.allocated_percentage|floatformat:1 }}% allocated +
+
+ {% endif %} +
+
+ + +
+
+

+ + + + + Top Flavors +

+ {% if skeleton %} +
+
+
+ + — instances +
+
+ Share + —% +
+
+
+
+
+
+ +
+ +
+
+
+
+ +
+ +
+
+
+ {% else %} +
+ +
+
+ {{ flavors.first_common_flavor.name }} + {{ flavors.first_common_flavor.count }} instances +
+
+ Share + {{ flavors.first_common_flavor.count|div:vm.count|mul:100|floatformat:0 }}% +
+
+ + +
+ {% if flavors.second_common_flavor %} +
+
+
+ {{ flavors.second_common_flavor.name }} +
+ {{ flavors.second_common_flavor.count }} +
+ {% endif %} + + {% if flavors.third_common_flavor %} +
+
+
+ {{ flavors.third_common_flavor.name }} +
+ {{ flavors.third_common_flavor.count }} +
+ {% endif %} +
+
+ {% endif %} +
+
+
diff --git a/templates/dashboard/_audit_section.html b/templates/dashboard/_audit_section.html new file mode 100644 index 0000000..37a7654 --- /dev/null +++ b/templates/dashboard/_audit_section.html @@ -0,0 +1,71 @@ + +
+
+
+
+
+

Audit Analysis

+
Select an audit to analyze resource distribution
+
+
+ {% if skeleton %}Loading…{% else %}{{ audits|length }} available{% endif %} + +
+
+ +
+ + +
+
+
+
diff --git a/templates/dashboard/_chart_migrations.html b/templates/dashboard/_chart_migrations.html new file mode 100644 index 0000000..5af42aa --- /dev/null +++ b/templates/dashboard/_chart_migrations.html @@ -0,0 +1,53 @@ + +
+
+
+

CPU Distribution (Current vs Projected)

+
+ +
+
+
+
+ Mean: 0% +
+
+
+ ±0.5σ: 0% +
+
+
+
+
+ + +
+
+
+
+

Migration Actions

+
Select audit
+
+ +
+ + + + + + + + + + + + + + +
InstanceSource → DestinationFlavorImpact
+ No audit selected. Load an audit to view migration recommendations. +
+
+
+
+
diff --git a/templates/dashboard/_stats_cards.html b/templates/dashboard/_stats_cards.html new file mode 100644 index 0000000..5f935fa --- /dev/null +++ b/templates/dashboard/_stats_cards.html @@ -0,0 +1,162 @@ +{% load mathfilters %} + +
+ +
+
+ {% if skeleton %} +
+
+

CPU Utilization

+
/ CPU
+
+
—%
+
+
+
+ Used + +
+ +
+ Free + +
+
+ {% else %} +
+
+

CPU Utilization

+
{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU
+
+
{{ pcpu.used_percentage|floatformat:1 }}%
+
+
+
+ Used + {{ pcpu.usage|floatformat:1 }} CPU +
+ +
+ Free + {{ pcpu.free }} CPU +
+
+ {% endif %} +
+
+ + +
+
+ {% if skeleton %} +
+
+

RAM Utilization

+
/ GB
+
+
—%
+
+
+
+ Used + +
+ +
+ Free + +
+
+ {% else %} +
+
+

RAM Utilization

+
{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB
+
+
{{ pram.used_percentage|floatformat:1 }}%
+
+
+
+ Used + {{ pram.usage|convert_bytes }} GB +
+ +
+ Free + {{ pram.free|convert_bytes }} GB +
+
+ {% endif %} +
+
+ + +
+
+ {% if skeleton %} +
+
+

Instances

+
active / stopped
+
+
+
+
+
+
+
+ Most Used Flavor +
+ +
+
+
+
+ Avg. vCPU/VM +
+ +
+
+
+
+ Density +
+ +
+
+ {% else %} +
+
+

Instances

+
{{ vm.active }} active / {{ vm.stopped }} stopped
+
+
{{ vm.count }}
+
+
+
+
+
+ Most Used Flavor +
+ {{ flavors.first_common_flavor.name }} +
+
+
+
+ Avg. vCPU/VM +
+ {{ vm.avg_cpu|floatformat:1 }} +
+
+
+
+ Density +
+ {{ vm.density|floatformat:1 }}/host +
+
+ {% endif %} +
+
+
diff --git a/templates/index.html b/templates/index.html index 3b68434..d61a959 100644 --- a/templates/index.html +++ b/templates/index.html @@ -11,444 +11,10 @@ {% block content %}
- -
- -
-
- {% if skeleton %} -
-
-

CPU Utilization

-
/ CPU
-
-
—%
-
-
-
- Used - -
- -
- Free - -
-
- {% else %} -
-
-

CPU Utilization

-
{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU
-
-
{{ pcpu.used_percentage|floatformat:1 }}%
-
-
-
- Used - {{ pcpu.usage|floatformat:1 }} CPU -
- -
- Free - {{ pcpu.free }} CPU -
-
- {% endif %} -
-
- - -
-
- {% if skeleton %} -
-
-

RAM Utilization

-
/ GB
-
-
—%
-
-
-
- Used - -
- -
- Free - -
-
- {% else %} -
-
-

RAM Utilization

-
{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB
-
-
{{ pram.used_percentage|floatformat:1 }}%
-
-
-
- Used - {{ pram.usage|convert_bytes }} GB -
- -
- Free - {{ pram.free|convert_bytes }} GB -
-
- {% endif %} -
-
- - -
-
- {% if skeleton %} -
-
-

Instances

-
active / stopped
-
-
-
-
-
-
-
- Most Used Flavor -
- -
-
-
-
- Avg. vCPU/VM -
- -
-
-
-
- Density -
- -
-
- {% else %} -
-
-

Instances

-
{{ vm.active }} active / {{ vm.stopped }} stopped
-
-
{{ vm.count }}
-
-
-
-
-
- Most Used Flavor -
- {{ flavors.first_common_flavor.name }} -
-
-
-
- Avg. vCPU/VM -
- {{ vm.avg_cpu|floatformat:1 }} -
-
-
-
- Density -
- {{ vm.density|floatformat:1 }}/host -
-
- {% endif %} -
-
-
- - -
- -
-
-

- - - - Resource Allocation -

- {% if skeleton %} -
-
- CPU Allocation - — / — vCPU -
-
- - —% -
-
-
-
-
- RAM Allocation - — / — GB -
-
- - —% -
-
-
- {% else %} - -
-
- CPU Allocation - {{ vcpu.allocated }} / {{ vcpu.total }} vCPU -
-
- - {{ vcpu.allocated_percentage|floatformat:1 }}% -
-
- overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }} - {{ vcpu.allocated_percentage|floatformat:1 }}% allocated -
-
- - -
-
- RAM Allocation - {{ vram.allocated|convert_bytes }} / {{ vram.total|convert_bytes }} GB -
-
- - {{ vram.allocated_percentage|floatformat:1 }}% -
-
- overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }} - {{ vram.allocated_percentage|floatformat:1 }}% allocated -
-
- {% endif %} -
-
- - -
-
-

- - - - - Top Flavors -

- {% if skeleton %} -
-
-
- - — instances -
-
- Share - —% -
-
-
-
-
-
- -
- -
-
-
-
- -
- -
-
-
- {% else %} -
- -
-
- {{ flavors.first_common_flavor.name }} - {{ flavors.first_common_flavor.count }} instances -
-
- Share - {{ flavors.first_common_flavor.count|div:vm.count|mul:100|floatformat:0 }}% -
-
- - -
- {% if flavors.second_common_flavor %} -
-
-
- {{ flavors.second_common_flavor.name }} -
- {{ flavors.second_common_flavor.count }} -
- {% endif %} - - {% if flavors.third_common_flavor %} -
-
-
- {{ flavors.third_common_flavor.name }} -
- {{ flavors.third_common_flavor.count }} -
- {% endif %} -
-
- {% endif %} -
-
-
- - -
-
-
-
-
-

Audit Analysis

-
Select an audit to analyze resource distribution
-
-
- {% if skeleton %}Loading…{% else %}{{ audits|length }} available{% endif %} - -
-
- -
- - -
-
-
-
- - -
-
-
-

CPU Distribution (Current vs Projected)

-
- -
-
-
-
- Mean: 0% -
-
-
- ±0.5σ: 0% -
-
-
-
-
- - -
-
-
-
-

Migration Actions

-
Select audit
-
- -
- - - - - - - - - - - - - - -
InstanceSource → DestinationFlavorImpact
- No audit selected. Load an audit to view migration recommendations. -
-
-
-
-
+ {% include "dashboard/_stats_cards.html" %} + {% include "dashboard/_allocation_flavors.html" %} + {% include "dashboard/_audit_section.html" %} + {% include "dashboard/_chart_migrations.html" %}
{% endblock %} @@ -457,7 +23,7 @@ const SKELETON_MODE = {{ skeleton|yesno:"true,false" }}; const CURRENT_CLUSTER = {% if current_cluster %}{ "host_labels": {{ current_cluster.host_labels|safe }}, "cpu_current": {{ current_cluster.cpu_current|safe }} }{% else %}null{% endif %}; - let auditData = { + window.auditData = { {% if not skeleton %} {% for audit in audits %} "{{ audit.id }}": { @@ -472,430 +38,9 @@ {% endfor %} {% endif %} }; - - document.getElementById('auditSelector').addEventListener('change', function(e) { - const option = this.options[this.selectedIndex]; - if (!option) return; - document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0'; - document.getElementById('previewRam').textContent = option.dataset.ram || '1.0'; - document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster'; - document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced'; - }); - - let cpuDistributionChart = null; - - function setStat(key, text) { - document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { - el.textContent = text; - el.classList.remove('animate-pulse'); - }); - } - function setProgress(key, value) { - document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { - if (el.tagName === 'PROGRESS') { - el.value = value; - el.classList.remove('animate-pulse'); - } - }); - } - - function renderStats(data) { - if (!data) return; - var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); }; - var regionBadge = document.getElementById('regionBadge'); - if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—'; - setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1)); - setStat('pcpu.total', String((data.pcpu && data.pcpu.total) || 0)); - setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%'); - setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU'); - setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0); - setStat('pcpu.free', String((data.pcpu && data.pcpu.free) || 0)); - var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB'); - var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB'); - var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB'); - setStat('pram.usage_gb', pramUsageGb); - setStat('pram.total_gb', pramTotalGb); - setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%'); - setStat('pram.usage_gb_val', pramUsageGb + ' GB'); - setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0); - setStat('pram.free_gb', pramFreeGb + ' GB'); - setStat('vm.active', String(data.vm && data.vm.active)); - setStat('vm.stopped', String(data.vm && data.vm.stopped)); - setStat('vm.count', String(data.vm && data.vm.count)); - setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—'); - setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1)); - setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host'); - setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU'); - setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0); - setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%'); - var vcpuOver = el('vcpu.overcommit'); - if (vcpuOver) { - vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated'; - vcpuOver.classList.remove('animate-pulse'); - } - var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB'); - var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB'); - setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB'); - setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0); - setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%'); - var vramOver = el('vram.overcommit'); - if (vramOver) { - vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated'; - vramOver.classList.remove('animate-pulse'); - } - setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances'); - var vmCount = data.vm && data.vm.count ? data.vm.count : 0; - var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0; - setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%'); - setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—'); - setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—'); - setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—'); - setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—'); - document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); }); - } - - function renderAudits(auditsList) { - if (!auditsList || !auditsList.length) { - var countEl = document.getElementById('auditsCount'); - if (countEl) countEl.textContent = '0 available'; - var sel = document.getElementById('auditSelector'); - if (sel) { sel.disabled = false; sel.innerHTML = ''; } - return; - } - auditData = {}; - auditsList.forEach(function(a) { - auditData[a.id] = { - name: a.name, - migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations, - hostData: { - labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels, - current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current, - projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected - } - }; - }); - var sel = document.getElementById('auditSelector'); - if (sel) { - sel.disabled = false; - sel.innerHTML = ''; - auditsList.forEach(function(audit) { - var opt = document.createElement('option'); - opt.value = audit.id; - opt.setAttribute('data-cpu', audit.cpu_weight || '1.0'); - opt.setAttribute('data-ram', audit.ram_weight || '1.0'); - opt.setAttribute('data-scope', audit.scope || 'Full Cluster'); - opt.setAttribute('data-strategy', audit.strategy || 'Balanced'); - opt.setAttribute('data-goal', audit.goal || ''); - var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : ''; - opt.textContent = audit.name + ' (' + dateStr + ')'; - sel.appendChild(opt); - }); - } - var countEl = document.getElementById('auditsCount'); - if (countEl) countEl.textContent = auditsList.length + ' available'; - if (auditsList.length > 0) { - document.getElementById('auditSelector').dispatchEvent(new Event('change')); - loadSelectedAudit(); - } - } - - // Load selected audit - function loadSelectedAudit() { - const auditId = document.getElementById('auditSelector').value; - updateMigrationTable(auditId); - updateCPUCharts(auditId); - } - - // Update migration table - function updateMigrationTable(auditId) { - const tbody = document.getElementById('migrationTableBody'); - const migrationCount = document.getElementById('migrationCount'); - const data = auditData[auditId]; - - if (!data || !data.migrations || data.migrations.length === 0) { - tbody.innerHTML = ` - - - No migration actions recommended - - - `; - migrationCount.textContent = '0 actions'; - return; - } - - let html = ''; - data.migrations.forEach(migration => { - const impact = migration.impact || 'Low'; - const impactClass = { - 'Low': 'badge-success', - 'Medium': 'badge-warning', - 'High': 'badge-error' - }[impact] || 'badge-neutral'; - - html += ` - - -
${migration.instanceName}
- - -
- ${migration.source} - - - - ${migration.destination} -
- - - ${migration.flavor} - - - ${impact} - - - `; - }); - - tbody.innerHTML = html; - migrationCount.textContent = `${data.migrations.length} action${data.migrations.length !== 1 ? 's' : ''}`; - } - - // Update CPU chart (combined current vs projected) - function updateCPUCharts(auditId) { - const data = auditData[auditId]; - if (!data || !data.hostData) return; - - const ctx = document.getElementById('cpuDistributionChart').getContext('2d'); - - const currentStats = calculateStats(data.hostData.current); - - document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1); - document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1); - - if (cpuDistributionChart) cpuDistributionChart.destroy(); - - const colors = { - primary: getCSSVar('--color-primary'), - secondary: getCSSVar('--color-secondary'), - accent: getCSSVar('--color-accent'), - neutral: getCSSVar('--color-neutral'), - info: getCSSVar('--color-info'), - success: getCSSVar('--color-success'), - warning: getCSSVar('--color-warning'), - error: getCSSVar('--color-error') - }; - const textColor = getCSSVar('--color-base-content'); - const gridColor = getCSSVar('--chart-grid-color') || textColor; - - cpuDistributionChart = new Chart(ctx, { - type: 'bar', - data: { - labels: data.hostData.labels, - datasets: [ - { - label: 'Current', - data: data.hostData.current.slice(), - backgroundColor: colors.info + '40', - borderColor: colors.info, - borderWidth: 1, - borderRadius: 3 - }, - { - label: 'Projected', - data: data.hostData.projected.slice(), - backgroundColor: colors.warning + '40', - borderColor: colors.warning, - borderWidth: 1, - borderRadius: 3 - } - ] - }, - options: { - responsive: true, - maintainAspectRatio: false, - animation: { - onComplete: function() { - var chart = this; - if (typeof chart.getDatasetMeta !== 'function') chart = chart.chart; - if (!chart || chart._hidingDataset === undefined) return; - var i = chart._hidingDataset; - chart.getDatasetMeta(i).hidden = true; - chart.data.datasets[i].data = chart._cpuOriginalData[i].slice(); - delete chart._hidingDataset; - chart.update('none'); - } - }, - plugins: { - legend: { - display: true, - position: 'top', - align: 'center', - onClick: function(e, legendItem, legend) { - const i = legendItem.datasetIndex; - const chart = legend.chart; - const len = chart.data.labels.length; - if (chart.isDatasetVisible(i)) { - chart._hidingDataset = i; - chart.data.datasets[i].data = Array(len).fill(0); - chart.update(); - } else { - chart.data.datasets[i].data = Array(len).fill(0); - chart.show(i); - chart.update('none'); - chart.data.datasets[i].data = chart._cpuOriginalData[i].slice(); - chart.update(); - } - }, - labels: { - usePointStyle: true, - pointStyle: 'rect', - boxWidth: 14, - boxHeight: 14, - padding: 12, - color: textColor, - generateLabels: function(chart) { - const datasets = chart.data.datasets; - const labelColor = getCSSVar('--color-base-content'); - return datasets.map(function(ds, i) { - return { - text: ds.label, - fillStyle: ds.borderColor, - strokeStyle: ds.borderColor, - lineWidth: 1, - fontColor: labelColor, - color: labelColor, - hidden: !chart.isDatasetVisible(i), - datasetIndex: i - }; - }); - } - } - }, - tooltip: { - callbacks: { - label: (ctx) => `${ctx.dataset.label}: ${Number(ctx.parsed.y).toFixed(2)}% CPU` - } - }, - annotation: { - annotations: { - MeanLine: { - type: 'line', - yMin: currentStats.mean.toFixed(1), - yMax: currentStats.mean.toFixed(1), - borderColor: colors.success, - borderWidth: 2, - borderDash: [] - }, - upperStdLine: { - type: 'line', - yMin: (currentStats.mean + currentStats.std * 0.5).toFixed(1), - yMax: (currentStats.mean + currentStats.std * 0.5).toFixed(1), - borderColor: colors.error, - borderWidth: 1, - borderDash: [5, 5] - }, - lowerStdLine: { - type: 'line', - yMin: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0, - yMax: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0, - borderColor: colors.error, - borderWidth: 1, - borderDash: [5, 5] - } - } - } - }, - scales: { - y: { - beginAtZero: true, - max: 100, - grid: { drawBorder: false, color: gridColor }, - ticks: { - color: textColor, - callback: value => value + '%' - } - }, - x: { - grid: { display: false }, - ticks: { - display: false - }, - barPercentage: 1, - categoryPercentage: 0.85 - } - } - } - }); - cpuDistributionChart._cpuOriginalData = [ - data.hostData.current.slice(), - data.hostData.projected.slice() - ]; - } - - // Utility functions - function calculateStats(data) { - const mean = data.reduce((a, b) => a + b, 0) / data.length; - const variance = data.reduce((a, b) => a + Math.pow(b - mean, 2), 0) / data.length; - const std = Math.sqrt(variance); - return { mean, std }; - } - - document.addEventListener('DOMContentLoaded', function() { - if (SKELETON_MODE) { - Promise.all([ - fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }), - fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }) - ]).then(function(results) { - renderStats(results[0]); - renderAudits(results[1].audits); - if (!results[1].audits || results[1].audits.length === 0) { - var cc = results[1].current_cluster; - if (cc && cc.host_labels && cc.cpu_current && cc.host_labels.length) { - auditData["current"] = { - hostData: { - labels: cc.host_labels, - current: cc.cpu_current, - projected: cc.cpu_current - } - }; - updateCPUCharts('current'); - } - } - }).catch(function(err) { - var msg = err.status ? 'Failed to load data (' + err.status + ')' : 'Failed to load data'; - var countEl = document.getElementById('auditsCount'); - if (countEl) countEl.textContent = msg; - fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d) renderStats(d); }); - fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d && d.audits) renderAudits(d.audits); }); - }); - } else { - var initialAudit = "{% if audits %}{{ audits.0.id }}{% endif %}"; - if (initialAudit && auditData[initialAudit]) { - document.getElementById('auditSelector').dispatchEvent(new Event('change')); - loadSelectedAudit(); - } else if (!initialAudit && CURRENT_CLUSTER && CURRENT_CLUSTER.host_labels && CURRENT_CLUSTER.host_labels.length) { - auditData["current"] = { - hostData: { - labels: CURRENT_CLUSTER.host_labels, - current: CURRENT_CLUSTER.cpu_current, - projected: CURRENT_CLUSTER.cpu_current - } - }; - updateCPUCharts('current'); - } - } - }); - - document.addEventListener('themechange', function() { - if (cpuDistributionChart) { - const auditId = document.getElementById('auditSelector').value; - cpuDistributionChart.destroy(); - cpuDistributionChart = null; - if (auditId) updateCPUCharts(auditId); - } - }); + var INITIAL_AUDIT_ID = "{% if audits %}{{ audits.0.id }}{% endif %}"; + {% endblock %} {% block css %} @@ -910,4 +55,4 @@ @apply px-1.5 py-0.5 text-xs; } -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/watcher_visio/settings.py b/watcher_visio/settings.py index f3c6de1..c5dd08b 100644 --- a/watcher_visio/settings.py +++ b/watcher_visio/settings.py @@ -48,7 +48,7 @@ INSTALLED_APPS = [ PROMETHEUS_URL = "http://10.226.74.53:9090/" PROMETHEUS_METRICS = { "cpu_usage": "rate(libvirt_domain_info_cpu_time_seconds_total)[300s]", - "ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]", + "ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s])", } # Openstack cloud settings From 263379c072b2e7f0b53e6db7f0b8d6b06db8dc6a Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Thu, 12 Feb 2026 13:56:05 +0300 Subject: [PATCH 02/10] Update dashboard metrics and enhance CPU statistics visualization - Expanded mock data to include additional host labels and updated CPU current and projected values for better representation. - Modified JavaScript to conditionally display projected CPU statistics and standard deviation, improving user experience. - Refactored chart configuration to dynamically handle datasets based on the presence of projected data. - Updated HTML to include a new block for displaying standard deviation, enhancing clarity in CPU metrics presentation. --- dashboard/mock_data.py | 6 +-- package.json | 58 +++++++++++----------- static/js/dashboard.js | 33 ++++++++---- templates/dashboard/_chart_migrations.html | 2 +- 4 files changed, 56 insertions(+), 43 deletions(-) diff --git a/dashboard/mock_data.py b/dashboard/mock_data.py index 4efee31..9e6598b 100644 --- a/dashboard/mock_data.py +++ b/dashboard/mock_data.py @@ -21,9 +21,9 @@ def get_mock_context(): vram_total = pram_total * vram_overcommit_max # Two sample audits with serialized fields for JS - host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"] - cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5] - cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0] + host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5", "compute-6", "compute-7", "compute-8", "compute-9", "compute-10", "compute-11"] + cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3] + cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0] audits = [ { diff --git a/package.json b/package.json index f45326e..b28524d 100644 --- a/package.json +++ b/package.json @@ -1,29 +1,29 @@ -{ - "name": "watcher-visio", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify", - "dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch" - }, - "repository": { - "type": "git", - "url": "https://git.arnike.ru/Arnike/watcher-visio.git" - }, - "keywords": [], - "author": "", - "license": "ISC", - "type": "commonjs", - "devDependencies": { - "@fontsource/dm-sans": "^5.2.8", - "@tailwindcss/typography": "^0.5.19", - "autoprefixer": "^10.4.22", - "daisyui": "^5.5.5", - "postcss": "^8.5.6", - "tailwindcss": "^4.1.17" - }, - "dependencies": { - "@tailwindcss/cli": "^4.1.17" - } -} +{ + "name": "watcher-visio", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify", + "dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch" + }, + "repository": { + "type": "git", + "url": "https://git.arnike.ru/Arnike/watcher-visio.git" + }, + "keywords": [], + "author": "", + "license": "ISC", + "type": "commonjs", + "devDependencies": { + "@fontsource/dm-sans": "^5.2.8", + "@tailwindcss/typography": "^0.5.19", + "autoprefixer": "^10.4.22", + "daisyui": "^5.5.5", + "postcss": "^8.5.6", + "tailwindcss": "^4.1.17" + }, + "dependencies": { + "@tailwindcss/cli": "^4.1.17" + } +} diff --git a/static/js/dashboard.js b/static/js/dashboard.js index cd5c444..410499f 100644 --- a/static/js/dashboard.js +++ b/static/js/dashboard.js @@ -161,11 +161,14 @@ var data = window.auditData && window.auditData[auditId]; if (!data || !data.hostData) return; + var hasProjected = (auditId !== 'current'); var ctx = document.getElementById('cpuDistributionChart').getContext('2d'); var currentStats = calculateStats(data.hostData.current); document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1); document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1); + var stdBlock = document.getElementById('currentCpuStdBlock'); + if (stdBlock) stdBlock.style.display = hasProjected ? '' : 'none'; if (cpuDistributionChart) cpuDistributionChart.destroy(); @@ -182,14 +185,26 @@ var textColor = getCSSVar('--color-base-content'); var gridColor = getCSSVar('--chart-grid-color') || textColor; + var datasets = [ + { label: 'Current', data: data.hostData.current.slice(), backgroundColor: colors.info + '40', borderColor: colors.info, borderWidth: 1, borderRadius: 3 } + ]; + if (hasProjected) { + datasets.push({ label: 'Projected', data: data.hostData.projected.slice(), backgroundColor: colors.warning + '40', borderColor: colors.warning, borderWidth: 1, borderRadius: 3 }); + } + + var annotationConfig = { + MeanLine: { type: 'line', yMin: currentStats.mean, yMax: currentStats.mean, borderColor: colors.success, borderWidth: 2, borderDash: [] } + }; + if (hasProjected) { + annotationConfig.upperStdLine = { type: 'line', yMin: currentStats.mean + currentStats.std * 0.5, yMax: currentStats.mean + currentStats.std * 0.5, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] }; + annotationConfig.lowerStdLine = { type: 'line', yMin: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, yMax: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] }; + } + cpuDistributionChart = new Chart(ctx, { type: 'bar', data: { labels: data.hostData.labels, - datasets: [ - { label: 'Current', data: data.hostData.current.slice(), backgroundColor: colors.info + '40', borderColor: colors.info, borderWidth: 1, borderRadius: 3 }, - { label: 'Projected', data: data.hostData.projected.slice(), backgroundColor: colors.warning + '40', borderColor: colors.warning, borderWidth: 1, borderRadius: 3 } - ] + datasets: datasets }, options: { responsive: true, @@ -246,11 +261,7 @@ callbacks: { label: function(ctx) { return ctx.dataset.label + ': ' + Number(ctx.parsed.y).toFixed(2) + '% CPU'; } } }, annotation: { - annotations: { - MeanLine: { type: 'line', yMin: currentStats.mean, yMax: currentStats.mean, borderColor: colors.success, borderWidth: 2, borderDash: [] }, - upperStdLine: { type: 'line', yMin: currentStats.mean + currentStats.std * 0.5, yMax: currentStats.mean + currentStats.std * 0.5, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] }, - lowerStdLine: { type: 'line', yMin: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, yMax: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] } - } + annotations: annotationConfig } }, scales: { @@ -259,7 +270,9 @@ } } }); - cpuDistributionChart._cpuOriginalData = [ data.hostData.current.slice(), data.hostData.projected.slice() ]; + cpuDistributionChart._cpuOriginalData = hasProjected + ? [ data.hostData.current.slice(), data.hostData.projected.slice() ] + : [ data.hostData.current.slice() ]; } document.addEventListener('DOMContentLoaded', function() { diff --git a/templates/dashboard/_chart_migrations.html b/templates/dashboard/_chart_migrations.html index 5af42aa..0e48e37 100644 --- a/templates/dashboard/_chart_migrations.html +++ b/templates/dashboard/_chart_migrations.html @@ -11,7 +11,7 @@
Mean: 0% -
+
±0.5σ: 0%
From 99f8cb2dec366bf65df0012fbe2d92fc2537a11e Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Thu, 12 Feb 2026 14:23:23 +0300 Subject: [PATCH 03/10] Refactor print styles and enhance chart configuration for better visualization - Adjusted chart grid color for improved contrast in visualizations. - Implemented print-specific styles to ensure proper layout and formatting on A4 paper. - Enhanced chart configuration in JavaScript to include grid line width and tick border dash for clearer data representation. - Updated HTML to streamline print functionality, replacing the PDF export button with a direct print command. --- static/css/main.css | 90 +++++++++++++++++++++++++++++++++++++++--- static/js/dashboard.js | 16 +++++++- templates/base.html | 10 +++-- 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/static/css/main.css b/static/css/main.css index 672fd8e..0c9b4a5 100644 --- a/static/css/main.css +++ b/static/css/main.css @@ -96,7 +96,7 @@ --border: 1px; --depth: 1; --noise: 0; - --chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent); + --chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent); } @plugin "daisyui/theme" { @@ -133,7 +133,7 @@ --border: 1px; --depth: 1; --noise: 0; - --chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent); + --chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent); } /* VTB gradient (both themes) */ @@ -229,16 +229,64 @@ label.swap:focus-within:not(.theme-toggle) { @source "../../templates"; /* --- Print (Save as PDF) --- */ +@page { + size: A4; + margin: 15mm; +} + @media print { + /* Force printable area width (A4 minus margins) so layout doesn't use screen width */ + html { + width: 180mm !important; + min-width: 180mm !important; + max-width: 180mm !important; + margin: 0 !important; + padding: 0 !important; + overflow-x: hidden !important; + } + body { + width: 180mm !important; + min-width: 180mm !important; + max-width: 180mm !important; + margin: 0 !important; + padding: 0 !important; + overflow-x: hidden !important; + -webkit-print-color-adjust: exact; + print-color-adjust: exact; + box-sizing: border-box !important; + } + body *, + body *::before, + body *::after { + box-sizing: border-box !important; + } + /* Allow flex/grid children to shrink so they don't force overflow */ + body * { + min-width: 0 !important; + } .no-print { display: none !important; } .print-only { display: block !important; } + /* Main and content: stay within body width */ + main.container { + width: 100% !important; + max-width: 100% !important; + margin: 0 !important; + padding: 0.5rem 0.5rem 0 !important; + min-width: 0 !important; + } + #dashboard-content { + width: 100% !important; + max-width: 100% !important; + min-width: 0 !important; + overflow-x: hidden !important; + padding: 0.5rem 0 !important; + } /* Keep card backgrounds and colors when printing */ .card, - main, .badge, .progress { -webkit-print-color-adjust: exact; @@ -253,9 +301,39 @@ label.swap:focus-within:not(.theme-toggle) { break-inside: avoid; page-break-inside: avoid; } - /* Reduce top padding so content starts higher */ - main { - padding-top: 0.5rem !important; + /* Tables: fit to page, allow column shrink */ + .overflow-x-auto { + max-width: 100% !important; + overflow-x: visible !important; + } + .table { + table-layout: fixed !important; + width: 100% !important; + max-width: 100% !important; + } + .table td, + .table th { + overflow: hidden; + text-overflow: ellipsis; + } + /* Chart: constrain so it doesn't overflow (canvas has fixed size from Chart.js) */ + section[aria-label="CPU distribution chart"] .card-body { + max-width: 100% !important; + overflow: hidden !important; + } + section[aria-label="CPU distribution chart"] .h-48, + section[aria-label="CPU distribution chart"] [class*="h-48"] { + max-width: 100% !important; + min-height: 10rem !important; + } + section[aria-label="CPU distribution chart"] canvas { + max-width: 100% !important; + height: auto !important; + } + /* Navbar fits page width */ + .navbar { + width: 100% !important; + max-width: 100% !important; } } @media screen { diff --git a/static/js/dashboard.js b/static/js/dashboard.js index 410499f..d9419b7 100644 --- a/static/js/dashboard.js +++ b/static/js/dashboard.js @@ -265,7 +265,21 @@ } }, scales: { - y: { beginAtZero: true, max: 100, grid: { drawBorder: false, color: gridColor }, ticks: { color: textColor, callback: function(value) { return value + '%'; } } }, + y: { + beginAtZero: true, + max: 100, + grid: { + drawBorder: false, + color: gridColor, + lineWidth: 0.5, + tickBorderDash: [4, 4] + }, + ticks: { + stepSize: 25, + color: textColor, + callback: function(value) { return value + '%'; } + } + }, x: { grid: { display: false }, ticks: { display: false }, barPercentage: 1, categoryPercentage: 0.85 } } } diff --git a/templates/base.html b/templates/base.html index 02537e4..656d7a4 100644 --- a/templates/base.html +++ b/templates/base.html @@ -7,8 +7,11 @@ {% block title %}SWatcher{% endblock %} - - + {% block imports %} {% endblock %} {% block css %} @@ -22,7 +25,7 @@
From 76eae52d2a8f7cba8639656e8ca46f53d919f7d7 Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Thu, 12 Feb 2026 19:38:12 +0300 Subject: [PATCH 08/10] Enhance tooltip functionality for CPU standard deviation in dashboard - Updated the tooltip for the CPU standard deviation metric to include a detailed explanation of the calculation, preserving newlines for better readability. - Added CSS styles to support multi-line tooltips, improving user comprehension of the displayed data. --- static/css/main.css | 6 ++++++ templates/dashboard/_chart_migrations.html | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/static/css/main.css b/static/css/main.css index 0c9b4a5..fd9a0fa 100644 --- a/static/css/main.css +++ b/static/css/main.css @@ -340,4 +340,10 @@ label.swap:focus-within:not(.theme-toggle) { .print-only { display: none !important; } +} + +/* Multi-line tooltip (formula): preserve newlines from data-tip */ +.tooltip-formula[data-tip]::before { + white-space: pre-line; + text-align: left; } \ No newline at end of file diff --git a/templates/dashboard/_chart_migrations.html b/templates/dashboard/_chart_migrations.html index dc8a47d..ec05e6c 100644 --- a/templates/dashboard/_chart_migrations.html +++ b/templates/dashboard/_chart_migrations.html @@ -13,7 +13,7 @@
- + ±0.5σ: 0%
From 656a6bfac412356b7b9ca7a2dd0d06af6eaef2be Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Thu, 12 Feb 2026 20:10:09 +0300 Subject: [PATCH 09/10] Refactor dashboard data serialization and mock context for improved clarity - Introduced `serialize_audit_for_response` and `serialize_current_cluster_for_template` functions to handle JSON serialization of audit and cluster data, enhancing data consistency for API responses and template rendering. - Updated `get_mock_context` in `mock_data.py` to utilize the new serialization functions, simplifying the mock data structure and improving readability. - Refactored `collect_context` and `collect_audits` in `views.py` to leverage the new serialization methods, ensuring a cleaner and more maintainable codebase. - Added unit tests for the new serialization functions to ensure correctness and reliability of data formatting. --- dashboard/mock_data.py | 49 ++++++------- dashboard/openstack_utils/audits.py | 106 ++++++++++++++++------------ dashboard/serializers.py | 32 +++++++++ dashboard/tests/test_serializers.py | 57 +++++++++++++++ dashboard/views.py | 25 ++----- docs/api_context.md | 80 +++++++++++++++++++++ static/js/dashboard.js | 32 +++++++-- static/js/utils.js | 22 ++++++ 8 files changed, 313 insertions(+), 90 deletions(-) create mode 100644 dashboard/serializers.py create mode 100644 dashboard/tests/test_serializers.py create mode 100644 docs/api_context.md diff --git a/dashboard/mock_data.py b/dashboard/mock_data.py index eb91535..ba5d774 100644 --- a/dashboard/mock_data.py +++ b/dashboard/mock_data.py @@ -1,6 +1,9 @@ """Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus).""" -import json +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) def get_mock_context(): @@ -38,7 +41,7 @@ def get_mock_context(): cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3] cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0] - audits = [ + audits_raw = [ { "id": "mock-audit-uuid-1", "name": "Mock audit (balanced)", @@ -49,20 +52,18 @@ def get_mock_context(): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": json.dumps( - [ - { - "instanceName": "instance-1", - "source": "compute-0", - "destination": "compute-3", - "flavor": "m1.small", - "impact": "Low", - } - ] - ), - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - "cpu_projected": json.dumps(cpu_projected), + "migrations": [ + { + "instanceName": "instance-1", + "source": "compute-0", + "destination": "compute-3", + "flavor": "m1.small", + "impact": "Low", + } + ], + "host_labels": host_labels, + "cpu_current": cpu_current, + "cpu_projected": cpu_projected, }, { "id": "mock-audit-uuid-2", @@ -74,12 +75,13 @@ def get_mock_context(): "scope": "Full Cluster", "cpu_weight": "1.0", "ram_weight": "1.0", - "migrations": json.dumps([]), - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - "cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]), + "migrations": [], + "host_labels": host_labels, + "cpu_current": cpu_current, + "cpu_projected": [40.0, 42.0, 50.0, 43.0, 36.0, 45.0], }, ] + audits = [serialize_audit_for_response(a) for a in audits_raw] return { "region": { @@ -128,8 +130,7 @@ def get_mock_context(): "third_common_flavor": {"name": "m1.large", "count": 4}, }, "audits": audits, - "current_cluster": { - "host_labels": json.dumps(host_labels), - "cpu_current": json.dumps(cpu_current), - }, + "current_cluster": serialize_current_cluster_for_template( + {"host_labels": host_labels, "cpu_current": cpu_current} + ), } diff --git a/dashboard/openstack_utils/audits.py b/dashboard/openstack_utils/audits.py index 5bc20a3..a5a7cc4 100644 --- a/dashboard/openstack_utils/audits.py +++ b/dashboard/openstack_utils/audits.py @@ -49,41 +49,81 @@ def get_current_cluster_cpu(connection: Connection) -> dict: } +def _fetch_audits_and_action_plans(session, watcher_endpoint): + """GET audits and action_plans from Watcher API. Returns (audits_list, action_plans_list).""" + audits_resp = session.get(f"{watcher_endpoint}/v1/audits") + audits_resp.raise_for_status() + audits_list = audits_resp.json().get("audits") or [] + + actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans") + actionplans_resp.raise_for_status() + action_plans_list = actionplans_resp.json().get("action_plans") or [] + + return audits_list, action_plans_list + + +def _fetch_migrations_for_audit( + connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp +): + """ + Fetch action details for the given action plan and build migrations list and + instance->destination mapping. Returns (migrations, mapping). + """ + migrations = [] + mapping = {} + for action in actions_resp: + action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}") + action_resp.raise_for_status() + action_resp = action_resp.json() + + server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"]) + params = action_resp["input_parameters"] + mapping[params["resource_name"]] = params["destination_node"] + + migrations.append( + { + "instanceName": params["resource_name"], + "source": params["source_node"], + "destination": params["destination_node"], + "flavor": server.flavor.name, + "impact": "Low", + } + ) + return migrations, mapping + + +def _build_projected_cpu_metrics(cpu_data, mapping): + """ + Apply instance->destination mapping to a copy of cpu_data and return + aggregated CPU metrics DataFrame (host, cpu_usage). + """ + projected_cpu_data = copy(cpu_data) + for entry in projected_cpu_data: + if (instance := entry["metric"]["instanceName"]) in mapping: + entry["metric"]["host"] = mapping[instance] + return convert_cpu_data(projected_cpu_data) + + def get_audits(connection: Connection) -> list[dict] | None: session = connection.session - watcher_endpoint = connection.endpoint_for( service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME ) - # Collect instances prometheus metrics cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"]) - cpu_metrics = convert_cpu_data(data=cpu_data) - # Fetch audit list - audits_resp = session.get(f"{watcher_endpoint}/v1/audits") - audits_resp.raise_for_status() - audits_resp = audits_resp.json().get("audits") or [] - - # Fetch action plan list - actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans") - actionplans_resp.raise_for_status() - actionplans_resp = actionplans_resp.json().get("action_plans") or [] - - # Filtering audits by PENDING state - pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"] + _, action_plans_list = _fetch_audits_and_action_plans(session, watcher_endpoint) + pending_audits = [plan for plan in action_plans_list if plan["state"] == "RECOMMENDED"] result = [] for item in pending_audits: - projected_cpu_data = copy(cpu_data) - audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}") audit_resp.raise_for_status() audit_resp = audit_resp.json() actionplan = next( - filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None + filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], action_plans_list), None ) if actionplan is None: continue @@ -94,32 +134,10 @@ def get_audits(connection: Connection) -> list[dict] | None: actions_resp.raise_for_status() actions_resp = actions_resp.json().get("actions") or [] - migrations = [] - mapping = {} - for action in actions_resp: - action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}") - action_resp.raise_for_status() - action_resp = action_resp.json() - - server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"]) - params = action_resp["input_parameters"] - mapping[params["resource_name"]] = params["destination_node"] - - migrations.append( - { - "instanceName": action_resp["input_parameters"]["resource_name"], - "source": action_resp["input_parameters"]["source_node"], - "destination": action_resp["input_parameters"]["destination_node"], - "flavor": server.flavor.name, - "impact": "Low", - } - ) - - for entry in projected_cpu_data: - if (instance := entry["metric"]["instanceName"]) in mapping: - entry["metric"]["host"] = mapping[instance] - - projected_cpu_metrics = convert_cpu_data(projected_cpu_data) + migrations, mapping = _fetch_migrations_for_audit( + connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp + ) + projected_cpu_metrics = _build_projected_cpu_metrics(cpu_data, mapping) result.append( { diff --git a/dashboard/serializers.py b/dashboard/serializers.py new file mode 100644 index 0000000..e47abf2 --- /dev/null +++ b/dashboard/serializers.py @@ -0,0 +1,32 @@ +"""Serialization helpers for dashboard context and API responses.""" + +import json + + +def _ensure_json_str(value): + """Return value as JSON string; if already a string, return as-is.""" + return value if isinstance(value, str) else json.dumps(value) + + +def serialize_audit_for_response(audit: dict) -> dict: + """ + Return a copy of the audit dict with migrations, host_labels, cpu_current, + and cpu_projected serialized as JSON strings (for template/API response). + """ + result = dict(audit) + result["migrations"] = _ensure_json_str(audit.get("migrations")) + result["host_labels"] = _ensure_json_str(audit.get("host_labels")) + result["cpu_current"] = _ensure_json_str(audit.get("cpu_current")) + result["cpu_projected"] = _ensure_json_str(audit.get("cpu_projected")) + return result + + +def serialize_current_cluster_for_template(current_cluster: dict) -> dict: + """ + Return current_cluster with host_labels and cpu_current as JSON strings + for template embedding (e.g. in index.html). + """ + return { + "host_labels": _ensure_json_str(current_cluster.get("host_labels")), + "cpu_current": _ensure_json_str(current_cluster.get("cpu_current")), + } diff --git a/dashboard/tests/test_serializers.py b/dashboard/tests/test_serializers.py new file mode 100644 index 0000000..4ca50e6 --- /dev/null +++ b/dashboard/tests/test_serializers.py @@ -0,0 +1,57 @@ +"""Tests for dashboard.serializers.""" + +import json + +from django.test import TestCase + +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) + + +class SerializeAuditForResponseTest(TestCase): + def test_serializes_list_fields_to_json_strings(self): + audit = { + "id": "audit-1", + "name": "Test", + "migrations": [{"instanceName": "i1", "source": "h1", "destination": "h2"}], + "host_labels": ["h1", "h2"], + "cpu_current": [10.0, 20.0], + "cpu_projected": [15.0, 25.0], + } + result = serialize_audit_for_response(audit) + self.assertEqual(result["id"], "audit-1") + self.assertEqual(result["name"], "Test") + self.assertEqual(json.loads(result["migrations"]), audit["migrations"]) + self.assertEqual(json.loads(result["host_labels"]), audit["host_labels"]) + self.assertEqual(json.loads(result["cpu_current"]), audit["cpu_current"]) + self.assertEqual(json.loads(result["cpu_projected"]), audit["cpu_projected"]) + + def test_leaves_already_serialized_strings_unchanged(self): + audit = { + "id": "a", + "migrations": "[1,2]", + "host_labels": "[]", + "cpu_current": "[0]", + "cpu_projected": "[0]", + } + result = serialize_audit_for_response(audit) + self.assertEqual(result["migrations"], "[1,2]") + self.assertEqual(result["host_labels"], "[]") + self.assertEqual(result["cpu_current"], "[0]") + self.assertEqual(result["cpu_projected"], "[0]") + + +class SerializeCurrentClusterForTemplateTest(TestCase): + def test_serializes_lists_to_json_strings(self): + cluster = {"host_labels": ["c0", "c1"], "cpu_current": [30.0, 40.0]} + result = serialize_current_cluster_for_template(cluster) + self.assertEqual(json.loads(result["host_labels"]), cluster["host_labels"]) + self.assertEqual(json.loads(result["cpu_current"]), cluster["cpu_current"]) + + def test_leaves_already_serialized_strings_unchanged(self): + cluster = {"host_labels": "[]", "cpu_current": "[]"} + result = serialize_current_cluster_for_template(cluster) + self.assertEqual(result["host_labels"], "[]") + self.assertEqual(result["cpu_current"], "[]") diff --git a/dashboard/views.py b/dashboard/views.py index c6aa561..4252b43 100644 --- a/dashboard/views.py +++ b/dashboard/views.py @@ -1,5 +1,3 @@ -import json - from django.conf import settings from django.core.cache import cache from django.http import JsonResponse @@ -10,6 +8,10 @@ from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu from dashboard.openstack_utils.connect import check_openstack, get_connection from dashboard.openstack_utils.flavor import get_flavor_list from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics +from dashboard.serializers import ( + serialize_audit_for_response, + serialize_current_cluster_for_template, +) from dashboard.stats import ( CACHE_KEY_AUDITS, CACHE_KEY_CURRENT_CLUSTER, @@ -44,17 +46,9 @@ def collect_context(): audits = get_audits(connection=connection) metrics = fetch_dashboard_metrics() context = build_stats(metrics, region_name, flavors) - context["audits"] = audits + context["audits"] = [serialize_audit_for_response(a) for a in audits] current_cluster = get_current_cluster_cpu(connection) - context["current_cluster"] = { - "host_labels": json.dumps(current_cluster["host_labels"]), - "cpu_current": json.dumps(current_cluster["cpu_current"]), - } - for audit in context["audits"]: - audit["migrations"] = json.dumps(audit["migrations"]) - audit["host_labels"] = json.dumps(audit["host_labels"]) - audit["cpu_current"] = json.dumps(audit["cpu_current"]) - audit["cpu_projected"] = json.dumps(audit["cpu_projected"]) + context["current_cluster"] = serialize_current_cluster_for_template(current_cluster) return context @@ -71,12 +65,7 @@ def collect_audits(): """Build audits list with serialized fields for frontend.""" connection = get_connection() audits = get_audits(connection=connection) - for audit in audits: - audit["migrations"] = json.dumps(audit["migrations"]) - audit["host_labels"] = json.dumps(audit["host_labels"]) - audit["cpu_current"] = json.dumps(audit["cpu_current"]) - audit["cpu_projected"] = json.dumps(audit["cpu_projected"]) - return audits + return [serialize_audit_for_response(a) for a in audits] def _skeleton_context(): diff --git a/docs/api_context.md b/docs/api_context.md new file mode 100644 index 0000000..b6c6399 --- /dev/null +++ b/docs/api_context.md @@ -0,0 +1,80 @@ +# Dashboard API and context contract + +This document describes the structure of data passed to the index template and returned by the dashboard API endpoints. Cache keys are defined in `dashboard/stats.py`. + +## Index page context (server-rendered) + +When the index is rendered with full data (e.g. `USE_MOCK_DATA=True` or after JS loads from API), the template receives a context with these top-level keys: + +| Key | Description | +|-----|-------------| +| `region` | `{ "name": str, "hosts_total": int }` | +| `pcpu` | Physical CPU: `total`, `usage`, `free`, `used_percentage` | +| `vcpu` | Virtual CPU: `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` | +| `pram` | Physical RAM (bytes): `total`, `usage`, `free`, `used_percentage` | +| `vram` | Virtual RAM (bytes): `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` | +| `vm` | VMs: `count`, `active`, `stopped`, `avg_cpu`, `avg_ram`, `density` | +| `flavors` | `first_common_flavor`, `second_common_flavor`, `third_common_flavor` — each `{ "name": str, "count": int }` or `None`. The `name` may be a human-readable flavor name or a flavor UUID depending on OpenStack. | +| `audits` | List of audit objects (see below). For template, `migrations`, `host_labels`, `cpu_current`, `cpu_projected` are JSON strings. | +| `current_cluster` | `{ "host_labels": str (JSON array), "cpu_current": str (JSON array) }` for embedding in the page. | +| `skeleton` | Optional boolean; when true, stats placeholders are shown and data is loaded via API. | + +## Single audit object (for template / API response) + +When serialized for the template or for `api/audits`, each audit has: + +| Field | Type | Description | +|-------|------|-------------| +| `id` | str | Audit UUID | +| `name` | str | Audit name | +| `created_at` | str | ISO 8601 datetime | +| `strategy` | str | Strategy name | +| `goal` | str | Goal name | +| `type` | str | e.g. `ONESHOT` | +| `scope` | str | e.g. `Full Cluster` | +| `cpu_weight` | str | Weight parameter | +| `ram_weight` | str | Weight parameter | +| `migrations` | str (template) / list (API raw) | JSON string of migration list, or list of `{ instanceName, source, destination, flavor, impact }` | +| `host_labels` | str (template) / list (API raw) | JSON string of host names, or list | +| `cpu_current` | str (template) / list (API raw) | JSON string of CPU usage per host, or list of numbers | +| `cpu_projected` | str (template) / list (API raw) | JSON string of projected CPU per host, or list of numbers | + +For the **index template**, `migrations`, `host_labels`, `cpu_current`, and `cpu_projected` are always JSON strings so they can be embedded in the page. For **api/audits**, `audits` are returned with these four fields as JSON strings (same as template). The **current_cluster** in the API response uses raw lists (see below). + +## GET /api/stats/ + +Returns a JSON object with the same keys as the index context, **excluding** `audits`, `current_cluster`, and `skeleton`: `region`, `pcpu`, `vcpu`, `pram`, `vram`, `vm`, `flavors`. All numeric values are numbers; sizes are in bytes where applicable. + +## GET /api/audits/ + +Returns: + +```json +{ + "audits": [ /* list of audit objects with migrations, host_labels, cpu_current, cpu_projected as JSON strings */ ], + "current_cluster": { + "host_labels": [ "compute-0", "compute-1", ... ], + "cpu_current": [ 30.5, 42.1, ... ] + } +} +``` + +Here `audits` use the same serialized form as the template (JSON strings for list fields). The `current_cluster` is with **raw lists** (not JSON strings) so the frontend can use them directly without parsing. + +## GET /api/source-status/ + +Returns: + +```json +{ + "prometheus": { "status": "ok" | "error" | "mock", "message"?: "..." }, + "openstack": { "status": "ok" | "error" | "mock", "message"?: "..." } +} +``` + +## Cache keys (dashboard/stats.py) + +- `CACHE_KEY_STATS` — stats for `/api/stats/` +- `CACHE_KEY_AUDITS` — serialized audits list +- `CACHE_KEY_CURRENT_CLUSTER` — raw current_cluster (host_labels, cpu_current lists) +- `CACHE_KEY_SOURCE_STATUS` — source status result diff --git a/static/js/dashboard.js b/static/js/dashboard.js index d4e1963..e57d1ca 100644 --- a/static/js/dashboard.js +++ b/static/js/dashboard.js @@ -1,11 +1,29 @@ /** * Dashboard logic: stats rendering, audit selector, CPU chart, migration table. - * Expects globals: SKELETON_MODE, CURRENT_CLUSTER, auditData (set by index.html). - * Depends on: utils.js (formatBytes, getCSSVar, calculateStats) + * + * Expected globals (set by index.html / inline script): + * - SKELETON_MODE (boolean): whether to fetch data from API instead of using embedded context + * - CURRENT_CLUSTER: { host_labels, cpu_current } for "current" cluster chart when no audits + * - auditData: object keyed by audit id, each value { name, migrations, hostData: { labels, current, projected } } + * - INITIAL_AUDIT_ID: first audit id to select when not in skeleton mode + * + * Required DOM element ids: + * - auditSelector, previewCpu, previewRam, previewScope, previewStrategy + * - regionBadge, auditsCount, migrationTableBody, migrationCount, cpuDistributionChart + * - currentCpuMean, currentCpuStd, currentCpuStdBlock + * - elements with data-stats="..." for renderStats() + * + * Depends on: utils.js (formatBytes, getCSSVar, calculateStats, escapeHtml, formatAuditDate) */ (function() { var cpuDistributionChart = null; + var escapeHtml = typeof window.escapeHtml === 'function' ? window.escapeHtml : function(text) { + if (text == null) return ''; + var s = String(text); + return s.replace(/&/g, '&').replace(//g, '>').replace(/"/g, '"').replace(/'/g, '''); + }; + // --- Initialization: audit selector change (preview panel) --- document.getElementById('auditSelector').addEventListener('change', function(e) { var option = this.options[this.selectedIndex]; if (!option) return; @@ -15,6 +33,7 @@ document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced'; }); + // --- Stats: setStat, setProgress, renderStats --- function setStat(key, text) { document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) { el.textContent = text; @@ -85,6 +104,7 @@ document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); }); } + // --- Audits: renderAudits, loadSelectedAudit --- function renderAudits(auditsList) { if (!auditsList || !auditsList.length) { var countEl = document.getElementById('auditsCount'); @@ -117,7 +137,7 @@ opt.setAttribute('data-scope', audit.scope || 'Full Cluster'); opt.setAttribute('data-strategy', audit.strategy || 'Balanced'); opt.setAttribute('data-goal', audit.goal || ''); - var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : ''; + var dateStr = formatAuditDate(audit.created_at); opt.textContent = audit.name + ' (' + dateStr + ')'; sel.appendChild(opt); }); @@ -136,6 +156,7 @@ updateCPUCharts(auditId); }; + // --- Migration table: updateMigrationTable --- function updateMigrationTable(auditId) { var tbody = document.getElementById('migrationTableBody'); var migrationCount = document.getElementById('migrationCount'); @@ -151,12 +172,13 @@ data.migrations.forEach(function(migration) { var impact = migration.impact || 'Low'; var impactClass = { 'Low': 'badge-success', 'Medium': 'badge-warning', 'High': 'badge-error' }[impact] || 'badge-neutral'; - html += '
' + migration.instanceName + '
' + migration.source + '' + migration.destination + '
' + migration.flavor + '' + impact + ''; + html += '
' + escapeHtml(migration.instanceName) + '
' + escapeHtml(migration.source) + '' + escapeHtml(migration.destination) + '
' + escapeHtml(migration.flavor) + '' + escapeHtml(impact) + ''; }); tbody.innerHTML = html; migrationCount.textContent = data.migrations.length + ' action' + (data.migrations.length !== 1 ? 's' : ''); } + // --- CPU charts: updateCPUCharts --- function updateCPUCharts(auditId) { var data = window.auditData && window.auditData[auditId]; if (!data || !data.hostData) return; @@ -289,6 +311,7 @@ : [ data.hostData.current.slice() ]; } + // --- Initialization: DOMContentLoaded (skeleton vs embedded data) --- document.addEventListener('DOMContentLoaded', function() { if (typeof SKELETON_MODE !== 'undefined' && SKELETON_MODE) { Promise.all([ @@ -325,6 +348,7 @@ } }); + // --- Initialization: theme change (recreate chart) --- document.addEventListener('themechange', function() { if (cpuDistributionChart) { var auditId = document.getElementById('auditSelector').value; diff --git a/static/js/utils.js b/static/js/utils.js index f02c449..ac4fb8b 100644 --- a/static/js/utils.js +++ b/static/js/utils.js @@ -1,3 +1,15 @@ +// Escape for safe HTML text content (prevents XSS when inserting into HTML) +function escapeHtml(text) { + if (text == null) return ''; + const s = String(text); + return s + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, '''); +} + // Format bytes to GB (matches Django convert_bytes filter default) function formatBytes(bytes, targetUnit = 'GB') { if (bytes == null || isNaN(Number(bytes))) return '0'; @@ -24,6 +36,16 @@ function getColorWithOpacity(className) { return computedColor; } +// Format audit date for display (ISO string -> short date, e.g. "Feb 1") +function formatAuditDate(isoString) { + if (!isoString) return ''; + try { + return new Date(isoString).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }); + } catch (e) { + return ''; + } +} + // Utility function to calculate mean and standard deviation function calculateStats(data) { if (!data || data.length === 0) return { mean: 0, std: 0 }; From d4fc2e920f72b751a64e848a9c1c5cda1a5bda33 Mon Sep 17 00:00:00 2001 From: Nikolay Tatarinov Date: Thu, 12 Feb 2026 20:19:30 +0300 Subject: [PATCH 10/10] Refactor code formatting for improved readability in flavor and math filter utilities - Standardized code formatting in `flavor.py` and `mathfilters.py` for better visual clarity and consistency. - Ensured proper indentation and spacing to enhance maintainability and readability of the utility functions. --- dashboard/openstack_utils/flavor.py | 42 ++++----- dashboard/templatetags/mathfilters.py | 120 +++++++++++++------------- 2 files changed, 81 insertions(+), 81 deletions(-) diff --git a/dashboard/openstack_utils/flavor.py b/dashboard/openstack_utils/flavor.py index 5eb56bd..0b0240b 100644 --- a/dashboard/openstack_utils/flavor.py +++ b/dashboard/openstack_utils/flavor.py @@ -1,21 +1,21 @@ -from collections import Counter - -from openstack.connection import Connection - - -def get_flavor_list(connection: Connection) -> dict: - servers = list(connection.compute.servers(all_projects=True)) - flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor] - flavor_count = Counter(flavor_ids).most_common() - - flavors = list(flavor_count) - - result = {} - placeholder = {"name": "—", "count": 0} - for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: - if len(flavors) > idx: - result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]} - else: - result[f"{prefix}_common_flavor"] = placeholder - - return result +from collections import Counter + +from openstack.connection import Connection + + +def get_flavor_list(connection: Connection) -> dict: + servers = list(connection.compute.servers(all_projects=True)) + flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor] + flavor_count = Counter(flavor_ids).most_common() + + flavors = list(flavor_count) + + result = {} + placeholder = {"name": "—", "count": 0} + for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: + if len(flavors) > idx: + result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]} + else: + result[f"{prefix}_common_flavor"] = placeholder + + return result diff --git a/dashboard/templatetags/mathfilters.py b/dashboard/templatetags/mathfilters.py index 7d602d9..22dafa6 100644 --- a/dashboard/templatetags/mathfilters.py +++ b/dashboard/templatetags/mathfilters.py @@ -1,60 +1,60 @@ -from django import template - -register = template.Library() - - -@register.filter -def div(a, b): - try: - return float(a) / float(b) - except (TypeError, ValueError, ZeroDivisionError): - return 0 - - -@register.filter -def mul(a, b): - try: - return float(a) * float(b) - except (TypeError, ValueError): - return 0 - - -@register.filter -def sub(a, b): - try: - return float(a) - float(b) - except (TypeError, ValueError): - return 0 - - -@register.filter -def convert_bytes(bytes_value, target_unit="GB"): - """ - Convert bytes to specific unit - - Args: - bytes_value: Size in bytes - target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') - precision: Number of decimal places - - Returns: - Float value in target unit - """ - try: - bytes_value = float(bytes_value) - except (ValueError, TypeError): - return 0.0 - conversion_factors = { - "B": 1, - "KB": 1024, - "MB": 1024 * 1024, - "GB": 1024 * 1024 * 1024, - "TB": 1024 * 1024 * 1024 * 1024, - } - - target_unit = target_unit.upper() - if target_unit not in conversion_factors: - target_unit = "MB" - - result = bytes_value / conversion_factors[target_unit] - return round(result, 1) +from django import template + +register = template.Library() + + +@register.filter +def div(a, b): + try: + return float(a) / float(b) + except (TypeError, ValueError, ZeroDivisionError): + return 0 + + +@register.filter +def mul(a, b): + try: + return float(a) * float(b) + except (TypeError, ValueError): + return 0 + + +@register.filter +def sub(a, b): + try: + return float(a) - float(b) + except (TypeError, ValueError): + return 0 + + +@register.filter +def convert_bytes(bytes_value, target_unit="GB"): + """ + Convert bytes to specific unit + + Args: + bytes_value: Size in bytes + target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') + precision: Number of decimal places + + Returns: + Float value in target unit + """ + try: + bytes_value = float(bytes_value) + except (ValueError, TypeError): + return 0.0 + conversion_factors = { + "B": 1, + "KB": 1024, + "MB": 1024 * 1024, + "GB": 1024 * 1024 * 1024, + "TB": 1024 * 1024 * 1024 * 1024, + } + + target_unit = target_unit.upper() + if target_unit not in conversion_factors: + target_unit = "MB" + + result = bytes_value / conversion_factors[target_unit] + return round(result, 1)