develop #4

Merged
Arnike merged 10 commits from develop into main 2026-02-12 20:32:19 +03:00
22 changed files with 1505 additions and 1298 deletions

View File

@@ -1,6 +1,9 @@
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
import json
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
def get_mock_context():
@@ -21,11 +24,24 @@ def get_mock_context():
vram_total = pram_total * vram_overcommit_max
# Two sample audits with serialized fields for JS
host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"]
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5]
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0]
host_labels = [
"compute-0",
"compute-1",
"compute-2",
"compute-3",
"compute-4",
"compute-5",
"compute-6",
"compute-7",
"compute-8",
"compute-9",
"compute-10",
"compute-11",
]
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3]
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0]
audits = [
audits_raw = [
{
"id": "mock-audit-uuid-1",
"name": "Mock audit (balanced)",
@@ -36,20 +52,18 @@ def get_mock_context():
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps(
[
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
]
),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps(cpu_projected),
"migrations": [
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
],
"host_labels": host_labels,
"cpu_current": cpu_current,
"cpu_projected": cpu_projected,
},
{
"id": "mock-audit-uuid-2",
@@ -61,12 +75,13 @@ def get_mock_context():
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps([]),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
"migrations": [],
"host_labels": host_labels,
"cpu_current": cpu_current,
"cpu_projected": [40.0, 42.0, 50.0, 43.0, 36.0, 45.0],
},
]
audits = [serialize_audit_for_response(a) for a in audits_raw]
return {
"region": {
@@ -115,8 +130,7 @@ def get_mock_context():
"third_common_flavor": {"name": "m1.large", "count": 4},
},
"audits": audits,
"current_cluster": {
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
},
"current_cluster": serialize_current_cluster_for_template(
{"host_labels": host_labels, "cpu_current": cpu_current}
),
}

View File

@@ -49,41 +49,81 @@ def get_current_cluster_cpu(connection: Connection) -> dict:
}
def _fetch_audits_and_action_plans(session, watcher_endpoint):
"""GET audits and action_plans from Watcher API. Returns (audits_list, action_plans_list)."""
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_list = audits_resp.json().get("audits") or []
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
action_plans_list = actionplans_resp.json().get("action_plans") or []
return audits_list, action_plans_list
def _fetch_migrations_for_audit(
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
):
"""
Fetch action details for the given action plan and build migrations list and
instance->destination mapping. Returns (migrations, mapping).
"""
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append(
{
"instanceName": params["resource_name"],
"source": params["source_node"],
"destination": params["destination_node"],
"flavor": server.flavor.name,
"impact": "Low",
}
)
return migrations, mapping
def _build_projected_cpu_metrics(cpu_data, mapping):
"""
Apply instance->destination mapping to a copy of cpu_data and return
aggregated CPU metrics DataFrame (host, cpu_usage).
"""
projected_cpu_data = copy(cpu_data)
for entry in projected_cpu_data:
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
return convert_cpu_data(projected_cpu_data)
def get_audits(connection: Connection) -> list[dict] | None:
session = connection.session
watcher_endpoint = connection.endpoint_for(
service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME
)
# Collect instances prometheus metrics
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
# Fetch audit list
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_resp = audits_resp.json().get("audits") or []
# Fetch action plan list
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
actionplans_resp = actionplans_resp.json().get("action_plans") or []
# Filtering audits by PENDING state
pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"]
_, action_plans_list = _fetch_audits_and_action_plans(session, watcher_endpoint)
pending_audits = [plan for plan in action_plans_list if plan["state"] == "RECOMMENDED"]
result = []
for item in pending_audits:
projected_cpu_data = copy(cpu_data)
audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}")
audit_resp.raise_for_status()
audit_resp = audit_resp.json()
actionplan = next(
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], action_plans_list), None
)
if actionplan is None:
continue
@@ -94,32 +134,10 @@ def get_audits(connection: Connection) -> list[dict] | None:
actions_resp.raise_for_status()
actions_resp = actions_resp.json().get("actions") or []
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append(
{
"instanceName": action_resp["input_parameters"]["resource_name"],
"source": action_resp["input_parameters"]["source_node"],
"destination": action_resp["input_parameters"]["destination_node"],
"flavor": server.flavor.name,
"impact": "Low",
}
)
for entry in projected_cpu_data:
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
migrations, mapping = _fetch_migrations_for_audit(
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
)
projected_cpu_metrics = _build_projected_cpu_metrics(cpu_data, mapping)
result.append(
{

View File

@@ -1,21 +1,21 @@
from collections import Counter
from openstack.connection import Connection
def get_flavor_list(connection: Connection) -> dict:
servers = list(connection.compute.servers(all_projects=True))
flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor]
flavor_count = Counter(flavor_ids).most_common()
flavors = list(flavor_count)
result = {}
placeholder = {"name": "", "count": 0}
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
if len(flavors) > idx:
result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]}
else:
result[f"{prefix}_common_flavor"] = placeholder
return result
from collections import Counter
from openstack.connection import Connection
def get_flavor_list(connection: Connection) -> dict:
servers = list(connection.compute.servers(all_projects=True))
flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor]
flavor_count = Counter(flavor_ids).most_common()
flavors = list(flavor_count)
result = {}
placeholder = {"name": "", "count": 0}
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
if len(flavors) > idx:
result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]}
else:
result[f"{prefix}_common_flavor"] = placeholder
return result

View File

@@ -1,9 +1,37 @@
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from watcher_visio.settings import PROMETHEUS_URL
# Timeout for lightweight health check (seconds)
CHECK_TIMEOUT = 5
# Dashboard Prometheus queries (query_key -> query string), run in parallel
DASHBOARD_QUERIES = {
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
"pcpu_total": (
"sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) "
"without (cpu,mode))"
),
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
"vcpu_overcommit_max": (
"avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
),
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
"vram_overcommit_max": (
"avg(avg_over_time("
"openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
),
"vm_count": "sum(libvirt_domain_state_code)",
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
}
# Keys that should be parsed as float (rest as int)
DASHBOARD_FLOAT_KEYS = frozenset(("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"))
def check_prometheus() -> dict:
"""
@@ -36,3 +64,23 @@ def query_prometheus(query: str) -> str | list[str]:
return result
else:
return result[0]["value"][1]
def fetch_dashboard_metrics() -> dict:
"""Run all dashboard Prometheus queries in parallel and return a dict of name -> value."""
result = {}
with ThreadPoolExecutor(max_workers=len(DASHBOARD_QUERIES)) as executor:
future_to_key = {
executor.submit(query_prometheus, query=q): key for key, q in DASHBOARD_QUERIES.items()
}
for future in as_completed(future_to_key):
key = future_to_key[future]
try:
raw = future.result()
if key in DASHBOARD_FLOAT_KEYS:
result[key] = float(raw)
else:
result[key] = int(raw)
except (ValueError, TypeError):
result[key] = float(0) if key in DASHBOARD_FLOAT_KEYS else 0
return result

32
dashboard/serializers.py Normal file
View File

@@ -0,0 +1,32 @@
"""Serialization helpers for dashboard context and API responses."""
import json
def _ensure_json_str(value):
"""Return value as JSON string; if already a string, return as-is."""
return value if isinstance(value, str) else json.dumps(value)
def serialize_audit_for_response(audit: dict) -> dict:
"""
Return a copy of the audit dict with migrations, host_labels, cpu_current,
and cpu_projected serialized as JSON strings (for template/API response).
"""
result = dict(audit)
result["migrations"] = _ensure_json_str(audit.get("migrations"))
result["host_labels"] = _ensure_json_str(audit.get("host_labels"))
result["cpu_current"] = _ensure_json_str(audit.get("cpu_current"))
result["cpu_projected"] = _ensure_json_str(audit.get("cpu_projected"))
return result
def serialize_current_cluster_for_template(current_cluster: dict) -> dict:
"""
Return current_cluster with host_labels and cpu_current as JSON strings
for template embedding (e.g. in index.html).
"""
return {
"host_labels": _ensure_json_str(current_cluster.get("host_labels")),
"cpu_current": _ensure_json_str(current_cluster.get("cpu_current")),
}

76
dashboard/stats.py Normal file
View File

@@ -0,0 +1,76 @@
"""Dashboard statistics building and cache key constants."""
# Cache keys used by views
CACHE_KEY_STATS = "dashboard_stats"
CACHE_KEY_AUDITS = "dashboard_audits"
CACHE_KEY_CURRENT_CLUSTER = "dashboard_current_cluster"
CACHE_KEY_SOURCE_STATUS = "dashboard_source_status"
# Empty structures for skeleton context (same shape as build_stats output)
EMPTY_FLAVORS = {
"first_common_flavor": {"name": "", "count": 0},
"second_common_flavor": None,
"third_common_flavor": None,
}
def build_stats(metrics: dict, region_name: str, flavors: dict) -> dict:
"""
Build stats dict from raw metrics and OpenStack-derived data.
Returns region, pcpu, vcpu, pram, vram, vm, flavors (no audits/current_cluster).
"""
hosts_total = metrics.get("hosts_total") or 1
pcpu_total = metrics.get("pcpu_total", 0)
pcpu_usage = metrics.get("pcpu_usage", 0)
vcpu_allocated = metrics.get("vcpu_allocated", 0)
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
pram_total = metrics.get("pram_total", 0)
pram_usage = metrics.get("pram_usage", 0)
vram_allocated = metrics.get("vram_allocated", 0)
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
vm_count = metrics.get("vm_count", 0)
vm_active = metrics.get("vm_active", 0)
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
return {
"region": {"name": region_name, "hosts_total": hosts_total},
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
},
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max,
},
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
},
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max,
},
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total if hosts_total else 0,
},
"flavors": flavors,
}

View File

@@ -1,60 +1,60 @@
from django import template
register = template.Library()
@register.filter
def div(a, b):
try:
return float(a) / float(b)
except (TypeError, ValueError, ZeroDivisionError):
return 0
@register.filter
def mul(a, b):
try:
return float(a) * float(b)
except (TypeError, ValueError):
return 0
@register.filter
def sub(a, b):
try:
return float(a) - float(b)
except (TypeError, ValueError):
return 0
@register.filter
def convert_bytes(bytes_value, target_unit="GB"):
"""
Convert bytes to specific unit
Args:
bytes_value: Size in bytes
target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB')
precision: Number of decimal places
Returns:
Float value in target unit
"""
try:
bytes_value = float(bytes_value)
except (ValueError, TypeError):
return 0.0
conversion_factors = {
"B": 1,
"KB": 1024,
"MB": 1024 * 1024,
"GB": 1024 * 1024 * 1024,
"TB": 1024 * 1024 * 1024 * 1024,
}
target_unit = target_unit.upper()
if target_unit not in conversion_factors:
target_unit = "MB"
result = bytes_value / conversion_factors[target_unit]
return round(result, 1)
from django import template
register = template.Library()
@register.filter
def div(a, b):
try:
return float(a) / float(b)
except (TypeError, ValueError, ZeroDivisionError):
return 0
@register.filter
def mul(a, b):
try:
return float(a) * float(b)
except (TypeError, ValueError):
return 0
@register.filter
def sub(a, b):
try:
return float(a) - float(b)
except (TypeError, ValueError):
return 0
@register.filter
def convert_bytes(bytes_value, target_unit="GB"):
"""
Convert bytes to specific unit
Args:
bytes_value: Size in bytes
target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB')
precision: Number of decimal places
Returns:
Float value in target unit
"""
try:
bytes_value = float(bytes_value)
except (ValueError, TypeError):
return 0.0
conversion_factors = {
"B": 1,
"KB": 1024,
"MB": 1024 * 1024,
"GB": 1024 * 1024 * 1024,
"TB": 1024 * 1024 * 1024 * 1024,
}
target_unit = target_unit.upper()
if target_unit not in conversion_factors:
target_unit = "MB"
result = bytes_value / conversion_factors[target_unit]
return round(result, 1)

View File

@@ -0,0 +1,57 @@
"""Tests for dashboard.serializers."""
import json
from django.test import TestCase
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
class SerializeAuditForResponseTest(TestCase):
def test_serializes_list_fields_to_json_strings(self):
audit = {
"id": "audit-1",
"name": "Test",
"migrations": [{"instanceName": "i1", "source": "h1", "destination": "h2"}],
"host_labels": ["h1", "h2"],
"cpu_current": [10.0, 20.0],
"cpu_projected": [15.0, 25.0],
}
result = serialize_audit_for_response(audit)
self.assertEqual(result["id"], "audit-1")
self.assertEqual(result["name"], "Test")
self.assertEqual(json.loads(result["migrations"]), audit["migrations"])
self.assertEqual(json.loads(result["host_labels"]), audit["host_labels"])
self.assertEqual(json.loads(result["cpu_current"]), audit["cpu_current"])
self.assertEqual(json.loads(result["cpu_projected"]), audit["cpu_projected"])
def test_leaves_already_serialized_strings_unchanged(self):
audit = {
"id": "a",
"migrations": "[1,2]",
"host_labels": "[]",
"cpu_current": "[0]",
"cpu_projected": "[0]",
}
result = serialize_audit_for_response(audit)
self.assertEqual(result["migrations"], "[1,2]")
self.assertEqual(result["host_labels"], "[]")
self.assertEqual(result["cpu_current"], "[0]")
self.assertEqual(result["cpu_projected"], "[0]")
class SerializeCurrentClusterForTemplateTest(TestCase):
def test_serializes_lists_to_json_strings(self):
cluster = {"host_labels": ["c0", "c1"], "cpu_current": [30.0, 40.0]}
result = serialize_current_cluster_for_template(cluster)
self.assertEqual(json.loads(result["host_labels"]), cluster["host_labels"])
self.assertEqual(json.loads(result["cpu_current"]), cluster["cpu_current"])
def test_leaves_already_serialized_strings_unchanged(self):
cluster = {"host_labels": "[]", "cpu_current": "[]"}
result = serialize_current_cluster_for_template(cluster)
self.assertEqual(result["host_labels"], "[]")
self.assertEqual(result["cpu_current"], "[]")

View File

@@ -96,7 +96,7 @@ class CollectContextTest(TestCase):
return conn
@patch("dashboard.views.get_current_cluster_cpu")
@patch("dashboard.views._fetch_prometheus_metrics")
@patch("dashboard.views.fetch_dashboard_metrics")
@patch("dashboard.views.get_audits")
@patch("dashboard.views.get_flavor_list")
@patch("dashboard.views.get_connection")
@@ -152,8 +152,6 @@ class CollectContextTest(TestCase):
self.assertEqual(context["flavors"]["first_common_flavor"]["name"], "m1.small")
self.assertEqual(len(context["audits"]), 1)
# Serialized for JS
import json
self.assertIsInstance(context["audits"][0]["migrations"], str)
self.assertEqual(json.loads(context["audits"][0]["host_labels"]), ["h0", "h1"])
self.assertIn("current_cluster", context)
@@ -167,7 +165,7 @@ class ApiStatsTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
@patch("dashboard.views._fetch_prometheus_metrics")
@patch("dashboard.views.fetch_dashboard_metrics")
@patch("dashboard.views.get_flavor_list")
@patch("dashboard.views.get_connection")
def test_api_stats_returns_json_with_expected_keys(

View File

@@ -1,6 +1,3 @@
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.conf import settings
from django.core.cache import cache
from django.http import JsonResponse
@@ -10,53 +7,36 @@ from dashboard.mock_data import get_mock_context
from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu
from dashboard.openstack_utils.connect import check_openstack, get_connection
from dashboard.openstack_utils.flavor import get_flavor_list
from dashboard.prometheus_utils.query import check_prometheus, query_prometheus
# Prometheus queries run in parallel (query_key -> query string)
_PROMETHEUS_QUERIES = {
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
"pcpu_total": (
"sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) "
"without (cpu,mode))"
),
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
"vcpu_overcommit_max": (
"avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
),
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
"vram_overcommit_max": (
"avg(avg_over_time("
"openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
),
"vm_count": "sum(libvirt_domain_state_code)",
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
}
from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
from dashboard.stats import (
CACHE_KEY_AUDITS,
CACHE_KEY_CURRENT_CLUSTER,
CACHE_KEY_SOURCE_STATUS,
CACHE_KEY_STATS,
EMPTY_FLAVORS,
build_stats,
)
def _fetch_prometheus_metrics():
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
result = {}
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
future_to_key = {
executor.submit(query_prometheus, query=q): key
for key, q in _PROMETHEUS_QUERIES.items()
}
for future in as_completed(future_to_key):
key = future_to_key[future]
try:
raw = future.result()
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
result[key] = float(raw)
else:
result[key] = int(raw)
except (ValueError, TypeError):
result[key] = (
0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
)
return result
def _empty_metrics():
"""Metrics dict with zero/default values for skeleton context."""
return {
"hosts_total": 0,
"pcpu_total": 0,
"pcpu_usage": 0,
"vcpu_allocated": 0,
"vcpu_overcommit_max": 0,
"pram_total": 0,
"pram_usage": 0,
"vram_allocated": 0,
"vram_overcommit_max": 0,
"vm_count": 0,
"vm_active": 0,
}
def collect_context():
@@ -64,86 +44,11 @@ def collect_context():
region_name = connection._compute_region
flavors = get_flavor_list(connection=connection)
audits = get_audits(connection=connection)
metrics = _fetch_prometheus_metrics()
hosts_total = metrics.get("hosts_total") or 1
pcpu_total = metrics.get("pcpu_total", 0)
pcpu_usage = metrics.get("pcpu_usage", 0)
vcpu_allocated = metrics.get("vcpu_allocated", 0)
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
pram_total = metrics.get("pram_total", 0)
pram_usage = metrics.get("pram_usage", 0)
vram_allocated = metrics.get("vram_allocated", 0)
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
vm_count = metrics.get("vm_count", 0)
vm_active = metrics.get("vm_active", 0)
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
context = {
# <--- Region data --->
"region": {
"name": region_name,
"hosts_total": hosts_total,
},
# <--- CPU data --->
# pCPU data
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
},
# vCPU data
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max,
},
# <--- RAM data --->
# pRAM data
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
},
# vRAM data
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max,
},
# <--- VM data --->
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total if hosts_total else 0,
},
"flavors": flavors,
"audits": audits,
}
metrics = fetch_dashboard_metrics()
context = build_stats(metrics, region_name, flavors)
context["audits"] = [serialize_audit_for_response(a) for a in audits]
current_cluster = get_current_cluster_cpu(connection)
context["current_cluster"] = {
"host_labels": json.dumps(current_cluster["host_labels"]),
"cpu_current": json.dumps(current_cluster["cpu_current"]),
}
# Serialize audit list fields for JavaScript so cached context is render-ready
for audit in context["audits"]:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
context["current_cluster"] = serialize_current_cluster_for_template(current_cluster)
return context
@@ -152,110 +57,27 @@ def collect_stats():
connection = get_connection()
region_name = connection._compute_region
flavors = get_flavor_list(connection=connection)
metrics = _fetch_prometheus_metrics()
hosts_total = metrics.get("hosts_total") or 1
pcpu_total = metrics.get("pcpu_total", 0)
pcpu_usage = metrics.get("pcpu_usage", 0)
vcpu_allocated = metrics.get("vcpu_allocated", 0)
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
pram_total = metrics.get("pram_total", 0)
pram_usage = metrics.get("pram_usage", 0)
vram_allocated = metrics.get("vram_allocated", 0)
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
vm_count = metrics.get("vm_count", 0)
vm_active = metrics.get("vm_active", 0)
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
return {
"region": {"name": region_name, "hosts_total": hosts_total},
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
},
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max,
},
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
},
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max,
},
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total if hosts_total else 0,
},
"flavors": flavors,
}
metrics = fetch_dashboard_metrics()
return build_stats(metrics, region_name, flavors)
def collect_audits():
"""Build audits list with serialized fields for frontend."""
connection = get_connection()
audits = get_audits(connection=connection)
for audit in audits:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
return audits
return [serialize_audit_for_response(a) for a in audits]
def _skeleton_context():
"""Minimal context for skeleton-only index render."""
empty_flavors = {
"first_common_flavor": {"name": "", "count": 0},
"second_common_flavor": None,
"third_common_flavor": None,
}
return {
"skeleton": True,
"region": {"name": "", "hosts_total": 0},
"pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
"pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
"vcpu": {
"total": 0,
"allocated": 0,
"free": 0,
"allocated_percentage": 0,
"overcommit_ratio": 0,
"overcommit_max": 0,
},
"vram": {
"total": 0,
"allocated": 0,
"free": 0,
"allocated_percentage": 0,
"overcommit_ratio": 0,
"overcommit_max": 0,
},
"vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0},
"flavors": empty_flavors,
"audits": [],
"current_cluster": {
"host_labels": "[]",
"cpu_current": "[]",
},
context = build_stats(_empty_metrics(), "", EMPTY_FLAVORS)
context["skeleton"] = True
context["audits"] = []
context["current_cluster"] = {
"host_labels": "[]",
"cpu_current": "[]",
}
return context
def index(request):
@@ -267,28 +89,25 @@ def index(request):
def api_stats(request):
cache_key = "dashboard_stats"
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
data = cache.get(cache_key)
data = cache.get(CACHE_KEY_STATS)
if data is None:
data = collect_stats()
cache.set(cache_key, data, timeout=cache_ttl)
cache.set(CACHE_KEY_STATS, data, timeout=cache_ttl)
return JsonResponse(data)
def api_audits(request):
cache_key_audits = "dashboard_audits"
cache_key_cluster = "dashboard_current_cluster"
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
audits = cache.get(cache_key_audits)
current_cluster = cache.get(cache_key_cluster)
audits = cache.get(CACHE_KEY_AUDITS)
current_cluster = cache.get(CACHE_KEY_CURRENT_CLUSTER)
if audits is None:
audits = collect_audits()
cache.set(cache_key_audits, audits, timeout=cache_ttl)
cache.set(CACHE_KEY_AUDITS, audits, timeout=cache_ttl)
if current_cluster is None:
connection = get_connection()
current_cluster = get_current_cluster_cpu(connection)
cache.set(cache_key_cluster, current_cluster, timeout=cache_ttl)
cache.set(CACHE_KEY_CURRENT_CLUSTER, current_cluster, timeout=cache_ttl)
return JsonResponse({"audits": audits, "current_cluster": current_cluster})
@@ -302,13 +121,12 @@ def api_source_status(request):
}
)
cache_key = "dashboard_source_status"
cache_ttl = getattr(settings, "SOURCE_STATUS_CACHE_TTL", 30)
data = cache.get(cache_key)
data = cache.get(CACHE_KEY_SOURCE_STATUS)
if data is None:
data = {
"prometheus": check_prometheus(),
"openstack": check_openstack(),
}
cache.set(cache_key, data, timeout=cache_ttl)
cache.set(CACHE_KEY_SOURCE_STATUS, data, timeout=cache_ttl)
return JsonResponse(data)

80
docs/api_context.md Normal file
View File

@@ -0,0 +1,80 @@
# Dashboard API and context contract
This document describes the structure of data passed to the index template and returned by the dashboard API endpoints. Cache keys are defined in `dashboard/stats.py`.
## Index page context (server-rendered)
When the index is rendered with full data (e.g. `USE_MOCK_DATA=True` or after JS loads from API), the template receives a context with these top-level keys:
| Key | Description |
|-----|-------------|
| `region` | `{ "name": str, "hosts_total": int }` |
| `pcpu` | Physical CPU: `total`, `usage`, `free`, `used_percentage` |
| `vcpu` | Virtual CPU: `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` |
| `pram` | Physical RAM (bytes): `total`, `usage`, `free`, `used_percentage` |
| `vram` | Virtual RAM (bytes): `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` |
| `vm` | VMs: `count`, `active`, `stopped`, `avg_cpu`, `avg_ram`, `density` |
| `flavors` | `first_common_flavor`, `second_common_flavor`, `third_common_flavor` — each `{ "name": str, "count": int }` or `None`. The `name` may be a human-readable flavor name or a flavor UUID depending on OpenStack. |
| `audits` | List of audit objects (see below). For template, `migrations`, `host_labels`, `cpu_current`, `cpu_projected` are JSON strings. |
| `current_cluster` | `{ "host_labels": str (JSON array), "cpu_current": str (JSON array) }` for embedding in the page. |
| `skeleton` | Optional boolean; when true, stats placeholders are shown and data is loaded via API. |
## Single audit object (for template / API response)
When serialized for the template or for `api/audits`, each audit has:
| Field | Type | Description |
|-------|------|-------------|
| `id` | str | Audit UUID |
| `name` | str | Audit name |
| `created_at` | str | ISO 8601 datetime |
| `strategy` | str | Strategy name |
| `goal` | str | Goal name |
| `type` | str | e.g. `ONESHOT` |
| `scope` | str | e.g. `Full Cluster` |
| `cpu_weight` | str | Weight parameter |
| `ram_weight` | str | Weight parameter |
| `migrations` | str (template) / list (API raw) | JSON string of migration list, or list of `{ instanceName, source, destination, flavor, impact }` |
| `host_labels` | str (template) / list (API raw) | JSON string of host names, or list |
| `cpu_current` | str (template) / list (API raw) | JSON string of CPU usage per host, or list of numbers |
| `cpu_projected` | str (template) / list (API raw) | JSON string of projected CPU per host, or list of numbers |
For the **index template**, `migrations`, `host_labels`, `cpu_current`, and `cpu_projected` are always JSON strings so they can be embedded in the page. For **api/audits**, `audits` are returned with these four fields as JSON strings (same as template). The **current_cluster** in the API response uses raw lists (see below).
## GET /api/stats/
Returns a JSON object with the same keys as the index context, **excluding** `audits`, `current_cluster`, and `skeleton`: `region`, `pcpu`, `vcpu`, `pram`, `vram`, `vm`, `flavors`. All numeric values are numbers; sizes are in bytes where applicable.
## GET /api/audits/
Returns:
```json
{
"audits": [ /* list of audit objects with migrations, host_labels, cpu_current, cpu_projected as JSON strings */ ],
"current_cluster": {
"host_labels": [ "compute-0", "compute-1", ... ],
"cpu_current": [ 30.5, 42.1, ... ]
}
}
```
Here `audits` use the same serialized form as the template (JSON strings for list fields). The `current_cluster` is with **raw lists** (not JSON strings) so the frontend can use them directly without parsing.
## GET /api/source-status/
Returns:
```json
{
"prometheus": { "status": "ok" | "error" | "mock", "message"?: "..." },
"openstack": { "status": "ok" | "error" | "mock", "message"?: "..." }
}
```
## Cache keys (dashboard/stats.py)
- `CACHE_KEY_STATS` — stats for `/api/stats/`
- `CACHE_KEY_AUDITS` — serialized audits list
- `CACHE_KEY_CURRENT_CLUSTER` — raw current_cluster (host_labels, cpu_current lists)
- `CACHE_KEY_SOURCE_STATUS` — source status result

View File

@@ -1,29 +1,29 @@
{
"name": "watcher-visio",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify",
"dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch"
},
"repository": {
"type": "git",
"url": "https://git.arnike.ru/Arnike/watcher-visio.git"
},
"keywords": [],
"author": "",
"license": "ISC",
"type": "commonjs",
"devDependencies": {
"@fontsource/dm-sans": "^5.2.8",
"@tailwindcss/typography": "^0.5.19",
"autoprefixer": "^10.4.22",
"daisyui": "^5.5.5",
"postcss": "^8.5.6",
"tailwindcss": "^4.1.17"
},
"dependencies": {
"@tailwindcss/cli": "^4.1.17"
}
}
{
"name": "watcher-visio",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"build": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --minify",
"dev": "npx @tailwindcss/cli -i ./static/css/main.css -o ./static/css/output.css --watch"
},
"repository": {
"type": "git",
"url": "https://git.arnike.ru/Arnike/watcher-visio.git"
},
"keywords": [],
"author": "",
"license": "ISC",
"type": "commonjs",
"devDependencies": {
"@fontsource/dm-sans": "^5.2.8",
"@tailwindcss/typography": "^0.5.19",
"autoprefixer": "^10.4.22",
"daisyui": "^5.5.5",
"postcss": "^8.5.6",
"tailwindcss": "^4.1.17"
},
"dependencies": {
"@tailwindcss/cli": "^4.1.17"
}
}

View File

@@ -96,7 +96,7 @@
--border: 1px;
--depth: 1;
--noise: 0;
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent);
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent);
}
@plugin "daisyui/theme" {
@@ -133,7 +133,7 @@
--border: 1px;
--depth: 1;
--noise: 0;
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent);
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 10%, transparent);
}
/* VTB gradient (both themes) */
@@ -229,16 +229,64 @@ label.swap:focus-within:not(.theme-toggle) {
@source "../../templates";
/* --- Print (Save as PDF) --- */
@page {
size: A4;
margin: 15mm;
}
@media print {
/* Force printable area width (A4 minus margins) so layout doesn't use screen width */
html {
width: 180mm !important;
min-width: 180mm !important;
max-width: 180mm !important;
margin: 0 !important;
padding: 0 !important;
overflow-x: hidden !important;
}
body {
width: 180mm !important;
min-width: 180mm !important;
max-width: 180mm !important;
margin: 0 !important;
padding: 0 !important;
overflow-x: hidden !important;
-webkit-print-color-adjust: exact;
print-color-adjust: exact;
box-sizing: border-box !important;
}
body *,
body *::before,
body *::after {
box-sizing: border-box !important;
}
/* Allow flex/grid children to shrink so they don't force overflow */
body * {
min-width: 0 !important;
}
.no-print {
display: none !important;
}
.print-only {
display: block !important;
}
/* Main and content: stay within body width */
main.container {
width: 100% !important;
max-width: 100% !important;
margin: 0 !important;
padding: 0.5rem 0.5rem 0 !important;
min-width: 0 !important;
}
#dashboard-content {
width: 100% !important;
max-width: 100% !important;
min-width: 0 !important;
overflow-x: hidden !important;
padding: 0.5rem 0 !important;
}
/* Keep card backgrounds and colors when printing */
.card,
main,
.badge,
.progress {
-webkit-print-color-adjust: exact;
@@ -253,13 +301,49 @@ label.swap:focus-within:not(.theme-toggle) {
break-inside: avoid;
page-break-inside: avoid;
}
/* Reduce top padding so content starts higher */
main {
padding-top: 0.5rem !important;
/* Tables: fit to page, allow column shrink */
.overflow-x-auto {
max-width: 100% !important;
overflow-x: visible !important;
}
.table {
table-layout: fixed !important;
width: 100% !important;
max-width: 100% !important;
}
.table td,
.table th {
overflow: hidden;
text-overflow: ellipsis;
}
/* Chart: constrain so it doesn't overflow (canvas has fixed size from Chart.js) */
section[aria-label="CPU distribution chart"] .card-body {
max-width: 100% !important;
overflow: hidden !important;
}
section[aria-label="CPU distribution chart"] .h-48,
section[aria-label="CPU distribution chart"] [class*="h-48"] {
max-width: 100% !important;
min-height: 10rem !important;
}
section[aria-label="CPU distribution chart"] canvas {
max-width: 100% !important;
height: auto !important;
}
/* Navbar fits page width */
.navbar {
width: 100% !important;
max-width: 100% !important;
}
}
@media screen {
.print-only {
display: none !important;
}
}
/* Multi-line tooltip (formula): preserve newlines from data-tip */
.tooltip-formula[data-tip]::before {
white-space: pre-line;
text-align: left;
}

360
static/js/dashboard.js Normal file
View File

@@ -0,0 +1,360 @@
/**
* Dashboard logic: stats rendering, audit selector, CPU chart, migration table.
*
* Expected globals (set by index.html / inline script):
* - SKELETON_MODE (boolean): whether to fetch data from API instead of using embedded context
* - CURRENT_CLUSTER: { host_labels, cpu_current } for "current" cluster chart when no audits
* - auditData: object keyed by audit id, each value { name, migrations, hostData: { labels, current, projected } }
* - INITIAL_AUDIT_ID: first audit id to select when not in skeleton mode
*
* Required DOM element ids:
* - auditSelector, previewCpu, previewRam, previewScope, previewStrategy
* - regionBadge, auditsCount, migrationTableBody, migrationCount, cpuDistributionChart
* - currentCpuMean, currentCpuStd, currentCpuStdBlock
* - elements with data-stats="..." for renderStats()
*
* Depends on: utils.js (formatBytes, getCSSVar, calculateStats, escapeHtml, formatAuditDate)
*/
(function() {
var cpuDistributionChart = null;
var escapeHtml = typeof window.escapeHtml === 'function' ? window.escapeHtml : function(text) {
if (text == null) return '';
var s = String(text);
return s.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;').replace(/'/g, '&#39;');
};
// --- Initialization: audit selector change (preview panel) ---
document.getElementById('auditSelector').addEventListener('change', function(e) {
var option = this.options[this.selectedIndex];
if (!option) return;
document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0';
document.getElementById('previewRam').textContent = option.dataset.ram || '1.0';
document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster';
document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced';
});
// --- Stats: setStat, setProgress, renderStats ---
function setStat(key, text) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
el.textContent = text;
el.classList.remove('animate-pulse');
});
}
function setProgress(key, value) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
if (el.tagName === 'PROGRESS') {
el.value = value;
el.classList.remove('animate-pulse');
}
});
}
function renderStats(data) {
if (!data) return;
var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); };
var regionBadge = document.getElementById('regionBadge');
if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—';
setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1));
setStat('pcpu.total', Number((data.pcpu && data.pcpu.total) || 0).toFixed(1));
setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%');
setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU');
setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0);
setStat('pcpu.free', Number((data.pcpu && data.pcpu.free) || 0).toFixed(1));
var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB');
var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB');
var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB');
setStat('pram.usage_gb', pramUsageGb);
setStat('pram.total_gb', pramTotalGb);
setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%');
setStat('pram.usage_gb_val', pramUsageGb + ' GB');
setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0);
setStat('pram.free_gb', pramFreeGb + ' GB');
setStat('vm.active', String(data.vm && data.vm.active));
setStat('vm.stopped', String(data.vm && data.vm.stopped));
setStat('vm.count', String(data.vm && data.vm.count));
setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—');
setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1));
setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host');
setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU');
setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0);
setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%');
var vcpuOver = el('vcpu.overcommit');
if (vcpuOver) {
vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated';
vcpuOver.classList.remove('animate-pulse');
}
var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB');
var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB');
setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB');
setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0);
setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%');
var vramOver = el('vram.overcommit');
if (vramOver) {
vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated';
vramOver.classList.remove('animate-pulse');
}
setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances');
var vmCount = data.vm && data.vm.count ? data.vm.count : 0;
var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0;
setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%');
setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—');
setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—');
setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—');
setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—');
document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); });
}
// --- Audits: renderAudits, loadSelectedAudit ---
function renderAudits(auditsList) {
if (!auditsList || !auditsList.length) {
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = '0 available';
var sel = document.getElementById('auditSelector');
if (sel) { sel.disabled = false; sel.innerHTML = '<option value="">No audits</option>'; }
return;
}
window.auditData = {};
auditsList.forEach(function(a) {
window.auditData[a.id] = {
name: a.name,
migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations,
hostData: {
labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels,
current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current,
projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected
}
};
});
var sel = document.getElementById('auditSelector');
if (sel) {
sel.disabled = false;
sel.innerHTML = '';
auditsList.forEach(function(audit) {
var opt = document.createElement('option');
opt.value = audit.id;
opt.setAttribute('data-cpu', audit.cpu_weight || '1.0');
opt.setAttribute('data-ram', audit.ram_weight || '1.0');
opt.setAttribute('data-scope', audit.scope || 'Full Cluster');
opt.setAttribute('data-strategy', audit.strategy || 'Balanced');
opt.setAttribute('data-goal', audit.goal || '');
var dateStr = formatAuditDate(audit.created_at);
opt.textContent = audit.name + ' (' + dateStr + ')';
sel.appendChild(opt);
});
}
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = auditsList.length + ' available';
if (auditsList.length > 0) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
}
}
window.loadSelectedAudit = function() {
var auditId = document.getElementById('auditSelector').value;
updateMigrationTable(auditId);
updateCPUCharts(auditId);
};
// --- Migration table: updateMigrationTable ---
function updateMigrationTable(auditId) {
var tbody = document.getElementById('migrationTableBody');
var migrationCount = document.getElementById('migrationCount');
var data = window.auditData && window.auditData[auditId];
if (!data || !data.migrations || data.migrations.length === 0) {
tbody.innerHTML = '<tr><td colspan="4" class="text-center py-6 text-base-content/60">No migration actions recommended</td></tr>';
migrationCount.textContent = '0 actions';
return;
}
var html = '';
data.migrations.forEach(function(migration) {
var impact = migration.impact || 'Low';
var impactClass = { 'Low': 'badge-success', 'Medium': 'badge-warning', 'High': 'badge-error' }[impact] || 'badge-neutral';
html += '<tr><td class="font-medium"><div>' + escapeHtml(migration.instanceName) + '</div></td><td><div class="flex items-center gap-2"><span class="badge badge-outline badge-xs">' + escapeHtml(migration.source) + '</span><svg class="w-3 h-3 text-base-content/30" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/></svg><span class="badge badge-primary badge-outline badge-xs">' + escapeHtml(migration.destination) + '</span></div></td><td><code class="text-xs bg-base-200 px-2 py-1 rounded">' + escapeHtml(migration.flavor) + '</code></td><td><span class="badge ' + impactClass + ' badge-xs">' + escapeHtml(impact) + '</span></td></tr>';
});
tbody.innerHTML = html;
migrationCount.textContent = data.migrations.length + ' action' + (data.migrations.length !== 1 ? 's' : '');
}
// --- CPU charts: updateCPUCharts ---
function updateCPUCharts(auditId) {
var data = window.auditData && window.auditData[auditId];
if (!data || !data.hostData) return;
var hasProjected = (auditId !== 'current');
var ctx = document.getElementById('cpuDistributionChart').getContext('2d');
var currentStats = calculateStats(data.hostData.current);
document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1);
document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1);
var stdBlock = document.getElementById('currentCpuStdBlock');
if (stdBlock) stdBlock.style.display = hasProjected ? '' : 'none';
if (cpuDistributionChart) cpuDistributionChart.destroy();
var colors = {
primary: getCSSVar('--color-primary'),
secondary: getCSSVar('--color-secondary'),
accent: getCSSVar('--color-accent'),
neutral: getCSSVar('--color-neutral'),
info: getCSSVar('--color-info'),
success: getCSSVar('--color-success'),
warning: getCSSVar('--color-warning'),
error: getCSSVar('--color-error')
};
var textColor = getCSSVar('--color-base-content');
var gridColor = getCSSVar('--chart-grid-color') || textColor;
var datasets = [
{ label: 'Current', data: data.hostData.current.slice(), backgroundColor: colors.info + '40', borderColor: colors.info, borderWidth: 1, borderRadius: 3 }
];
if (hasProjected) {
datasets.push({ label: 'Projected', data: data.hostData.projected.slice(), backgroundColor: colors.warning + '40', borderColor: colors.warning, borderWidth: 1, borderRadius: 3 });
}
var annotationConfig = {
MeanLine: { type: 'line', yMin: currentStats.mean, yMax: currentStats.mean, borderColor: colors.success, borderWidth: 2, borderDash: [] }
};
if (hasProjected) {
annotationConfig.upperStdLine = { type: 'line', yMin: currentStats.mean + currentStats.std * 0.5, yMax: currentStats.mean + currentStats.std * 0.5, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] };
annotationConfig.lowerStdLine = { type: 'line', yMin: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, yMax: currentStats.mean > currentStats.std * 0.5 ? currentStats.mean - currentStats.std * 0.5 : 0, borderColor: colors.error, borderWidth: 1, borderDash: [5, 5] };
}
cpuDistributionChart = new Chart(ctx, {
type: 'bar',
data: {
labels: data.hostData.labels,
datasets: datasets
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: {
onComplete: function() {
var chart = this.chart || this;
if (chart._hidingDataset === undefined) return;
var i = chart._hidingDataset;
chart.getDatasetMeta(i).hidden = true;
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
delete chart._hidingDataset;
chart.update('none');
}
},
plugins: {
legend: {
display: true,
position: 'top',
align: 'center',
onClick: function(e, legendItem, legend) {
var i = legendItem.datasetIndex;
var chart = legend.chart;
var len = chart.data.labels.length;
if (chart.isDatasetVisible(i)) {
chart._hidingDataset = i;
chart.data.datasets[i].data = Array(len).fill(0);
chart.update();
} else {
chart.data.datasets[i].data = Array(len).fill(0);
chart.show(i);
chart.update('none');
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
chart.update();
}
},
labels: {
usePointStyle: true,
pointStyle: 'rect',
boxWidth: 14,
boxHeight: 14,
padding: 12,
color: textColor,
generateLabels: function(chart) {
var datasets = chart.data.datasets;
var labelColor = getCSSVar('--color-base-content') || textColor;
return datasets.map(function(ds, i) {
return { text: ds.label, fillStyle: ds.borderColor, strokeStyle: ds.borderColor, lineWidth: 1, fontColor: labelColor, color: labelColor, hidden: !chart.isDatasetVisible(i), datasetIndex: i };
});
}
}
},
tooltip: {
callbacks: { label: function(ctx) { return ctx.dataset.label + ': ' + Number(ctx.parsed.y).toFixed(1) + '% CPU'; } }
},
annotation: {
annotations: annotationConfig
}
},
scales: {
y: {
beginAtZero: true,
max: 100,
grid: {
drawBorder: false,
color: gridColor,
lineWidth: 0.5,
tickBorderDash: [4, 4]
},
ticks: {
stepSize: 25,
color: textColor,
callback: function(value) { return value + '%'; }
}
},
x: { grid: { display: false }, ticks: { display: false }, barPercentage: 1, categoryPercentage: 0.85 }
}
}
});
cpuDistributionChart._cpuOriginalData = hasProjected
? [ data.hostData.current.slice(), data.hostData.projected.slice() ]
: [ data.hostData.current.slice() ];
}
// --- Initialization: DOMContentLoaded (skeleton vs embedded data) ---
document.addEventListener('DOMContentLoaded', function() {
if (typeof SKELETON_MODE !== 'undefined' && SKELETON_MODE) {
Promise.all([
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }),
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); })
]).then(function(results) {
renderStats(results[0]);
renderAudits(results[1].audits);
if (!results[1].audits || results[1].audits.length === 0) {
var cc = results[1].current_cluster;
if (cc && cc.host_labels && cc.cpu_current && cc.host_labels.length) {
window.auditData = window.auditData || {};
window.auditData.current = { hostData: { labels: cc.host_labels, current: cc.cpu_current, projected: cc.cpu_current } };
updateCPUCharts('current');
}
}
}).catch(function(err) {
var msg = err.status ? 'Failed to load data (' + err.status + ')' : 'Failed to load data';
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = msg;
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d) renderStats(d); });
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d && d.audits) renderAudits(d.audits); });
});
} else {
var initialAudit = typeof INITIAL_AUDIT_ID !== 'undefined' ? INITIAL_AUDIT_ID : '';
if (initialAudit && window.auditData && window.auditData[initialAudit]) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
} else if (!initialAudit && typeof CURRENT_CLUSTER !== 'undefined' && CURRENT_CLUSTER && CURRENT_CLUSTER.host_labels && CURRENT_CLUSTER.host_labels.length) {
window.auditData = window.auditData || {};
window.auditData.current = { hostData: { labels: CURRENT_CLUSTER.host_labels, current: CURRENT_CLUSTER.cpu_current, projected: CURRENT_CLUSTER.cpu_current } };
updateCPUCharts('current');
}
}
});
// --- Initialization: theme change (recreate chart) ---
document.addEventListener('themechange', function() {
if (cpuDistributionChart) {
var auditId = document.getElementById('auditSelector').value;
cpuDistributionChart.destroy();
cpuDistributionChart = null;
if (auditId) updateCPUCharts(auditId);
}
});
})();

View File

@@ -1,3 +1,15 @@
// Escape for safe HTML text content (prevents XSS when inserting into HTML)
function escapeHtml(text) {
if (text == null) return '';
const s = String(text);
return s
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#39;');
}
// Format bytes to GB (matches Django convert_bytes filter default)
function formatBytes(bytes, targetUnit = 'GB') {
if (bytes == null || isNaN(Number(bytes))) return '0';
@@ -24,6 +36,16 @@ function getColorWithOpacity(className) {
return computedColor;
}
// Format audit date for display (ISO string -> short date, e.g. "Feb 1")
function formatAuditDate(isoString) {
if (!isoString) return '';
try {
return new Date(isoString).toLocaleDateString('en-US', { month: 'short', day: 'numeric' });
} catch (e) {
return '';
}
}
// Utility function to calculate mean and standard deviation
function calculateStats(data) {
if (!data || data.length === 0) return { mean: 0, std: 0 };

View File

@@ -7,8 +7,11 @@
<title>{% block title %}SWatcher{% endblock %}</title>
<link rel="icon" href="{% static 'favicon.ico' %}" type="image/x-icon">
<link rel="stylesheet" href="{% static 'css/output.css' %}">
<script src="{% static 'js/html2canvas-pro.min.js' %}"></script>
<script src="{% static 'js/jspdf.umd.min.js' %}"></script>
<style media="print">
/* Force A4 content width so print layout does not use screen width */
html, body { width: 180mm !important; min-width: 180mm !important; max-width: 180mm !important; margin: 0 !important; padding: 0 !important; overflow-x: hidden !important; }
body * { min-width: 0 !important; box-sizing: border-box !important; }
</style>
{% block imports %}
{% endblock %}
{% block css %}
@@ -22,7 +25,7 @@
</div>
<div class="navbar-end">
<div class="px-1 flex items-center gap-3 pr-10">
<button type="button" id="pdf-export-btn" class="btn btn-ghost btn-sm no-print" onclick="exportDashboardToPdf()" title="Save as PDF" aria-label="Save as PDF">
<button type="button" id="pdf-export-btn" class="btn btn-ghost btn-sm no-print" onclick="window.print()" title="Save as PDF (opens print dialog; choose &quot;Save as PDF&quot;)" aria-label="Save as PDF">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"/>
</svg>
@@ -74,7 +77,6 @@
localStorage.setItem('theme', newTheme);
});
</script>
<script src="{% static 'js/export-pdf.js' %}"></script>
<script>
(function() {
function updateSourceStatus(el, label, data) {
@@ -105,13 +107,27 @@
var promEl = document.getElementById('source-status-prometheus');
var osEl = document.getElementById('source-status-openstack');
if (!promEl || !osEl) return;
fetch('/api/source-status/').then(function(r) { return r.ok ? r.json() : {}; }).then(function(data) {
updateSourceStatus(promEl, 'Prometheus', data.prometheus);
updateSourceStatus(osEl, 'OpenStack', data.openstack);
}).catch(function() {
updateSourceStatus(promEl, 'Prometheus', { status: 'error', message: 'Failed to fetch status' });
updateSourceStatus(osEl, 'OpenStack', { status: 'error', message: 'Failed to fetch status' });
});
fetch('/api/source-status/')
.then(function(r) {
if (r.ok) return r.json().then(function(data) { return { data: data }; });
return r.json().catch(function() { return {}; }).then(function(body) {
return { error: true, message: (body && body.message) || 'Failed to fetch status' };
});
})
.then(function(result) {
if (result && result.error) {
updateSourceStatus(promEl, 'Prometheus', { status: 'error', message: result.message });
updateSourceStatus(osEl, 'OpenStack', { status: 'error', message: result.message });
} else {
var data = result && result.data;
updateSourceStatus(promEl, 'Prometheus', data && data.prometheus);
updateSourceStatus(osEl, 'OpenStack', data && data.openstack);
}
})
.catch(function() {
updateSourceStatus(promEl, 'Prometheus', { status: 'error', message: 'Failed to fetch status' });
updateSourceStatus(osEl, 'OpenStack', { status: 'error', message: 'Failed to fetch status' });
});
});
})();
</script>

View File

@@ -0,0 +1,151 @@
{% load mathfilters %}
<!-- DETAILED OVERVIEW -->
<section class="grid grid-cols-1 lg:grid-cols-2 gap-4" aria-label="Resource allocation and flavors">
<!-- Resource Allocation -->
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsAllocationCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z"/>
</svg>
Resource Allocation
</h2>
{% if skeleton %}
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">CPU Allocation</span>
<span class="font-medium animate-pulse" data-stats="vcpu.allocated_total">— / — vCPU</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-primary flex-1 animate-pulse" data-stats="vcpu.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vcpu.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vcpu.overcommit"></div>
</div>
<div>
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">RAM Allocation</span>
<span class="font-medium animate-pulse" data-stats="vram.allocated_total">— / — GB</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-secondary flex-1 animate-pulse" data-stats="vram.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vram.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vram.overcommit"></div>
</div>
{% else %}
<!-- CPU Allocation -->
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">CPU Allocation</span>
<span class="font-medium">{{ vcpu.allocated }} / {{ vcpu.total }} vCPU</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-primary flex-1" value="{{ vcpu.allocated_percentage }}" max="100"></progress>
<span class="text-xs font-medium w-12 text-right">{{ vcpu.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/60">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
<!-- RAM Allocation -->
<div>
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">RAM Allocation</span>
<span class="font-medium">{{ vram.allocated|convert_bytes }} / {{ vram.total|convert_bytes }} GB</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-secondary flex-1" value="{{ vram.allocated_percentage }}" max="100"></progress>
<span class="text-xs font-medium w-12 text-right">{{ vram.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/60">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Flavor Distribution -->
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsFlavorsCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M11 3.055A9.001 9.001 0 1020.945 13H11V3.055z"/>
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M20.488 9H15V3.512A9.025 9.025 0 0120.488 9z"/>
</svg>
Top Flavors
</h2>
{% if skeleton %}
<div class="space-y-3">
<div class="bg-base-200/50 rounded-lg p-3">
<div class="flex justify-between items-center mb-1">
<span class="text-sm font-medium animate-pulse" data-stats="flavors.first_name"></span>
<span class="text-xs badge badge-primary animate-pulse" data-stats="flavors.first_count">— instances</span>
</div>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Share</span>
<span class="font-medium animate-pulse" data-stats="flavors.first_share">—%</span>
</div>
</div>
<div class="space-y-2">
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.second_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.second_count"></span>
</div>
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.third_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.third_count"></span>
</div>
</div>
</div>
{% else %}
<div class="space-y-3">
<!-- Most Common -->
<div class="bg-base-200/50 rounded-lg p-3">
<div class="flex justify-between items-center mb-1">
<span class="text-sm font-medium">{{ flavors.first_common_flavor.name }}</span>
<span class="text-xs badge badge-primary">{{ flavors.first_common_flavor.count }} instances</span>
</div>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Share</span>
<span class="font-medium">{{ flavors.first_common_flavor.count|div:vm.count|mul:100|floatformat:0 }}%</span>
</div>
</div>
<!-- Other Flavors -->
<div class="space-y-2">
{% if flavors.second_common_flavor %}
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span>{{ flavors.second_common_flavor.name }}</span>
</div>
<span class="text-xs badge badge-outline">{{ flavors.second_common_flavor.count }}</span>
</div>
{% endif %}
{% if flavors.third_common_flavor %}
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span>{{ flavors.third_common_flavor.name }}</span>
</div>
<span class="text-xs badge badge-outline">{{ flavors.third_common_flavor.count }}</span>
</div>
{% endif %}
</div>
</div>
{% endif %}
</div>
</div>
</section>

View File

@@ -0,0 +1,71 @@
<!-- AUDIT CONTROL -->
<section aria-label="Audit analysis">
<div class="card bg-base-100 shadow-sm" id="auditSection">
<div class="card-body p-5">
<div class="flex flex-col sm:flex-row sm:items-center justify-between gap-3 mb-4">
<div>
<h2 class="text-lg font-semibold">Audit Analysis</h2>
<div class="text-base text-base-content/60 mt-0.5">Select an audit to analyze resource distribution</div>
</div>
<div class="flex items-center gap-2">
<span class="text-xs text-base-content/60" id="auditsCount">{% if skeleton %}Loading…{% else %}{{ audits|length }} available{% endif %}</span>
<div class="dropdown dropdown-end no-print">
<label tabindex="0" class="btn btn-xs btn-ghost">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"/>
</svg>
Info
</label>
<div tabindex="0" class="dropdown-content z-[1] card card-compact w-64 p-2 shadow bg-base-100">
<div class="card-body">
<div class="text-xs space-y-1">
<div class="flex justify-between">
<span class="text-base-content/60">Strategy:</span>
<span id="previewStrategy">Balanced</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">Scope:</span>
<span id="previewScope">Full Cluster</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">CPU Weight:</span>
<span id="previewCpu">1.0</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">RAM Weight:</span>
<span id="previewRam">1.0</span>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="flex flex-col md:flex-row gap-3">
<select id="auditSelector" class="select select-bordered select-sm flex-1" {% if skeleton %}disabled{% endif %}>
{% if skeleton %}
<option value="">Loading…</option>
{% else %}
{% for audit in audits %}
<option value="{{ audit.id }}"
data-cpu="{{ audit.cpu_weight }}"
data-ram="{{ audit.ram_weight }}"
data-scope="{{ audit.scope }}"
data-strategy="{{ audit.strategy }}"
data-goal="{{ audit.goal }}">
{{ audit.name }} ({{ audit.created_at|date:"M d" }})
</option>
{% endfor %}
{% endif %}
</select>
<button type="button" onclick="loadSelectedAudit()" class="btn btn-primary btn-sm gap-2 no-print">
<svg class="w-4 h-4 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/>
</svg>
Load Analysis
</button>
</div>
</div>
</div>
</section>

View File

@@ -0,0 +1,55 @@
<!-- ANALYSIS VISUALIZATION -->
<section aria-label="CPU distribution chart">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-5">
<h3 class="text-lg font-semibold mb-4">CPU Distribution (Current vs Projected)</h3>
<div class="h-48">
<canvas id="cpuDistributionChart"></canvas>
</div>
<div class="flex items-center justify-center gap-3 mt-3">
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-success"></div>
<span class="text-success">Mean: <span id="currentCpuMean">0</span>%</span>
</div>
<div id="currentCpuStdBlock" class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-error/60"></div>
<span class="tooltip tooltip-top tooltip-formula text-error/60 cursor-help" data-tip="Показывает разброс утилизации CPU относительно среднего.&#10;&#10;μ — средняя утилизация CPU по хостам.&#10;σ — стандартное отклонение:&#10;σ = √(Σ(xᵢμ)²/n)." aria-label="Формула расчёта ±0.5σ">
<span class="text-error/60">±0.5σ: <span id="currentCpuStd">0</span>%</span>
</span>
</div>
</div>
</div>
</div>
</section>
<!-- MIGRATION ACTIONS -->
<section aria-label="Migration actions">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-6">
<div class="flex items-center justify-between mb-4">
<h3 class="text-lg font-semibold">Migration Actions</h3>
<div class="badge badge-neutral badge-sm" id="migrationCount">Select audit</div>
</div>
<div class="overflow-x-auto">
<table class="table table-zebra table-sm">
<thead>
<tr class="bg-base-200">
<th class="text-xs font-medium">Instance</th>
<th class="text-xs font-medium">Source → Destination</th>
<th class="text-xs font-medium">Flavor</th>
<th class="text-xs font-medium">Impact</th>
</tr>
</thead>
<tbody id="migrationTableBody" class="text-sm">
<tr>
<td colspan="4" class="text-center py-6 text-base-content/60">
No audit selected. Load an audit to view migration recommendations.
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</section>

View File

@@ -0,0 +1,162 @@
{% load mathfilters %}
<!-- QUICK STATS ROW -->
<section class="grid grid-cols-1 lg:grid-cols-3 gap-4" aria-label="Quick stats">
<!-- CPU Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPcpuCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pcpu.usage"></span> / <span data-stats="pcpu.total"></span> CPU</div>
</div>
<div class="text-xl font-bold text-primary animate-pulse" data-stats="pcpu.used_percentage">—%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium animate-pulse" data-stats="pcpu.usage_val"></span>
</div>
<progress class="progress progress-primary w-full animate-pulse" data-stats="pcpu.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pcpu.free"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU</div>
</div>
<div class="text-xl font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pcpu.usage|floatformat:1 }} CPU</span>
</div>
<progress class="progress progress-primary w-full" value="{{ pcpu.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium">{{ pcpu.free|floatformat:1 }} CPU</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- RAM Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPramCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pram.usage_gb"></span> / <span data-stats="pram.total_gb"></span> GB</div>
</div>
<div class="text-xl font-bold text-secondary animate-pulse" data-stats="pram.used_percentage">—%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium animate-pulse" data-stats="pram.usage_gb_val"></span>
</div>
<progress class="progress progress-secondary w-full animate-pulse" data-stats="pram.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pram.free_gb"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB</div>
</div>
<div class="text-xl font-bold text-secondary">{{ pram.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pram.usage|convert_bytes }} GB</span>
</div>
<progress class="progress progress-secondary w-full" value="{{ pram.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium">{{ pram.free|convert_bytes }} GB</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Instance Summary -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsVmCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="vm.active"></span> active / <span data-stats="vm.stopped"></span> stopped</div>
</div>
<div class="text-xl font-bold text-accent animate-pulse" data-stats="vm.count"></div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-success"></div>
<span>Most Used Flavor</span>
</div>
<span class="font-medium animate-pulse" data-stats="flavors.first_name"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-info"></div>
<span>Avg. vCPU/VM</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.avg_cpu"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-warning"></div>
<span>Density</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.density"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ vm.active }} active / {{ vm.stopped }} stopped</div>
</div>
<div class="text-xl font-bold text-accent">{{ vm.count }}</div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-success"></div>
<span>Most Used Flavor</span>
</div>
<span class="font-medium">{{ flavors.first_common_flavor.name }}</span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-info"></div>
<span>Avg. vCPU/VM</span>
</div>
<span class="font-medium">{{ vm.avg_cpu|floatformat:1 }}</span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-warning"></div>
<span>Density</span>
</div>
<span class="font-medium">{{ vm.density|floatformat:1 }}/host</span>
</div>
</div>
{% endif %}
</div>
</div>
</section>

View File

@@ -11,444 +11,10 @@
{% block content %}
<!-- MAIN DASHBOARD -->
<div id="dashboard-content" class="p-4 space-y-8" {% if skeleton %}data-dashboard="skeleton"{% endif %}>
<!-- QUICK STATS ROW -->
<section class="grid grid-cols-1 lg:grid-cols-3 gap-4" aria-label="Quick stats">
<!-- CPU Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPcpuCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pcpu.usage"></span> / <span data-stats="pcpu.total"></span> CPU</div>
</div>
<div class="text-xl font-bold text-primary animate-pulse" data-stats="pcpu.used_percentage">—%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium animate-pulse" data-stats="pcpu.usage_val"></span>
</div>
<progress class="progress progress-primary w-full animate-pulse" data-stats="pcpu.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pcpu.free"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU</div>
</div>
<div class="text-xl font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pcpu.usage|floatformat:1 }} CPU</span>
</div>
<progress class="progress progress-primary w-full" value="{{ pcpu.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium">{{ pcpu.free }} CPU</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- RAM Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPramCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pram.usage_gb"></span> / <span data-stats="pram.total_gb"></span> GB</div>
</div>
<div class="text-xl font-bold text-secondary animate-pulse" data-stats="pram.used_percentage">—%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium animate-pulse" data-stats="pram.usage_gb_val"></span>
</div>
<progress class="progress progress-secondary w-full animate-pulse" data-stats="pram.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pram.free_gb"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB</div>
</div>
<div class="text-xl font-bold text-secondary">{{ pram.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pram.usage|convert_bytes }} GB</span>
</div>
<progress class="progress progress-secondary w-full" value="{{ pram.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium">{{ pram.free|convert_bytes }} GB</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Instance Summary -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsVmCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="vm.active"></span> active / <span data-stats="vm.stopped"></span> stopped</div>
</div>
<div class="text-xl font-bold text-accent animate-pulse" data-stats="vm.count"></div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-success"></div>
<span>Most Used Flavor</span>
</div>
<span class="font-medium animate-pulse" data-stats="flavors.first_name"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-info"></div>
<span>Avg. vCPU/VM</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.avg_cpu"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-warning"></div>
<span>Density</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.density"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ vm.active }} active / {{ vm.stopped }} stopped</div>
</div>
<div class="text-xl font-bold text-accent">{{ vm.count }}</div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-success"></div>
<span>Most Used Flavor</span>
</div>
<span class="font-medium">{{ flavors.first_common_flavor.name }}</span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-info"></div>
<span>Avg. vCPU/VM</span>
</div>
<span class="font-medium">{{ vm.avg_cpu|floatformat:1 }}</span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-warning"></div>
<span>Density</span>
</div>
<span class="font-medium">{{ vm.density|floatformat:1 }}/host</span>
</div>
</div>
{% endif %}
</div>
</div>
</section>
<!-- DETAILED OVERVIEW -->
<section class="grid grid-cols-1 lg:grid-cols-2 gap-4" aria-label="Resource allocation and flavors">
<!-- Resource Allocation -->
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsAllocationCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z"/>
</svg>
Resource Allocation
</h2>
{% if skeleton %}
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">CPU Allocation</span>
<span class="font-medium animate-pulse" data-stats="vcpu.allocated_total">— / — vCPU</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-primary flex-1 animate-pulse" data-stats="vcpu.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vcpu.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vcpu.overcommit"></div>
</div>
<div>
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">RAM Allocation</span>
<span class="font-medium animate-pulse" data-stats="vram.allocated_total">— / — GB</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-secondary flex-1 animate-pulse" data-stats="vram.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vram.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vram.overcommit"></div>
</div>
{% else %}
<!-- CPU Allocation -->
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">CPU Allocation</span>
<span class="font-medium">{{ vcpu.allocated }} / {{ vcpu.total }} vCPU</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-primary flex-1" value="{{ vcpu.allocated_percentage }}" max="100"></progress>
<span class="text-xs font-medium w-12 text-right">{{ vcpu.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/60">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
<!-- RAM Allocation -->
<div>
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">RAM Allocation</span>
<span class="font-medium">{{ vram.allocated|convert_bytes }} / {{ vram.total|convert_bytes }} GB</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-secondary flex-1" value="{{ vram.allocated_percentage }}" max="100"></progress>
<span class="text-xs font-medium w-12 text-right">{{ vram.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/60">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Flavor Distribution -->
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsFlavorsCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M11 3.055A9.001 9.001 0 1020.945 13H11V3.055z"/>
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M20.488 9H15V3.512A9.025 9.025 0 0120.488 9z"/>
</svg>
Top Flavors
</h2>
{% if skeleton %}
<div class="space-y-3">
<div class="bg-base-200/50 rounded-lg p-3">
<div class="flex justify-between items-center mb-1">
<span class="text-sm font-medium animate-pulse" data-stats="flavors.first_name"></span>
<span class="text-xs badge badge-primary animate-pulse" data-stats="flavors.first_count">— instances</span>
</div>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Share</span>
<span class="font-medium animate-pulse" data-stats="flavors.first_share">—%</span>
</div>
</div>
<div class="space-y-2">
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.second_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.second_count"></span>
</div>
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.third_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.third_count"></span>
</div>
</div>
</div>
{% else %}
<div class="space-y-3">
<!-- Most Common -->
<div class="bg-base-200/50 rounded-lg p-3">
<div class="flex justify-between items-center mb-1">
<span class="text-sm font-medium">{{ flavors.first_common_flavor.name }}</span>
<span class="text-xs badge badge-primary">{{ flavors.first_common_flavor.count }} instances</span>
</div>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Share</span>
<span class="font-medium">{{ flavors.first_common_flavor.count|div:vm.count|mul:100|floatformat:0 }}%</span>
</div>
</div>
<!-- Other Flavors -->
<div class="space-y-2">
{% if flavors.second_common_flavor %}
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span>{{ flavors.second_common_flavor.name }}</span>
</div>
<span class="text-xs badge badge-outline">{{ flavors.second_common_flavor.count }}</span>
</div>
{% endif %}
{% if flavors.third_common_flavor %}
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span>{{ flavors.third_common_flavor.name }}</span>
</div>
<span class="text-xs badge badge-outline">{{ flavors.third_common_flavor.count }}</span>
</div>
{% endif %}
</div>
</div>
{% endif %}
</div>
</div>
</section>
<!-- AUDIT CONTROL -->
<section aria-label="Audit analysis">
<div class="card bg-base-100 shadow-sm" id="auditSection">
<div class="card-body p-5">
<div class="flex flex-col sm:flex-row sm:items-center justify-between gap-3 mb-4">
<div>
<h2 class="text-lg font-semibold">Audit Analysis</h2>
<div class="text-base text-base-content/60 mt-0.5">Select an audit to analyze resource distribution</div>
</div>
<div class="flex items-center gap-2">
<span class="text-xs text-base-content/60" id="auditsCount">{% if skeleton %}Loading…{% else %}{{ audits|length }} available{% endif %}</span>
<div class="dropdown dropdown-end no-print">
<label tabindex="0" class="btn btn-xs btn-ghost">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"/>
</svg>
Info
</label>
<div tabindex="0" class="dropdown-content z-[1] card card-compact w-64 p-2 shadow bg-base-100">
<div class="card-body">
<div class="text-xs space-y-1">
<div class="flex justify-between">
<span class="text-base-content/60">Strategy:</span>
<span id="previewStrategy">Balanced</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">Scope:</span>
<span id="previewScope">Full Cluster</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">CPU Weight:</span>
<span id="previewCpu">1.0</span>
</div>
<div class="flex justify-between">
<span class="text-base-content/60">RAM Weight:</span>
<span id="previewRam">1.0</span>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="flex flex-col md:flex-row gap-3">
<select id="auditSelector" class="select select-bordered select-sm flex-1" {% if skeleton %}disabled{% endif %}>
{% if skeleton %}
<option value="">Loading…</option>
{% else %}
{% for audit in audits %}
<option value="{{ audit.id }}"
data-cpu="{{ audit.cpu_weight }}"
data-ram="{{ audit.ram_weight }}"
data-scope="{{ audit.scope }}"
data-strategy="{{ audit.strategy }}"
data-goal="{{ audit.goal }}">
{{ audit.name }} ({{ audit.created_at|date:"M d" }})
</option>
{% endfor %}
{% endif %}
</select>
<button type="button" onclick="loadSelectedAudit()" class="btn btn-primary btn-sm gap-2 no-print">
<svg class="w-4 h-4 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/>
</svg>
Load Analysis
</button>
</div>
</div>
</div>
</section>
<!-- ANALYSIS VISUALIZATION -->
<section aria-label="CPU distribution chart">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-5">
<h3 class="text-lg font-semibold mb-4">CPU Distribution (Current vs Projected)</h3>
<div class="h-48">
<canvas id="cpuDistributionChart"></canvas>
</div>
<div class="flex items-center justify-center gap-3 mt-3">
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-success"></div>
<span class="text-success">Mean: <span id="currentCpuMean">0</span>%</span>
</div>
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-error/60"></div>
<span class="text-error/60">±0.5σ: <span id="currentCpuStd">0</span>%</span>
</div>
</div>
</div>
</div>
</section>
<!-- MIGRATION ACTIONS -->
<section aria-label="Migration actions">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-6">
<div class="flex items-center justify-between mb-4">
<h3 class="text-lg font-semibold">Migration Actions</h3>
<div class="badge badge-neutral badge-sm" id="migrationCount">Select audit</div>
</div>
<div class="overflow-x-auto">
<table class="table table-zebra table-sm">
<thead>
<tr class="bg-base-200">
<th class="text-xs font-medium">Instance</th>
<th class="text-xs font-medium">Source → Destination</th>
<th class="text-xs font-medium">Flavor</th>
<th class="text-xs font-medium">Impact</th>
</tr>
</thead>
<tbody id="migrationTableBody" class="text-sm">
<tr>
<td colspan="4" class="text-center py-6 text-base-content/60">
No audit selected. Load an audit to view migration recommendations.
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</section>
{% include "dashboard/_stats_cards.html" %}
{% include "dashboard/_allocation_flavors.html" %}
{% include "dashboard/_audit_section.html" %}
{% include "dashboard/_chart_migrations.html" %}
</div>
{% endblock %}
@@ -457,7 +23,7 @@
const SKELETON_MODE = {{ skeleton|yesno:"true,false" }};
const CURRENT_CLUSTER = {% if current_cluster %}{ "host_labels": {{ current_cluster.host_labels|safe }}, "cpu_current": {{ current_cluster.cpu_current|safe }} }{% else %}null{% endif %};
let auditData = {
window.auditData = {
{% if not skeleton %}
{% for audit in audits %}
"{{ audit.id }}": {
@@ -472,430 +38,9 @@
{% endfor %}
{% endif %}
};
document.getElementById('auditSelector').addEventListener('change', function(e) {
const option = this.options[this.selectedIndex];
if (!option) return;
document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0';
document.getElementById('previewRam').textContent = option.dataset.ram || '1.0';
document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster';
document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced';
});
let cpuDistributionChart = null;
function setStat(key, text) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
el.textContent = text;
el.classList.remove('animate-pulse');
});
}
function setProgress(key, value) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
if (el.tagName === 'PROGRESS') {
el.value = value;
el.classList.remove('animate-pulse');
}
});
}
function renderStats(data) {
if (!data) return;
var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); };
var regionBadge = document.getElementById('regionBadge');
if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—';
setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1));
setStat('pcpu.total', String((data.pcpu && data.pcpu.total) || 0));
setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%');
setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU');
setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0);
setStat('pcpu.free', String((data.pcpu && data.pcpu.free) || 0));
var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB');
var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB');
var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB');
setStat('pram.usage_gb', pramUsageGb);
setStat('pram.total_gb', pramTotalGb);
setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%');
setStat('pram.usage_gb_val', pramUsageGb + ' GB');
setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0);
setStat('pram.free_gb', pramFreeGb + ' GB');
setStat('vm.active', String(data.vm && data.vm.active));
setStat('vm.stopped', String(data.vm && data.vm.stopped));
setStat('vm.count', String(data.vm && data.vm.count));
setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—');
setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1));
setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host');
setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU');
setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0);
setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%');
var vcpuOver = el('vcpu.overcommit');
if (vcpuOver) {
vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated';
vcpuOver.classList.remove('animate-pulse');
}
var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB');
var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB');
setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB');
setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0);
setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%');
var vramOver = el('vram.overcommit');
if (vramOver) {
vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated';
vramOver.classList.remove('animate-pulse');
}
setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances');
var vmCount = data.vm && data.vm.count ? data.vm.count : 0;
var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0;
setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%');
setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—');
setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—');
setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—');
setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—');
document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); });
}
function renderAudits(auditsList) {
if (!auditsList || !auditsList.length) {
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = '0 available';
var sel = document.getElementById('auditSelector');
if (sel) { sel.disabled = false; sel.innerHTML = '<option value="">No audits</option>'; }
return;
}
auditData = {};
auditsList.forEach(function(a) {
auditData[a.id] = {
name: a.name,
migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations,
hostData: {
labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels,
current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current,
projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected
}
};
});
var sel = document.getElementById('auditSelector');
if (sel) {
sel.disabled = false;
sel.innerHTML = '';
auditsList.forEach(function(audit) {
var opt = document.createElement('option');
opt.value = audit.id;
opt.setAttribute('data-cpu', audit.cpu_weight || '1.0');
opt.setAttribute('data-ram', audit.ram_weight || '1.0');
opt.setAttribute('data-scope', audit.scope || 'Full Cluster');
opt.setAttribute('data-strategy', audit.strategy || 'Balanced');
opt.setAttribute('data-goal', audit.goal || '');
var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : '';
opt.textContent = audit.name + ' (' + dateStr + ')';
sel.appendChild(opt);
});
}
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = auditsList.length + ' available';
if (auditsList.length > 0) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
}
}
// Load selected audit
function loadSelectedAudit() {
const auditId = document.getElementById('auditSelector').value;
updateMigrationTable(auditId);
updateCPUCharts(auditId);
}
// Update migration table
function updateMigrationTable(auditId) {
const tbody = document.getElementById('migrationTableBody');
const migrationCount = document.getElementById('migrationCount');
const data = auditData[auditId];
if (!data || !data.migrations || data.migrations.length === 0) {
tbody.innerHTML = `
<tr>
<td colspan="4" class="text-center py-6 text-base-content/60">
No migration actions recommended
</td>
</tr>
`;
migrationCount.textContent = '0 actions';
return;
}
let html = '';
data.migrations.forEach(migration => {
const impact = migration.impact || 'Low';
const impactClass = {
'Low': 'badge-success',
'Medium': 'badge-warning',
'High': 'badge-error'
}[impact] || 'badge-neutral';
html += `
<tr>
<td class="font-medium">
<div>${migration.instanceName}</div>
</td>
<td>
<div class="flex items-center gap-2">
<span class="badge badge-outline badge-xs">${migration.source}</span>
<svg class="w-3 h-3 text-base-content/30" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/>
</svg>
<span class="badge badge-primary badge-outline badge-xs">${migration.destination}</span>
</div>
</td>
<td>
<code class="text-xs bg-base-200 px-2 py-1 rounded">${migration.flavor}</code>
</td>
<td>
<span class="badge ${impactClass} badge-xs">${impact}</span>
</td>
</tr>
`;
});
tbody.innerHTML = html;
migrationCount.textContent = `${data.migrations.length} action${data.migrations.length !== 1 ? 's' : ''}`;
}
// Update CPU chart (combined current vs projected)
function updateCPUCharts(auditId) {
const data = auditData[auditId];
if (!data || !data.hostData) return;
const ctx = document.getElementById('cpuDistributionChart').getContext('2d');
const currentStats = calculateStats(data.hostData.current);
document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1);
document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1);
if (cpuDistributionChart) cpuDistributionChart.destroy();
const colors = {
primary: getCSSVar('--color-primary'),
secondary: getCSSVar('--color-secondary'),
accent: getCSSVar('--color-accent'),
neutral: getCSSVar('--color-neutral'),
info: getCSSVar('--color-info'),
success: getCSSVar('--color-success'),
warning: getCSSVar('--color-warning'),
error: getCSSVar('--color-error')
};
const textColor = getCSSVar('--color-base-content');
const gridColor = getCSSVar('--chart-grid-color') || textColor;
cpuDistributionChart = new Chart(ctx, {
type: 'bar',
data: {
labels: data.hostData.labels,
datasets: [
{
label: 'Current',
data: data.hostData.current.slice(),
backgroundColor: colors.info + '40',
borderColor: colors.info,
borderWidth: 1,
borderRadius: 3
},
{
label: 'Projected',
data: data.hostData.projected.slice(),
backgroundColor: colors.warning + '40',
borderColor: colors.warning,
borderWidth: 1,
borderRadius: 3
}
]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: {
onComplete: function() {
var chart = this;
if (typeof chart.getDatasetMeta !== 'function') chart = chart.chart;
if (!chart || chart._hidingDataset === undefined) return;
var i = chart._hidingDataset;
chart.getDatasetMeta(i).hidden = true;
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
delete chart._hidingDataset;
chart.update('none');
}
},
plugins: {
legend: {
display: true,
position: 'top',
align: 'center',
onClick: function(e, legendItem, legend) {
const i = legendItem.datasetIndex;
const chart = legend.chart;
const len = chart.data.labels.length;
if (chart.isDatasetVisible(i)) {
chart._hidingDataset = i;
chart.data.datasets[i].data = Array(len).fill(0);
chart.update();
} else {
chart.data.datasets[i].data = Array(len).fill(0);
chart.show(i);
chart.update('none');
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
chart.update();
}
},
labels: {
usePointStyle: true,
pointStyle: 'rect',
boxWidth: 14,
boxHeight: 14,
padding: 12,
color: textColor,
generateLabels: function(chart) {
const datasets = chart.data.datasets;
const labelColor = getCSSVar('--color-base-content');
return datasets.map(function(ds, i) {
return {
text: ds.label,
fillStyle: ds.borderColor,
strokeStyle: ds.borderColor,
lineWidth: 1,
fontColor: labelColor,
color: labelColor,
hidden: !chart.isDatasetVisible(i),
datasetIndex: i
};
});
}
}
},
tooltip: {
callbacks: {
label: (ctx) => `${ctx.dataset.label}: ${Number(ctx.parsed.y).toFixed(2)}% CPU`
}
},
annotation: {
annotations: {
MeanLine: {
type: 'line',
yMin: currentStats.mean.toFixed(1),
yMax: currentStats.mean.toFixed(1),
borderColor: colors.success,
borderWidth: 2,
borderDash: []
},
upperStdLine: {
type: 'line',
yMin: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
yMax: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
},
lowerStdLine: {
type: 'line',
yMin: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
yMax: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
}
}
}
},
scales: {
y: {
beginAtZero: true,
max: 100,
grid: { drawBorder: false, color: gridColor },
ticks: {
color: textColor,
callback: value => value + '%'
}
},
x: {
grid: { display: false },
ticks: {
display: false
},
barPercentage: 1,
categoryPercentage: 0.85
}
}
}
});
cpuDistributionChart._cpuOriginalData = [
data.hostData.current.slice(),
data.hostData.projected.slice()
];
}
// Utility functions
function calculateStats(data) {
const mean = data.reduce((a, b) => a + b, 0) / data.length;
const variance = data.reduce((a, b) => a + Math.pow(b - mean, 2), 0) / data.length;
const std = Math.sqrt(variance);
return { mean, std };
}
document.addEventListener('DOMContentLoaded', function() {
if (SKELETON_MODE) {
Promise.all([
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }),
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); })
]).then(function(results) {
renderStats(results[0]);
renderAudits(results[1].audits);
if (!results[1].audits || results[1].audits.length === 0) {
var cc = results[1].current_cluster;
if (cc && cc.host_labels && cc.cpu_current && cc.host_labels.length) {
auditData["current"] = {
hostData: {
labels: cc.host_labels,
current: cc.cpu_current,
projected: cc.cpu_current
}
};
updateCPUCharts('current');
}
}
}).catch(function(err) {
var msg = err.status ? 'Failed to load data (' + err.status + ')' : 'Failed to load data';
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = msg;
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d) renderStats(d); });
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d && d.audits) renderAudits(d.audits); });
});
} else {
var initialAudit = "{% if audits %}{{ audits.0.id }}{% endif %}";
if (initialAudit && auditData[initialAudit]) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
} else if (!initialAudit && CURRENT_CLUSTER && CURRENT_CLUSTER.host_labels && CURRENT_CLUSTER.host_labels.length) {
auditData["current"] = {
hostData: {
labels: CURRENT_CLUSTER.host_labels,
current: CURRENT_CLUSTER.cpu_current,
projected: CURRENT_CLUSTER.cpu_current
}
};
updateCPUCharts('current');
}
}
});
document.addEventListener('themechange', function() {
if (cpuDistributionChart) {
const auditId = document.getElementById('auditSelector').value;
cpuDistributionChart.destroy();
cpuDistributionChart = null;
if (auditId) updateCPUCharts(auditId);
}
});
var INITIAL_AUDIT_ID = "{% if audits %}{{ audits.0.id }}{% endif %}";
</script>
<script src="{% static 'js/dashboard.js' %}"></script>
{% endblock %}
{% block css %}
@@ -910,4 +55,4 @@
@apply px-1.5 py-0.5 text-xs;
}
</style>
{% endblock %}
{% endblock %}

View File

@@ -48,7 +48,7 @@ INSTALLED_APPS = [
PROMETHEUS_URL = "http://10.226.74.53:9090/"
PROMETHEUS_METRICS = {
"cpu_usage": "rate(libvirt_domain_info_cpu_time_seconds_total)[300s]",
"ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]",
"ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s])",
}
# Openstack cloud settings