Add API endpoints for stats and audits, implement data collection functions, and enhance index view with skeleton context
This commit is contained in:
@@ -3,6 +3,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.http import JsonResponse
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
@@ -129,15 +130,123 @@ def collect_context():
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return context
|
||||
|
||||
|
||||
def collect_stats():
|
||||
"""Build stats dict: region, pcpu, pram, vcpu, vram, vm, flavors (no audits)."""
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
return {
|
||||
"region": {"name": region_name, "hosts_total": hosts_total},
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
}
|
||||
|
||||
|
||||
def collect_audits():
|
||||
"""Build audits list with serialized fields for frontend."""
|
||||
connection = get_connection()
|
||||
audits = get_audits(connection=connection)
|
||||
for audit in audits:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return audits
|
||||
|
||||
|
||||
def _skeleton_context():
|
||||
"""Minimal context for skeleton-only index render."""
|
||||
empty_flavors = {
|
||||
"first_common_flavor": {"name": "—", "count": 0},
|
||||
"second_common_flavor": None,
|
||||
"third_common_flavor": None,
|
||||
}
|
||||
return {
|
||||
"skeleton": True,
|
||||
"region": {"name": "—", "hosts_total": 0},
|
||||
"pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"vcpu": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vram": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0},
|
||||
"flavors": empty_flavors,
|
||||
"audits": [],
|
||||
}
|
||||
|
||||
|
||||
def index(request):
|
||||
if getattr(settings, "USE_MOCK_DATA", False):
|
||||
context = get_mock_context()
|
||||
return render(request, "index.html", context)
|
||||
context = _skeleton_context()
|
||||
return render(request, "index.html", context)
|
||||
|
||||
cache_key = "dashboard_context"
|
||||
|
||||
def api_stats(request):
|
||||
cache_key = "dashboard_stats"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
context = cache.get(cache_key)
|
||||
if context is None:
|
||||
context = collect_context()
|
||||
cache.set(cache_key, context, timeout=cache_ttl)
|
||||
return render(request, "index.html", context)
|
||||
data = cache.get(cache_key)
|
||||
if data is None:
|
||||
data = collect_stats()
|
||||
cache.set(cache_key, data, timeout=cache_ttl)
|
||||
return JsonResponse(data)
|
||||
|
||||
|
||||
def api_audits(request):
|
||||
cache_key = "dashboard_audits"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
audits = cache.get(cache_key)
|
||||
if audits is None:
|
||||
audits = collect_audits()
|
||||
cache.set(cache_key, audits, timeout=cache_ttl)
|
||||
return JsonResponse({"audits": audits})
|
||||
Reference in New Issue
Block a user