develop #1
72
Dockerfile
72
Dockerfile
@@ -1,37 +1,37 @@
|
||||
FROM alpine:3 AS build
|
||||
|
||||
RUN apk update && \
|
||||
apk add --no-cache --virtual .build-deps \
|
||||
ca-certificates gcc postgresql-dev linux-headers musl-dev \
|
||||
libffi-dev jpeg-dev zlib-dev \
|
||||
git bash build-base python3-dev \
|
||||
dos2unix
|
||||
|
||||
RUN python3 -m venv /venv
|
||||
ENV PATH "/venv/bin:$PATH"
|
||||
COPY ./requirements.txt /
|
||||
RUN pip install -r /requirements.txt
|
||||
|
||||
COPY ./docker-entrypoint.sh /docker-entrypoint.sh
|
||||
RUN dos2unix /docker-entrypoint.sh && \
|
||||
chmod +x /docker-entrypoint.sh
|
||||
|
||||
|
||||
FROM alpine:3
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
ENV PATH "/venv/bin:$PATH"
|
||||
|
||||
RUN apk add --no-cache --update python3 curl
|
||||
|
||||
COPY --from=build /venv /venv
|
||||
COPY --from=build /docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
FROM alpine:3 AS build
|
||||
|
||||
RUN apk update && \
|
||||
apk add --no-cache --virtual .build-deps \
|
||||
ca-certificates gcc postgresql-dev linux-headers musl-dev \
|
||||
libffi-dev jpeg-dev zlib-dev \
|
||||
git bash build-base python3-dev \
|
||||
dos2unix
|
||||
|
||||
RUN python3 -m venv /venv
|
||||
ENV PATH "/venv/bin:$PATH"
|
||||
COPY ./requirements.txt /
|
||||
RUN pip install -r /requirements.txt
|
||||
|
||||
COPY ./docker-entrypoint.sh /docker-entrypoint.sh
|
||||
RUN dos2unix /docker-entrypoint.sh && \
|
||||
chmod +x /docker-entrypoint.sh
|
||||
|
||||
|
||||
FROM alpine:3
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
ENV PATH "/venv/bin:$PATH"
|
||||
|
||||
RUN apk add --no-cache --update python3 curl
|
||||
|
||||
COPY --from=build /venv /venv
|
||||
COPY --from=build /docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["python3", "manage.py", "runserver", "0.0.0.0:8000"]
|
||||
@@ -1,8 +1,8 @@
|
||||
from django.urls import path
|
||||
from . import views
|
||||
|
||||
urlpatterns = [
|
||||
path('', views.index, name='index'),
|
||||
path('api/stats/', views.api_stats),
|
||||
path('api/audits/', views.api_audits),
|
||||
from django.urls import path
|
||||
from . import views
|
||||
|
||||
urlpatterns = [
|
||||
path('', views.index, name='index'),
|
||||
path('api/stats/', views.api_stats),
|
||||
path('api/audits/', views.api_audits),
|
||||
]
|
||||
@@ -1,252 +1,252 @@
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.http import JsonResponse
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
from dashboard.mock_data import get_mock_context
|
||||
|
||||
# Prometheus queries run in parallel (query_key -> query string)
|
||||
_PROMETHEUS_QUERIES = {
|
||||
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
|
||||
"pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))",
|
||||
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
|
||||
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
|
||||
"vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})",
|
||||
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
|
||||
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
|
||||
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
|
||||
"vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))",
|
||||
"vm_count": "sum(libvirt_domain_state_code)",
|
||||
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
|
||||
}
|
||||
|
||||
|
||||
def _fetch_prometheus_metrics():
|
||||
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
|
||||
result = {}
|
||||
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
|
||||
future_to_key = {
|
||||
executor.submit(query_prometheus, query=q): key
|
||||
for key, q in _PROMETHEUS_QUERIES.items()
|
||||
}
|
||||
for future in as_completed(future_to_key):
|
||||
key = future_to_key[future]
|
||||
try:
|
||||
raw = future.result()
|
||||
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
|
||||
result[key] = float(raw)
|
||||
else:
|
||||
result[key] = int(raw)
|
||||
except (ValueError, TypeError):
|
||||
result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
|
||||
return result
|
||||
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": hosts_total,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
# Serialize audit list fields for JavaScript so cached context is render-ready
|
||||
for audit in context["audits"]:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return context
|
||||
|
||||
|
||||
def collect_stats():
|
||||
"""Build stats dict: region, pcpu, pram, vcpu, vram, vm, flavors (no audits)."""
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
return {
|
||||
"region": {"name": region_name, "hosts_total": hosts_total},
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
}
|
||||
|
||||
|
||||
def collect_audits():
|
||||
"""Build audits list with serialized fields for frontend."""
|
||||
connection = get_connection()
|
||||
audits = get_audits(connection=connection)
|
||||
for audit in audits:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return audits
|
||||
|
||||
|
||||
def _skeleton_context():
|
||||
"""Minimal context for skeleton-only index render."""
|
||||
empty_flavors = {
|
||||
"first_common_flavor": {"name": "—", "count": 0},
|
||||
"second_common_flavor": None,
|
||||
"third_common_flavor": None,
|
||||
}
|
||||
return {
|
||||
"skeleton": True,
|
||||
"region": {"name": "—", "hosts_total": 0},
|
||||
"pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"vcpu": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vram": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0},
|
||||
"flavors": empty_flavors,
|
||||
"audits": [],
|
||||
}
|
||||
|
||||
|
||||
def index(request):
|
||||
if getattr(settings, "USE_MOCK_DATA", False):
|
||||
context = get_mock_context()
|
||||
return render(request, "index.html", context)
|
||||
context = _skeleton_context()
|
||||
return render(request, "index.html", context)
|
||||
|
||||
|
||||
def api_stats(request):
|
||||
cache_key = "dashboard_stats"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
data = cache.get(cache_key)
|
||||
if data is None:
|
||||
data = collect_stats()
|
||||
cache.set(cache_key, data, timeout=cache_ttl)
|
||||
return JsonResponse(data)
|
||||
|
||||
|
||||
def api_audits(request):
|
||||
cache_key = "dashboard_audits"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
audits = cache.get(cache_key)
|
||||
if audits is None:
|
||||
audits = collect_audits()
|
||||
cache.set(cache_key, audits, timeout=cache_ttl)
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.http import JsonResponse
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
from dashboard.mock_data import get_mock_context
|
||||
|
||||
# Prometheus queries run in parallel (query_key -> query string)
|
||||
_PROMETHEUS_QUERIES = {
|
||||
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
|
||||
"pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))",
|
||||
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
|
||||
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
|
||||
"vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})",
|
||||
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
|
||||
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
|
||||
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
|
||||
"vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))",
|
||||
"vm_count": "sum(libvirt_domain_state_code)",
|
||||
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
|
||||
}
|
||||
|
||||
|
||||
def _fetch_prometheus_metrics():
|
||||
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
|
||||
result = {}
|
||||
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
|
||||
future_to_key = {
|
||||
executor.submit(query_prometheus, query=q): key
|
||||
for key, q in _PROMETHEUS_QUERIES.items()
|
||||
}
|
||||
for future in as_completed(future_to_key):
|
||||
key = future_to_key[future]
|
||||
try:
|
||||
raw = future.result()
|
||||
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
|
||||
result[key] = float(raw)
|
||||
else:
|
||||
result[key] = int(raw)
|
||||
except (ValueError, TypeError):
|
||||
result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
|
||||
return result
|
||||
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": hosts_total,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
# Serialize audit list fields for JavaScript so cached context is render-ready
|
||||
for audit in context["audits"]:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return context
|
||||
|
||||
|
||||
def collect_stats():
|
||||
"""Build stats dict: region, pcpu, pram, vcpu, vram, vm, flavors (no audits)."""
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
return {
|
||||
"region": {"name": region_name, "hosts_total": hosts_total},
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
}
|
||||
|
||||
|
||||
def collect_audits():
|
||||
"""Build audits list with serialized fields for frontend."""
|
||||
connection = get_connection()
|
||||
audits = get_audits(connection=connection)
|
||||
for audit in audits:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return audits
|
||||
|
||||
|
||||
def _skeleton_context():
|
||||
"""Minimal context for skeleton-only index render."""
|
||||
empty_flavors = {
|
||||
"first_common_flavor": {"name": "—", "count": 0},
|
||||
"second_common_flavor": None,
|
||||
"third_common_flavor": None,
|
||||
}
|
||||
return {
|
||||
"skeleton": True,
|
||||
"region": {"name": "—", "hosts_total": 0},
|
||||
"pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
|
||||
"vcpu": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vram": {"total": 0, "allocated": 0, "free": 0, "allocated_percentage": 0, "overcommit_ratio": 0, "overcommit_max": 0},
|
||||
"vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0},
|
||||
"flavors": empty_flavors,
|
||||
"audits": [],
|
||||
}
|
||||
|
||||
|
||||
def index(request):
|
||||
if getattr(settings, "USE_MOCK_DATA", False):
|
||||
context = get_mock_context()
|
||||
return render(request, "index.html", context)
|
||||
context = _skeleton_context()
|
||||
return render(request, "index.html", context)
|
||||
|
||||
|
||||
def api_stats(request):
|
||||
cache_key = "dashboard_stats"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
data = cache.get(cache_key)
|
||||
if data is None:
|
||||
data = collect_stats()
|
||||
cache.set(cache_key, data, timeout=cache_ttl)
|
||||
return JsonResponse(data)
|
||||
|
||||
|
||||
def api_audits(request):
|
||||
cache_key = "dashboard_audits"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
audits = cache.get(cache_key)
|
||||
if audits is None:
|
||||
audits = collect_audits()
|
||||
cache.set(cache_key, audits, timeout=cache_ttl)
|
||||
return JsonResponse({"audits": audits})
|
||||
@@ -1,33 +1,33 @@
|
||||
asgiref==3.11.0
|
||||
certifi==2025.11.12
|
||||
cffi==2.0.0
|
||||
charset-normalizer==3.4.4
|
||||
cryptography==46.0.3
|
||||
decorator==5.2.1
|
||||
Django==5.2.8
|
||||
dogpile.cache==1.5.0
|
||||
idna==3.11
|
||||
iso8601==2.1.0
|
||||
jmespath==1.0.1
|
||||
jsonpatch==1.33
|
||||
jsonpointer==3.0.0
|
||||
keystoneauth1==5.12.0
|
||||
numpy==2.3.5
|
||||
openstacksdk==4.8.0
|
||||
os-service-types==1.8.2
|
||||
pandas==2.3.3
|
||||
pbr==7.0.3
|
||||
platformdirs==4.5.0
|
||||
psutil==7.1.3
|
||||
pycparser==2.23
|
||||
python-dateutil==2.9.0.post0
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
requestsexceptions==1.4.0
|
||||
six==1.17.0
|
||||
sqlparse==0.5.4
|
||||
stevedore==5.6.0
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.2
|
||||
urllib3==2.5.0
|
||||
asgiref==3.11.0
|
||||
certifi==2025.11.12
|
||||
cffi==2.0.0
|
||||
charset-normalizer==3.4.4
|
||||
cryptography==46.0.3
|
||||
decorator==5.2.1
|
||||
Django==5.2.8
|
||||
dogpile.cache==1.5.0
|
||||
idna==3.11
|
||||
iso8601==2.1.0
|
||||
jmespath==1.0.1
|
||||
jsonpatch==1.33
|
||||
jsonpointer==3.0.0
|
||||
keystoneauth1==5.12.0
|
||||
numpy==2.3.5
|
||||
openstacksdk==4.8.0
|
||||
os-service-types==1.8.2
|
||||
pandas==2.3.3
|
||||
pbr==7.0.3
|
||||
platformdirs==4.5.0
|
||||
psutil==7.1.3
|
||||
pycparser==2.23
|
||||
python-dateutil==2.9.0.post0
|
||||
pytz==2025.2
|
||||
PyYAML==6.0.3
|
||||
requests==2.32.5
|
||||
requestsexceptions==1.4.0
|
||||
six==1.17.0
|
||||
sqlparse==0.5.4
|
||||
stevedore==5.6.0
|
||||
typing_extensions==4.15.0
|
||||
tzdata==2025.2
|
||||
urllib3==2.5.0
|
||||
|
||||
@@ -1,76 +1,76 @@
|
||||
{% load static %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" data-theme="light">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{% block title %}SWatcher{% endblock %}</title>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,opsz,wght@0,9..40,100..1000;1,9..40,100..1000&display=swap" rel="stylesheet">
|
||||
<link rel="stylesheet" href="{% static 'css/output.css' %}">
|
||||
{% block imports %}
|
||||
{% endblock %}
|
||||
{% block css %}
|
||||
{% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
<!-- Navbar -->
|
||||
<div class="navbar bg-base-100 shadow-lg border-b border-base-200 sticky top-0 z-10">
|
||||
<div class="navbar-start">
|
||||
<a class="btn btn-ghost text-xl" href="{% url 'index' %}">SWatcher</a>
|
||||
</div>
|
||||
<div class="navbar-end">
|
||||
<div class="px-1 flex items-center gap-3 pr-10">
|
||||
<button type="button" class="btn btn-ghost btn-sm no-print" onclick="window.print()" title="Save as PDF" aria-label="Save as PDF">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"/>
|
||||
</svg>
|
||||
Save as PDF
|
||||
</button>
|
||||
<span id="regionBadge" class="badge badge-primary badge-lg">{{ region.name }}</span>
|
||||
<label class="swap swap-rotate theme-toggle no-print">
|
||||
<input type="checkbox" class="theme-controller" value="dark" />
|
||||
<svg class="swap-off fill-current w-6 h-6" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M5.64,17l-.71.71a1,1,0,0,0,0,1.41,1,1,0,0,0,1.41,0l.71-.71A1,1,0,0,0,5.64,17ZM5,12a1,1,0,0,0-1-1H3a1,1,0,0,0,0,2H4A1,1,0,0,0,5,12Zm7-7a1,1,0,0,0,1-1V3a1,1,0,0,0-2,0V4A1,1,0,0,0,12,5ZM5.64,7.05a1,1,0,0,0,.7.29,1,1,0,0,0,.71-.29,1,1,0,0,0,0-1.41l-.71-.71A1,1,0,0,0,4.93,6.34Zm12,.29a1,1,0,0,0,.7-.29l.71-.71a1,1,0,1,0-1.41-1.41L17,5.64a1,1,0,0,0,0,1.41A1,1,0,0,0,17.66,7.34ZM21,11H20a1,1,0,0,0,0,2h1a1,1,0,0,0,0-2Zm-9,8a1,1,0,0,0-1,1v1a1,1,0,0,0,2,0V20A1,1,0,0,0,12,19ZM18.36,17A1,1,0,0,0,17,18.36l.71.71a1,1,0,0,0,1.41,0,1,1,0,0,0,0-1.41ZM12,6.5A5.5,5.5,0,1,0,17.5,12,5.51,5.51,0,0,0,12,6.5Zm0,9A3.5,3.5,0,1,1,15.5,12,3.5,3.5,0,0,1,12,15.5Z"/>
|
||||
</svg>
|
||||
<svg class="swap-on fill-current w-6 h-6" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M21.64,13a1,1,0,0,0-1.05-.14,8.05,8.05,0,0,1-3.37.73A8.15,8.15,0,0,1,9.08,5.49a8.59,8.59,0,0,1,.25-2A1,1,0,0,0,8,2.36,10.14,10.14,0,1,0,22,14.05,1,1,0,0,0,21.64,13Zm-9.5,6.69A8.14,8.14,0,0,1,7.08,5.22v.27A10.15,10.15,0,0,0,17.22,15.63a9.79,9.79,0,0,0,2.1-.22A8.11,8.11,0,0,1,12.14,19.73Z"/>
|
||||
</svg>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Main Content -->
|
||||
<main class="container mx-auto px-4 py-8 min-h-screen">
|
||||
<p class="print-only text-lg font-semibold mb-4">Dashboard report</p>
|
||||
{% block content %}
|
||||
{% endblock %}
|
||||
</main>
|
||||
|
||||
<script>
|
||||
// Function to apply theme
|
||||
function applyTheme(theme) {
|
||||
document.documentElement.setAttribute('data-theme', theme);
|
||||
const checkbox = document.querySelector('.theme-controller');
|
||||
checkbox.checked = (theme === 'dark');
|
||||
|
||||
document.dispatchEvent(new Event("themechange"));
|
||||
}
|
||||
|
||||
// Load saved theme from localStorage
|
||||
const savedTheme = localStorage.getItem('theme') || 'light';
|
||||
applyTheme(savedTheme);
|
||||
|
||||
// Listen for toggle changes
|
||||
document.querySelector('.theme-controller').addEventListener('change', function() {
|
||||
const newTheme = this.checked ? 'dark' : 'light';
|
||||
applyTheme(newTheme);
|
||||
localStorage.setItem('theme', newTheme);
|
||||
});
|
||||
</script>
|
||||
{% block script %}
|
||||
{% endblock %}
|
||||
</body>
|
||||
{% load static %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" data-theme="light">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{% block title %}SWatcher{% endblock %}</title>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,opsz,wght@0,9..40,100..1000;1,9..40,100..1000&display=swap" rel="stylesheet">
|
||||
<link rel="stylesheet" href="{% static 'css/output.css' %}">
|
||||
{% block imports %}
|
||||
{% endblock %}
|
||||
{% block css %}
|
||||
{% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
<!-- Navbar -->
|
||||
<div class="navbar bg-base-100 shadow-lg border-b border-base-200 sticky top-0 z-10">
|
||||
<div class="navbar-start">
|
||||
<a class="btn btn-ghost text-xl" href="{% url 'index' %}">SWatcher</a>
|
||||
</div>
|
||||
<div class="navbar-end">
|
||||
<div class="px-1 flex items-center gap-3 pr-10">
|
||||
<button type="button" class="btn btn-ghost btn-sm no-print" onclick="window.print()" title="Save as PDF" aria-label="Save as PDF">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"/>
|
||||
</svg>
|
||||
Save as PDF
|
||||
</button>
|
||||
<span id="regionBadge" class="badge badge-primary badge-lg">{{ region.name }}</span>
|
||||
<label class="swap swap-rotate theme-toggle no-print">
|
||||
<input type="checkbox" class="theme-controller" value="dark" />
|
||||
<svg class="swap-off fill-current w-6 h-6" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M5.64,17l-.71.71a1,1,0,0,0,0,1.41,1,1,0,0,0,1.41,0l.71-.71A1,1,0,0,0,5.64,17ZM5,12a1,1,0,0,0-1-1H3a1,1,0,0,0,0,2H4A1,1,0,0,0,5,12Zm7-7a1,1,0,0,0,1-1V3a1,1,0,0,0-2,0V4A1,1,0,0,0,12,5ZM5.64,7.05a1,1,0,0,0,.7.29,1,1,0,0,0,.71-.29,1,1,0,0,0,0-1.41l-.71-.71A1,1,0,0,0,4.93,6.34Zm12,.29a1,1,0,0,0,.7-.29l.71-.71a1,1,0,1,0-1.41-1.41L17,5.64a1,1,0,0,0,0,1.41A1,1,0,0,0,17.66,7.34ZM21,11H20a1,1,0,0,0,0,2h1a1,1,0,0,0,0-2Zm-9,8a1,1,0,0,0-1,1v1a1,1,0,0,0,2,0V20A1,1,0,0,0,12,19ZM18.36,17A1,1,0,0,0,17,18.36l.71.71a1,1,0,0,0,1.41,0,1,1,0,0,0,0-1.41ZM12,6.5A5.5,5.5,0,1,0,17.5,12,5.51,5.51,0,0,0,12,6.5Zm0,9A3.5,3.5,0,1,1,15.5,12,3.5,3.5,0,0,1,12,15.5Z"/>
|
||||
</svg>
|
||||
<svg class="swap-on fill-current w-6 h-6" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M21.64,13a1,1,0,0,0-1.05-.14,8.05,8.05,0,0,1-3.37.73A8.15,8.15,0,0,1,9.08,5.49a8.59,8.59,0,0,1,.25-2A1,1,0,0,0,8,2.36,10.14,10.14,0,1,0,22,14.05,1,1,0,0,0,21.64,13Zm-9.5,6.69A8.14,8.14,0,0,1,7.08,5.22v.27A10.15,10.15,0,0,0,17.22,15.63a9.79,9.79,0,0,0,2.1-.22A8.11,8.11,0,0,1,12.14,19.73Z"/>
|
||||
</svg>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Main Content -->
|
||||
<main class="container mx-auto px-4 py-8 min-h-screen">
|
||||
<p class="print-only text-lg font-semibold mb-4">Dashboard report</p>
|
||||
{% block content %}
|
||||
{% endblock %}
|
||||
</main>
|
||||
|
||||
<script>
|
||||
// Function to apply theme
|
||||
function applyTheme(theme) {
|
||||
document.documentElement.setAttribute('data-theme', theme);
|
||||
const checkbox = document.querySelector('.theme-controller');
|
||||
checkbox.checked = (theme === 'dark');
|
||||
|
||||
document.dispatchEvent(new Event("themechange"));
|
||||
}
|
||||
|
||||
// Load saved theme from localStorage
|
||||
const savedTheme = localStorage.getItem('theme') || 'light';
|
||||
applyTheme(savedTheme);
|
||||
|
||||
// Listen for toggle changes
|
||||
document.querySelector('.theme-controller').addEventListener('change', function() {
|
||||
const newTheme = this.checked ? 'dark' : 'light';
|
||||
applyTheme(newTheme);
|
||||
localStorage.setItem('theme', newTheme);
|
||||
});
|
||||
</script>
|
||||
{% block script %}
|
||||
{% endblock %}
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user