Refactor Docker setup and add mock data support
- Updated .dockerignore and .gitignore for better file management. - Introduced .env.example for environment variable configuration. - Added docker-compose.dev.yml for development with mock data and live reload. - Enhanced Dockerfile to include necessary dependencies and entrypoint script. - Created mock_data.py to provide sample data for testing without OpenStack/Prometheus. - Added unit tests for template filters in dashboard. - Cleaned up various files for consistency and improved readability.
This commit is contained in:
115
dashboard/mock_data.py
Normal file
115
dashboard/mock_data.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
|
||||
import json
|
||||
|
||||
|
||||
def get_mock_context():
|
||||
"""Return a context dict with the same structure as collect_context(), render-ready."""
|
||||
hosts_total = 6
|
||||
pcpu_total = 48
|
||||
pcpu_usage = 12.5
|
||||
vcpu_allocated = 96
|
||||
vcpu_overcommit_max = 2.0
|
||||
pram_total = 256 * 1024**3 # 256 GB in bytes
|
||||
pram_usage = 120 * 1024**3
|
||||
vram_allocated = 192 * 1024**3
|
||||
vram_overcommit_max = 1.5
|
||||
vm_count = 24
|
||||
vm_active = 22
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
# Two sample audits with serialized fields for JS
|
||||
host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"]
|
||||
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5]
|
||||
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0]
|
||||
|
||||
audits = [
|
||||
{
|
||||
"id": "mock-audit-uuid-1",
|
||||
"name": "Mock audit (balanced)",
|
||||
"created_at": "2025-02-01T10:00:00",
|
||||
"strategy": "Balanced",
|
||||
"goal": "BALANCED",
|
||||
"type": "ONESHOT",
|
||||
"scope": "Full Cluster",
|
||||
"cpu_weight": "1.0",
|
||||
"ram_weight": "1.0",
|
||||
"migrations": json.dumps([
|
||||
{
|
||||
"instanceName": "instance-1",
|
||||
"source": "compute-0",
|
||||
"destination": "compute-3",
|
||||
"flavor": "m1.small",
|
||||
"impact": "Low",
|
||||
}
|
||||
]),
|
||||
"host_labels": json.dumps(host_labels),
|
||||
"cpu_current": json.dumps(cpu_current),
|
||||
"cpu_projected": json.dumps(cpu_projected),
|
||||
},
|
||||
{
|
||||
"id": "mock-audit-uuid-2",
|
||||
"name": "Mock audit (workload consolidation)",
|
||||
"created_at": "2025-02-02T14:30:00",
|
||||
"strategy": "Workload consolidation",
|
||||
"goal": "WORKLOAD_CONSOLIDATION",
|
||||
"type": "ONESHOT",
|
||||
"scope": "Full Cluster",
|
||||
"cpu_weight": "1.0",
|
||||
"ram_weight": "1.0",
|
||||
"migrations": json.dumps([]),
|
||||
"host_labels": json.dumps(host_labels),
|
||||
"cpu_current": json.dumps(cpu_current),
|
||||
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
|
||||
},
|
||||
]
|
||||
|
||||
return {
|
||||
"region": {
|
||||
"name": "mock-region",
|
||||
"hosts_total": hosts_total,
|
||||
},
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
"vcpu": {
|
||||
"total": int(vcpu_total),
|
||||
"allocated": vcpu_allocated,
|
||||
"free": int(vcpu_total) - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": {
|
||||
"first_common_flavor": {"name": "m1.small", "count": 12},
|
||||
"second_common_flavor": {"name": "m1.medium", "count": 8},
|
||||
"third_common_flavor": {"name": "m1.large", "count": 4},
|
||||
},
|
||||
"audits": audits,
|
||||
}
|
||||
@@ -1,124 +1,126 @@
|
||||
import pandas
|
||||
|
||||
from copy import copy
|
||||
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS
|
||||
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
|
||||
def convert_cpu_data(data: list):
|
||||
metrics = []
|
||||
|
||||
for entry in data:
|
||||
for t, val in entry["values"]:
|
||||
metrics.append({
|
||||
"timestamp": int(t),
|
||||
"host": entry["metric"]["host"],
|
||||
"cpu_usage": float(val),
|
||||
"instance": entry["metric"]["instanceName"]
|
||||
})
|
||||
|
||||
df_cpu = pandas.DataFrame(metrics)
|
||||
df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s")
|
||||
|
||||
# Aggregate CPU usage per host
|
||||
return (
|
||||
df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum()
|
||||
.groupby("host").mean()
|
||||
.reset_index()
|
||||
)
|
||||
|
||||
def get_audits(connection: Connection) -> list[dict] | None:
|
||||
session = connection.session
|
||||
|
||||
watcher_endpoint = connection.endpoint_for(
|
||||
service_type=WATCHER_ENDPOINT_NAME,
|
||||
interface=WATCHER_INTERFACE_NAME
|
||||
)
|
||||
|
||||
# Collect instances prometheus metrics
|
||||
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
|
||||
|
||||
cpu_metrics = convert_cpu_data(data=cpu_data)
|
||||
|
||||
# Fetch audit list
|
||||
audits_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits"
|
||||
)
|
||||
audits_resp.raise_for_status()
|
||||
audits_resp.json().get('audits')
|
||||
|
||||
# Fetch action plan list
|
||||
actionplans_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/action_plans"
|
||||
)
|
||||
actionplans_resp.raise_for_status()
|
||||
actionplans_resp.json().get('action_plans')
|
||||
|
||||
# Filtering audits by PENDING state
|
||||
pending_audits = [audit for audit in audits_resp if audit['state'] == "PENDING"]
|
||||
|
||||
result = []
|
||||
for item in pending_audits:
|
||||
projected_cpu_data = copy(cpu_data)
|
||||
|
||||
audit_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits/{item['uuid']}"
|
||||
)
|
||||
audit_resp.raise_for_status()
|
||||
audit_resp = audit_resp.json()
|
||||
|
||||
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
|
||||
|
||||
actions_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
|
||||
)
|
||||
actions_resp.raise_for_status()
|
||||
actions_resp = actions_resp.json().get('actions')
|
||||
|
||||
migrations = []
|
||||
mapping = {}
|
||||
for action in actions_resp:
|
||||
action_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/{action['uuid']}"
|
||||
)
|
||||
action_resp.raise_for_status()
|
||||
action_resp = action_resp.json()
|
||||
|
||||
server = connection.get_server_by_id(action['input_parameters']['resource_id'])
|
||||
params = action_resp['input_parameters']
|
||||
mapping[params['resource_name']] = params['destination_node']
|
||||
|
||||
migrations.append({
|
||||
"instanceName": action['input_parameters']['resource_name'],
|
||||
"source": action['input_parameters']['source_node'],
|
||||
"destination": action['input_parameters']['destination_node'],
|
||||
"flavor": server.flavor.name,
|
||||
"impact": 'Low'
|
||||
})
|
||||
|
||||
for entry in projected_cpu_data:
|
||||
if (instance := entry['metric']['instanceName']) in mapping:
|
||||
entry['metric']['host'] = mapping[instance]
|
||||
|
||||
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
|
||||
|
||||
result.append({
|
||||
"id": audit_resp['uuid'],
|
||||
"name": audit_resp['name'],
|
||||
"created_at": audit_resp['created_at'],
|
||||
"strategy": audit_resp['strategy_name'],
|
||||
"goal": audit_resp['goal_name'],
|
||||
"type": audit_resp['audit_type'],
|
||||
"scope": audit_resp['scope'],
|
||||
"cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"),
|
||||
"ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"),
|
||||
"migrations": migrations,
|
||||
"host_labels": cpu_metrics['host'].to_list(),
|
||||
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
|
||||
"cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(),
|
||||
})
|
||||
|
||||
return result
|
||||
import pandas
|
||||
|
||||
from copy import copy
|
||||
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS
|
||||
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
|
||||
def convert_cpu_data(data: list):
|
||||
metrics = []
|
||||
|
||||
for entry in data:
|
||||
for t, val in entry["values"]:
|
||||
metrics.append({
|
||||
"timestamp": int(t),
|
||||
"host": entry["metric"]["host"],
|
||||
"cpu_usage": float(val),
|
||||
"instance": entry["metric"]["instanceName"]
|
||||
})
|
||||
|
||||
df_cpu = pandas.DataFrame(metrics)
|
||||
df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s")
|
||||
|
||||
# Aggregate CPU usage per host
|
||||
return (
|
||||
df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum()
|
||||
.groupby("host").mean()
|
||||
.reset_index()
|
||||
)
|
||||
|
||||
def get_audits(connection: Connection) -> list[dict] | None:
|
||||
session = connection.session
|
||||
|
||||
watcher_endpoint = connection.endpoint_for(
|
||||
service_type=WATCHER_ENDPOINT_NAME,
|
||||
interface=WATCHER_INTERFACE_NAME
|
||||
)
|
||||
|
||||
# Collect instances prometheus metrics
|
||||
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
|
||||
|
||||
cpu_metrics = convert_cpu_data(data=cpu_data)
|
||||
|
||||
# Fetch audit list
|
||||
audits_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits"
|
||||
)
|
||||
audits_resp.raise_for_status()
|
||||
audits_resp = audits_resp.json().get('audits') or []
|
||||
|
||||
# Fetch action plan list
|
||||
actionplans_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/action_plans"
|
||||
)
|
||||
actionplans_resp.raise_for_status()
|
||||
actionplans_resp = actionplans_resp.json().get('action_plans') or []
|
||||
|
||||
# Filtering audits by PENDING state
|
||||
pending_audits = [plan for plan in actionplans_resp if plan['state'] == "RECOMMENDED"]
|
||||
|
||||
result = []
|
||||
for item in pending_audits:
|
||||
projected_cpu_data = copy(cpu_data)
|
||||
|
||||
audit_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}"
|
||||
)
|
||||
audit_resp.raise_for_status()
|
||||
audit_resp = audit_resp.json()
|
||||
|
||||
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
|
||||
if actionplan is None:
|
||||
continue
|
||||
|
||||
actions_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
|
||||
)
|
||||
actions_resp.raise_for_status()
|
||||
actions_resp = actions_resp.json().get('actions') or []
|
||||
|
||||
migrations = []
|
||||
mapping = {}
|
||||
for action in actions_resp:
|
||||
action_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/{action['uuid']}"
|
||||
)
|
||||
action_resp.raise_for_status()
|
||||
action_resp = action_resp.json()
|
||||
|
||||
server = connection.get_server_by_id(action_resp['input_parameters']['resource_id'])
|
||||
params = action_resp['input_parameters']
|
||||
mapping[params['resource_name']] = params['destination_node']
|
||||
|
||||
migrations.append({
|
||||
"instanceName": action_resp['input_parameters']['resource_name'],
|
||||
"source": action_resp['input_parameters']['source_node'],
|
||||
"destination": action_resp['input_parameters']['destination_node'],
|
||||
"flavor": server.flavor.name,
|
||||
"impact": 'Low'
|
||||
})
|
||||
|
||||
for entry in projected_cpu_data:
|
||||
if (instance := entry['metric']['instanceName']) in mapping:
|
||||
entry['metric']['host'] = mapping[instance]
|
||||
|
||||
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
|
||||
|
||||
result.append({
|
||||
"id": audit_resp['uuid'],
|
||||
"name": audit_resp['name'],
|
||||
"created_at": audit_resp['created_at'],
|
||||
"strategy": audit_resp['strategy_name'],
|
||||
"goal": audit_resp['goal_name'],
|
||||
"type": audit_resp['audit_type'],
|
||||
"scope": audit_resp['scope'],
|
||||
"cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"),
|
||||
"ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"),
|
||||
"migrations": migrations,
|
||||
"host_labels": cpu_metrics['host'].to_list(),
|
||||
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
|
||||
"cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(),
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import openstack
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME
|
||||
|
||||
def get_connection() -> Connection:
|
||||
connection = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME)
|
||||
return connection
|
||||
import openstack
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME
|
||||
|
||||
def get_connection() -> Connection:
|
||||
connection = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME)
|
||||
return connection
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
from openstack.connection import Connection
|
||||
|
||||
from collections import Counter
|
||||
|
||||
def get_flavor_list(connection: Connection) -> dict:
|
||||
servers = list(connection.compute.servers(all_projects=True))
|
||||
flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor]
|
||||
flavor_count = Counter(flavor_ids).most_common()
|
||||
|
||||
flavors = list(flavor_count)
|
||||
|
||||
result = {}
|
||||
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
|
||||
if len(flavors) > idx:
|
||||
result[f"{prefix}_common_flavor"] = {
|
||||
"name": flavors[idx][0],
|
||||
"count": flavors[idx][1]
|
||||
}
|
||||
|
||||
return result
|
||||
from openstack.connection import Connection
|
||||
|
||||
from collections import Counter
|
||||
|
||||
def get_flavor_list(connection: Connection) -> dict:
|
||||
servers = list(connection.compute.servers(all_projects=True))
|
||||
flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor]
|
||||
flavor_count = Counter(flavor_ids).most_common()
|
||||
|
||||
flavors = list(flavor_count)
|
||||
|
||||
result = {}
|
||||
placeholder = {"name": "—", "count": 0}
|
||||
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
|
||||
if len(flavors) > idx:
|
||||
result[f"{prefix}_common_flavor"] = {
|
||||
"name": flavors[idx][0],
|
||||
"count": flavors[idx][1]
|
||||
}
|
||||
else:
|
||||
result[f"{prefix}_common_flavor"] = placeholder
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import requests
|
||||
|
||||
from watcher_visio.settings import PROMETHEUS_URL
|
||||
|
||||
def query_prometheus(query: str) -> str | list[str]:
|
||||
url = f"{PROMETHEUS_URL}/api/v1/query"
|
||||
params = {
|
||||
"query": query,
|
||||
}
|
||||
response = requests.get(url=url, params=params)
|
||||
response.raise_for_status()
|
||||
result = response.json()["data"]["result"]
|
||||
if len(result) > 1:
|
||||
return result[0]["value"][1]
|
||||
else:
|
||||
return result[0]["values"]
|
||||
import requests
|
||||
|
||||
from watcher_visio.settings import PROMETHEUS_URL
|
||||
|
||||
def query_prometheus(query: str) -> str | list[str]:
|
||||
url = f"{PROMETHEUS_URL}/api/v1/query"
|
||||
params = {
|
||||
"query": query,
|
||||
}
|
||||
response = requests.get(url=url, params=params)
|
||||
response.raise_for_status()
|
||||
result = response.json()["data"]["result"]
|
||||
if len(result) > 1:
|
||||
return result
|
||||
else:
|
||||
return result[0]["value"][1]
|
||||
|
||||
@@ -1,56 +1,56 @@
|
||||
from django import template
|
||||
|
||||
register = template.Library()
|
||||
|
||||
@register.filter
|
||||
def div(a, b):
|
||||
try:
|
||||
return float(a) / float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def mul(a, b):
|
||||
try:
|
||||
return float(a) * float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def sub(a, b):
|
||||
try:
|
||||
return float(a) - float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def convert_bytes(bytes_value, target_unit='GB'):
|
||||
"""
|
||||
Convert bytes to specific unit
|
||||
|
||||
Args:
|
||||
bytes_value: Size in bytes
|
||||
target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB')
|
||||
precision: Number of decimal places
|
||||
|
||||
Returns:
|
||||
Float value in target unit
|
||||
"""
|
||||
try:
|
||||
bytes_value = float(bytes_value)
|
||||
except (ValueError, TypeError):
|
||||
return 0.0
|
||||
conversion_factors = {
|
||||
'B': 1,
|
||||
'KB': 1024,
|
||||
'MB': 1024 * 1024,
|
||||
'GB': 1024 * 1024 * 1024,
|
||||
'TB': 1024 * 1024 * 1024 * 1024,
|
||||
}
|
||||
|
||||
target_unit = target_unit.upper()
|
||||
if target_unit not in conversion_factors:
|
||||
target_unit = 'MB'
|
||||
|
||||
result = bytes_value / conversion_factors[target_unit]
|
||||
return round(result, 1)
|
||||
from django import template
|
||||
|
||||
register = template.Library()
|
||||
|
||||
@register.filter
|
||||
def div(a, b):
|
||||
try:
|
||||
return float(a) / float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def mul(a, b):
|
||||
try:
|
||||
return float(a) * float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def sub(a, b):
|
||||
try:
|
||||
return float(a) - float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def convert_bytes(bytes_value, target_unit='GB'):
|
||||
"""
|
||||
Convert bytes to specific unit
|
||||
|
||||
Args:
|
||||
bytes_value: Size in bytes
|
||||
target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB')
|
||||
precision: Number of decimal places
|
||||
|
||||
Returns:
|
||||
Float value in target unit
|
||||
"""
|
||||
try:
|
||||
bytes_value = float(bytes_value)
|
||||
except (ValueError, TypeError):
|
||||
return 0.0
|
||||
conversion_factors = {
|
||||
'B': 1,
|
||||
'KB': 1024,
|
||||
'MB': 1024 * 1024,
|
||||
'GB': 1024 * 1024 * 1024,
|
||||
'TB': 1024 * 1024 * 1024 * 1024,
|
||||
}
|
||||
|
||||
target_unit = target_unit.upper()
|
||||
if target_unit not in conversion_factors:
|
||||
target_unit = 'MB'
|
||||
|
||||
result = bytes_value / conversion_factors[target_unit]
|
||||
return round(result, 1)
|
||||
|
||||
1
dashboard/tests/__init__.py
Normal file
1
dashboard/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Dashboard test package
|
||||
103
dashboard/tests/test_mathfilters.py
Normal file
103
dashboard/tests/test_mathfilters.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Tests for dashboard.templatetags.mathfilters."""
|
||||
from django.test import TestCase
|
||||
from django.template import Template, Context
|
||||
|
||||
from dashboard.templatetags.mathfilters import div, mul, sub, convert_bytes
|
||||
|
||||
|
||||
class DivFilterTest(TestCase):
|
||||
"""Tests for the div template filter."""
|
||||
|
||||
def test_div_normal(self):
|
||||
self.assertEqual(div(10, 2), 5.0)
|
||||
self.assertEqual(div(10.0, 4), 2.5)
|
||||
|
||||
def test_div_by_zero(self):
|
||||
self.assertEqual(div(10, 0), 0)
|
||||
|
||||
def test_div_non_numeric(self):
|
||||
self.assertEqual(div("x", 2), 0)
|
||||
self.assertEqual(div(10, "y"), 0)
|
||||
self.assertEqual(div(None, 2), 0)
|
||||
|
||||
|
||||
class MulFilterTest(TestCase):
|
||||
"""Tests for the mul template filter."""
|
||||
|
||||
def test_mul_normal(self):
|
||||
self.assertEqual(mul(3, 4), 12.0)
|
||||
self.assertEqual(mul(2.5, 4), 10.0)
|
||||
|
||||
def test_mul_non_numeric(self):
|
||||
self.assertEqual(mul("a", 2), 0)
|
||||
self.assertEqual(mul(2, None), 0)
|
||||
|
||||
|
||||
class SubFilterTest(TestCase):
|
||||
"""Tests for the sub template filter."""
|
||||
|
||||
def test_sub_normal(self):
|
||||
self.assertEqual(sub(10, 3), 7.0)
|
||||
self.assertEqual(sub(5.5, 2), 3.5)
|
||||
|
||||
def test_sub_non_numeric(self):
|
||||
self.assertEqual(sub("x", 1), 0)
|
||||
self.assertEqual(sub(5, "y"), 0)
|
||||
|
||||
|
||||
class ConvertBytesFilterTest(TestCase):
|
||||
"""Tests for the convert_bytes template filter."""
|
||||
|
||||
def test_convert_to_B(self):
|
||||
self.assertEqual(convert_bytes(1024, "B"), 1024.0)
|
||||
|
||||
def test_convert_to_KB(self):
|
||||
self.assertEqual(convert_bytes(2048, "KB"), 2.0)
|
||||
|
||||
def test_convert_to_MB(self):
|
||||
self.assertEqual(convert_bytes(1024 * 1024 * 3, "MB"), 3.0)
|
||||
|
||||
def test_convert_to_GB(self):
|
||||
self.assertEqual(convert_bytes(1024 ** 3 * 5, "GB"), 5.0)
|
||||
|
||||
def test_convert_to_TB(self):
|
||||
self.assertEqual(convert_bytes(1024 ** 4, "TB"), 1.0)
|
||||
|
||||
def test_convert_default_GB(self):
|
||||
self.assertEqual(convert_bytes(1024 ** 3 * 2), 2.0)
|
||||
|
||||
def test_convert_invalid_unit_fallback_to_MB(self):
|
||||
self.assertEqual(convert_bytes(1024 * 1024, "invalid"), 1.0)
|
||||
self.assertEqual(convert_bytes(1024 * 1024, "xyz"), 1.0)
|
||||
|
||||
def test_convert_non_numeric_returns_zero(self):
|
||||
self.assertEqual(convert_bytes("abc"), 0.0)
|
||||
self.assertEqual(convert_bytes(None), 0.0)
|
||||
|
||||
def test_convert_rounds_to_one_decimal(self):
|
||||
self.assertEqual(convert_bytes(1500 * 1024 * 1024, "GB"), 1.5)
|
||||
self.assertEqual(convert_bytes(1536 * 1024 * 1024, "GB"), 1.5)
|
||||
|
||||
def test_convert_case_insensitive_unit(self):
|
||||
self.assertEqual(convert_bytes(1024 ** 3, "gb"), 1.0)
|
||||
self.assertEqual(convert_bytes(1024 ** 3, "GB"), 1.0)
|
||||
|
||||
|
||||
class MathfiltersTemplateIntegrationTest(TestCase):
|
||||
"""Test filters via template rendering."""
|
||||
|
||||
def test_div_in_template(self):
|
||||
t = Template("{% load mathfilters %}{{ a|div:b }}")
|
||||
self.assertEqual(t.render(Context({"a": 10, "b": 2})), "5.0")
|
||||
|
||||
def test_mul_in_template(self):
|
||||
t = Template("{% load mathfilters %}{{ a|mul:b }}")
|
||||
self.assertEqual(t.render(Context({"a": 3, "b": 4})), "12.0")
|
||||
|
||||
def test_sub_in_template(self):
|
||||
t = Template("{% load mathfilters %}{{ a|sub:b }}")
|
||||
self.assertEqual(t.render(Context({"a": 10, "b": 3})), "7.0")
|
||||
|
||||
def test_convert_bytes_in_template(self):
|
||||
t = Template("{% load mathfilters %}{{ bytes|convert_bytes:'GB' }}")
|
||||
self.assertEqual(t.render(Context({"bytes": 1024 ** 3 * 2})), "2.0")
|
||||
107
dashboard/tests/test_mock_data.py
Normal file
107
dashboard/tests/test_mock_data.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""Tests for dashboard.mock_data."""
|
||||
import json
|
||||
|
||||
from django.test import TestCase
|
||||
|
||||
from dashboard.mock_data import get_mock_context
|
||||
|
||||
|
||||
class GetMockContextTest(TestCase):
|
||||
"""Tests for get_mock_context()."""
|
||||
|
||||
def test_returns_all_top_level_keys(self):
|
||||
ctx = get_mock_context()
|
||||
expected_keys = {"region", "pcpu", "vcpu", "pram", "vram", "vm", "flavors", "audits"}
|
||||
self.assertEqual(set(ctx.keys()), expected_keys)
|
||||
|
||||
def test_region_structure(self):
|
||||
ctx = get_mock_context()
|
||||
region = ctx["region"]
|
||||
self.assertIn("name", region)
|
||||
self.assertIn("hosts_total", region)
|
||||
self.assertEqual(region["name"], "mock-region")
|
||||
self.assertEqual(region["hosts_total"], 6)
|
||||
|
||||
def test_pcpu_structure_and_types(self):
|
||||
ctx = get_mock_context()
|
||||
pcpu = ctx["pcpu"]
|
||||
self.assertEqual(pcpu["total"], 48)
|
||||
self.assertEqual(pcpu["usage"], 12.5)
|
||||
self.assertEqual(pcpu["free"], 48 - 12.5)
|
||||
self.assertIsInstance(pcpu["used_percentage"], (int, float))
|
||||
|
||||
def test_vcpu_structure(self):
|
||||
ctx = get_mock_context()
|
||||
vcpu = ctx["vcpu"]
|
||||
self.assertIn("total", vcpu)
|
||||
self.assertIn("allocated", vcpu)
|
||||
self.assertIn("free", vcpu)
|
||||
self.assertIn("allocated_percentage", vcpu)
|
||||
self.assertIn("overcommit_ratio", vcpu)
|
||||
self.assertIn("overcommit_max", vcpu)
|
||||
self.assertEqual(vcpu["overcommit_max"], 2.0)
|
||||
|
||||
def test_pram_vram_structure(self):
|
||||
ctx = get_mock_context()
|
||||
pram = ctx["pram"]
|
||||
vram = ctx["vram"]
|
||||
self.assertIn("total", pram)
|
||||
self.assertIn("usage", pram)
|
||||
self.assertIn("free", pram)
|
||||
self.assertIn("used_percentage", pram)
|
||||
self.assertIn("total", vram)
|
||||
self.assertIn("allocated", vram)
|
||||
self.assertIn("overcommit_max", vram)
|
||||
|
||||
def test_vm_structure(self):
|
||||
ctx = get_mock_context()
|
||||
vm = ctx["vm"]
|
||||
self.assertEqual(vm["count"], 24)
|
||||
self.assertEqual(vm["active"], 22)
|
||||
self.assertEqual(vm["stopped"], 2)
|
||||
self.assertIn("avg_cpu", vm)
|
||||
self.assertIn("avg_ram", vm)
|
||||
self.assertIn("density", vm)
|
||||
|
||||
def test_flavors_structure(self):
|
||||
ctx = get_mock_context()
|
||||
flavors = ctx["flavors"]
|
||||
for key in ("first_common_flavor", "second_common_flavor", "third_common_flavor"):
|
||||
self.assertIn(key, flavors)
|
||||
self.assertIn("name", flavors[key])
|
||||
self.assertIn("count", flavors[key])
|
||||
self.assertEqual(flavors["first_common_flavor"]["name"], "m1.small")
|
||||
self.assertEqual(flavors["first_common_flavor"]["count"], 12)
|
||||
|
||||
def test_audits_serialized_fields(self):
|
||||
ctx = get_mock_context()
|
||||
self.assertIsInstance(ctx["audits"], list)
|
||||
self.assertGreaterEqual(len(ctx["audits"]), 1)
|
||||
for audit in ctx["audits"]:
|
||||
self.assertIn("migrations", audit)
|
||||
self.assertIn("host_labels", audit)
|
||||
self.assertIn("cpu_current", audit)
|
||||
self.assertIn("cpu_projected", audit)
|
||||
# These must be JSON strings (render-ready for JS)
|
||||
self.assertIsInstance(audit["migrations"], str)
|
||||
self.assertIsInstance(audit["host_labels"], str)
|
||||
self.assertIsInstance(audit["cpu_current"], str)
|
||||
self.assertIsInstance(audit["cpu_projected"], str)
|
||||
# Must be valid JSON
|
||||
json.loads(audit["migrations"])
|
||||
json.loads(audit["host_labels"])
|
||||
json.loads(audit["cpu_current"])
|
||||
json.loads(audit["cpu_projected"])
|
||||
|
||||
def test_audits_metadata_fields(self):
|
||||
ctx = get_mock_context()
|
||||
audit = ctx["audits"][0]
|
||||
self.assertIn("id", audit)
|
||||
self.assertIn("name", audit)
|
||||
self.assertIn("created_at", audit)
|
||||
self.assertIn("strategy", audit)
|
||||
self.assertIn("goal", audit)
|
||||
self.assertIn("type", audit)
|
||||
self.assertIn("scope", audit)
|
||||
self.assertIn("cpu_weight", audit)
|
||||
self.assertIn("ram_weight", audit)
|
||||
@@ -1,6 +1,6 @@
|
||||
from django.urls import path
|
||||
from . import views
|
||||
|
||||
urlpatterns = [
|
||||
path('', views.index, name='index'),
|
||||
from django.urls import path
|
||||
from . import views
|
||||
|
||||
urlpatterns = [
|
||||
path('', views.index, name='index'),
|
||||
]
|
||||
@@ -1,332 +1,143 @@
|
||||
import json
|
||||
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
hosts_total = int(
|
||||
query_prometheus(
|
||||
query="count(node_exporter_build_info{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
pcpu_total = int(
|
||||
query_prometheus(
|
||||
query="sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))"
|
||||
)
|
||||
)
|
||||
pcpu_usage = float(
|
||||
query_prometheus(
|
||||
query="sum(node_load5{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vcpu_allocated = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_virtual_cpus)"
|
||||
)
|
||||
)
|
||||
vcpu_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
|
||||
)
|
||||
)
|
||||
pram_total = int(
|
||||
query_prometheus(
|
||||
query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes
|
||||
)
|
||||
)
|
||||
pram_usage = int (
|
||||
query_prometheus(
|
||||
query="sum(node_memory_Active_bytes{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vram_allocated = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_maximum_memory_bytes)"
|
||||
)
|
||||
)
|
||||
vram_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
|
||||
)
|
||||
)
|
||||
vm_count = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code)"
|
||||
)
|
||||
)
|
||||
vm_active = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})"
|
||||
)
|
||||
)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": 6,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
return context
|
||||
|
||||
def index(request):
|
||||
hosts_total = 6
|
||||
pcpu_total = 672
|
||||
pcpu_usage = 39.2
|
||||
vcpu_total = 3360
|
||||
vcpu_allocated = 98
|
||||
vcpu_overcommit_max = 5
|
||||
pram_total = 562500000000
|
||||
pram_usage = 4325000000
|
||||
vram_total = 489375000000
|
||||
vram_allocated = 5625000000
|
||||
vram_overcommit_max = 0.87
|
||||
vm_count = 120
|
||||
vm_active = 90
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": "ct3k1ldt",
|
||||
"hosts_total": 6,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": {
|
||||
'first_common_flavor': {
|
||||
'name': 'm1.medium',
|
||||
'count': 18
|
||||
},
|
||||
'second_common_flavor': {
|
||||
'name': 'm1.small',
|
||||
'count': 12
|
||||
},
|
||||
'third_common_flavor': {
|
||||
'name': 'm1.large',
|
||||
'count': 8
|
||||
},
|
||||
},
|
||||
|
||||
# Audit data
|
||||
'audits': [
|
||||
{
|
||||
'id': 'audit_001',
|
||||
'name': 'Weekly Optimization',
|
||||
'created_at': '2024-01-15',
|
||||
'cpu_weight': 1.2,
|
||||
'ram_weight': 0.6,
|
||||
'scope': 'Full Cluster',
|
||||
'strategy': 'Load Balancing',
|
||||
'goal': 'Optimize CPU distribution across all hosts',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'web-server-01',
|
||||
'source': 'compute-02',
|
||||
'destination': 'compute-05',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'db-replica-03',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-04',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'Medium'
|
||||
},
|
||||
{
|
||||
'instanceName': 'api-gateway',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-06',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'cache-node-02',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'monitoring-server',
|
||||
'source': 'compute-04',
|
||||
'destination': 'compute-02',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [78, 65, 42, 89, 34, 56, 71],
|
||||
'cpu_projected': [65, 58, 45, 72, 48, 61, 68]
|
||||
},
|
||||
{
|
||||
'id': 'audit_002',
|
||||
'name': 'Emergency Rebalance',
|
||||
'created_at': '2024-01-14',
|
||||
'cpu_weight': 1.0,
|
||||
'ram_weight': 1.0,
|
||||
'scope': 'Overloaded Hosts',
|
||||
'strategy': 'Hotspot Reduction',
|
||||
'goal': 'Reduce load on compute-01 and compute-04',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'app-server-02',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-06',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Medium'
|
||||
},
|
||||
{
|
||||
'instanceName': 'file-server-01',
|
||||
'source': 'compute-04',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'High'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [92, 65, 42, 85, 34, 56, 71],
|
||||
'cpu_projected': [72, 65, 42, 65, 34, 66, 81]
|
||||
},
|
||||
{
|
||||
'id': 'audit_003',
|
||||
'name': 'Pre-Maintenance Planning',
|
||||
'created_at': '2024-01-10',
|
||||
'cpu_weight': 0.8,
|
||||
'ram_weight': 1.5,
|
||||
'scope': 'Maintenance Zone',
|
||||
'strategy': 'Evacuation',
|
||||
'goal': 'Empty compute-03 for maintenance',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'test-vm-01',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-02',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'dev-server',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-05',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'staging-db',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'High'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [78, 65, 56, 89, 34, 56, 71],
|
||||
'cpu_projected': [78, 75, 0, 89, 54, 56, 81]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Serialize lists for JavaScript
|
||||
for audit in context['audits']:
|
||||
audit['migrations'] = json.dumps(audit['migrations'])
|
||||
audit['host_labels'] = json.dumps(audit['host_labels'])
|
||||
audit['cpu_current'] = json.dumps(audit['cpu_current'])
|
||||
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
|
||||
|
||||
return render(request, 'index.html', context)
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
from dashboard.mock_data import get_mock_context
|
||||
|
||||
# Prometheus queries run in parallel (query_key -> query string)
|
||||
_PROMETHEUS_QUERIES = {
|
||||
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
|
||||
"pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))",
|
||||
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
|
||||
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
|
||||
"vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})",
|
||||
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
|
||||
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
|
||||
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
|
||||
"vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))",
|
||||
"vm_count": "sum(libvirt_domain_state_code)",
|
||||
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
|
||||
}
|
||||
|
||||
|
||||
def _fetch_prometheus_metrics():
|
||||
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
|
||||
result = {}
|
||||
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
|
||||
future_to_key = {
|
||||
executor.submit(query_prometheus, query=q): key
|
||||
for key, q in _PROMETHEUS_QUERIES.items()
|
||||
}
|
||||
for future in as_completed(future_to_key):
|
||||
key = future_to_key[future]
|
||||
try:
|
||||
raw = future.result()
|
||||
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
|
||||
result[key] = float(raw)
|
||||
else:
|
||||
result[key] = int(raw)
|
||||
except (ValueError, TypeError):
|
||||
result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
|
||||
return result
|
||||
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": hosts_total,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
# Serialize audit list fields for JavaScript so cached context is render-ready
|
||||
for audit in context["audits"]:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return context
|
||||
|
||||
def index(request):
|
||||
if getattr(settings, "USE_MOCK_DATA", False):
|
||||
context = get_mock_context()
|
||||
return render(request, "index.html", context)
|
||||
|
||||
cache_key = "dashboard_context"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
context = cache.get(cache_key)
|
||||
if context is None:
|
||||
context = collect_context()
|
||||
cache.set(cache_key, context, timeout=cache_ttl)
|
||||
return render(request, "index.html", context)
|
||||
Reference in New Issue
Block a user