Refactor Docker setup and add mock data support
- Updated .dockerignore and .gitignore for better file management. - Introduced .env.example for environment variable configuration. - Added docker-compose.dev.yml for development with mock data and live reload. - Enhanced Dockerfile to include necessary dependencies and entrypoint script. - Created mock_data.py to provide sample data for testing without OpenStack/Prometheus. - Added unit tests for template filters in dashboard. - Cleaned up various files for consistency and improved readability.
This commit is contained in:
@@ -1,332 +1,143 @@
|
||||
import json
|
||||
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
hosts_total = int(
|
||||
query_prometheus(
|
||||
query="count(node_exporter_build_info{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
pcpu_total = int(
|
||||
query_prometheus(
|
||||
query="sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))"
|
||||
)
|
||||
)
|
||||
pcpu_usage = float(
|
||||
query_prometheus(
|
||||
query="sum(node_load5{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vcpu_allocated = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_virtual_cpus)"
|
||||
)
|
||||
)
|
||||
vcpu_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
|
||||
)
|
||||
)
|
||||
pram_total = int(
|
||||
query_prometheus(
|
||||
query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes
|
||||
)
|
||||
)
|
||||
pram_usage = int (
|
||||
query_prometheus(
|
||||
query="sum(node_memory_Active_bytes{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vram_allocated = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_maximum_memory_bytes)"
|
||||
)
|
||||
)
|
||||
vram_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
|
||||
)
|
||||
)
|
||||
vm_count = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code)"
|
||||
)
|
||||
)
|
||||
vm_active = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})"
|
||||
)
|
||||
)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": 6,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
return context
|
||||
|
||||
def index(request):
|
||||
hosts_total = 6
|
||||
pcpu_total = 672
|
||||
pcpu_usage = 39.2
|
||||
vcpu_total = 3360
|
||||
vcpu_allocated = 98
|
||||
vcpu_overcommit_max = 5
|
||||
pram_total = 562500000000
|
||||
pram_usage = 4325000000
|
||||
vram_total = 489375000000
|
||||
vram_allocated = 5625000000
|
||||
vram_overcommit_max = 0.87
|
||||
vm_count = 120
|
||||
vm_active = 90
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": "ct3k1ldt",
|
||||
"hosts_total": 6,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": {
|
||||
'first_common_flavor': {
|
||||
'name': 'm1.medium',
|
||||
'count': 18
|
||||
},
|
||||
'second_common_flavor': {
|
||||
'name': 'm1.small',
|
||||
'count': 12
|
||||
},
|
||||
'third_common_flavor': {
|
||||
'name': 'm1.large',
|
||||
'count': 8
|
||||
},
|
||||
},
|
||||
|
||||
# Audit data
|
||||
'audits': [
|
||||
{
|
||||
'id': 'audit_001',
|
||||
'name': 'Weekly Optimization',
|
||||
'created_at': '2024-01-15',
|
||||
'cpu_weight': 1.2,
|
||||
'ram_weight': 0.6,
|
||||
'scope': 'Full Cluster',
|
||||
'strategy': 'Load Balancing',
|
||||
'goal': 'Optimize CPU distribution across all hosts',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'web-server-01',
|
||||
'source': 'compute-02',
|
||||
'destination': 'compute-05',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'db-replica-03',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-04',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'Medium'
|
||||
},
|
||||
{
|
||||
'instanceName': 'api-gateway',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-06',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'cache-node-02',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'monitoring-server',
|
||||
'source': 'compute-04',
|
||||
'destination': 'compute-02',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [78, 65, 42, 89, 34, 56, 71],
|
||||
'cpu_projected': [65, 58, 45, 72, 48, 61, 68]
|
||||
},
|
||||
{
|
||||
'id': 'audit_002',
|
||||
'name': 'Emergency Rebalance',
|
||||
'created_at': '2024-01-14',
|
||||
'cpu_weight': 1.0,
|
||||
'ram_weight': 1.0,
|
||||
'scope': 'Overloaded Hosts',
|
||||
'strategy': 'Hotspot Reduction',
|
||||
'goal': 'Reduce load on compute-01 and compute-04',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'app-server-02',
|
||||
'source': 'compute-01',
|
||||
'destination': 'compute-06',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Medium'
|
||||
},
|
||||
{
|
||||
'instanceName': 'file-server-01',
|
||||
'source': 'compute-04',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'High'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [92, 65, 42, 85, 34, 56, 71],
|
||||
'cpu_projected': [72, 65, 42, 65, 34, 66, 81]
|
||||
},
|
||||
{
|
||||
'id': 'audit_003',
|
||||
'name': 'Pre-Maintenance Planning',
|
||||
'created_at': '2024-01-10',
|
||||
'cpu_weight': 0.8,
|
||||
'ram_weight': 1.5,
|
||||
'scope': 'Maintenance Zone',
|
||||
'strategy': 'Evacuation',
|
||||
'goal': 'Empty compute-03 for maintenance',
|
||||
'migrations': [
|
||||
{
|
||||
'instanceName': 'test-vm-01',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-02',
|
||||
'flavor': 'm1.small',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'dev-server',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-05',
|
||||
'flavor': 'm1.medium',
|
||||
'impact': 'Low'
|
||||
},
|
||||
{
|
||||
'instanceName': 'staging-db',
|
||||
'source': 'compute-03',
|
||||
'destination': 'compute-07',
|
||||
'flavor': 'm1.large',
|
||||
'impact': 'High'
|
||||
}
|
||||
],
|
||||
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
|
||||
'cpu_current': [78, 65, 56, 89, 34, 56, 71],
|
||||
'cpu_projected': [78, 75, 0, 89, 54, 56, 81]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Serialize lists for JavaScript
|
||||
for audit in context['audits']:
|
||||
audit['migrations'] = json.dumps(audit['migrations'])
|
||||
audit['host_labels'] = json.dumps(audit['host_labels'])
|
||||
audit['cpu_current'] = json.dumps(audit['cpu_current'])
|
||||
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
|
||||
|
||||
return render(request, 'index.html', context)
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
from dashboard.mock_data import get_mock_context
|
||||
|
||||
# Prometheus queries run in parallel (query_key -> query string)
|
||||
_PROMETHEUS_QUERIES = {
|
||||
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
|
||||
"pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))",
|
||||
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
|
||||
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
|
||||
"vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})",
|
||||
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
|
||||
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
|
||||
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
|
||||
"vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))",
|
||||
"vm_count": "sum(libvirt_domain_state_code)",
|
||||
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
|
||||
}
|
||||
|
||||
|
||||
def _fetch_prometheus_metrics():
|
||||
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
|
||||
result = {}
|
||||
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
|
||||
future_to_key = {
|
||||
executor.submit(query_prometheus, query=q): key
|
||||
for key, q in _PROMETHEUS_QUERIES.items()
|
||||
}
|
||||
for future in as_completed(future_to_key):
|
||||
key = future_to_key[future]
|
||||
try:
|
||||
raw = future.result()
|
||||
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
|
||||
result[key] = float(raw)
|
||||
else:
|
||||
result[key] = int(raw)
|
||||
except (ValueError, TypeError):
|
||||
result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
|
||||
return result
|
||||
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
|
||||
metrics = _fetch_prometheus_metrics()
|
||||
hosts_total = metrics.get("hosts_total") or 1
|
||||
pcpu_total = metrics.get("pcpu_total", 0)
|
||||
pcpu_usage = metrics.get("pcpu_usage", 0)
|
||||
vcpu_allocated = metrics.get("vcpu_allocated", 0)
|
||||
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
|
||||
pram_total = metrics.get("pram_total", 0)
|
||||
pram_usage = metrics.get("pram_usage", 0)
|
||||
vram_allocated = metrics.get("vram_allocated", 0)
|
||||
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
|
||||
vm_count = metrics.get("vm_count", 0)
|
||||
vm_active = metrics.get("vm_active", 0)
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": hosts_total,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
|
||||
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram": {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
|
||||
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
|
||||
"avg_ram": vram_allocated / vm_count if vm_count else 0,
|
||||
"density": vm_count / hosts_total if hosts_total else 0,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
# Serialize audit list fields for JavaScript so cached context is render-ready
|
||||
for audit in context["audits"]:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return context
|
||||
|
||||
def index(request):
|
||||
if getattr(settings, "USE_MOCK_DATA", False):
|
||||
context = get_mock_context()
|
||||
return render(request, "index.html", context)
|
||||
|
||||
cache_key = "dashboard_context"
|
||||
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
|
||||
context = cache.get(cache_key)
|
||||
if context is None:
|
||||
context = collect_context()
|
||||
cache.set(cache_key, context, timeout=cache_ttl)
|
||||
return render(request, "index.html", context)
|
||||
Reference in New Issue
Block a user