Refactor Docker setup and add mock data support

- Updated .dockerignore and .gitignore for better file management.
- Introduced .env.example for environment variable configuration.
- Added docker-compose.dev.yml for development with mock data and live reload.
- Enhanced Dockerfile to include necessary dependencies and entrypoint script.
- Created mock_data.py to provide sample data for testing without OpenStack/Prometheus.
- Added unit tests for template filters in dashboard.
- Cleaned up various files for consistency and improved readability.
This commit is contained in:
2026-02-06 16:12:21 +03:00
parent d197d1e5e2
commit 57a2933f28
33 changed files with 3319 additions and 3050 deletions

View File

@@ -12,7 +12,6 @@ env/
.idea .idea
*.log *.log
*.sqlite3 *.sqlite3
static/
media/ media/
node_modules/ node_modules/
npm-debug.log* npm-debug.log*
@@ -21,3 +20,4 @@ yarn-error.log*
Dockerfile Dockerfile
docker-compose.yml docker-compose.yml
README.md README.md
clouds.yaml

9
.env.example Normal file
View File

@@ -0,0 +1,9 @@
# Optional: copy to .env and set for your environment.
# For docker-compose, add to docker-compose.yml: env_file: [.env]
# PYTHONUNBUFFERED=1
# USE_MOCK_DATA=false
# PROMETHEUS_URL=http://127.0.0.1:1234/
# OPENSTACK_CLOUD=distlab
# OPENSTACK_REGION_NAME=cl2k1distlab
# SECRET_KEY=your-secret-key

2
.gitignore vendored
View File

@@ -24,3 +24,5 @@ static/css/tailwindcss
# NodeJS # NodeJS
node_modules node_modules
clouds.yaml

View File

@@ -4,13 +4,18 @@ RUN apk update && \
apk add --no-cache --virtual .build-deps \ apk add --no-cache --virtual .build-deps \
ca-certificates gcc postgresql-dev linux-headers musl-dev \ ca-certificates gcc postgresql-dev linux-headers musl-dev \
libffi-dev jpeg-dev zlib-dev \ libffi-dev jpeg-dev zlib-dev \
git bash build-base python3-dev git bash build-base python3-dev \
dos2unix
RUN python3 -m venv /venv RUN python3 -m venv /venv
ENV PATH "/venv/bin:$PATH" ENV PATH "/venv/bin:$PATH"
COPY ./requirements.txt / COPY ./requirements.txt /
RUN pip install -r /requirements.txt RUN pip install -r /requirements.txt
COPY ./docker-entrypoint.sh /docker-entrypoint.sh
RUN dos2unix /docker-entrypoint.sh && \
chmod +x /docker-entrypoint.sh
FROM alpine:3 FROM alpine:3
@@ -20,13 +25,13 @@ ENV LC_ALL C.UTF-8
ENV PYTHONUNBUFFERED 1 ENV PYTHONUNBUFFERED 1
ENV PATH "/venv/bin:$PATH" ENV PATH "/venv/bin:$PATH"
RUN apk add --no-cache --update python3 RUN apk add --no-cache --update python3 curl
COPY --from=build /venv /venv COPY --from=build /venv /venv
COPY --from=build /docker-entrypoint.sh /docker-entrypoint.sh
RUN mkdir /app
WORKDIR /app WORKDIR /app
COPY ./ / COPY . /app
ENTRYPOINT ["/docker-entrypoint.sh"] ENTRYPOINT ["/docker-entrypoint.sh"]
CMD [ "python", "manage.py", "runserver", "0.0.0.0:8000" ] CMD ["python3", "manage.py", "runserver", "0.0.0.0:8000"]

115
dashboard/mock_data.py Normal file
View File

@@ -0,0 +1,115 @@
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
import json
def get_mock_context():
"""Return a context dict with the same structure as collect_context(), render-ready."""
hosts_total = 6
pcpu_total = 48
pcpu_usage = 12.5
vcpu_allocated = 96
vcpu_overcommit_max = 2.0
pram_total = 256 * 1024**3 # 256 GB in bytes
pram_usage = 120 * 1024**3
vram_allocated = 192 * 1024**3
vram_overcommit_max = 1.5
vm_count = 24
vm_active = 22
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
# Two sample audits with serialized fields for JS
host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"]
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5]
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0]
audits = [
{
"id": "mock-audit-uuid-1",
"name": "Mock audit (balanced)",
"created_at": "2025-02-01T10:00:00",
"strategy": "Balanced",
"goal": "BALANCED",
"type": "ONESHOT",
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps([
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
]),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps(cpu_projected),
},
{
"id": "mock-audit-uuid-2",
"name": "Mock audit (workload consolidation)",
"created_at": "2025-02-02T14:30:00",
"strategy": "Workload consolidation",
"goal": "WORKLOAD_CONSOLIDATION",
"type": "ONESHOT",
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps([]),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
},
]
return {
"region": {
"name": "mock-region",
"hosts_total": hosts_total,
},
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100,
},
"vcpu": {
"total": int(vcpu_total),
"allocated": vcpu_allocated,
"free": int(vcpu_total) - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
"overcommit_ratio": vcpu_allocated / pcpu_total,
"overcommit_max": vcpu_overcommit_max,
},
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100,
},
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100,
"overcommit_ratio": vram_allocated / pram_total,
"overcommit_max": vram_overcommit_max,
},
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total,
},
"flavors": {
"first_common_flavor": {"name": "m1.small", "count": 12},
"second_common_flavor": {"name": "m1.medium", "count": 8},
"third_common_flavor": {"name": "m1.large", "count": 4},
},
"audits": audits,
}

View File

@@ -48,35 +48,37 @@ def get_audits(connection: Connection) -> list[dict] | None:
f"{watcher_endpoint}/v1/audits" f"{watcher_endpoint}/v1/audits"
) )
audits_resp.raise_for_status() audits_resp.raise_for_status()
audits_resp.json().get('audits') audits_resp = audits_resp.json().get('audits') or []
# Fetch action plan list # Fetch action plan list
actionplans_resp = session.get( actionplans_resp = session.get(
f"{watcher_endpoint}/v1/action_plans" f"{watcher_endpoint}/v1/action_plans"
) )
actionplans_resp.raise_for_status() actionplans_resp.raise_for_status()
actionplans_resp.json().get('action_plans') actionplans_resp = actionplans_resp.json().get('action_plans') or []
# Filtering audits by PENDING state # Filtering audits by PENDING state
pending_audits = [audit for audit in audits_resp if audit['state'] == "PENDING"] pending_audits = [plan for plan in actionplans_resp if plan['state'] == "RECOMMENDED"]
result = [] result = []
for item in pending_audits: for item in pending_audits:
projected_cpu_data = copy(cpu_data) projected_cpu_data = copy(cpu_data)
audit_resp = session.get( audit_resp = session.get(
f"{watcher_endpoint}/v1/audits/{item['uuid']}" f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}"
) )
audit_resp.raise_for_status() audit_resp.raise_for_status()
audit_resp = audit_resp.json() audit_resp = audit_resp.json()
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None) actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
if actionplan is None:
continue
actions_resp = session.get( actions_resp = session.get(
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}" f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
) )
actions_resp.raise_for_status() actions_resp.raise_for_status()
actions_resp = actions_resp.json().get('actions') actions_resp = actions_resp.json().get('actions') or []
migrations = [] migrations = []
mapping = {} mapping = {}
@@ -87,14 +89,14 @@ def get_audits(connection: Connection) -> list[dict] | None:
action_resp.raise_for_status() action_resp.raise_for_status()
action_resp = action_resp.json() action_resp = action_resp.json()
server = connection.get_server_by_id(action['input_parameters']['resource_id']) server = connection.get_server_by_id(action_resp['input_parameters']['resource_id'])
params = action_resp['input_parameters'] params = action_resp['input_parameters']
mapping[params['resource_name']] = params['destination_node'] mapping[params['resource_name']] = params['destination_node']
migrations.append({ migrations.append({
"instanceName": action['input_parameters']['resource_name'], "instanceName": action_resp['input_parameters']['resource_name'],
"source": action['input_parameters']['source_node'], "source": action_resp['input_parameters']['source_node'],
"destination": action['input_parameters']['destination_node'], "destination": action_resp['input_parameters']['destination_node'],
"flavor": server.flavor.name, "flavor": server.flavor.name,
"impact": 'Low' "impact": 'Low'
}) })

View File

@@ -10,11 +10,14 @@ def get_flavor_list(connection: Connection) -> dict:
flavors = list(flavor_count) flavors = list(flavor_count)
result = {} result = {}
placeholder = {"name": "", "count": 0}
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
if len(flavors) > idx: if len(flavors) > idx:
result[f"{prefix}_common_flavor"] = { result[f"{prefix}_common_flavor"] = {
"name": flavors[idx][0], "name": flavors[idx][0],
"count": flavors[idx][1] "count": flavors[idx][1]
} }
else:
result[f"{prefix}_common_flavor"] = placeholder
return result return result

View File

@@ -11,6 +11,6 @@ def query_prometheus(query: str) -> str | list[str]:
response.raise_for_status() response.raise_for_status()
result = response.json()["data"]["result"] result = response.json()["data"]["result"]
if len(result) > 1: if len(result) > 1:
return result[0]["value"][1] return result
else: else:
return result[0]["values"] return result[0]["value"][1]

View File

@@ -0,0 +1 @@
# Dashboard test package

View File

@@ -0,0 +1,103 @@
"""Tests for dashboard.templatetags.mathfilters."""
from django.test import TestCase
from django.template import Template, Context
from dashboard.templatetags.mathfilters import div, mul, sub, convert_bytes
class DivFilterTest(TestCase):
"""Tests for the div template filter."""
def test_div_normal(self):
self.assertEqual(div(10, 2), 5.0)
self.assertEqual(div(10.0, 4), 2.5)
def test_div_by_zero(self):
self.assertEqual(div(10, 0), 0)
def test_div_non_numeric(self):
self.assertEqual(div("x", 2), 0)
self.assertEqual(div(10, "y"), 0)
self.assertEqual(div(None, 2), 0)
class MulFilterTest(TestCase):
"""Tests for the mul template filter."""
def test_mul_normal(self):
self.assertEqual(mul(3, 4), 12.0)
self.assertEqual(mul(2.5, 4), 10.0)
def test_mul_non_numeric(self):
self.assertEqual(mul("a", 2), 0)
self.assertEqual(mul(2, None), 0)
class SubFilterTest(TestCase):
"""Tests for the sub template filter."""
def test_sub_normal(self):
self.assertEqual(sub(10, 3), 7.0)
self.assertEqual(sub(5.5, 2), 3.5)
def test_sub_non_numeric(self):
self.assertEqual(sub("x", 1), 0)
self.assertEqual(sub(5, "y"), 0)
class ConvertBytesFilterTest(TestCase):
"""Tests for the convert_bytes template filter."""
def test_convert_to_B(self):
self.assertEqual(convert_bytes(1024, "B"), 1024.0)
def test_convert_to_KB(self):
self.assertEqual(convert_bytes(2048, "KB"), 2.0)
def test_convert_to_MB(self):
self.assertEqual(convert_bytes(1024 * 1024 * 3, "MB"), 3.0)
def test_convert_to_GB(self):
self.assertEqual(convert_bytes(1024 ** 3 * 5, "GB"), 5.0)
def test_convert_to_TB(self):
self.assertEqual(convert_bytes(1024 ** 4, "TB"), 1.0)
def test_convert_default_GB(self):
self.assertEqual(convert_bytes(1024 ** 3 * 2), 2.0)
def test_convert_invalid_unit_fallback_to_MB(self):
self.assertEqual(convert_bytes(1024 * 1024, "invalid"), 1.0)
self.assertEqual(convert_bytes(1024 * 1024, "xyz"), 1.0)
def test_convert_non_numeric_returns_zero(self):
self.assertEqual(convert_bytes("abc"), 0.0)
self.assertEqual(convert_bytes(None), 0.0)
def test_convert_rounds_to_one_decimal(self):
self.assertEqual(convert_bytes(1500 * 1024 * 1024, "GB"), 1.5)
self.assertEqual(convert_bytes(1536 * 1024 * 1024, "GB"), 1.5)
def test_convert_case_insensitive_unit(self):
self.assertEqual(convert_bytes(1024 ** 3, "gb"), 1.0)
self.assertEqual(convert_bytes(1024 ** 3, "GB"), 1.0)
class MathfiltersTemplateIntegrationTest(TestCase):
"""Test filters via template rendering."""
def test_div_in_template(self):
t = Template("{% load mathfilters %}{{ a|div:b }}")
self.assertEqual(t.render(Context({"a": 10, "b": 2})), "5.0")
def test_mul_in_template(self):
t = Template("{% load mathfilters %}{{ a|mul:b }}")
self.assertEqual(t.render(Context({"a": 3, "b": 4})), "12.0")
def test_sub_in_template(self):
t = Template("{% load mathfilters %}{{ a|sub:b }}")
self.assertEqual(t.render(Context({"a": 10, "b": 3})), "7.0")
def test_convert_bytes_in_template(self):
t = Template("{% load mathfilters %}{{ bytes|convert_bytes:'GB' }}")
self.assertEqual(t.render(Context({"bytes": 1024 ** 3 * 2})), "2.0")

View File

@@ -0,0 +1,107 @@
"""Tests for dashboard.mock_data."""
import json
from django.test import TestCase
from dashboard.mock_data import get_mock_context
class GetMockContextTest(TestCase):
"""Tests for get_mock_context()."""
def test_returns_all_top_level_keys(self):
ctx = get_mock_context()
expected_keys = {"region", "pcpu", "vcpu", "pram", "vram", "vm", "flavors", "audits"}
self.assertEqual(set(ctx.keys()), expected_keys)
def test_region_structure(self):
ctx = get_mock_context()
region = ctx["region"]
self.assertIn("name", region)
self.assertIn("hosts_total", region)
self.assertEqual(region["name"], "mock-region")
self.assertEqual(region["hosts_total"], 6)
def test_pcpu_structure_and_types(self):
ctx = get_mock_context()
pcpu = ctx["pcpu"]
self.assertEqual(pcpu["total"], 48)
self.assertEqual(pcpu["usage"], 12.5)
self.assertEqual(pcpu["free"], 48 - 12.5)
self.assertIsInstance(pcpu["used_percentage"], (int, float))
def test_vcpu_structure(self):
ctx = get_mock_context()
vcpu = ctx["vcpu"]
self.assertIn("total", vcpu)
self.assertIn("allocated", vcpu)
self.assertIn("free", vcpu)
self.assertIn("allocated_percentage", vcpu)
self.assertIn("overcommit_ratio", vcpu)
self.assertIn("overcommit_max", vcpu)
self.assertEqual(vcpu["overcommit_max"], 2.0)
def test_pram_vram_structure(self):
ctx = get_mock_context()
pram = ctx["pram"]
vram = ctx["vram"]
self.assertIn("total", pram)
self.assertIn("usage", pram)
self.assertIn("free", pram)
self.assertIn("used_percentage", pram)
self.assertIn("total", vram)
self.assertIn("allocated", vram)
self.assertIn("overcommit_max", vram)
def test_vm_structure(self):
ctx = get_mock_context()
vm = ctx["vm"]
self.assertEqual(vm["count"], 24)
self.assertEqual(vm["active"], 22)
self.assertEqual(vm["stopped"], 2)
self.assertIn("avg_cpu", vm)
self.assertIn("avg_ram", vm)
self.assertIn("density", vm)
def test_flavors_structure(self):
ctx = get_mock_context()
flavors = ctx["flavors"]
for key in ("first_common_flavor", "second_common_flavor", "third_common_flavor"):
self.assertIn(key, flavors)
self.assertIn("name", flavors[key])
self.assertIn("count", flavors[key])
self.assertEqual(flavors["first_common_flavor"]["name"], "m1.small")
self.assertEqual(flavors["first_common_flavor"]["count"], 12)
def test_audits_serialized_fields(self):
ctx = get_mock_context()
self.assertIsInstance(ctx["audits"], list)
self.assertGreaterEqual(len(ctx["audits"]), 1)
for audit in ctx["audits"]:
self.assertIn("migrations", audit)
self.assertIn("host_labels", audit)
self.assertIn("cpu_current", audit)
self.assertIn("cpu_projected", audit)
# These must be JSON strings (render-ready for JS)
self.assertIsInstance(audit["migrations"], str)
self.assertIsInstance(audit["host_labels"], str)
self.assertIsInstance(audit["cpu_current"], str)
self.assertIsInstance(audit["cpu_projected"], str)
# Must be valid JSON
json.loads(audit["migrations"])
json.loads(audit["host_labels"])
json.loads(audit["cpu_current"])
json.loads(audit["cpu_projected"])
def test_audits_metadata_fields(self):
ctx = get_mock_context()
audit = ctx["audits"][0]
self.assertIn("id", audit)
self.assertIn("name", audit)
self.assertIn("created_at", audit)
self.assertIn("strategy", audit)
self.assertIn("goal", audit)
self.assertIn("type", audit)
self.assertIn("scope", audit)
self.assertIn("cpu_weight", audit)
self.assertIn("ram_weight", audit)

View File

@@ -1,71 +1,70 @@
import json import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import render from django.shortcuts import render
from dashboard.openstack_utils.connect import get_connection from dashboard.openstack_utils.connect import get_connection
from dashboard.openstack_utils.flavor import get_flavor_list from dashboard.openstack_utils.flavor import get_flavor_list
from dashboard.prometheus_utils.query import query_prometheus from dashboard.prometheus_utils.query import query_prometheus
from dashboard.openstack_utils.audits import get_audits from dashboard.openstack_utils.audits import get_audits
from dashboard.mock_data import get_mock_context
# Prometheus queries run in parallel (query_key -> query string)
_PROMETHEUS_QUERIES = {
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
"pcpu_total": "sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))",
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
"vcpu_overcommit_max": "avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})",
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
"vram_overcommit_max": "avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))",
"vm_count": "sum(libvirt_domain_state_code)",
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
}
def _fetch_prometheus_metrics():
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
result = {}
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
future_to_key = {
executor.submit(query_prometheus, query=q): key
for key, q in _PROMETHEUS_QUERIES.items()
}
for future in as_completed(future_to_key):
key = future_to_key[future]
try:
raw = future.result()
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
result[key] = float(raw)
else:
result[key] = int(raw)
except (ValueError, TypeError):
result[key] = 0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
return result
def collect_context(): def collect_context():
connection = get_connection() connection = get_connection()
region_name = connection._compute_region region_name = connection._compute_region
flavors = get_flavor_list(connection=connection) flavors = get_flavor_list(connection=connection)
audits = get_audits(connection=connection) audits = get_audits(connection=connection)
hosts_total = int(
query_prometheus( metrics = _fetch_prometheus_metrics()
query="count(node_exporter_build_info{job='node_exporter_compute'})" hosts_total = metrics.get("hosts_total") or 1
) pcpu_total = metrics.get("pcpu_total", 0)
) pcpu_usage = metrics.get("pcpu_usage", 0)
pcpu_total = int( vcpu_allocated = metrics.get("vcpu_allocated", 0)
query_prometheus( vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
query="sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))" pram_total = metrics.get("pram_total", 0)
) pram_usage = metrics.get("pram_usage", 0)
) vram_allocated = metrics.get("vram_allocated", 0)
pcpu_usage = float( vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
query_prometheus( vm_count = metrics.get("vm_count", 0)
query="sum(node_load5{job='node_exporter_compute'})" vm_active = metrics.get("vm_active", 0)
)
)
vcpu_allocated = int(
query_prometheus(
query="sum(libvirt_domain_info_virtual_cpus)"
)
)
vcpu_overcommit_max = float(
query_prometheus(
query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
)
)
pram_total = int(
query_prometheus(
query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes
)
)
pram_usage = int (
query_prometheus(
query="sum(node_memory_Active_bytes{job='node_exporter_compute'})"
)
)
vram_allocated = int(
query_prometheus(
query="sum(libvirt_domain_info_maximum_memory_bytes)"
)
)
vram_overcommit_max = float(
query_prometheus(
query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
)
)
vm_count = int(
query_prometheus(
query="sum(libvirt_domain_state_code)"
)
)
vm_active = int(
query_prometheus(
query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})"
)
)
vcpu_total = pcpu_total * vcpu_overcommit_max vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max vram_total = pram_total * vram_overcommit_max
@@ -74,7 +73,7 @@ def collect_context():
# <--- Region data ---> # <--- Region data --->
"region": { "region": {
"name": region_name, "name": region_name,
"hosts_total": 6, "hosts_total": hosts_total,
}, },
# <--- CPU data ---> # <--- CPU data --->
# pCPU data # pCPU data
@@ -82,32 +81,32 @@ def collect_context():
"total": pcpu_total, "total": pcpu_total,
"usage": pcpu_usage, "usage": pcpu_usage,
"free": pcpu_total - pcpu_usage, "free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100, "used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
}, },
# vCPU data # vCPU data
"vcpu": { "vcpu": {
"total": vcpu_total, "total": vcpu_total,
"allocated": vcpu_allocated, "allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated, "free": vcpu_total - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100, "allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": vcpu_allocated / pcpu_total, "overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max, "overcommit_max": vcpu_overcommit_max,
}, },
# <--- RAM data ---> # <--- RAM data --->
# pRAM data # pRAM data
"pram" : { "pram": {
"total": pram_total, "total": pram_total,
"usage": pram_usage, "usage": pram_usage,
"free": pram_total - pram_usage, "free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100, "used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
}, },
# vRAM data # vRAM data
"vram": { "vram": {
"total": vram_total, "total": vram_total,
"allocated": vram_allocated, "allocated": vram_allocated,
"free": vram_total - vram_allocated, "free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100, "allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": vram_allocated / pram_total, "overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max, "overcommit_max": vram_overcommit_max,
}, },
# <--- VM data ---> # <--- VM data --->
@@ -115,218 +114,30 @@ def collect_context():
"count": vm_count, "count": vm_count,
"active": vm_active, "active": vm_active,
"stopped": vm_count - vm_active, "stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count, "avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count, "avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total, "density": vm_count / hosts_total if hosts_total else 0,
}, },
"flavors": flavors, "flavors": flavors,
"audits": audits, "audits": audits,
} }
# Serialize audit list fields for JavaScript so cached context is render-ready
for audit in context["audits"]:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
return context return context
def index(request): def index(request):
hosts_total = 6 if getattr(settings, "USE_MOCK_DATA", False):
pcpu_total = 672 context = get_mock_context()
pcpu_usage = 39.2 return render(request, "index.html", context)
vcpu_total = 3360
vcpu_allocated = 98
vcpu_overcommit_max = 5
pram_total = 562500000000
pram_usage = 4325000000
vram_total = 489375000000
vram_allocated = 5625000000
vram_overcommit_max = 0.87
vm_count = 120
vm_active = 90
context = {
# <--- Region data --->
"region": {
"name": "ct3k1ldt",
"hosts_total": 6,
},
# <--- CPU data --->
# pCPU data
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100,
},
# vCPU data
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
"overcommit_ratio": vcpu_allocated / pcpu_total,
"overcommit_max": vcpu_overcommit_max,
},
# <--- RAM data --->
# pRAM data
"pram" : {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100,
},
# vRAM data
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100,
"overcommit_ratio": vram_allocated / pram_total,
"overcommit_max": vram_overcommit_max,
},
# <--- VM data --->
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count,
"avg_ram": vram_allocated / vm_count,
"density": vm_count / hosts_total,
},
"flavors": {
'first_common_flavor': {
'name': 'm1.medium',
'count': 18
},
'second_common_flavor': {
'name': 'm1.small',
'count': 12
},
'third_common_flavor': {
'name': 'm1.large',
'count': 8
},
},
# Audit data cache_key = "dashboard_context"
'audits': [ cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
{ context = cache.get(cache_key)
'id': 'audit_001', if context is None:
'name': 'Weekly Optimization', context = collect_context()
'created_at': '2024-01-15', cache.set(cache_key, context, timeout=cache_ttl)
'cpu_weight': 1.2, return render(request, "index.html", context)
'ram_weight': 0.6,
'scope': 'Full Cluster',
'strategy': 'Load Balancing',
'goal': 'Optimize CPU distribution across all hosts',
'migrations': [
{
'instanceName': 'web-server-01',
'source': 'compute-02',
'destination': 'compute-05',
'flavor': 'm1.medium',
'impact': 'Low'
},
{
'instanceName': 'db-replica-03',
'source': 'compute-01',
'destination': 'compute-04',
'flavor': 'm1.large',
'impact': 'Medium'
},
{
'instanceName': 'api-gateway',
'source': 'compute-03',
'destination': 'compute-06',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'cache-node-02',
'source': 'compute-01',
'destination': 'compute-07',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'monitoring-server',
'source': 'compute-04',
'destination': 'compute-02',
'flavor': 'm1.medium',
'impact': 'Low'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [78, 65, 42, 89, 34, 56, 71],
'cpu_projected': [65, 58, 45, 72, 48, 61, 68]
},
{
'id': 'audit_002',
'name': 'Emergency Rebalance',
'created_at': '2024-01-14',
'cpu_weight': 1.0,
'ram_weight': 1.0,
'scope': 'Overloaded Hosts',
'strategy': 'Hotspot Reduction',
'goal': 'Reduce load on compute-01 and compute-04',
'migrations': [
{
'instanceName': 'app-server-02',
'source': 'compute-01',
'destination': 'compute-06',
'flavor': 'm1.medium',
'impact': 'Medium'
},
{
'instanceName': 'file-server-01',
'source': 'compute-04',
'destination': 'compute-07',
'flavor': 'm1.large',
'impact': 'High'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [92, 65, 42, 85, 34, 56, 71],
'cpu_projected': [72, 65, 42, 65, 34, 66, 81]
},
{
'id': 'audit_003',
'name': 'Pre-Maintenance Planning',
'created_at': '2024-01-10',
'cpu_weight': 0.8,
'ram_weight': 1.5,
'scope': 'Maintenance Zone',
'strategy': 'Evacuation',
'goal': 'Empty compute-03 for maintenance',
'migrations': [
{
'instanceName': 'test-vm-01',
'source': 'compute-03',
'destination': 'compute-02',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'dev-server',
'source': 'compute-03',
'destination': 'compute-05',
'flavor': 'm1.medium',
'impact': 'Low'
},
{
'instanceName': 'staging-db',
'source': 'compute-03',
'destination': 'compute-07',
'flavor': 'm1.large',
'impact': 'High'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [78, 65, 56, 89, 34, 56, 71],
'cpu_projected': [78, 75, 0, 89, 54, 56, 81]
}
]
}
# Serialize lists for JavaScript
for audit in context['audits']:
audit['migrations'] = json.dumps(audit['migrations'])
audit['host_labels'] = json.dumps(audit['host_labels'])
audit['cpu_current'] = json.dumps(audit['cpu_current'])
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
return render(request, 'index.html', context)

22
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,22 @@
# Development override: use with
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build
#
# Uses mock data (no OpenStack/Prometheus), mounts code for live reload.
services:
watcher-visio:
build:
context: .
dockerfile: Dockerfile
volumes:
- .:/app
environment:
- USE_MOCK_DATA=true
- DEBUG=true
- PYTHONUNBUFFERED=1
ports:
- "8000:8000"
# Optional: skip entrypoint migrations on every start for faster restart
# command: ["python3", "manage.py", "runserver", "0.0.0.0:8000"]
stdin_open: true
tty: true

View File

@@ -1,8 +1,24 @@
# Base compose: production-like run.
# For development with mock data and live reload use:
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build
services: services:
watcher-visio: watcher-visio:
build: . image: watcher-visio:latest
build:
context: .
dockerfile: Dockerfile
container_name: watcher-visio
ports: ports:
- "8000:8000" - "8000:8000"
volumes: environment:
- ./:/app - PYTHONUNBUFFERED=1
# Override via environment or env_file (e.g. env_file: .env):
# PROMETHEUS_URL, OPENSTACK_CLOUD, OPENSTACK_REGION_NAME, SECRET_KEY
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:8000/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
restart: unless-stopped restart: unless-stopped

View File

@@ -1,12 +1,11 @@
#!/bin/sh #!/bin/sh
set -e set -e
echo "Applying database migrations..." echo "Applying database migrations..."
python manage.py migrate --noinput python3 manage.py migrate --noinput
echo "Collecting static files..." echo "Collecting static files..."
python manage.py collectstatic --noinput python3 manage.py collectstatic --noinput
echo "Starting Django application..." echo "Starting Django application..."
exec "$@" exec "$@"

5
package-lock.json generated
View File

@@ -713,7 +713,6 @@
} }
], ],
"license": "MIT", "license": "MIT",
"peer": true,
"dependencies": { "dependencies": {
"baseline-browser-mapping": "^2.8.25", "baseline-browser-mapping": "^2.8.25",
"caniuse-lite": "^1.0.30001754", "caniuse-lite": "^1.0.30001754",
@@ -1254,7 +1253,6 @@
} }
], ],
"license": "MIT", "license": "MIT",
"peer": true,
"dependencies": { "dependencies": {
"nanoid": "^3.3.11", "nanoid": "^3.3.11",
"picocolors": "^1.1.1", "picocolors": "^1.1.1",
@@ -1298,8 +1296,7 @@
"version": "4.1.17", "version": "4.1.17",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.17.tgz", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.17.tgz",
"integrity": "sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==", "integrity": "sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==",
"license": "MIT", "license": "MIT"
"peer": true
}, },
"node_modules/tapable": { "node_modules/tapable": {
"version": "2.3.0", "version": "2.3.0",

View File

@@ -10,7 +10,7 @@
--color-base-200: oklch(98% 0 0); --color-base-200: oklch(98% 0 0);
--color-base-300: oklch(95% 0 0); --color-base-300: oklch(95% 0 0);
--color-base-content: oklch(21% 0.006 285.885); --color-base-content: oklch(21% 0.006 285.885);
--color-primary: #09418E; --color-primary: #0A2896;
--color-primary-content: oklch(93% 0.034 272.788); --color-primary-content: oklch(93% 0.034 272.788);
--color-secondary: #428BCA; --color-secondary: #428BCA;
--color-secondary-content: oklch(100% 0 0); --color-secondary-content: oklch(100% 0 0);

View File

@@ -5,6 +5,7 @@
<script src="{% static 'js/utils.js' %}"></script> <script src="{% static 'js/utils.js' %}"></script>
<script src="{% static 'js/chart.js' %}"></script> <script src="{% static 'js/chart.js' %}"></script>
<script src="{% static 'js/chartjs-plugin-datalabels.min.js' %}"></script> <script src="{% static 'js/chartjs-plugin-datalabels.min.js' %}"></script>
<script src="{% static 'js/chartjs-plugin-annotation.min.js' %}"></script>
{% endblock %} {% endblock %}
{% block content %} {% block content %}
@@ -18,14 +19,14 @@
<div class="flex items-center justify-between mb-3"> <div class="flex items-center justify-between mb-3">
<div> <div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3> <h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/40 mt-0.5">{{ pcpu.usage }} / {{ pcpu.total }} CPU</div> <div class="text-xs text-base-content/40 mt-0.5">{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU</div>
</div> </div>
<div class="text-lg font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div> <div class="text-lg font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div>
</div> </div>
<div class="space-y-2"> <div class="space-y-2">
<div class="flex justify-between text-xs"> <div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span> <span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pcpu.usage }} CPU</span> <span class="font-medium">{{ pcpu.usage|floatformat:1 }} CPU</span>
</div> </div>
<progress class="progress progress-primary w-full" value="{{ pcpu.used_percentage }}" max="100"></progress> <progress class="progress progress-primary w-full" value="{{ pcpu.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs"> <div class="flex justify-between text-xs">
@@ -120,7 +121,7 @@
<span class="text-xs font-medium w-12 text-right">{{ vcpu.allocated_percentage|floatformat:1 }}%</span> <span class="text-xs font-medium w-12 text-right">{{ vcpu.allocated_percentage|floatformat:1 }}%</span>
</div> </div>
<div class="flex justify-between text-xs mt-1"> <div class="flex justify-between text-xs mt-1">
<span class="text-base-content/50">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max }}</span> <span class="text-base-content/50">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/50">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span> <span class="text-base-content/50">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span>
</div> </div>
</div> </div>
@@ -136,7 +137,7 @@
<span class="text-xs font-medium w-12 text-right">{{ vram.allocated_percentage|floatformat:1 }}%</span> <span class="text-xs font-medium w-12 text-right">{{ vram.allocated_percentage|floatformat:1 }}%</span>
</div> </div>
<div class="flex justify-between text-xs mt-1"> <div class="flex justify-between text-xs mt-1">
<span class="text-base-content/50">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max }}</span> <span class="text-base-content/50">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/50">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span> <span class="text-base-content/50">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span>
</div> </div>
</div> </div>
@@ -271,6 +272,10 @@
<div class="w-3 h-0.5 bg-success"></div> <div class="w-3 h-0.5 bg-success"></div>
<span class="text-success">Mean: <span id="currentCpuMean">0</span>%</span> <span class="text-success">Mean: <span id="currentCpuMean">0</span>%</span>
</div> </div>
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-error/60"></div>
<span class="text-error/60">±0.5σ: <span id="currentCpuStd">0</span>%</span>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -289,7 +294,7 @@
</div> </div>
<div class="flex items-center gap-1 text-xs"> <div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-error/60"></div> <div class="w-3 h-0.5 bg-error/60"></div>
<span class="text-error/60">±1σ: <span id="projectedCpuStd">0</span>%</span> <span class="text-error/60">±0.5σ: <span id="projectedCpuStd">0</span>%</span>
</div> </div>
</div> </div>
</div> </div>
@@ -435,7 +440,8 @@
// Update stats displays // Update stats displays
document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1); document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1);
document.getElementById('projectedCpuMean').textContent = projectedStats.mean.toFixed(1); document.getElementById('projectedCpuMean').textContent = projectedStats.mean.toFixed(1);
document.getElementById('projectedCpuStd').textContent = projectedStats.std.toFixed(1); document.getElementById('projectedCpuStd').textContent = (currentStats.std * 0.5).toFixed(1);
document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1);
// Destroy existing charts // Destroy existing charts
if (cpuHostChart) cpuHostChart.destroy(); if (cpuHostChart) cpuHostChart.destroy();
@@ -474,7 +480,35 @@
legend: { display: false }, legend: { display: false },
tooltip: { tooltip: {
callbacks: { callbacks: {
label: (ctx) => `${ctx.parsed.y}% CPU` label: (ctx) => `${Number(ctx.parsed.y).toFixed(2)}% CPU`
}
},
annotation: {
annotations: {
MeanLine: {
type: 'line',
yMin: currentStats.mean.toFixed(1),
yMax: currentStats.mean.toFixed(1),
borderColor: colors.success,
borderWidth: 2,
borderDash: []
},
upperStdLine: {
type: 'line',
yMin: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
yMax: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
},
lowerStdLine: {
type: 'line',
yMin: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
yMax: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
}
} }
} }
}, },
@@ -482,7 +516,9 @@
y: { y: {
beginAtZero: true, beginAtZero: true,
max: 100, max: 100,
grid: { drawBorder: false }, grid: {
drawBorder: false,
},
ticks: { ticks: {
callback: value => value + '%' callback: value => value + '%'
} }
@@ -518,7 +554,35 @@
legend: { display: false }, legend: { display: false },
tooltip: { tooltip: {
callbacks: { callbacks: {
label: (ctx) => `${ctx.parsed.y}% CPU` label: (ctx) => `${Number(ctx.parsed.y).toFixed(2)}% CPU`
}
},
annotation: {
annotations: {
MeanLine: {
type: 'line',
yMin: projectedStats.mean.toFixed(1),
yMax: projectedStats.mean.toFixed(1),
borderColor: colors.success,
borderWidth: 2,
borderDash: []
},
upperStdLine: {
type: 'line',
yMin: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
yMax: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
},
lowerStdLine: {
type: 'line',
yMin: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
yMax: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
}
} }
} }
}, },
@@ -552,7 +616,7 @@
// Initialize // Initialize
document.addEventListener('DOMContentLoaded', () => { document.addEventListener('DOMContentLoaded', () => {
const initialAudit = "{{ audits.0.id|default:'' }}"; const initialAudit = "{% if audits %}{{ audits.0.id }}{% endif %}";
if (initialAudit && auditData[initialAudit]) { if (initialAudit && auditData[initialAudit]) {
document.getElementById('auditSelector').dispatchEvent(new Event('change')); document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit(); loadSelectedAudit();

View File

@@ -10,11 +10,15 @@ For the full list of settings and their values, see
https://docs.djangoproject.com/en/5.2/ref/settings/ https://docs.djangoproject.com/en/5.2/ref/settings/
""" """
import os
from pathlib import Path from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'. # Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent BASE_DIR = Path(__file__).resolve().parent.parent
# Use mock data when no OpenStack/Prometheus access (e.g. local dev)
USE_MOCK_DATA = os.environ.get("USE_MOCK_DATA", "false").lower() in ("1", "true", "yes")
# Quick-start development settings - unsuitable for production # Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/5.2/howto/deployment/checklist/ # See https://docs.djangoproject.com/en/5.2/howto/deployment/checklist/
@@ -25,7 +29,7 @@ SECRET_KEY = 'django-insecure-747*14ir*49hoo6c2225)kxr%4^am0ub_s-m^_7i4cctu)v$g8
# SECURITY WARNING: don't run with debug turned on in production! # SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True DEBUG = True
ALLOWED_HOSTS = [] ALLOWED_HOSTS = ['*']
# Application definition # Application definition
@@ -41,15 +45,15 @@ INSTALLED_APPS = [
] ]
# Prometheus settings (environment override recommended) # Prometheus settings (environment override recommended)
PROMETHEUS_URL = "http://localhost:9090" PROMETHEUS_URL = "http://10.226.74.53:9090/"
PROMETHEUS_METRICS = { PROMETHEUS_METRICS = {
"cpu_usage": "", "cpu_usage": "rate(libvirt_domain_info_cpu_time_seconds_total)[300s]",
"ram_usage": "" "ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]"
} }
# Openstack cloud settings # Openstack cloud settings
OPENSTACK_REGION_NAME = "default" OPENSTACK_REGION_NAME = "cl2k1distlab"
OPENSTACK_CLOUD = "default" OPENSTACK_CLOUD = "distlab"
# Openstack watcher endoint settings # Openstack watcher endoint settings
WATCHER_ENDPOINT_NAME = "infra-optim" WATCHER_ENDPOINT_NAME = "infra-optim"
@@ -142,3 +146,12 @@ STATIC_ROOT = BASE_DIR / "staticfiles"
# https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field # https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Dashboard cache (reduces load on OpenStack/Prometheus and allows concurrent users)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'watcher-visio-dashboard',
}
}
DASHBOARD_CACHE_TTL = 120 # seconds