Merge pull request 'develop' (#1) from develop into main
Some checks failed
CI / ci (push) Successful in 14s
Docker build and release / build-and-push (push) Failing after 8s
Docker build and release / release (push) Has been skipped

Reviewed-on: #1
This commit is contained in:
2026-02-07 18:06:50 +03:00
53 changed files with 4959 additions and 2409 deletions

View File

@@ -12,7 +12,6 @@ env/
.idea
*.log
*.sqlite3
static/
media/
node_modules/
npm-debug.log*
@@ -20,4 +19,12 @@ yarn-debug.log*
yarn-error.log*
Dockerfile
docker-compose.yml
docker-compose.dev.yml
README.md
*.md
clouds.yaml
.env.example
tailwind.config.js
package.json
package-lock.json
dashboard/tests/

9
.env.example Normal file
View File

@@ -0,0 +1,9 @@
# Optional: copy to .env and set for your environment.
# For docker-compose, add to docker-compose.yml: env_file: [.env]
# PYTHONUNBUFFERED=1
# USE_MOCK_DATA=false
# PROMETHEUS_URL=http://127.0.0.1:1234/
# OPENSTACK_CLOUD=distlab
# OPENSTACK_REGION_NAME=cl2k1distlab
# SECRET_KEY=your-secret-key

36
.gitea/workflows/ci.yml Normal file
View File

@@ -0,0 +1,36 @@
name: CI
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
ci:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: https://gitea.com/actions/checkout@v4
- name: Set up Python
uses: https://gitea.com/actions/setup-python@v5
with:
python-version: "3.12"
- name: Install dependencies
run: pip install -r requirements.txt
- name: Install lint and security tools
run: pip install ruff bandit
- name: Lint with Ruff
run: ruff check dashboard watcher_visio
- name: Run tests
env:
USE_MOCK_DATA: "true"
run: python manage.py test dashboard
- name: Security check with Bandit
run: bandit -r dashboard watcher_visio -ll

View File

@@ -0,0 +1,75 @@
name: Docker build and release
on:
push:
branches: [main]
jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
outputs:
tag: ${{ steps.meta.outputs.tag }}
steps:
- name: Checkout
uses: https://gitea.com/actions/checkout@v4
with:
fetch-depth: 0
- name: Set release tag
id: meta
run: |
echo "tag=v$(date +%Y%m%d)-${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
- name: Extract registry host
id: registry
run: |
echo "host=${GITHUB_SERVER_URL#https://}" >> $GITHUB_OUTPUT
- name: Log in to Gitea Container Registry
run: |
echo "${{ secrets.REGISTRY_TOKEN }}" | docker login ${{ steps.registry.outputs.host }} -u ${{ github.actor }} --password-stdin
- name: Build and push
run: |
IMAGE="${{ steps.registry.outputs.host }}/${{ github.repository }}"
TAG="${{ steps.meta.outputs.tag }}"
docker build -t "$IMAGE:$TAG" -t "$IMAGE:latest" .
docker push "$IMAGE:$TAG"
docker push "$IMAGE:latest"
release:
runs-on: ubuntu-latest
needs: build-and-push
permissions:
contents: write
steps:
- name: Checkout
uses: https://gitea.com/actions/checkout@v4
with:
fetch-depth: 0
- name: Generate release notes
id: notes
run: |
PREV=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
if [ -z "$PREV" ]; then
echo "## Changes" > release_notes.md
echo "" >> release_notes.md
git log --pretty=format:"- %s (%h)" >> release_notes.md || echo "- Initial release" >> release_notes.md
else
echo "## Changes since $PREV" > release_notes.md
echo "" >> release_notes.md
git log "$PREV"..HEAD --pretty=format:"- %s (%h)" >> release_notes.md
fi
- name: Create release
uses: https://gitea.com/actions/gitea-release-action@v1
with:
token: ${{ secrets.REGISTRY_TOKEN }}
tag_name: ${{ needs.build-and-push.outputs.tag }}
body_path: release_notes.md
target_commitish: ${{ github.sha }}

4
.gitignore vendored
View File

@@ -2,6 +2,8 @@
*.pyc
__pycache__/
venv/
.venv/
.venv/
.env
db.sqlite3
@@ -24,3 +26,5 @@ static/css/tailwindcss
# NodeJS
node_modules
clouds.yaml

View File

@@ -1,32 +1,42 @@
FROM alpine:3 AS build
FROM alpine:3.21 AS build
RUN apk update && \
apk add --no-cache --virtual .build-deps \
ca-certificates gcc postgresql-dev linux-headers musl-dev \
libffi-dev jpeg-dev zlib-dev \
git bash build-base python3-dev
build-base python3-dev dos2unix
RUN python3 -m venv /venv
ENV PATH "/venv/bin:$PATH"
ENV PATH="/venv/bin:$PATH"
COPY ./requirements.txt /
RUN pip install -r /requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip \
pip install -r /requirements.txt
COPY ./docker-entrypoint.sh /docker-entrypoint.sh
RUN dos2unix /docker-entrypoint.sh && \
chmod +x /docker-entrypoint.sh
FROM alpine:3
FROM alpine:3.21
ENV LANG C.UTF-8
ENV LC_ALL C.UTF-8
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
ENV PYTHONUNBUFFERED 1
ENV PATH "/venv/bin:$PATH"
ENV PYTHONUNBUFFERED=1
ENV PATH="/venv/bin:$PATH"
RUN apk add --no-cache --update python3
RUN apk add --no-cache --update python3 curl
COPY --from=build /venv /venv
COPY --from=build /docker-entrypoint.sh /docker-entrypoint.sh
RUN mkdir /app
WORKDIR /app
COPY ./ /
COPY . /app
ENV DJANGO_SETTINGS_MODULE=watcher_visio.settings
RUN python3 manage.py collectstatic --noinput
RUN adduser -D -g "" app && chown -R app:app /app
USER app
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD [ "python", "manage.py", "runserver", "0.0.0.0:8000" ]
CMD ["python3", "manage.py", "runserver", "0.0.0.0:8080"]

171
README.md
View File

@@ -0,0 +1,171 @@
# watcher-visio
Web dashboard (**SWatcher**) for monitoring an OpenStack cluster and visualising OpenStack Watcher audits: region and host counts, physical/virtual CPU and RAM usage, VM stats, top flavors, and audit recommendations (migrations) with CPU load charts per host.
Data sources:
- **OpenStack** (SDK, `clouds.yaml`): compute region, servers, flavors; Watcher API for audits, action plans, and actions.
- **Prometheus**: node_exporter, libvirt, and placement metrics for pCPU/vCPU, pRAM/vRAM, and VM counts.
---
## Configuration
### Environment variables
Copy [.env.example](.env.example) to `.env` and set as needed. For Docker Compose you can use `env_file: [.env]` in `docker-compose.yml`.
| Variable | Description |
|----------|-------------|
| `PROMETHEUS_URL` | Prometheus base URL (e.g. `http://10.0.0.1:9090/`). |
| `OPENSTACK_CLOUD` | Cloud name from `clouds.yaml` (e.g. `distlab`). |
| `OPENSTACK_REGION_NAME` | OpenStack region (e.g. `cl2k1distlab`). |
| `USE_MOCK_DATA` | Set to `true`/`1`/`yes` to serve mock data (no OpenStack/Prometheus). Useful for local/dev. |
| `SECRET_KEY` | Django secret key; override in production. |
Defaults for Prometheus and OpenStack are in [watcher_visio/settings.py](watcher_visio/settings.py).
### OpenStack (`clouds.yaml`)
Authentication uses OpenStacks standard `clouds.yaml`. The cloud name must match `OPENSTACK_CLOUD`. Place `clouds.yaml` in the project root (or standard OpenStack config location). **Do not commit real credentials;** use a local or CI-specific file and keep production secrets out of the repo.
---
## Running locally
1. Create a virtualenv and install dependencies:
```bash
python -m venv .venv
source .venv/bin/activate # or .venv\Scripts\activate on Windows
pip install -r requirements.txt
```
2. Optionally build frontend CSS (see [Frontend build](#frontend-build)).
3. Configure `clouds.yaml` and environment (e.g. `.env` or export `PROMETHEUS_URL`, `OPENSTACK_CLOUD`, `OPENSTACK_REGION_NAME`). For development without OpenStack/Prometheus, set `USE_MOCK_DATA=true`.
4. Run migrations and start the server:
```bash
python manage.py migrate
python manage.py runserver
```
Open http://127.0.0.1:8000/ (or the port shown). With `USE_MOCK_DATA=true`, the dashboard is filled with mock data; otherwise the page loads a skeleton and fetches data from the API.
---
## Running with Docker
**Production-like** (built image, no volume mount):
```bash
docker compose up --build
```
App is available at http://localhost:8080. Healthcheck hits `GET /`.
**Development** (mounted code, mock data, no OpenStack/Prometheus):
```bash
docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build
```
Uses `USE_MOCK_DATA=true` and mounts the project directory for live code changes. Build CSS before building the image so `static/css/output.css` is present, or run `npm run build` locally before `docker compose ... up --build`.
---
## Frontend build
CSS is built with Tailwind and DaisyUI ([package.json](package.json)).
- Install: `npm install`
- One-off build: `npm run build`
- Watch: `npm run dev`
Source: [static/css/main.css](static/css/main.css). Output: `static/css/output.css`. For Docker, run `npm run build` before building the image so the image includes `output.css`.
---
## API
| Endpoint | Description |
|----------|-------------|
| `GET /` | Dashboard page. With `USE_MOCK_DATA=true`, rendered with mock context; otherwise skeleton, with data loaded via the API. |
| `GET /api/stats/` | JSON: region, pCPU/vCPU, pRAM/vRAM, VM stats, top flavors. Cached for `DASHBOARD_CACHE_TTL` seconds (see settings). |
| `GET /api/audits/` | JSON: `{ "audits": [ ... ] }` — list of Watcher audits with migrations and chart data (host labels, cpu_current, cpu_projected). Same cache TTL. |
---
## Repository structure
| Path | Description |
|------|-------------|
| `watcher_visio/` | Django project: settings, root URL config, WSGI/ASGI. |
| `dashboard/` | Main app: views (index, api_stats, api_audits), `openstack_utils` (connect, flavor, audits), `prometheus_utils` (query), `mock_data`, `templatetags` (mathfilters), tests. |
| `templates/`, `static/` | HTML templates and static assets (Tailwind output, Chart.js, etc.). |
| `clouds.yaml` | OpenStack config (do not commit production secrets). |
| [Dockerfile](Dockerfile), [docker-entrypoint.sh](docker-entrypoint.sh) | Image build and entrypoint (migrate then run server). |
| [docker-compose.yml](docker-compose.yml), [docker-compose.dev.yml](docker-compose.dev.yml) | Compose: base (prod-like) and dev override (mount + mock). |
---
## Architecture
```mermaid
flowchart LR
subgraph sources [Data sources]
OS[OpenStack SDK]
Prom[Prometheus]
Watcher[Watcher API]
end
subgraph app [Django]
Views[views]
Cache[(Cache)]
end
subgraph out [Output]
HTML[HTML]
API[JSON API]
end
subgraph frontend [Frontend]
Chart[Chart.js]
end
OS --> Views
Prom --> Views
Watcher --> Views
Views --> Cache
Cache --> Views
Views --> HTML
Views --> API
HTML --> Chart
API --> Chart
```
OpenStack (region, servers, flavors), Prometheus (metrics), and the Watcher API (audits, action plans, actions) are queried in Django views; results are cached. The dashboard page is either rendered with mock/skeleton data or loads stats and audits via `/api/stats/` and `/api/audits/`; Chart.js draws the CPU and other charts.
---
## Running tests
From the project root (with Django and dependencies installed, e.g. in a virtualenv):
```bash
python manage.py test dashboard
```
Run a specific test module:
```bash
python manage.py test dashboard.tests.test_mathfilters
```
### Running tests in Docker
Use the **dev** compose file so the project directory is mounted; the container will then run tests against your current code (no image rebuild needed):
```bash
docker compose -f docker-compose.yml -f docker-compose.dev.yml run --rm watcher-visio python3 manage.py test dashboard
```
If you run tests with only the base compose (`docker compose run --rm watcher-visio ...`), the container uses the code baked into the image at build time. After code or test changes, either rebuild the image or use the dev override above so tests see the latest files.

View File

122
dashboard/mock_data.py Normal file
View File

@@ -0,0 +1,122 @@
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
import json
def get_mock_context():
"""Return a context dict with the same structure as collect_context(), render-ready."""
hosts_total = 6
pcpu_total = 48
pcpu_usage = 12.5
vcpu_allocated = 96
vcpu_overcommit_max = 2.0
pram_total = 256 * 1024**3 # 256 GB in bytes
pram_usage = 120 * 1024**3
vram_allocated = 192 * 1024**3
vram_overcommit_max = 1.5
vm_count = 24
vm_active = 22
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
# Two sample audits with serialized fields for JS
host_labels = ["compute-0", "compute-1", "compute-2", "compute-3", "compute-4", "compute-5"]
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 48.5]
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0]
audits = [
{
"id": "mock-audit-uuid-1",
"name": "Mock audit (balanced)",
"created_at": "2025-02-01T10:00:00",
"strategy": "Balanced",
"goal": "BALANCED",
"type": "ONESHOT",
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps(
[
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
]
),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps(cpu_projected),
},
{
"id": "mock-audit-uuid-2",
"name": "Mock audit (workload consolidation)",
"created_at": "2025-02-02T14:30:00",
"strategy": "Workload consolidation",
"goal": "WORKLOAD_CONSOLIDATION",
"type": "ONESHOT",
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps([]),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
},
]
return {
"region": {
"name": "mock-region",
"hosts_total": hosts_total,
},
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100,
},
"vcpu": {
"total": int(vcpu_total),
"allocated": vcpu_allocated,
"free": int(vcpu_total) - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
"overcommit_ratio": vcpu_allocated / pcpu_total,
"overcommit_max": vcpu_overcommit_max,
},
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100,
},
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100,
"overcommit_ratio": vram_allocated / pram_total,
"overcommit_max": vram_overcommit_max,
},
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total,
},
"flavors": {
"first_common_flavor": {"name": "m1.small", "count": 12},
"second_common_flavor": {"name": "m1.medium", "count": 8},
"third_common_flavor": {"name": "m1.large", "count": 4},
},
"audits": audits,
"current_cluster": {
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
},
}

View File

@@ -1,124 +1,146 @@
import pandas
from copy import copy
import pandas
from openstack.connection import Connection
from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS
from watcher_visio.settings import PROMETHEUS_METRICS, WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME
from dashboard.prometheus_utils.query import query_prometheus
def convert_cpu_data(data: list):
metrics = []
if not data:
return pandas.DataFrame(columns=["host", "cpu_usage"])
for entry in data:
for t, val in entry["values"]:
metrics.append({
metrics.append(
{
"timestamp": int(t),
"host": entry["metric"]["host"],
"cpu_usage": float(val),
"instance": entry["metric"]["instanceName"]
})
"instance": entry["metric"]["instanceName"],
}
)
df_cpu = pandas.DataFrame(metrics)
df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s")
# Aggregate CPU usage per host
return (
df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum()
.groupby("host").mean()
df_cpu.groupby(["host", "timestamp"])["cpu_usage"]
.sum()
.groupby("host")
.mean()
.reset_index()
)
def get_current_cluster_cpu(connection: Connection) -> dict:
"""Return current per-host CPU state for the cluster (no Watcher dependency)."""
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
if cpu_metrics.empty:
return {"host_labels": [], "cpu_current": []}
return {
"host_labels": cpu_metrics["host"].to_list(),
"cpu_current": cpu_metrics["cpu_usage"].to_list(),
}
def get_audits(connection: Connection) -> list[dict] | None:
session = connection.session
watcher_endpoint = connection.endpoint_for(
service_type=WATCHER_ENDPOINT_NAME,
interface=WATCHER_INTERFACE_NAME
service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME
)
# Collect instances prometheus metrics
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
# Fetch audit list
audits_resp = session.get(
f"{watcher_endpoint}/v1/audits"
)
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_resp.json().get('audits')
audits_resp = audits_resp.json().get("audits") or []
# Fetch action plan list
actionplans_resp = session.get(
f"{watcher_endpoint}/v1/action_plans"
)
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
actionplans_resp.json().get('action_plans')
actionplans_resp = actionplans_resp.json().get("action_plans") or []
# Filtering audits by PENDING state
pending_audits = [audit for audit in audits_resp if audit['state'] == "PENDING"]
pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"]
result = []
for item in pending_audits:
projected_cpu_data = copy(cpu_data)
audit_resp = session.get(
f"{watcher_endpoint}/v1/audits/{item['uuid']}"
)
audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}")
audit_resp.raise_for_status()
audit_resp = audit_resp.json()
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
actionplan = next(
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None
)
if actionplan is None:
continue
actions_resp = session.get(
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
)
actions_resp.raise_for_status()
actions_resp = actions_resp.json().get('actions')
actions_resp = actions_resp.json().get("actions") or []
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(
f"{watcher_endpoint}/v1/actions/{action['uuid']}"
)
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action['input_parameters']['resource_id'])
params = action_resp['input_parameters']
mapping[params['resource_name']] = params['destination_node']
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append({
"instanceName": action['input_parameters']['resource_name'],
"source": action['input_parameters']['source_node'],
"destination": action['input_parameters']['destination_node'],
migrations.append(
{
"instanceName": action_resp["input_parameters"]["resource_name"],
"source": action_resp["input_parameters"]["source_node"],
"destination": action_resp["input_parameters"]["destination_node"],
"flavor": server.flavor.name,
"impact": 'Low'
})
"impact": "Low",
}
)
for entry in projected_cpu_data:
if (instance := entry['metric']['instanceName']) in mapping:
entry['metric']['host'] = mapping[instance]
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
result.append({
"id": audit_resp['uuid'],
"name": audit_resp['name'],
"created_at": audit_resp['created_at'],
"strategy": audit_resp['strategy_name'],
"goal": audit_resp['goal_name'],
"type": audit_resp['audit_type'],
"scope": audit_resp['scope'],
"cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"),
"ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"),
result.append(
{
"id": audit_resp["uuid"],
"name": audit_resp["name"],
"created_at": audit_resp["created_at"],
"strategy": audit_resp["strategy_name"],
"goal": audit_resp["goal_name"],
"type": audit_resp["audit_type"],
"scope": audit_resp["scope"],
"cpu_weight": audit_resp["parameters"]
.get("weights", {})
.get("instance_cpu_usage_weight", "none"),
"ram_weight": audit_resp["parameters"]
.get("weights", {})
.get("instance_ram_usage_weight", "none"),
"migrations": migrations,
"host_labels": cpu_metrics['host'].to_list(),
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
"cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(),
})
"host_labels": cpu_metrics["host"].to_list(),
"cpu_current": cpu_metrics["cpu_usage"].to_list(),
"cpu_projected": projected_cpu_metrics["cpu_usage"].to_list(),
}
)
return result

View File

@@ -1,8 +1,22 @@
import openstack
from openstack.connection import Connection
from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME
def check_openstack() -> dict:
"""
Lightweight check that OpenStack is reachable (connection only).
Returns {"status": "ok"} or {"status": "error", "message": "..."}.
"""
try:
conn = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME)
if conn is None:
return {"status": "error", "message": "No connection"}
return {"status": "ok"}
except Exception as e:
return {"status": "error", "message": str(e) or "Connection failed"}
def get_connection() -> Connection:
connection = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME)
return connection

View File

@@ -1,20 +1,21 @@
from collections import Counter
from openstack.connection import Connection
from collections import Counter
def get_flavor_list(connection: Connection) -> dict:
servers = list(connection.compute.servers(all_projects=True))
flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor]
flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor]
flavor_count = Counter(flavor_ids).most_common()
flavors = list(flavor_count)
result = {}
placeholder = {"name": "", "count": 0}
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
if len(flavors) > idx:
result[f"{prefix}_common_flavor"] = {
"name": flavors[idx][0],
"count": flavors[idx][1]
}
result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]}
else:
result[f"{prefix}_common_flavor"] = placeholder
return result

View File

@@ -1,16 +1,38 @@
import requests
from watcher_visio.settings import PROMETHEUS_URL
# Timeout for lightweight health check (seconds)
CHECK_TIMEOUT = 5
def check_prometheus() -> dict:
"""
Lightweight check that Prometheus is reachable.
Returns {"status": "ok"} or {"status": "error", "message": "..."}.
"""
url = f"{PROMETHEUS_URL.rstrip('/')}/api/v1/query"
try:
response = requests.get(url, params={"query": "1"}, timeout=CHECK_TIMEOUT)
response.raise_for_status()
data = response.json()
if "data" in data and "result" in data["data"]:
return {"status": "ok"}
return {"status": "error", "message": "Invalid response"}
except requests.RequestException as e:
return {"status": "error", "message": str(e) or "Connection failed"}
except (ValueError, KeyError) as e:
return {"status": "error", "message": str(e) or "Invalid response"}
def query_prometheus(query: str) -> str | list[str]:
url = f"{PROMETHEUS_URL}/api/v1/query"
params = {
"query": query,
}
response = requests.get(url=url, params=params)
response = requests.get(url=url, params=params, timeout=CHECK_TIMEOUT)
response.raise_for_status()
result = response.json()["data"]["result"]
if len(result) > 1:
return result[0]["value"][1]
return result
else:
return result[0]["values"]
return result[0]["value"][1]

View File

@@ -2,29 +2,33 @@ from django import template
register = template.Library()
@register.filter
def div(a, b):
try:
return float(a) / float(b)
except:
except (TypeError, ValueError, ZeroDivisionError):
return 0
@register.filter
def mul(a, b):
try:
return float(a) * float(b)
except:
except (TypeError, ValueError):
return 0
@register.filter
def sub(a, b):
try:
return float(a) - float(b)
except:
except (TypeError, ValueError):
return 0
@register.filter
def convert_bytes(bytes_value, target_unit='GB'):
def convert_bytes(bytes_value, target_unit="GB"):
"""
Convert bytes to specific unit
@@ -41,16 +45,16 @@ def convert_bytes(bytes_value, target_unit='GB'):
except (ValueError, TypeError):
return 0.0
conversion_factors = {
'B': 1,
'KB': 1024,
'MB': 1024 * 1024,
'GB': 1024 * 1024 * 1024,
'TB': 1024 * 1024 * 1024 * 1024,
"B": 1,
"KB": 1024,
"MB": 1024 * 1024,
"GB": 1024 * 1024 * 1024,
"TB": 1024 * 1024 * 1024 * 1024,
}
target_unit = target_unit.upper()
if target_unit not in conversion_factors:
target_unit = 'MB'
target_unit = "MB"
result = bytes_value / conversion_factors[target_unit]
return round(result, 1)

View File

@@ -0,0 +1 @@
# Dashboard test package

View File

@@ -0,0 +1,77 @@
"""Tests for dashboard.openstack_utils.audits."""
from unittest.mock import MagicMock, patch
from django.test import TestCase
from dashboard.openstack_utils.audits import convert_cpu_data, get_current_cluster_cpu
class ConvertCpuDataTest(TestCase):
"""Tests for convert_cpu_data."""
def test_aggregates_cpu_usage_per_host(self):
data = [
{
"metric": {"host": "compute-0", "instanceName": "inst1"},
"values": [[1000, "10.0"], [1001, "20.0"]],
},
{
"metric": {"host": "compute-0", "instanceName": "inst2"},
"values": [[1000, "5.0"]],
},
{
"metric": {"host": "compute-1", "instanceName": "inst3"},
"values": [[1000, "30.0"]],
},
]
result = convert_cpu_data(data)
self.assertIn("host", result.columns)
self.assertIn("cpu_usage", result.columns)
hosts = result["host"].tolist()
self.assertEqual(len(hosts), 2)
self.assertIn("compute-0", hosts)
self.assertIn("compute-1", hosts)
# compute-0: (10+20)/2 for ts 1000 and 5 for ts 1000 -> groupby host,timestamp sum
# -> then groupby host mean
# For compute-0: two timestamps 1000 (10+5=15) and 1001 (20).
# Mean over timestamps = (15+20)/2 = 17.5
# For compute-1: one value 30
by_host = result.set_index("host")["cpu_usage"]
self.assertAlmostEqual(by_host["compute-0"], 17.5)
self.assertAlmostEqual(by_host["compute-1"], 30.0)
def test_empty_data_returns_empty_dataframe_with_columns(self):
result = convert_cpu_data([])
self.assertIn("host", result.columns)
self.assertIn("cpu_usage", result.columns)
self.assertEqual(len(result), 0)
class GetCurrentClusterCpuTest(TestCase):
"""Tests for get_current_cluster_cpu."""
@patch("dashboard.openstack_utils.audits.query_prometheus")
def test_returns_empty_lists_when_no_data(self, mock_query):
mock_query.return_value = []
conn = MagicMock()
result = get_current_cluster_cpu(conn)
self.assertEqual(result["host_labels"], [])
self.assertEqual(result["cpu_current"], [])
@patch("dashboard.openstack_utils.audits.convert_cpu_data")
@patch("dashboard.openstack_utils.audits.query_prometheus")
def test_returns_host_labels_and_cpu_current(self, mock_query, mock_convert):
import pandas as pd
mock_query.return_value = [{"metric": {"host": "h0"}, "values": [[0, "1.0"]]}]
mock_convert.return_value = pd.DataFrame(
{
"host": ["compute-0", "compute-1"],
"cpu_usage": [25.0, 35.0],
}
)
conn = MagicMock()
result = get_current_cluster_cpu(conn)
self.assertEqual(result["host_labels"], ["compute-0", "compute-1"])
self.assertEqual(result["cpu_current"], [25.0, 35.0])

View File

@@ -0,0 +1,65 @@
"""Tests for dashboard.openstack_utils.flavor."""
from unittest.mock import MagicMock
from django.test import TestCase
from dashboard.openstack_utils.flavor import get_flavor_list
def make_mock_server(flavor_id):
"""Create a mock server object with flavor['id']."""
s = MagicMock()
s.flavor = {"id": flavor_id}
return s
class GetFlavorListTest(TestCase):
"""Tests for get_flavor_list."""
def test_returns_first_second_third_common_flavor_keys(self):
mock_conn = MagicMock()
mock_conn.compute.servers.return_value = [
make_mock_server("m1.small"),
make_mock_server("m1.small"),
make_mock_server("m1.medium"),
]
result = get_flavor_list(connection=mock_conn)
self.assertIn("first_common_flavor", result)
self.assertIn("second_common_flavor", result)
self.assertIn("third_common_flavor", result)
def test_most_common_flavor_first(self):
mock_conn = MagicMock()
mock_conn.compute.servers.return_value = [
make_mock_server("m1.large"),
make_mock_server("m1.small"),
make_mock_server("m1.small"),
make_mock_server("m1.small"),
]
result = get_flavor_list(connection=mock_conn)
self.assertEqual(result["first_common_flavor"]["name"], "m1.small")
self.assertEqual(result["first_common_flavor"]["count"], 3)
self.assertEqual(result["second_common_flavor"]["name"], "m1.large")
self.assertEqual(result["second_common_flavor"]["count"], 1)
self.assertEqual(result["third_common_flavor"]["name"], "")
self.assertEqual(result["third_common_flavor"]["count"], 0)
def test_empty_servers_uses_placeholder_for_all(self):
mock_conn = MagicMock()
mock_conn.compute.servers.return_value = []
result = get_flavor_list(connection=mock_conn)
placeholder = {"name": "", "count": 0}
self.assertEqual(result["first_common_flavor"], placeholder)
self.assertEqual(result["second_common_flavor"], placeholder)
self.assertEqual(result["third_common_flavor"], placeholder)
def test_skips_servers_without_flavor_id(self):
mock_conn = MagicMock()
s_with_id = make_mock_server("m1.small")
s_without = MagicMock()
s_without.flavor = {} # no 'id'
mock_conn.compute.servers.return_value = [s_with_id, s_without]
result = get_flavor_list(connection=mock_conn)
self.assertEqual(result["first_common_flavor"]["name"], "m1.small")
self.assertEqual(result["first_common_flavor"]["count"], 1)

View File

@@ -0,0 +1,104 @@
"""Tests for dashboard.templatetags.mathfilters."""
from django.template import Context, Template
from django.test import TestCase
from dashboard.templatetags.mathfilters import convert_bytes, div, mul, sub
class DivFilterTest(TestCase):
"""Tests for the div template filter."""
def test_div_normal(self):
self.assertEqual(div(10, 2), 5.0)
self.assertEqual(div(10.0, 4), 2.5)
def test_div_by_zero(self):
self.assertEqual(div(10, 0), 0)
def test_div_non_numeric(self):
self.assertEqual(div("x", 2), 0)
self.assertEqual(div(10, "y"), 0)
self.assertEqual(div(None, 2), 0)
class MulFilterTest(TestCase):
"""Tests for the mul template filter."""
def test_mul_normal(self):
self.assertEqual(mul(3, 4), 12.0)
self.assertEqual(mul(2.5, 4), 10.0)
def test_mul_non_numeric(self):
self.assertEqual(mul("a", 2), 0)
self.assertEqual(mul(2, None), 0)
class SubFilterTest(TestCase):
"""Tests for the sub template filter."""
def test_sub_normal(self):
self.assertEqual(sub(10, 3), 7.0)
self.assertEqual(sub(5.5, 2), 3.5)
def test_sub_non_numeric(self):
self.assertEqual(sub("x", 1), 0)
self.assertEqual(sub(5, "y"), 0)
class ConvertBytesFilterTest(TestCase):
"""Tests for the convert_bytes template filter."""
def test_convert_to_b(self):
self.assertEqual(convert_bytes(1024, "B"), 1024.0)
def test_convert_to_kb(self):
self.assertEqual(convert_bytes(2048, "KB"), 2.0)
def test_convert_to_mb(self):
self.assertEqual(convert_bytes(1024 * 1024 * 3, "MB"), 3.0)
def test_convert_to_gb(self):
self.assertEqual(convert_bytes(1024**3 * 5, "GB"), 5.0)
def test_convert_to_tb(self):
self.assertEqual(convert_bytes(1024**4, "TB"), 1.0)
def test_convert_default_gb(self):
self.assertEqual(convert_bytes(1024**3 * 2), 2.0)
def test_convert_invalid_unit_fallback_to_mb(self):
self.assertEqual(convert_bytes(1024 * 1024, "invalid"), 1.0)
self.assertEqual(convert_bytes(1024 * 1024, "xyz"), 1.0)
def test_convert_non_numeric_returns_zero(self):
self.assertEqual(convert_bytes("abc"), 0.0)
self.assertEqual(convert_bytes(None), 0.0)
def test_convert_rounds_to_one_decimal(self):
self.assertEqual(convert_bytes(1500 * 1024 * 1024, "GB"), 1.5)
self.assertEqual(convert_bytes(1536 * 1024 * 1024, "GB"), 1.5)
def test_convert_case_insensitive_unit(self):
self.assertEqual(convert_bytes(1024**3, "gb"), 1.0)
self.assertEqual(convert_bytes(1024**3, "GB"), 1.0)
class MathfiltersTemplateIntegrationTest(TestCase):
"""Test filters via template rendering."""
def test_div_in_template(self):
t = Template("{% load mathfilters %}{{ a|div:b }}")
self.assertEqual(t.render(Context({"a": 10, "b": 2})), "5.0")
def test_mul_in_template(self):
t = Template("{% load mathfilters %}{{ a|mul:b }}")
self.assertEqual(t.render(Context({"a": 3, "b": 4})), "12.0")
def test_sub_in_template(self):
t = Template("{% load mathfilters %}{{ a|sub:b }}")
self.assertEqual(t.render(Context({"a": 10, "b": 3})), "7.0")
def test_convert_bytes_in_template(self):
t = Template("{% load mathfilters %}{{ bytes|convert_bytes:'GB' }}")
self.assertEqual(t.render(Context({"bytes": 1024**3 * 2})), "2.0")

View File

@@ -0,0 +1,118 @@
"""Tests for dashboard.mock_data."""
import json
from django.test import TestCase
from dashboard.mock_data import get_mock_context
class GetMockContextTest(TestCase):
"""Tests for get_mock_context()."""
def test_returns_all_top_level_keys(self):
ctx = get_mock_context()
expected_keys = {
"region",
"pcpu",
"vcpu",
"pram",
"vram",
"vm",
"flavors",
"audits",
"current_cluster",
}
self.assertEqual(set(ctx.keys()), expected_keys)
def test_region_structure(self):
ctx = get_mock_context()
region = ctx["region"]
self.assertIn("name", region)
self.assertIn("hosts_total", region)
self.assertEqual(region["name"], "mock-region")
self.assertEqual(region["hosts_total"], 6)
def test_pcpu_structure_and_types(self):
ctx = get_mock_context()
pcpu = ctx["pcpu"]
self.assertEqual(pcpu["total"], 48)
self.assertEqual(pcpu["usage"], 12.5)
self.assertEqual(pcpu["free"], 48 - 12.5)
self.assertIsInstance(pcpu["used_percentage"], (int, float))
def test_vcpu_structure(self):
ctx = get_mock_context()
vcpu = ctx["vcpu"]
self.assertIn("total", vcpu)
self.assertIn("allocated", vcpu)
self.assertIn("free", vcpu)
self.assertIn("allocated_percentage", vcpu)
self.assertIn("overcommit_ratio", vcpu)
self.assertIn("overcommit_max", vcpu)
self.assertEqual(vcpu["overcommit_max"], 2.0)
def test_pram_vram_structure(self):
ctx = get_mock_context()
pram = ctx["pram"]
vram = ctx["vram"]
self.assertIn("total", pram)
self.assertIn("usage", pram)
self.assertIn("free", pram)
self.assertIn("used_percentage", pram)
self.assertIn("total", vram)
self.assertIn("allocated", vram)
self.assertIn("overcommit_max", vram)
def test_vm_structure(self):
ctx = get_mock_context()
vm = ctx["vm"]
self.assertEqual(vm["count"], 24)
self.assertEqual(vm["active"], 22)
self.assertEqual(vm["stopped"], 2)
self.assertIn("avg_cpu", vm)
self.assertIn("avg_ram", vm)
self.assertIn("density", vm)
def test_flavors_structure(self):
ctx = get_mock_context()
flavors = ctx["flavors"]
for key in ("first_common_flavor", "second_common_flavor", "third_common_flavor"):
self.assertIn(key, flavors)
self.assertIn("name", flavors[key])
self.assertIn("count", flavors[key])
self.assertEqual(flavors["first_common_flavor"]["name"], "m1.small")
self.assertEqual(flavors["first_common_flavor"]["count"], 12)
def test_audits_serialized_fields(self):
ctx = get_mock_context()
self.assertIsInstance(ctx["audits"], list)
self.assertGreaterEqual(len(ctx["audits"]), 1)
for audit in ctx["audits"]:
self.assertIn("migrations", audit)
self.assertIn("host_labels", audit)
self.assertIn("cpu_current", audit)
self.assertIn("cpu_projected", audit)
# These must be JSON strings (render-ready for JS)
self.assertIsInstance(audit["migrations"], str)
self.assertIsInstance(audit["host_labels"], str)
self.assertIsInstance(audit["cpu_current"], str)
self.assertIsInstance(audit["cpu_projected"], str)
# Must be valid JSON
json.loads(audit["migrations"])
json.loads(audit["host_labels"])
json.loads(audit["cpu_current"])
json.loads(audit["cpu_projected"])
def test_audits_metadata_fields(self):
ctx = get_mock_context()
audit = ctx["audits"][0]
self.assertIn("id", audit)
self.assertIn("name", audit)
self.assertIn("created_at", audit)
self.assertIn("strategy", audit)
self.assertIn("goal", audit)
self.assertIn("type", audit)
self.assertIn("scope", audit)
self.assertIn("cpu_weight", audit)
self.assertIn("ram_weight", audit)

View File

@@ -0,0 +1,57 @@
"""Tests for dashboard.prometheus_utils.query."""
from unittest.mock import MagicMock, patch
from django.test import TestCase
from dashboard.prometheus_utils.query import query_prometheus
class QueryPrometheusTest(TestCase):
"""Tests for query_prometheus."""
@patch("dashboard.prometheus_utils.query.requests.get")
def test_single_result_returns_value_string(self, mock_get):
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"result": [{"value": ["1234567890", "42"]}]}}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
result = query_prometheus("some_query")
self.assertEqual(result, "42")
mock_response.raise_for_status.assert_called_once()
mock_get.assert_called_once()
call_kw = mock_get.call_args
self.assertIn("params", call_kw.kwargs)
self.assertEqual(call_kw.kwargs["params"]["query"], "some_query")
@patch("dashboard.prometheus_utils.query.requests.get")
def test_multiple_results_returns_full_result_list(self, mock_get):
mock_response = MagicMock()
result_list = [
{"metric": {"host": "h1"}, "value": ["1", "10"]},
{"metric": {"host": "h2"}, "value": ["1", "20"]},
]
mock_response.json.return_value = {"data": {"result": result_list}}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
result = query_prometheus("vector_query")
self.assertEqual(result, result_list)
self.assertEqual(len(result), 2)
@patch("dashboard.prometheus_utils.query.requests.get")
def test_uses_prometheus_url_from_settings(self, mock_get):
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"result": [{"value": ["0", "1"]}]}}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
query_prometheus("test")
mock_get.assert_called_once()
args, kwargs = mock_get.call_args
url = args[0] if args else kwargs.get("url", "")
self.assertIn("/api/v1/query", url)

View File

@@ -0,0 +1,420 @@
"""Tests for dashboard.views."""
import json
from unittest.mock import MagicMock, patch
from django.core.cache import cache
from django.test import RequestFactory, TestCase
from dashboard.views import (
api_audits,
api_source_status,
api_stats,
collect_context,
index,
)
def _minimal_render_context(region_name="test", first_flavor_name="f1", vm_count=1):
"""Context with all keys the index.html template expects."""
return {
"region": {"name": region_name, "hosts_total": 1},
"pcpu": {"total": 1, "usage": 0, "free": 1, "used_percentage": 0},
"vcpu": {
"total": 2,
"allocated": 1,
"free": 1,
"allocated_percentage": 50,
"overcommit_ratio": 1,
"overcommit_max": 2,
},
"pram": {"total": 1024**3, "usage": 0, "free": 1024**3, "used_percentage": 0},
"vram": {
"total": 1024**3,
"allocated": 0,
"free": 1024**3,
"allocated_percentage": 0,
"overcommit_ratio": 0,
"overcommit_max": 1,
},
"vm": {
"count": vm_count,
"active": vm_count,
"stopped": 0,
"avg_cpu": 1,
"avg_ram": 0,
"density": float(vm_count),
},
"flavors": {
"first_common_flavor": {"name": first_flavor_name, "count": vm_count},
"second_common_flavor": {"name": "", "count": 0},
"third_common_flavor": {"name": "", "count": 0},
},
"audits": [],
}
class IndexViewTest(TestCase):
"""Tests for the index view."""
def setUp(self):
self.factory = RequestFactory()
@patch("dashboard.views.settings")
def test_index_use_mock_data_returns_200_and_mock_context(self, mock_settings):
mock_settings.USE_MOCK_DATA = True
mock_settings.DASHBOARD_CACHE_TTL = 120
request = self.factory.get("/")
response = index(request)
self.assertEqual(response.status_code, 200)
# Mock context contains mock-region and flavors; render uses index.html
content = response.content.decode()
self.assertIn("mock-region", content)
self.assertIn("m1.small", content)
@patch("dashboard.views.collect_context")
@patch("dashboard.views.settings")
def test_index_without_mock_returns_skeleton_and_does_not_call_collect_context(
self, mock_settings, mock_collect_context
):
mock_settings.USE_MOCK_DATA = False
request = self.factory.get("/")
response = index(request)
self.assertEqual(response.status_code, 200)
mock_collect_context.assert_not_called()
content = response.content.decode()
self.assertIn('data-dashboard="skeleton"', content)
self.assertIn("", content)
class CollectContextTest(TestCase):
"""Tests for collect_context with mocked dependencies."""
def _make_mock_connection(self, region_name="test-region"):
conn = MagicMock()
conn._compute_region = region_name
return conn
@patch("dashboard.views.get_current_cluster_cpu")
@patch("dashboard.views._fetch_prometheus_metrics")
@patch("dashboard.views.get_audits")
@patch("dashboard.views.get_flavor_list")
@patch("dashboard.views.get_connection")
def test_collect_context_structure_and_calculation(
self,
mock_get_connection,
mock_get_flavor_list,
mock_get_audits,
mock_fetch_metrics,
mock_get_current_cluster_cpu,
):
mock_get_connection.return_value = self._make_mock_connection("my-region")
mock_get_current_cluster_cpu.return_value = {
"host_labels": ["h0", "h1"],
"cpu_current": [30.0, 40.0],
}
mock_get_flavor_list.return_value = {
"first_common_flavor": {"name": "m1.small", "count": 5},
"second_common_flavor": {"name": "", "count": 0},
"third_common_flavor": {"name": "", "count": 0},
}
mock_get_audits.return_value = [
{
"migrations": [],
"host_labels": ["h0", "h1"],
"cpu_current": [30.0, 40.0],
"cpu_projected": [35.0, 35.0],
}
]
mock_fetch_metrics.return_value = {
"hosts_total": 2,
"pcpu_total": 8,
"pcpu_usage": 2.5,
"vcpu_allocated": 16,
"vcpu_overcommit_max": 2.0,
"pram_total": 32 * 1024**3,
"pram_usage": 8 * 1024**3,
"vram_allocated": 24 * 1024**3,
"vram_overcommit_max": 1.5,
"vm_count": 4,
"vm_active": 4,
}
context = collect_context()
self.assertEqual(context["region"]["name"], "my-region")
self.assertEqual(context["region"]["hosts_total"], 2)
self.assertEqual(context["pcpu"]["total"], 8)
self.assertEqual(context["pcpu"]["usage"], 2.5)
self.assertEqual(context["vcpu"]["total"], 8 * 2.0) # pcpu_total * vcpu_overcommit_max
self.assertEqual(context["vcpu"]["allocated"], 16)
self.assertEqual(context["vram"]["total"], 32 * 1024**3 * 1.5)
self.assertEqual(context["flavors"]["first_common_flavor"]["name"], "m1.small")
self.assertEqual(len(context["audits"]), 1)
# Serialized for JS
import json
self.assertIsInstance(context["audits"][0]["migrations"], str)
self.assertEqual(json.loads(context["audits"][0]["host_labels"]), ["h0", "h1"])
self.assertIn("current_cluster", context)
self.assertEqual(json.loads(context["current_cluster"]["host_labels"]), ["h0", "h1"])
self.assertEqual(json.loads(context["current_cluster"]["cpu_current"]), [30.0, 40.0])
class ApiStatsTest(TestCase):
"""Tests for api_stats view."""
def setUp(self):
self.factory = RequestFactory()
@patch("dashboard.views._fetch_prometheus_metrics")
@patch("dashboard.views.get_flavor_list")
@patch("dashboard.views.get_connection")
def test_api_stats_returns_json_with_expected_keys(
self, mock_get_connection, mock_get_flavor_list, mock_fetch_metrics
):
conn = MagicMock()
conn._compute_region = "api-region"
mock_get_connection.return_value = conn
mock_get_flavor_list.return_value = {
"first_common_flavor": {"name": "m1.small", "count": 3},
"second_common_flavor": {"name": "", "count": 0},
"third_common_flavor": {"name": "", "count": 0},
}
mock_fetch_metrics.return_value = {
"hosts_total": 2,
"pcpu_total": 4,
"pcpu_usage": 1.0,
"vcpu_allocated": 8,
"vcpu_overcommit_max": 2.0,
"pram_total": 16 * 1024**3,
"pram_usage": 4 * 1024**3,
"vram_allocated": 12 * 1024**3,
"vram_overcommit_max": 1.5,
"vm_count": 2,
"vm_active": 2,
}
cache.clear()
request = self.factory.get("/api/stats/")
with patch("dashboard.views.settings") as mock_settings:
mock_settings.DASHBOARD_CACHE_TTL = 120
response = api_stats(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
data = json.loads(response.content)
self.assertEqual(data["region"]["name"], "api-region")
self.assertEqual(data["region"]["hosts_total"], 2)
self.assertIn("pcpu", data)
self.assertIn("pram", data)
self.assertIn("vcpu", data)
self.assertIn("vram", data)
self.assertIn("vm", data)
self.assertIn("flavors", data)
self.assertEqual(data["flavors"]["first_common_flavor"]["name"], "m1.small")
@patch("dashboard.views.collect_stats")
@patch("dashboard.views.settings")
def test_api_stats_uses_cache(self, mock_settings, mock_collect_stats):
mock_settings.DASHBOARD_CACHE_TTL = 120
cached = {
"region": {"name": "cached", "hosts_total": 1},
"pcpu": {},
"pram": {},
"vcpu": {},
"vram": {},
"vm": {},
"flavors": {},
}
cache.clear()
cache.set("dashboard_stats", cached, timeout=120)
request = self.factory.get("/api/stats/")
response = api_stats(request)
mock_collect_stats.assert_not_called()
self.assertEqual(json.loads(response.content)["region"]["name"], "cached")
class ApiAuditsTest(TestCase):
"""Tests for api_audits view."""
def setUp(self):
self.factory = RequestFactory()
@patch("dashboard.views.get_current_cluster_cpu")
@patch("dashboard.views.get_audits")
@patch("dashboard.views.get_connection")
def test_api_audits_returns_json_audits_list(
self, mock_get_connection, mock_get_audits, mock_get_current_cluster_cpu
):
mock_get_connection.return_value = MagicMock()
mock_get_audits.return_value = [
{
"id": "audit-1",
"name": "Test Audit",
"created_at": "2025-02-01T10:00:00",
"strategy": "Balanced",
"goal": "BALANCED",
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": [
{
"instanceName": "i1",
"source": "h0",
"destination": "h1",
"flavor": "m1.small",
"impact": "Low",
}
],
"host_labels": ["h0", "h1"],
"cpu_current": [30.0, 40.0],
"cpu_projected": [35.0, 35.0],
}
]
mock_get_current_cluster_cpu.return_value = {
"host_labels": ["h0", "h1"],
"cpu_current": [30.0, 40.0],
}
cache.clear()
request = self.factory.get("/api/audits/")
with patch("dashboard.views.settings") as mock_settings:
mock_settings.DASHBOARD_CACHE_TTL = 120
response = api_audits(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
data = json.loads(response.content)
self.assertIn("audits", data)
self.assertEqual(len(data["audits"]), 1)
self.assertEqual(data["audits"][0]["name"], "Test Audit")
self.assertIsInstance(data["audits"][0]["migrations"], str)
self.assertIsInstance(data["audits"][0]["host_labels"], str)
self.assertIn("current_cluster", data)
self.assertEqual(data["current_cluster"]["host_labels"], ["h0", "h1"])
self.assertEqual(data["current_cluster"]["cpu_current"], [30.0, 40.0])
@patch("dashboard.views.get_current_cluster_cpu")
@patch("dashboard.views.collect_audits")
@patch("dashboard.views.settings")
def test_api_audits_uses_cache(
self, mock_settings, mock_collect_audits, mock_get_current_cluster_cpu
):
mock_settings.DASHBOARD_CACHE_TTL = 120
cached_audits = [
{
"id": "cached-1",
"name": "Cached Audit",
"migrations": "[]",
"host_labels": "[]",
"cpu_current": "[]",
"cpu_projected": "[]",
}
]
cached_cluster = {"host_labels": ["cached-h0"], "cpu_current": [10.0]}
cache.clear()
cache.set("dashboard_audits", cached_audits, timeout=120)
cache.set("dashboard_current_cluster", cached_cluster, timeout=120)
request = self.factory.get("/api/audits/")
response = api_audits(request)
mock_collect_audits.assert_not_called()
mock_get_current_cluster_cpu.assert_not_called()
data = json.loads(response.content)
self.assertEqual(data["audits"][0]["name"], "Cached Audit")
self.assertEqual(data["current_cluster"], cached_cluster)
class ApiSourceStatusTest(TestCase):
"""Tests for api_source_status view."""
def setUp(self):
self.factory = RequestFactory()
@patch("dashboard.views.settings")
def test_api_source_status_mock_returns_mock_status(self, mock_settings):
mock_settings.USE_MOCK_DATA = True
mock_settings.SOURCE_STATUS_CACHE_TTL = 30
request = self.factory.get("/api/source-status/")
response = api_source_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
data = json.loads(response.content)
self.assertEqual(data["prometheus"]["status"], "mock")
self.assertEqual(data["openstack"]["status"], "mock")
@patch("dashboard.views.check_openstack")
@patch("dashboard.views.check_prometheus")
@patch("dashboard.views.settings")
def test_api_source_status_both_ok_returns_ok(
self, mock_settings, mock_check_prometheus, mock_check_openstack
):
mock_settings.USE_MOCK_DATA = False
mock_settings.SOURCE_STATUS_CACHE_TTL = 30
mock_check_prometheus.return_value = {"status": "ok"}
mock_check_openstack.return_value = {"status": "ok"}
cache.clear()
request = self.factory.get("/api/source-status/")
response = api_source_status(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data["prometheus"]["status"], "ok")
self.assertEqual(data["openstack"]["status"], "ok")
mock_check_prometheus.assert_called_once()
mock_check_openstack.assert_called_once()
@patch("dashboard.views.check_openstack")
@patch("dashboard.views.check_prometheus")
@patch("dashboard.views.settings")
def test_api_source_status_prometheus_error_returns_error_message(
self, mock_settings, mock_check_prometheus, mock_check_openstack
):
mock_settings.USE_MOCK_DATA = False
mock_settings.SOURCE_STATUS_CACHE_TTL = 30
mock_check_prometheus.return_value = {"status": "error", "message": "Connection refused"}
mock_check_openstack.return_value = {"status": "ok"}
cache.clear()
request = self.factory.get("/api/source-status/")
response = api_source_status(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data["prometheus"]["status"], "error")
self.assertEqual(data["prometheus"]["message"], "Connection refused")
self.assertEqual(data["openstack"]["status"], "ok")
@patch("dashboard.views.check_openstack")
@patch("dashboard.views.check_prometheus")
@patch("dashboard.views.settings")
def test_api_source_status_openstack_error_returns_error_message(
self, mock_settings, mock_check_prometheus, mock_check_openstack
):
mock_settings.USE_MOCK_DATA = False
mock_settings.SOURCE_STATUS_CACHE_TTL = 30
mock_check_prometheus.return_value = {"status": "ok"}
mock_check_openstack.return_value = {"status": "error", "message": "Auth failed"}
cache.clear()
request = self.factory.get("/api/source-status/")
response = api_source_status(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data["prometheus"]["status"], "ok")
self.assertEqual(data["openstack"]["status"], "error")
self.assertEqual(data["openstack"]["message"], "Auth failed")
@patch("dashboard.views.check_openstack")
@patch("dashboard.views.check_prometheus")
@patch("dashboard.views.settings")
def test_api_source_status_uses_cache(
self, mock_settings, mock_check_prometheus, mock_check_openstack
):
mock_settings.USE_MOCK_DATA = False
mock_settings.SOURCE_STATUS_CACHE_TTL = 30
cache.clear()
cached = {
"prometheus": {"status": "ok"},
"openstack": {"status": "ok"},
}
cache.set("dashboard_source_status", cached, timeout=30)
request = self.factory.get("/api/source-status/")
response = api_source_status(request)
mock_check_prometheus.assert_not_called()
mock_check_openstack.assert_not_called()
data = json.loads(response.content)
self.assertEqual(data["prometheus"]["status"], "ok")
self.assertEqual(data["openstack"]["status"], "ok")

View File

@@ -1,6 +1,10 @@
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path("", views.index, name="index"),
path("api/stats/", views.api_stats),
path("api/audits/", views.api_audits),
path("api/source-status/", views.api_source_status),
]

View File

@@ -1,71 +1,82 @@
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.conf import settings
from django.core.cache import cache
from django.http import JsonResponse
from django.shortcuts import render
from dashboard.openstack_utils.connect import get_connection
from dashboard.mock_data import get_mock_context
from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu
from dashboard.openstack_utils.connect import check_openstack, get_connection
from dashboard.openstack_utils.flavor import get_flavor_list
from dashboard.prometheus_utils.query import query_prometheus
from dashboard.openstack_utils.audits import get_audits
from dashboard.prometheus_utils.query import check_prometheus, query_prometheus
# Prometheus queries run in parallel (query_key -> query string)
_PROMETHEUS_QUERIES = {
"hosts_total": "count(node_exporter_build_info{job='node_exporter_compute'})",
"pcpu_total": (
"sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) "
"without (cpu,mode))"
),
"pcpu_usage": "sum(node_load5{job='node_exporter_compute'})",
"vcpu_allocated": "sum(libvirt_domain_info_virtual_cpus)",
"vcpu_overcommit_max": (
"avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
),
"pram_total": "sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})",
"pram_usage": "sum(node_memory_Active_bytes{job='node_exporter_compute'})",
"vram_allocated": "sum(libvirt_domain_info_maximum_memory_bytes)",
"vram_overcommit_max": (
"avg(avg_over_time("
"openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
),
"vm_count": "sum(libvirt_domain_state_code)",
"vm_active": "sum(libvirt_domain_state_code{stateDesc='the domain is running'})",
}
def _fetch_prometheus_metrics():
"""Run all Prometheus queries in parallel and return a dict of name -> value."""
result = {}
with ThreadPoolExecutor(max_workers=len(_PROMETHEUS_QUERIES)) as executor:
future_to_key = {
executor.submit(query_prometheus, query=q): key
for key, q in _PROMETHEUS_QUERIES.items()
}
for future in as_completed(future_to_key):
key = future_to_key[future]
try:
raw = future.result()
if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max"):
result[key] = float(raw)
else:
result[key] = int(raw)
except (ValueError, TypeError):
result[key] = (
0 if key in ("pcpu_usage", "vcpu_overcommit_max", "vram_overcommit_max") else 0
)
return result
def collect_context():
connection = get_connection()
region_name = connection._compute_region
flavors = get_flavor_list(connection=connection)
audits = get_audits(connection=connection)
hosts_total = int(
query_prometheus(
query="count(node_exporter_build_info{job='node_exporter_compute'})"
)
)
pcpu_total = int(
query_prometheus(
query="sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode))"
)
)
pcpu_usage = float(
query_prometheus(
query="sum(node_load5{job='node_exporter_compute'})"
)
)
vcpu_allocated = int(
query_prometheus(
query="sum(libvirt_domain_info_virtual_cpus)"
)
)
vcpu_overcommit_max = float(
query_prometheus(
query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
)
)
pram_total = int(
query_prometheus(
query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes
)
)
pram_usage = int (
query_prometheus(
query="sum(node_memory_Active_bytes{job='node_exporter_compute'})"
)
)
vram_allocated = int(
query_prometheus(
query="sum(libvirt_domain_info_maximum_memory_bytes)"
)
)
vram_overcommit_max = float(
query_prometheus(
query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
)
)
vm_count = int(
query_prometheus(
query="sum(libvirt_domain_state_code)"
)
)
vm_active = int(
query_prometheus(
query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})"
)
)
metrics = _fetch_prometheus_metrics()
hosts_total = metrics.get("hosts_total") or 1
pcpu_total = metrics.get("pcpu_total", 0)
pcpu_usage = metrics.get("pcpu_usage", 0)
vcpu_allocated = metrics.get("vcpu_allocated", 0)
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
pram_total = metrics.get("pram_total", 0)
pram_usage = metrics.get("pram_usage", 0)
vram_allocated = metrics.get("vram_allocated", 0)
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
vm_count = metrics.get("vm_count", 0)
vm_active = metrics.get("vm_active", 0)
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
@@ -74,7 +85,7 @@ def collect_context():
# <--- Region data --->
"region": {
"name": region_name,
"hosts_total": 6,
"hosts_total": hosts_total,
},
# <--- CPU data --->
# pCPU data
@@ -82,15 +93,15 @@ def collect_context():
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100,
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
},
# vCPU data
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
"overcommit_ratio": vcpu_allocated / pcpu_total,
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max,
},
# <--- RAM data --->
@@ -99,15 +110,15 @@ def collect_context():
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100,
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
},
# vRAM data
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100,
"overcommit_ratio": vram_allocated / pram_total,
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max,
},
# <--- VM data --->
@@ -115,218 +126,189 @@ def collect_context():
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count,
"avg_ram": vram_allocated / vm_count,
"density": vm_count / hosts_total,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total if hosts_total else 0,
},
"flavors": flavors,
"audits": audits,
}
current_cluster = get_current_cluster_cpu(connection)
context["current_cluster"] = {
"host_labels": json.dumps(current_cluster["host_labels"]),
"cpu_current": json.dumps(current_cluster["cpu_current"]),
}
# Serialize audit list fields for JavaScript so cached context is render-ready
for audit in context["audits"]:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
return context
def index(request):
hosts_total = 6
pcpu_total = 672
pcpu_usage = 39.2
vcpu_total = 3360
vcpu_allocated = 98
vcpu_overcommit_max = 5
pram_total = 562500000000
pram_usage = 4325000000
vram_total = 489375000000
vram_allocated = 5625000000
vram_overcommit_max = 0.87
vm_count = 120
vm_active = 90
context = {
# <--- Region data --->
"region": {
"name": "ct3k1ldt",
"hosts_total": 6,
},
# <--- CPU data --->
# pCPU data
def collect_stats():
"""Build stats dict: region, pcpu, pram, vcpu, vram, vm, flavors (no audits)."""
connection = get_connection()
region_name = connection._compute_region
flavors = get_flavor_list(connection=connection)
metrics = _fetch_prometheus_metrics()
hosts_total = metrics.get("hosts_total") or 1
pcpu_total = metrics.get("pcpu_total", 0)
pcpu_usage = metrics.get("pcpu_usage", 0)
vcpu_allocated = metrics.get("vcpu_allocated", 0)
vcpu_overcommit_max = metrics.get("vcpu_overcommit_max", 0)
pram_total = metrics.get("pram_total", 0)
pram_usage = metrics.get("pram_usage", 0)
vram_allocated = metrics.get("vram_allocated", 0)
vram_overcommit_max = metrics.get("vram_overcommit_max", 0)
vm_count = metrics.get("vm_count", 0)
vm_active = metrics.get("vm_active", 0)
vcpu_total = pcpu_total * vcpu_overcommit_max
vram_total = pram_total * vram_overcommit_max
return {
"region": {"name": region_name, "hosts_total": hosts_total},
"pcpu": {
"total": pcpu_total,
"usage": pcpu_usage,
"free": pcpu_total - pcpu_usage,
"used_percentage": pcpu_usage / pcpu_total * 100,
"used_percentage": (pcpu_usage / pcpu_total * 100) if pcpu_total else 0,
},
# vCPU data
"vcpu": {
"total": vcpu_total,
"allocated": vcpu_allocated,
"free": vcpu_total - vcpu_allocated,
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
"overcommit_ratio": vcpu_allocated / pcpu_total,
"allocated_percentage": (vcpu_allocated / vcpu_total * 100) if vcpu_total else 0,
"overcommit_ratio": (vcpu_allocated / pcpu_total) if pcpu_total else 0,
"overcommit_max": vcpu_overcommit_max,
},
# <--- RAM data --->
# pRAM data
"pram": {
"total": pram_total,
"usage": pram_usage,
"free": pram_total - pram_usage,
"used_percentage": pram_usage / pram_total * 100,
"used_percentage": (pram_usage / pram_total * 100) if pram_total else 0,
},
# vRAM data
"vram": {
"total": vram_total,
"allocated": vram_allocated,
"free": vram_total - vram_allocated,
"allocated_percentage": vram_allocated / vram_total * 100,
"overcommit_ratio": vram_allocated / pram_total,
"allocated_percentage": (vram_allocated / vram_total * 100) if vram_total else 0,
"overcommit_ratio": (vram_allocated / pram_total) if pram_total else 0,
"overcommit_max": vram_overcommit_max,
},
# <--- VM data --->
"vm": {
"count": vm_count,
"active": vm_active,
"stopped": vm_count - vm_active,
"avg_cpu": vcpu_allocated / vm_count,
"avg_ram": vram_allocated / vm_count,
"density": vm_count / hosts_total,
"avg_cpu": vcpu_allocated / vm_count if vm_count else 0,
"avg_ram": vram_allocated / vm_count if vm_count else 0,
"density": vm_count / hosts_total if hosts_total else 0,
},
"flavors": {
'first_common_flavor': {
'name': 'm1.medium',
'count': 18
},
'second_common_flavor': {
'name': 'm1.small',
'count': 12
},
'third_common_flavor': {
'name': 'm1.large',
'count': 8
},
},
# Audit data
'audits': [
{
'id': 'audit_001',
'name': 'Weekly Optimization',
'created_at': '2024-01-15',
'cpu_weight': 1.2,
'ram_weight': 0.6,
'scope': 'Full Cluster',
'strategy': 'Load Balancing',
'goal': 'Optimize CPU distribution across all hosts',
'migrations': [
{
'instanceName': 'web-server-01',
'source': 'compute-02',
'destination': 'compute-05',
'flavor': 'm1.medium',
'impact': 'Low'
},
{
'instanceName': 'db-replica-03',
'source': 'compute-01',
'destination': 'compute-04',
'flavor': 'm1.large',
'impact': 'Medium'
},
{
'instanceName': 'api-gateway',
'source': 'compute-03',
'destination': 'compute-06',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'cache-node-02',
'source': 'compute-01',
'destination': 'compute-07',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'monitoring-server',
'source': 'compute-04',
'destination': 'compute-02',
'flavor': 'm1.medium',
'impact': 'Low'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [78, 65, 42, 89, 34, 56, 71],
'cpu_projected': [65, 58, 45, 72, 48, 61, 68]
},
{
'id': 'audit_002',
'name': 'Emergency Rebalance',
'created_at': '2024-01-14',
'cpu_weight': 1.0,
'ram_weight': 1.0,
'scope': 'Overloaded Hosts',
'strategy': 'Hotspot Reduction',
'goal': 'Reduce load on compute-01 and compute-04',
'migrations': [
{
'instanceName': 'app-server-02',
'source': 'compute-01',
'destination': 'compute-06',
'flavor': 'm1.medium',
'impact': 'Medium'
},
{
'instanceName': 'file-server-01',
'source': 'compute-04',
'destination': 'compute-07',
'flavor': 'm1.large',
'impact': 'High'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [92, 65, 42, 85, 34, 56, 71],
'cpu_projected': [72, 65, 42, 65, 34, 66, 81]
},
{
'id': 'audit_003',
'name': 'Pre-Maintenance Planning',
'created_at': '2024-01-10',
'cpu_weight': 0.8,
'ram_weight': 1.5,
'scope': 'Maintenance Zone',
'strategy': 'Evacuation',
'goal': 'Empty compute-03 for maintenance',
'migrations': [
{
'instanceName': 'test-vm-01',
'source': 'compute-03',
'destination': 'compute-02',
'flavor': 'm1.small',
'impact': 'Low'
},
{
'instanceName': 'dev-server',
'source': 'compute-03',
'destination': 'compute-05',
'flavor': 'm1.medium',
'impact': 'Low'
},
{
'instanceName': 'staging-db',
'source': 'compute-03',
'destination': 'compute-07',
'flavor': 'm1.large',
'impact': 'High'
}
],
'host_labels': ['compute-01', 'compute-02', 'compute-03', 'compute-04', 'compute-05', 'compute-06', 'compute-07'],
'cpu_current': [78, 65, 56, 89, 34, 56, 71],
'cpu_projected': [78, 75, 0, 89, 54, 56, 81]
}
]
"flavors": flavors,
}
# Serialize lists for JavaScript
for audit in context['audits']:
audit['migrations'] = json.dumps(audit['migrations'])
audit['host_labels'] = json.dumps(audit['host_labels'])
audit['cpu_current'] = json.dumps(audit['cpu_current'])
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
return render(request, 'index.html', context)
def collect_audits():
"""Build audits list with serialized fields for frontend."""
connection = get_connection()
audits = get_audits(connection=connection)
for audit in audits:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
return audits
def _skeleton_context():
"""Minimal context for skeleton-only index render."""
empty_flavors = {
"first_common_flavor": {"name": "", "count": 0},
"second_common_flavor": None,
"third_common_flavor": None,
}
return {
"skeleton": True,
"region": {"name": "", "hosts_total": 0},
"pcpu": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
"pram": {"total": 0, "usage": 0, "free": 0, "used_percentage": 0},
"vcpu": {
"total": 0,
"allocated": 0,
"free": 0,
"allocated_percentage": 0,
"overcommit_ratio": 0,
"overcommit_max": 0,
},
"vram": {
"total": 0,
"allocated": 0,
"free": 0,
"allocated_percentage": 0,
"overcommit_ratio": 0,
"overcommit_max": 0,
},
"vm": {"count": 0, "active": 0, "stopped": 0, "avg_cpu": 0, "avg_ram": 0, "density": 0},
"flavors": empty_flavors,
"audits": [],
"current_cluster": {
"host_labels": "[]",
"cpu_current": "[]",
},
}
def index(request):
if getattr(settings, "USE_MOCK_DATA", False):
context = get_mock_context()
return render(request, "index.html", context)
context = _skeleton_context()
return render(request, "index.html", context)
def api_stats(request):
cache_key = "dashboard_stats"
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
data = cache.get(cache_key)
if data is None:
data = collect_stats()
cache.set(cache_key, data, timeout=cache_ttl)
return JsonResponse(data)
def api_audits(request):
cache_key_audits = "dashboard_audits"
cache_key_cluster = "dashboard_current_cluster"
cache_ttl = getattr(settings, "DASHBOARD_CACHE_TTL", 120)
audits = cache.get(cache_key_audits)
current_cluster = cache.get(cache_key_cluster)
if audits is None:
audits = collect_audits()
cache.set(cache_key_audits, audits, timeout=cache_ttl)
if current_cluster is None:
connection = get_connection()
current_cluster = get_current_cluster_cpu(connection)
cache.set(cache_key_cluster, current_cluster, timeout=cache_ttl)
return JsonResponse({"audits": audits, "current_cluster": current_cluster})
def api_source_status(request):
"""Return status of Prometheus and OpenStack data sources (ok / error / mock)."""
if getattr(settings, "USE_MOCK_DATA", False):
return JsonResponse(
{
"prometheus": {"status": "mock"},
"openstack": {"status": "mock"},
}
)
cache_key = "dashboard_source_status"
cache_ttl = getattr(settings, "SOURCE_STATUS_CACHE_TTL", 30)
data = cache.get(cache_key)
if data is None:
data = {
"prometheus": check_prometheus(),
"openstack": check_openstack(),
}
cache.set(cache_key, data, timeout=cache_ttl)
return JsonResponse(data)

22
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,22 @@
# Development override: use with
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build
#
# Uses mock data (no OpenStack/Prometheus), mounts code for live reload.
services:
watcher-visio:
build:
context: .
dockerfile: Dockerfile
volumes:
- .:/app
environment:
- USE_MOCK_DATA=true
- DEBUG=true
- PYTHONUNBUFFERED=1
ports:
- "8080:8080"
# Optional: skip entrypoint migrations on every start for faster restart
# command: ["python3", "manage.py", "runserver", "0.0.0.0:8080"]
stdin_open: true
tty: true

View File

@@ -1,8 +1,24 @@
# Base compose: production-like run.
# For development with mock data and live reload use:
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build
services:
watcher-visio:
build: .
image: watcher-visio:latest
build:
context: .
dockerfile: Dockerfile
container_name: watcher-visio
ports:
- "8000:8000"
volumes:
- ./:/app
- "8080:8080"
environment:
- PYTHONUNBUFFERED=1
# Override via environment or env_file (e.g. env_file: .env):
# PROMETHEUS_URL, OPENSTACK_CLOUD, OPENSTACK_REGION_NAME, SECRET_KEY
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
restart: unless-stopped

View File

@@ -1,12 +1,8 @@
#!/bin/sh
set -e
echo "Applying database migrations..."
python manage.py migrate --noinput
echo "Collecting static files..."
python manage.py collectstatic --noinput
python3 manage.py migrate --noinput
echo "Starting Django application..."
exec "$@"

View File

@@ -1,12 +1,13 @@
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
@@ -18,5 +19,5 @@ def main():
execute_from_command_line(sys.argv)
if __name__ == '__main__':
if __name__ == "__main__":
main()

15
package-lock.json generated
View File

@@ -12,6 +12,7 @@
"@tailwindcss/cli": "^4.1.17"
},
"devDependencies": {
"@fontsource/dm-sans": "^5.2.8",
"@tailwindcss/typography": "^0.5.19",
"autoprefixer": "^10.4.22",
"daisyui": "^5.5.5",
@@ -19,6 +20,15 @@
"tailwindcss": "^4.1.17"
}
},
"node_modules/@fontsource/dm-sans": {
"version": "5.2.8",
"resolved": "https://registry.npmjs.org/@fontsource/dm-sans/-/dm-sans-5.2.8.tgz",
"integrity": "sha512-tlovG42m9ESG28WiHpLq3F5umAlm64rv0RkqTbYowRn70e9OlRr5a3yTJhrhrY+k5lftR/OFJjPzOLQzk8EfCA==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/ayuhito"
}
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.13",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
@@ -713,7 +723,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.8.25",
"caniuse-lite": "^1.0.30001754",
@@ -1254,7 +1263,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -1298,8 +1306,7 @@
"version": "4.1.17",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.17.tgz",
"integrity": "sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/tapable": {
"version": "2.3.0",

View File

@@ -16,6 +16,7 @@
"license": "ISC",
"type": "commonjs",
"devDependencies": {
"@fontsource/dm-sans": "^5.2.8",
"@tailwindcss/typography": "^0.5.19",
"autoprefixer": "^10.4.22",
"daisyui": "^5.5.5",

15
pyproject.toml Normal file
View File

@@ -0,0 +1,15 @@
[project]
name = "watcher-visio"
version = "0.1.0"
description = "Watcher Visio dashboard"
readme = "README.md"
requires-python = ">=3.12"
[tool.ruff]
line-length = 100
target-version = "py312"
src = ["dashboard", "watcher_visio"]
[tool.ruff.lint]
select = ["E", "F", "I", "N", "W"]
ignore = []

View File

@@ -1,21 +1,83 @@
/* DM Sans local webfonts (no external requests) */
@font-face {
font-family: 'DM Sans';
font-style: normal;
font-display: swap;
font-weight: 400;
src: url('../fonts/dm-sans-latin-400-normal.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: normal;
font-display: swap;
font-weight: 500;
src: url('../fonts/dm-sans-latin-500-normal.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: normal;
font-display: swap;
font-weight: 600;
src: url('../fonts/dm-sans-latin-600-normal.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: normal;
font-display: swap;
font-weight: 700;
src: url('../fonts/dm-sans-latin-700-normal.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: italic;
font-display: swap;
font-weight: 400;
src: url('../fonts/dm-sans-latin-400-italic.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: italic;
font-display: swap;
font-weight: 500;
src: url('../fonts/dm-sans-latin-500-italic.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: italic;
font-display: swap;
font-weight: 600;
src: url('../fonts/dm-sans-latin-600-italic.woff2') format('woff2');
}
@font-face {
font-family: 'DM Sans';
font-style: italic;
font-display: swap;
font-weight: 700;
src: url('../fonts/dm-sans-latin-700-italic.woff2') format('woff2');
}
@import "tailwindcss";
@plugin "daisyui";
@theme {
--font-sans: "DM Sans", ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
}
@plugin "daisyui/theme" {
name: "light";
default: true;
prefersdark: false;
color-scheme: "light";
--color-base-100: oklch(100% 0 0);
--color-base-200: oklch(98% 0 0);
--color-base-300: oklch(95% 0 0);
--color-base-content: oklch(21% 0.006 285.885);
--color-primary: #09418E;
--color-primary-content: oklch(93% 0.034 272.788);
--color-secondary: #428BCA;
--color-secondary-content: oklch(100% 0 0);
--color-accent: #A492FF;
--color-accent-content: oklch(21% 0.006 285.885);
--color-base-100: oklch(100% 0.004 260);
--color-base-200: oklch(98% 0.004 260);
--color-base-300: oklch(95% 0.004 260);
--color-base-content: oklch(22% 0.02 260);
--color-primary: #0a2973;
--color-primary-content: #ffffff;
--color-secondary: #009fdf;
--color-secondary-content: #ffffff;
--color-accent: #009fdf;
--color-accent-content: #ffffff;
--color-neutral: #333333;
--color-neutral-content: oklch(92% 0.004 286.32);
--color-info: oklch(74% 0.16 232.661);
@@ -34,6 +96,170 @@
--border: 1px;
--depth: 1;
--noise: 0;
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent);
}
@plugin "daisyui/theme" {
name: "dark";
default: false;
prefersdark: true;
color-scheme: "dark";
--color-base-100: oklch(22% 0.014 260);
--color-base-200: oklch(19% 0.012 260);
--color-base-300: oklch(16% 0.01 260);
--color-base-content: oklch(96% 0.01 260);
/* Lighter primary/secondary for readability on dark background (cards, progress, %) */
--color-primary: oklch(65% 0.2 260);
--color-primary-content: #ffffff;
--color-secondary: oklch(72% 0.16 230);
--color-secondary-content: #ffffff;
--color-accent: oklch(72% 0.16 230);
--color-accent-content: #ffffff;
--color-neutral: oklch(28% 0.02 260);
--color-neutral-content: oklch(92% 0.004 286.32);
--color-info: oklch(74% 0.16 232.661);
--color-info-content: oklch(29% 0.066 243.157);
--color-success: oklch(76% 0.177 163.223);
--color-success-content: oklch(37% 0.077 168.94);
--color-warning: oklch(82% 0.189 84.429);
--color-warning-content: oklch(41% 0.112 45.904);
--color-error: oklch(71% 0.194 13.428);
--color-error-content: oklch(27% 0.105 12.094);
--radius-selector: 0.5rem;
--radius-field: 0.25rem;
--radius-box: 0.5rem;
--size-selector: 0.25rem;
--size-field: 0.25rem;
--border: 1px;
--depth: 1;
--noise: 0;
--chart-grid-color: color-mix(in oklch, var(--color-base-content) 22%, transparent);
}
/* VTB gradient (both themes) */
:root {
--gradient-vtb: linear-gradient(135deg, #0a2973 0%, #009fdf 100%);
}
/* Gradient top border for cards */
.border-t-gradient-vtb {
position: relative;
}
.border-t-gradient-vtb::before {
content: "";
position: absolute;
top: 0;
left: 0;
right: 0;
height: 4px;
background: var(--gradient-vtb);
border-radius: var(--radius-box) var(--radius-box) 0 0;
}
/* Gradient button (e.g. Load Analysis) */
.btn-gradient-vtb {
background: var(--gradient-vtb);
color: #ffffff;
border: none;
}
.btn-gradient-vtb:hover {
opacity: 0.9;
color: #ffffff;
}
/* Main content area: distinct background so cards (base-100) stand out */
[data-theme=light] main {
background-color: var(--color-base-200);
}
[data-theme=dark] main {
background-color: var(--color-base-200);
}
/* Dark theme: improve contrast for muted text and controls */
[data-theme=dark] .text-base-content\/60 {
color: color-mix(in oklch, var(--color-base-content) 88%, transparent);
}
[data-theme=dark] .text-base-content\/70 {
color: color-mix(in oklch, var(--color-base-content) 92%, transparent);
}
[data-theme=dark] .text-base-content\/30 {
color: color-mix(in oklch, var(--color-base-content) 55%, transparent);
}
[data-theme=dark] .badge-outline {
border-color: color-mix(in oklch, var(--color-base-content) 75%, transparent);
color: color-mix(in oklch, var(--color-base-content) 90%, transparent);
}
[data-theme=dark] .badge-neutral {
background-color: var(--color-base-300);
color: var(--color-base-content);
border-color: transparent;
}
[data-theme=dark] .btn-ghost {
color: color-mix(in oklch, var(--color-base-content) 90%, transparent);
}
[data-theme=dark] .btn-ghost:hover {
color: var(--color-base-content);
background-color: color-mix(in oklch, var(--color-base-content) 12%, transparent);
}
/* Dark theme: better contrast for CPU chart stats (Mean, ±0.5σ) */
[data-theme=dark] section[aria-label="CPU distribution chart"] .text-success {
color: oklch(85% 0.16 163);
}
[data-theme=dark] section[aria-label="CPU distribution chart"] .text-error\/60 {
color: oklch(82% 0.18 13);
}
/* Accessibility: ensure focus ring is visible on key controls */
.btn:focus-visible,
.select:focus-within,
label.swap:focus-within:not(.theme-toggle) {
outline: 2px solid var(--color-primary);
outline-offset: 2px;
}
/* Light theme: slightly stronger card shadow for depth */
[data-theme=light] .card.shadow-sm {
box-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.08), 0 1px 2px -1px rgb(0 0 0 / 0.06);
}
[data-theme=light] .card.shadow-sm:hover {
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.08), 0 2px 4px -2px rgb(0 0 0 / 0.06);
}
@source "../../templates";
/* --- Print (Save as PDF) --- */
@media print {
.no-print {
display: none !important;
}
.print-only {
display: block !important;
}
/* Keep card backgrounds and colors when printing */
.card,
main,
.badge,
.progress {
-webkit-print-color-adjust: exact;
print-color-adjust: exact;
}
/* Avoid breaking cards and sections across pages */
.card {
break-inside: avoid;
page-break-inside: avoid;
}
section {
break-inside: avoid;
page-break-inside: avoid;
}
/* Reduce top padding so content starts higher */
main {
padding-top: 0.5rem !important;
}
}
@media screen {
.print-only {
display: none !important;
}
}

BIN
static/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

111
static/js/export-pdf.js Normal file
View File

@@ -0,0 +1,111 @@
/**
* Export dashboard as PDF by capturing a screenshot of #dashboard-content
* and assembling it into a multi-page PDF (html2canvas + jsPDF).
*/
function exportDashboardToPdf() {
var el = document.getElementById('dashboard-content');
if (!el) {
if (typeof console !== 'undefined' && console.warn) {
console.warn('export-pdf: #dashboard-content not found');
}
return;
}
var btn = document.getElementById('pdf-export-btn');
var originalText = btn ? btn.innerHTML : '';
if (btn) {
btn.disabled = true;
btn.innerHTML = 'Generating PDF…';
}
var regionEl = document.getElementById('regionBadge');
var region = regionEl ? (regionEl.textContent || '').trim() : '';
if (typeof html2canvas === 'undefined' || typeof jspdf === 'undefined') {
if (btn) {
btn.disabled = false;
btn.innerHTML = originalText;
}
alert('PDF export requires html2canvas and jsPDF. Please refresh the page.');
return;
}
var JsPDFConstructor = (typeof jspdf !== 'undefined' && jspdf.jsPDF) ? jspdf.jsPDF : jspdf;
var auditSection = el.querySelector('section[aria-label="Audit analysis"]');
var auditSectionDisplay = '';
if (auditSection) {
auditSectionDisplay = auditSection.style.display;
auditSection.style.display = 'none';
}
function restoreAuditSection() {
if (auditSection) {
auditSection.style.display = auditSectionDisplay;
}
}
html2canvas(el, {
scale: 2,
useCORS: true,
allowTaint: true,
logging: false
}).then(function(canvas) {
var imgW = canvas.width;
var imgH = canvas.height;
var dataUrl = canvas.toDataURL('image/png');
var doc = new JsPDFConstructor({ orientation: 'portrait', unit: 'mm', format: 'a4' });
var pageW = 210;
var pageH = 297;
var margin = 10;
var contentW = pageW - 2 * margin;
var headerH = 14;
var firstPageImgTop = margin + headerH;
var firstPageImgH = pageH - firstPageImgTop - margin;
var otherPageImgH = pageH - 2 * margin;
var imgWmm = contentW;
var imgHmm = contentW * (imgH / imgW);
doc.setFontSize(14);
doc.text('Dashboard report', margin, margin + 6);
doc.setFontSize(10);
doc.text(region ? 'Region: ' + region : '', margin, margin + 12);
var shown = 0;
var totalH = imgHmm;
var pageNum = 0;
var imgYmm = firstPageImgTop;
while (shown < totalH) {
if (pageNum > 0) {
doc.addPage();
imgYmm = margin;
}
var sliceH = pageNum === 0 ? firstPageImgH : otherPageImgH;
var yOffset = -shown;
doc.addImage(dataUrl, 'PNG', margin, imgYmm + yOffset, imgWmm, imgHmm);
shown += sliceH;
pageNum += 1;
}
doc.save('dashboard-report.pdf');
restoreAuditSection();
if (btn) {
btn.disabled = false;
btn.innerHTML = originalText;
}
}).catch(function (err) {
if (typeof console !== 'undefined' && console.error) {
console.error('export-pdf:', err);
}
restoreAuditSection();
if (btn) {
btn.disabled = false;
btn.innerHTML = originalText;
}
alert('Failed to generate PDF. Please try again.');
});
}

10
static/js/html2canvas-pro.min.js vendored Normal file

File diff suppressed because one or more lines are too long

397
static/js/jspdf.umd.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +1,13 @@
// Format bytes to GB (matches Django convert_bytes filter default)
function formatBytes(bytes, targetUnit = 'GB') {
if (bytes == null || isNaN(Number(bytes))) return '0';
const b = Number(bytes);
const factors = { B: 1, KB: 1024, MB: 1024 * 1024, GB: 1024 ** 3, TB: 1024 ** 4 };
const unit = (targetUnit || 'GB').toUpperCase();
const factor = factors[unit] || factors.GB;
return (b / factor).toFixed(1);
}
// Color utilities
const getCSSVar = (varName) => {
return getComputedStyle(document.documentElement).getPropertyValue(varName).trim();

View File

@@ -5,7 +5,10 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{% block title %}SWatcher{% endblock %}</title>
<link rel="icon" href="{% static 'favicon.ico' %}" type="image/x-icon">
<link rel="stylesheet" href="{% static 'css/output.css' %}">
<script src="{% static 'js/html2canvas-pro.min.js' %}"></script>
<script src="{% static 'js/jspdf.umd.min.js' %}"></script>
{% block imports %}
{% endblock %}
{% block css %}
@@ -13,16 +16,24 @@
</head>
<body>
<!-- Navbar -->
<div class="navbar bg-base-100 shadow-lg">
<div class="navbar bg-base-100 shadow-lg border-b border-base-200 sticky top-0 z-10">
<div class="navbar-start">
<a class="btn btn-ghost text-xl" href="{% url 'index' %}">SWatcher</a>
</div>
<div class="navbar-center hidden lg:flex">
</div>
<div class="navbar-end">
<div class="px-1 flex gap-3 pr-10">
<span class="badge badge-primary badge-lg">{{ region.name }}</span>
<label class="swap swap-rotate">
<div class="px-1 flex items-center gap-3 pr-10">
<button type="button" id="pdf-export-btn" class="btn btn-ghost btn-sm no-print" onclick="exportDashboardToPdf()" title="Save as PDF" aria-label="Save as PDF">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"/>
</svg>
Save as PDF
</button>
<span id="regionBadge" class="badge badge-primary badge-lg">{{ region.name }}</span>
<div id="source-status" class="flex items-center gap-2 no-print" aria-label="Data source status">
<span id="source-status-prometheus" class="badge badge-ghost badge-sm" title="Prometheus">Prometheus: …</span>
<span id="source-status-openstack" class="badge badge-ghost badge-sm" title="OpenStack">OpenStack: …</span>
</div>
<label class="swap swap-rotate theme-toggle no-print">
<input type="checkbox" class="theme-controller" value="dark" />
<svg class="swap-off fill-current w-6 h-6" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path d="M5.64,17l-.71.71a1,1,0,0,0,0,1.41,1,1,0,0,0,1.41,0l.71-.71A1,1,0,0,0,5.64,17ZM5,12a1,1,0,0,0-1-1H3a1,1,0,0,0,0,2H4A1,1,0,0,0,5,12Zm7-7a1,1,0,0,0,1-1V3a1,1,0,0,0-2,0V4A1,1,0,0,0,12,5ZM5.64,7.05a1,1,0,0,0,.7.29,1,1,0,0,0,.71-.29,1,1,0,0,0,0-1.41l-.71-.71A1,1,0,0,0,4.93,6.34Zm12,.29a1,1,0,0,0,.7-.29l.71-.71a1,1,0,1,0-1.41-1.41L17,5.64a1,1,0,0,0,0,1.41A1,1,0,0,0,17.66,7.34ZM21,11H20a1,1,0,0,0,0,2h1a1,1,0,0,0,0-2Zm-9,8a1,1,0,0,0-1,1v1a1,1,0,0,0,2,0V20A1,1,0,0,0,12,19ZM18.36,17A1,1,0,0,0,17,18.36l.71.71a1,1,0,0,0,1.41,0,1,1,0,0,0,0-1.41ZM12,6.5A5.5,5.5,0,1,0,17.5,12,5.51,5.51,0,0,0,12,6.5Zm0,9A3.5,3.5,0,1,1,15.5,12,3.5,3.5,0,0,1,12,15.5Z"/>
@@ -37,6 +48,7 @@
<!-- Main Content -->
<main class="container mx-auto px-4 py-8 min-h-screen">
<p class="print-only text-lg font-semibold mb-4">Dashboard report</p>
{% block content %}
{% endblock %}
</main>
@@ -62,6 +74,47 @@
localStorage.setItem('theme', newTheme);
});
</script>
<script src="{% static 'js/export-pdf.js' %}"></script>
<script>
(function() {
function updateSourceStatus(el, label, data) {
if (!el) return;
var status = data && data.status;
var msg = data && data.message;
var title = msg ? (label + ': ' + msg) : label;
el.setAttribute('title', title);
el.setAttribute('aria-label', title);
if (status === 'ok') {
el.textContent = label + ': OK';
el.classList.remove('badge-error', 'badge-warning');
el.classList.add('badge-success');
} else if (status === 'error') {
el.textContent = label + ': Error';
el.classList.remove('badge-success', 'badge-warning');
el.classList.add('badge-error');
} else if (status === 'mock') {
el.textContent = label + ': Mock';
el.classList.remove('badge-error', 'badge-success');
el.classList.add('badge-warning');
} else {
el.textContent = label + ': …';
el.classList.remove('badge-success', 'badge-error', 'badge-warning');
}
}
document.addEventListener('DOMContentLoaded', function() {
var promEl = document.getElementById('source-status-prometheus');
var osEl = document.getElementById('source-status-openstack');
if (!promEl || !osEl) return;
fetch('/api/source-status/').then(function(r) { return r.ok ? r.json() : {}; }).then(function(data) {
updateSourceStatus(promEl, 'Prometheus', data.prometheus);
updateSourceStatus(osEl, 'OpenStack', data.openstack);
}).catch(function() {
updateSourceStatus(promEl, 'Prometheus', { status: 'error', message: 'Failed to fetch status' });
updateSourceStatus(osEl, 'OpenStack', { status: 'error', message: 'Failed to fetch status' });
});
});
})();
</script>
{% block script %}
{% endblock %}
</body>

View File

@@ -5,27 +5,48 @@
<script src="{% static 'js/utils.js' %}"></script>
<script src="{% static 'js/chart.js' %}"></script>
<script src="{% static 'js/chartjs-plugin-datalabels.min.js' %}"></script>
<script src="{% static 'js/chartjs-plugin-annotation.min.js' %}"></script>
{% endblock %}
{% block content %}
<!-- MAIN DASHBOARD -->
<div class="p-4 space-y-4">
<div id="dashboard-content" class="p-4 space-y-8" {% if skeleton %}data-dashboard="skeleton"{% endif %}>
<!-- QUICK STATS ROW -->
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
<section class="grid grid-cols-1 lg:grid-cols-3 gap-4" aria-label="Quick stats">
<!-- CPU Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow">
<div class="card-body p-4">
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPcpuCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/40 mt-0.5">{{ pcpu.usage }} / {{ pcpu.total }} CPU</div>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pcpu.usage"></span> / <span data-stats="pcpu.total"></span> CPU</div>
</div>
<div class="text-lg font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div>
<div class="text-xl font-bold text-primary animate-pulse" data-stats="pcpu.used_percentage">%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pcpu.usage }} CPU</span>
<span class="font-medium animate-pulse" data-stats="pcpu.usage_val"></span>
</div>
<progress class="progress progress-primary w-full animate-pulse" data-stats="pcpu.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pcpu.free"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">CPU Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pcpu.usage|floatformat:1 }} / {{ pcpu.total }} CPU</div>
</div>
<div class="text-xl font-bold text-primary">{{ pcpu.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium">{{ pcpu.usage|floatformat:1 }} CPU</span>
</div>
<progress class="progress progress-primary w-full" value="{{ pcpu.used_percentage }}" max="100"></progress>
<div class="flex justify-between text-xs">
@@ -33,18 +54,39 @@
<span class="font-medium">{{ pcpu.free }} CPU</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- RAM Utilization -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow">
<div class="card-body p-4">
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsPramCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/40 mt-0.5">{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB</div>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="pram.usage_gb"></span> / <span data-stats="pram.total_gb"></span> GB</div>
</div>
<div class="text-lg font-bold text-secondary">{{ pram.used_percentage|floatformat:1 }}%</div>
<div class="text-xl font-bold text-secondary animate-pulse" data-stats="pram.used_percentage">%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Used</span>
<span class="font-medium animate-pulse" data-stats="pram.usage_gb_val"></span>
</div>
<progress class="progress progress-secondary w-full animate-pulse" data-stats="pram.progress" value="0" max="100"></progress>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Free</span>
<span class="font-medium animate-pulse" data-stats="pram.free_gb"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">RAM Utilization</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ pram.usage|convert_bytes }} / {{ pram.total|convert_bytes }} GB</div>
</div>
<div class="text-xl font-bold text-secondary">{{ pram.used_percentage|floatformat:1 }}%</div>
</div>
<div class="space-y-2">
<div class="flex justify-between text-xs">
@@ -57,18 +99,51 @@
<span class="font-medium">{{ pram.free|convert_bytes }} GB</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Instance Summary -->
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow">
<div class="card-body p-4">
<div class="card bg-base-100 shadow-sm hover:shadow transition-shadow" id="statsVmCard">
<div class="card-body p-5">
{% if skeleton %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/40 mt-0.5">{{ vm.active }} active / {{ vm.stopped }} stopped</div>
<div class="text-xs text-base-content/60 mt-0.5 animate-pulse"><span data-stats="vm.active"></span> active / <span data-stats="vm.stopped"></span> stopped</div>
</div>
<div class="text-lg font-bold text-accent">{{ vm.count }}</div>
<div class="text-xl font-bold text-accent animate-pulse" data-stats="vm.count"></div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-success"></div>
<span>Most Used Flavor</span>
</div>
<span class="font-medium animate-pulse" data-stats="flavors.first_name"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-info"></div>
<span>Avg. vCPU/VM</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.avg_cpu"></span>
</div>
<div class="flex justify-between items-center text-xs">
<div class="flex items-center gap-2">
<div class="w-2 h-2 rounded-full bg-warning"></div>
<span>Density</span>
</div>
<span class="font-medium animate-pulse" data-stats="vm.density"></span>
</div>
</div>
{% else %}
<div class="flex items-center justify-between mb-3">
<div>
<h3 class="text-sm font-medium text-base-content/70">Instances</h3>
<div class="text-xs text-base-content/60 mt-0.5">{{ vm.active }} active / {{ vm.stopped }} stopped</div>
</div>
<div class="text-xl font-bold text-accent">{{ vm.count }}</div>
</div>
<div class="space-y-3">
<div class="flex justify-between items-center text-xs">
@@ -93,22 +168,46 @@
<span class="font-medium">{{ vm.density|floatformat:1 }}/host</span>
</div>
</div>
{% endif %}
</div>
</div>
</div>
</section>
<!-- DETAILED OVERVIEW -->
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<section class="grid grid-cols-1 lg:grid-cols-2 gap-4" aria-label="Resource allocation and flavors">
<!-- Resource Allocation -->
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<h3 class="text-sm font-semibold mb-4 flex items-center gap-2">
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsAllocationCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z"/>
</svg>
Resource Allocation
</h3>
</h2>
{% if skeleton %}
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">CPU Allocation</span>
<span class="font-medium animate-pulse" data-stats="vcpu.allocated_total">— / — vCPU</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-primary flex-1 animate-pulse" data-stats="vcpu.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vcpu.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vcpu.overcommit"></div>
</div>
<div>
<div class="flex justify-between text-xs mb-1">
<span class="text-base-content/70">RAM Allocation</span>
<span class="font-medium animate-pulse" data-stats="vram.allocated_total">— / — GB</span>
</div>
<div class="flex items-center gap-2">
<progress class="progress progress-secondary flex-1 animate-pulse" data-stats="vram.progress" value="0" max="100"></progress>
<span class="text-xs font-medium w-12 text-right animate-pulse" data-stats="vram.allocated_percentage">—%</span>
</div>
<div class="flex justify-between text-xs mt-1 animate-pulse" data-stats="vram.overcommit"></div>
</div>
{% else %}
<!-- CPU Allocation -->
<div class="mb-4">
<div class="flex justify-between text-xs mb-1">
@@ -120,8 +219,8 @@
<span class="text-xs font-medium w-12 text-right">{{ vcpu.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/50">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max }}</span>
<span class="text-base-content/50">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span>
<span class="text-base-content/60">overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vcpu.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
@@ -136,24 +235,54 @@
<span class="text-xs font-medium w-12 text-right">{{ vram.allocated_percentage|floatformat:1 }}%</span>
</div>
<div class="flex justify-between text-xs mt-1">
<span class="text-base-content/50">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max }}</span>
<span class="text-base-content/50">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span>
<span class="text-base-content/60">overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max|floatformat:1 }}</span>
<span class="text-base-content/60">{{ vram.allocated_percentage|floatformat:1 }}% allocated</span>
</div>
</div>
{% endif %}
</div>
</div>
<!-- Flavor Distribution -->
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<h3 class="text-sm font-semibold mb-4 flex items-center gap-2">
<div class="card bg-base-100 shadow-sm border-t-gradient-vtb" id="statsFlavorsCard">
<div class="card-body p-5">
<h2 class="text-lg font-semibold mb-4 flex items-center gap-2">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M11 3.055A9.001 9.001 0 1020.945 13H11V3.055z"/>
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M20.488 9H15V3.512A9.025 9.025 0 0120.488 9z"/>
</svg>
Top Flavors
</h3>
</h2>
{% if skeleton %}
<div class="space-y-3">
<div class="bg-base-200/50 rounded-lg p-3">
<div class="flex justify-between items-center mb-1">
<span class="text-sm font-medium animate-pulse" data-stats="flavors.first_name"></span>
<span class="text-xs badge badge-primary animate-pulse" data-stats="flavors.first_count">— instances</span>
</div>
<div class="flex justify-between text-xs">
<span class="text-base-content/60">Share</span>
<span class="font-medium animate-pulse" data-stats="flavors.first_share">—%</span>
</div>
</div>
<div class="space-y-2">
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.second_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.second_count"></span>
</div>
<div class="flex justify-between items-center text-sm">
<div class="flex items-center gap-2">
<div class="w-1.5 h-1.5 rounded-full bg-base-content/30"></div>
<span class="animate-pulse" data-stats="flavors.third_name"></span>
</div>
<span class="text-xs badge badge-outline animate-pulse" data-stats="flavors.third_count"></span>
</div>
</div>
</div>
{% else %}
<div class="space-y-3">
<!-- Most Common -->
<div class="bg-base-200/50 rounded-lg p-3">
@@ -190,21 +319,23 @@
{% endif %}
</div>
</div>
{% endif %}
</div>
</div>
</div>
</section>
<!-- AUDIT CONTROL -->
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<section aria-label="Audit analysis">
<div class="card bg-base-100 shadow-sm" id="auditSection">
<div class="card-body p-5">
<div class="flex flex-col sm:flex-row sm:items-center justify-between gap-3 mb-4">
<div>
<h3 class="text-sm font-semibold">Audit Analysis</h3>
<div class="text-xs text-base-content/50 mt-0.5">Select an audit to analyze resource distribution</div>
<h2 class="text-lg font-semibold">Audit Analysis</h2>
<div class="text-base text-base-content/60 mt-0.5">Select an audit to analyze resource distribution</div>
</div>
<div class="flex items-center gap-2">
<span class="text-xs text-base-content/50">{{ audits|length }} available</span>
<div class="dropdown dropdown-end">
<span class="text-xs text-base-content/60" id="auditsCount">{% if skeleton %}Loading…{% else %}{{ audits|length }} available{% endif %}</span>
<div class="dropdown dropdown-end no-print">
<label tabindex="0" class="btn btn-xs btn-ghost">
<svg class="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"/>
@@ -238,7 +369,10 @@
</div>
<div class="flex flex-col md:flex-row gap-3">
<select id="auditSelector" class="select select-bordered select-sm flex-1">
<select id="auditSelector" class="select select-bordered select-sm flex-1" {% if skeleton %}disabled{% endif %}>
{% if skeleton %}
<option value="">Loading…</option>
{% else %}
{% for audit in audits %}
<option value="{{ audit.id }}"
data-cpu="{{ audit.cpu_weight }}"
@@ -249,58 +383,47 @@
{{ audit.name }} ({{ audit.created_at|date:"M d" }})
</option>
{% endfor %}
{% endif %}
</select>
<button onclick="loadSelectedAudit()" class="btn btn-primary btn-sm">
<button type="button" onclick="loadSelectedAudit()" class="btn btn-primary btn-sm gap-2 no-print">
<svg class="w-4 h-4 shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24" aria-hidden="true">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/>
</svg>
Load Analysis
</button>
</div>
</div>
</div>
</section>
<!-- ANALYSIS VISUALIZATION -->
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<!-- Current State -->
<section aria-label="CPU distribution chart">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<h3 class="text-sm font-semibold mb-4">Current CPU Distribution</h3>
<div class="card-body p-5">
<h3 class="text-lg font-semibold mb-4">CPU Distribution (Current vs Projected)</h3>
<div class="h-48">
<canvas id="cpuHostChart"></canvas>
<canvas id="cpuDistributionChart"></canvas>
</div>
<div class="flex items-center justify-center gap-3 mt-3">
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-success"></div>
<span class="text-success">Mean: <span id="currentCpuMean">0</span>%</span>
</div>
</div>
</div>
</div>
<!-- Projected State -->
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<h3 class="text-sm font-semibold mb-4">Projected CPU Distribution</h3>
<div class="h-48">
<canvas id="cpuProjectedChart"></canvas>
</div>
<div class="flex items-center justify-center gap-4 mt-3">
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-success"></div>
<span class="text-success">Mean: <span id="projectedCpuMean">0</span>%</span>
</div>
<div class="flex items-center gap-1 text-xs">
<div class="w-3 h-0.5 bg-error/60"></div>
<span class="text-error/60">±1σ: <span id="projectedCpuStd">0</span>%</span>
</div>
<span class="text-error/60">±0.5σ: <span id="currentCpuStd">0</span>%</span>
</div>
</div>
</div>
</div>
</section>
<!-- MIGRATION ACTIONS -->
<section aria-label="Migration actions">
<div class="card bg-base-100 shadow-sm">
<div class="card-body p-4">
<div class="card-body p-6">
<div class="flex items-center justify-between mb-4">
<h3 class="text-sm font-semibold">Migration Actions</h3>
<h3 class="text-lg font-semibold">Migration Actions</h3>
<div class="badge badge-neutral badge-sm" id="migrationCount">Select audit</div>
</div>
@@ -316,7 +439,7 @@
</thead>
<tbody id="migrationTableBody" class="text-sm">
<tr>
<td colspan="4" class="text-center py-6 text-base-content/40">
<td colspan="4" class="text-center py-6 text-base-content/60">
No audit selected. Load an audit to view migration recommendations.
</td>
</tr>
@@ -325,22 +448,17 @@
</div>
</div>
</div>
</section>
</div>
{% endblock %}
{% block script %}
<script>
// Update audit preview
document.getElementById('auditSelector').addEventListener('change', function(e) {
const option = this.options[this.selectedIndex];
document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0';
document.getElementById('previewRam').textContent = option.dataset.ram || '1.0';
document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster';
document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced';
});
const SKELETON_MODE = {{ skeleton|yesno:"true,false" }};
const CURRENT_CLUSTER = {% if current_cluster %}{ "host_labels": {{ current_cluster.host_labels|safe }}, "cpu_current": {{ current_cluster.cpu_current|safe }} }{% else %}null{% endif %};
// Audit data
const auditData = {
let auditData = {
{% if not skeleton %}
{% for audit in audits %}
"{{ audit.id }}": {
name: "{{ audit.name }}",
@@ -352,11 +470,134 @@
}
}{% if not forloop.last %},{% endif %}
{% endfor %}
{% endif %}
};
// Chart instances
let cpuHostChart = null;
let cpuProjectedChart = null;
document.getElementById('auditSelector').addEventListener('change', function(e) {
const option = this.options[this.selectedIndex];
if (!option) return;
document.getElementById('previewCpu').textContent = option.dataset.cpu || '1.0';
document.getElementById('previewRam').textContent = option.dataset.ram || '1.0';
document.getElementById('previewScope').textContent = option.dataset.scope || 'Full Cluster';
document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced';
});
let cpuDistributionChart = null;
function setStat(key, text) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
el.textContent = text;
el.classList.remove('animate-pulse');
});
}
function setProgress(key, value) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
if (el.tagName === 'PROGRESS') {
el.value = value;
el.classList.remove('animate-pulse');
}
});
}
function renderStats(data) {
if (!data) return;
var el = function(k) { return document.querySelector('[data-stats="' + k + '"]'); };
var regionBadge = document.getElementById('regionBadge');
if (regionBadge) regionBadge.textContent = data.region && data.region.name ? data.region.name : '—';
setStat('pcpu.usage', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1));
setStat('pcpu.total', String((data.pcpu && data.pcpu.total) || 0));
setStat('pcpu.used_percentage', Number((data.pcpu && data.pcpu.used_percentage) || 0).toFixed(1) + '%');
setStat('pcpu.usage_val', Number((data.pcpu && data.pcpu.usage) || 0).toFixed(1) + ' CPU');
setProgress('pcpu.progress', (data.pcpu && data.pcpu.used_percentage) || 0);
setStat('pcpu.free', String((data.pcpu && data.pcpu.free) || 0));
var pramUsageGb = formatBytes(data.pram && data.pram.usage, 'GB');
var pramTotalGb = formatBytes(data.pram && data.pram.total, 'GB');
var pramFreeGb = formatBytes(data.pram && data.pram.free, 'GB');
setStat('pram.usage_gb', pramUsageGb);
setStat('pram.total_gb', pramTotalGb);
setStat('pram.used_percentage', Number((data.pram && data.pram.used_percentage) || 0).toFixed(1) + '%');
setStat('pram.usage_gb_val', pramUsageGb + ' GB');
setProgress('pram.progress', (data.pram && data.pram.used_percentage) || 0);
setStat('pram.free_gb', pramFreeGb + ' GB');
setStat('vm.active', String(data.vm && data.vm.active));
setStat('vm.stopped', String(data.vm && data.vm.stopped));
setStat('vm.count', String(data.vm && data.vm.count));
setStat('flavors.first_name', data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.name : '—');
setStat('vm.avg_cpu', Number((data.vm && data.vm.avg_cpu) || 0).toFixed(1));
setStat('vm.density', Number((data.vm && data.vm.density) || 0).toFixed(1) + '/host');
setStat('vcpu.allocated_total', ((data.vcpu && data.vcpu.allocated) || 0) + ' / ' + ((data.vcpu && data.vcpu.total) || 0) + ' vCPU');
setProgress('vcpu.progress', (data.vcpu && data.vcpu.allocated_percentage) || 0);
setStat('vcpu.allocated_percentage', Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '%');
var vcpuOver = el('vcpu.overcommit');
if (vcpuOver) {
vcpuOver.textContent = 'overcommit: ' + Number((data.vcpu && data.vcpu.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vcpu && data.vcpu.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vcpu && data.vcpu.allocated_percentage) || 0).toFixed(1) + '% allocated';
vcpuOver.classList.remove('animate-pulse');
}
var vramAllocGb = formatBytes(data.vram && data.vram.allocated, 'GB');
var vramTotalGb = formatBytes(data.vram && data.vram.total, 'GB');
setStat('vram.allocated_total', vramAllocGb + ' / ' + vramTotalGb + ' GB');
setProgress('vram.progress', (data.vram && data.vram.allocated_percentage) || 0);
setStat('vram.allocated_percentage', Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '%');
var vramOver = el('vram.overcommit');
if (vramOver) {
vramOver.textContent = 'overcommit: ' + Number((data.vram && data.vram.overcommit_ratio) || 0).toFixed(1) + ' / ' + Number((data.vram && data.vram.overcommit_max) || 0).toFixed(1) + ' — ' + Number((data.vram && data.vram.allocated_percentage) || 0).toFixed(1) + '% allocated';
vramOver.classList.remove('animate-pulse');
}
setStat('flavors.first_count', (data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0) + ' instances');
var vmCount = data.vm && data.vm.count ? data.vm.count : 0;
var firstCount = data.flavors && data.flavors.first_common_flavor ? data.flavors.first_common_flavor.count : 0;
setStat('flavors.first_share', (vmCount ? Math.round(firstCount / vmCount * 100) : 0) + '%');
setStat('flavors.second_name', data.flavors && data.flavors.second_common_flavor ? data.flavors.second_common_flavor.name : '—');
setStat('flavors.second_count', data.flavors && data.flavors.second_common_flavor ? String(data.flavors.second_common_flavor.count) : '—');
setStat('flavors.third_name', data.flavors && data.flavors.third_common_flavor ? data.flavors.third_common_flavor.name : '—');
setStat('flavors.third_count', data.flavors && data.flavors.third_common_flavor ? String(data.flavors.third_common_flavor.count) : '—');
document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); });
}
function renderAudits(auditsList) {
if (!auditsList || !auditsList.length) {
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = '0 available';
var sel = document.getElementById('auditSelector');
if (sel) { sel.disabled = false; sel.innerHTML = '<option value="">No audits</option>'; }
return;
}
auditData = {};
auditsList.forEach(function(a) {
auditData[a.id] = {
name: a.name,
migrations: typeof a.migrations === 'string' ? JSON.parse(a.migrations) : a.migrations,
hostData: {
labels: typeof a.host_labels === 'string' ? JSON.parse(a.host_labels) : a.host_labels,
current: typeof a.cpu_current === 'string' ? JSON.parse(a.cpu_current) : a.cpu_current,
projected: typeof a.cpu_projected === 'string' ? JSON.parse(a.cpu_projected) : a.cpu_projected
}
};
});
var sel = document.getElementById('auditSelector');
if (sel) {
sel.disabled = false;
sel.innerHTML = '';
auditsList.forEach(function(audit) {
var opt = document.createElement('option');
opt.value = audit.id;
opt.setAttribute('data-cpu', audit.cpu_weight || '1.0');
opt.setAttribute('data-ram', audit.ram_weight || '1.0');
opt.setAttribute('data-scope', audit.scope || 'Full Cluster');
opt.setAttribute('data-strategy', audit.strategy || 'Balanced');
opt.setAttribute('data-goal', audit.goal || '');
var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : '';
opt.textContent = audit.name + ' (' + dateStr + ')';
sel.appendChild(opt);
});
}
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = auditsList.length + ' available';
if (auditsList.length > 0) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
}
}
// Load selected audit
function loadSelectedAudit() {
@@ -374,7 +615,7 @@
if (!data || !data.migrations || data.migrations.length === 0) {
tbody.innerHTML = `
<tr>
<td colspan="4" class="text-center py-6 text-base-content/40">
<td colspan="4" class="text-center py-6 text-base-content/60">
No migration actions recommended
</td>
</tr>
@@ -420,105 +661,148 @@
migrationCount.textContent = `${data.migrations.length} action${data.migrations.length !== 1 ? 's' : ''}`;
}
// Update CPU charts
// Update CPU chart (combined current vs projected)
function updateCPUCharts(auditId) {
const data = auditData[auditId];
if (!data || !data.hostData) return;
const currentCtx = document.getElementById('cpuHostChart').getContext('2d');
const projectedCtx = document.getElementById('cpuProjectedChart').getContext('2d');
const ctx = document.getElementById('cpuDistributionChart').getContext('2d');
// Calculate statistics
const currentStats = calculateStats(data.hostData.current);
const projectedStats = calculateStats(data.hostData.projected);
// Update stats displays
document.getElementById('currentCpuMean').textContent = currentStats.mean.toFixed(1);
document.getElementById('projectedCpuMean').textContent = projectedStats.mean.toFixed(1);
document.getElementById('projectedCpuStd').textContent = projectedStats.std.toFixed(1);
document.getElementById('currentCpuStd').textContent = (currentStats.std * 0.5).toFixed(1);
// Destroy existing charts
if (cpuHostChart) cpuHostChart.destroy();
if (cpuProjectedChart) cpuProjectedChart.destroy();
if (cpuDistributionChart) cpuDistributionChart.destroy();
// Chart colors
const colors = {
primary: getCSSVar('--color-primary'),
secondary: getCSSVar('--color-secondary'),
accent: getCSSVar('--color-accent'),
neutral: getCSSVar('-color-neutral'),
neutral: getCSSVar('--color-neutral'),
info: getCSSVar('--color-info'),
success: getCSSVar('--color-success'),
warning: getCSSVar('--color-warning'),
error: getCSSVar('--color-error')
};
const textColor = getCSSVar('--color-base-content');
const gridColor = getCSSVar('--chart-grid-color') || textColor;
// Create current CPU chart
cpuHostChart = new Chart(currentCtx, {
cpuDistributionChart = new Chart(ctx, {
type: 'bar',
data: {
labels: data.hostData.labels,
datasets: [{
label: 'CPU %',
data: data.hostData.current,
datasets: [
{
label: 'Current',
data: data.hostData.current.slice(),
backgroundColor: colors.info + '40',
borderColor: colors.info,
borderWidth: 1,
borderRadius: 3
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
plugins: {
legend: { display: false },
tooltip: {
callbacks: {
label: (ctx) => `${ctx.parsed.y}% CPU`
}
}
},
scales: {
y: {
beginAtZero: true,
max: 100,
grid: { drawBorder: false },
ticks: {
callback: value => value + '%'
}
},
x: {
grid: { display: false },
ticks: {
maxRotation: 45
}
}
}
}
});
// Create projected CPU chart
cpuProjectedChart = new Chart(projectedCtx, {
type: 'bar',
data: {
labels: data.hostData.labels,
datasets: [{
label: 'Projected CPU %',
data: data.hostData.projected,
{
label: 'Projected',
data: data.hostData.projected.slice(),
backgroundColor: colors.warning + '40',
borderColor: colors.warning,
borderWidth: 1,
borderRadius: 3
}]
}
]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: {
onComplete: function() {
var chart = this;
if (typeof chart.getDatasetMeta !== 'function') chart = chart.chart;
if (!chart || chart._hidingDataset === undefined) return;
var i = chart._hidingDataset;
chart.getDatasetMeta(i).hidden = true;
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
delete chart._hidingDataset;
chart.update('none');
}
},
plugins: {
legend: { display: false },
legend: {
display: true,
position: 'top',
align: 'center',
onClick: function(e, legendItem, legend) {
const i = legendItem.datasetIndex;
const chart = legend.chart;
const len = chart.data.labels.length;
if (chart.isDatasetVisible(i)) {
chart._hidingDataset = i;
chart.data.datasets[i].data = Array(len).fill(0);
chart.update();
} else {
chart.data.datasets[i].data = Array(len).fill(0);
chart.show(i);
chart.update('none');
chart.data.datasets[i].data = chart._cpuOriginalData[i].slice();
chart.update();
}
},
labels: {
usePointStyle: true,
pointStyle: 'rect',
boxWidth: 14,
boxHeight: 14,
padding: 12,
color: textColor,
generateLabels: function(chart) {
const datasets = chart.data.datasets;
const labelColor = getCSSVar('--color-base-content');
return datasets.map(function(ds, i) {
return {
text: ds.label,
fillStyle: ds.borderColor,
strokeStyle: ds.borderColor,
lineWidth: 1,
fontColor: labelColor,
color: labelColor,
hidden: !chart.isDatasetVisible(i),
datasetIndex: i
};
});
}
}
},
tooltip: {
callbacks: {
label: (ctx) => `${ctx.parsed.y}% CPU`
label: (ctx) => `${ctx.dataset.label}: ${Number(ctx.parsed.y).toFixed(2)}% CPU`
}
},
annotation: {
annotations: {
MeanLine: {
type: 'line',
yMin: currentStats.mean.toFixed(1),
yMax: currentStats.mean.toFixed(1),
borderColor: colors.success,
borderWidth: 2,
borderDash: []
},
upperStdLine: {
type: 'line',
yMin: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
yMax: (currentStats.mean + currentStats.std * 0.5).toFixed(1),
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
},
lowerStdLine: {
type: 'line',
yMin: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
yMax: currentStats.mean > currentStats.std * 0.5 ? (currentStats.mean - currentStats.std * 0.5).toFixed(1) : 0,
borderColor: colors.error,
borderWidth: 1,
borderDash: [5, 5]
}
}
}
},
@@ -526,20 +810,27 @@
y: {
beginAtZero: true,
max: 100,
grid: { drawBorder: false },
grid: { drawBorder: false, color: gridColor },
ticks: {
color: textColor,
callback: value => value + '%'
}
},
x: {
grid: { display: false },
ticks: {
maxRotation: 45
}
display: false
},
barPercentage: 1,
categoryPercentage: 0.85
}
}
}
});
cpuDistributionChart._cpuOriginalData = [
data.hostData.current.slice(),
data.hostData.projected.slice()
];
}
// Utility functions
@@ -550,12 +841,58 @@
return { mean, std };
}
// Initialize
document.addEventListener('DOMContentLoaded', () => {
const initialAudit = "{{ audits.0.id|default:'' }}";
document.addEventListener('DOMContentLoaded', function() {
if (SKELETON_MODE) {
Promise.all([
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); }),
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : Promise.reject(r); })
]).then(function(results) {
renderStats(results[0]);
renderAudits(results[1].audits);
if (!results[1].audits || results[1].audits.length === 0) {
var cc = results[1].current_cluster;
if (cc && cc.host_labels && cc.cpu_current && cc.host_labels.length) {
auditData["current"] = {
hostData: {
labels: cc.host_labels,
current: cc.cpu_current,
projected: cc.cpu_current
}
};
updateCPUCharts('current');
}
}
}).catch(function(err) {
var msg = err.status ? 'Failed to load data (' + err.status + ')' : 'Failed to load data';
var countEl = document.getElementById('auditsCount');
if (countEl) countEl.textContent = msg;
fetch('/api/stats/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d) renderStats(d); });
fetch('/api/audits/').then(function(r) { return r.ok ? r.json() : null; }).then(function(d) { if (d && d.audits) renderAudits(d.audits); });
});
} else {
var initialAudit = "{% if audits %}{{ audits.0.id }}{% endif %}";
if (initialAudit && auditData[initialAudit]) {
document.getElementById('auditSelector').dispatchEvent(new Event('change'));
loadSelectedAudit();
} else if (!initialAudit && CURRENT_CLUSTER && CURRENT_CLUSTER.host_labels && CURRENT_CLUSTER.host_labels.length) {
auditData["current"] = {
hostData: {
labels: CURRENT_CLUSTER.host_labels,
current: CURRENT_CLUSTER.cpu_current,
projected: CURRENT_CLUSTER.cpu_current
}
};
updateCPUCharts('current');
}
}
});
document.addEventListener('themechange', function() {
if (cpuDistributionChart) {
const auditId = document.getElementById('auditSelector').value;
cpuDistributionChart.destroy();
cpuDistributionChart = null;
if (auditId) updateCPUCharts(auditId);
}
});
</script>
@@ -564,10 +901,10 @@
{% block css %}
<style>
.progress {
@apply h-1.5;
@apply h-2 rounded-full;
}
.table td, .table th {
@apply px-4 py-2;
@apply px-4 py-3;
}
.badge-xs {
@apply px-1.5 py-0.5 text-xs;

View File

@@ -11,6 +11,6 @@ import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings")
application = get_asgi_application()

View File

@@ -10,88 +10,95 @@ For the full list of settings and their values, see
https://docs.djangoproject.com/en/5.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Use mock data when no OpenStack/Prometheus access (e.g. local dev)
USE_MOCK_DATA = os.environ.get("USE_MOCK_DATA", "false").lower() in ("1", "true", "yes")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/5.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-747*14ir*49hoo6c2225)kxr%4^am0ub_s-m^_7i4cctu)v$g8'
SECRET_KEY = "django-insecure-747*14ir*49hoo6c2225)kxr%4^am0ub_s-m^_7i4cctu)v$g8"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dashboard',
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"dashboard",
]
# Prometheus settings (environment override recommended)
PROMETHEUS_URL = "http://localhost:9090"
PROMETHEUS_URL = "http://10.226.74.53:9090/"
PROMETHEUS_METRICS = {
"cpu_usage": "",
"ram_usage": ""
"cpu_usage": "rate(libvirt_domain_info_cpu_time_seconds_total)[300s]",
"ram_usage": "avg_over_time(libvirt_domain_info_memory_usage_bytes[300s]",
}
# Openstack cloud settings
OPENSTACK_REGION_NAME = "default"
OPENSTACK_CLOUD = "default"
OPENSTACK_REGION_NAME = "cl2k1distlab"
OPENSTACK_CLOUD = "distlab"
# Openstack watcher endoint settings
WATCHER_ENDPOINT_NAME = "infra-optim"
WATCHER_INTERFACE_NAME = "public"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = 'watcher_visio.urls'
# COOP ignored on non-HTTPS / non-localhost; disable to avoid console warning
SECURE_CROSS_ORIGIN_OPENER_POLICY = None
ROOT_URLCONF = "watcher_visio.urls"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = 'watcher_visio.wsgi.application'
WSGI_APPLICATION = "watcher_visio.wsgi.application"
# Database
# https://docs.djangoproject.com/en/5.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
@@ -101,16 +108,16 @@ DATABASES = {
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
@@ -118,9 +125,9 @@ AUTH_PASSWORD_VALIDATORS = [
# Internationalization
# https://docs.djangoproject.com/en/5.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = "en-us"
TIME_ZONE = 'UTC'
TIME_ZONE = "UTC"
USE_I18N = True
@@ -130,7 +137,7 @@ USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/5.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL = "/static/"
STATICFILES_DIRS = [
BASE_DIR / "static",
@@ -141,4 +148,14 @@ STATIC_ROOT = BASE_DIR / "staticfiles"
# Default primary key field type
# https://docs.djangoproject.com/en/5.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Dashboard cache (reduces load on OpenStack/Prometheus and allows concurrent users)
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "watcher-visio-dashboard",
}
}
DASHBOARD_CACHE_TTL = 120 # seconds
SOURCE_STATUS_CACHE_TTL = 30 # seconds (lightweight source-status checks)

View File

@@ -14,10 +14,17 @@ Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.urls import include, path
from django.views.generic import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('dashboard.urls')),
path("admin/", admin.site.urls),
path(
"favicon.ico",
RedirectView.as_view(url=settings.STATIC_URL + "favicon.ico", permanent=False),
),
path("", include("dashboard.urls")),
]

View File

@@ -11,6 +11,6 @@ import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'watcher_visio.settings')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "watcher_visio.settings")
application = get_wsgi_application()