Refactor dashboard data serialization and mock context for improved clarity
Some checks failed
CI / ci (push) Has been cancelled

- Introduced `serialize_audit_for_response` and `serialize_current_cluster_for_template` functions to handle JSON serialization of audit and cluster data, enhancing data consistency for API responses and template rendering.
- Updated `get_mock_context` in `mock_data.py` to utilize the new serialization functions, simplifying the mock data structure and improving readability.
- Refactored `collect_context` and `collect_audits` in `views.py` to leverage the new serialization methods, ensuring a cleaner and more maintainable codebase.
- Added unit tests for the new serialization functions to ensure correctness and reliability of data formatting.
This commit is contained in:
2026-02-12 20:10:09 +03:00
parent 76eae52d2a
commit 656a6bfac4
8 changed files with 313 additions and 90 deletions

View File

@@ -1,6 +1,9 @@
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
import json
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
def get_mock_context():
@@ -38,7 +41,7 @@ def get_mock_context():
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3]
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0]
audits = [
audits_raw = [
{
"id": "mock-audit-uuid-1",
"name": "Mock audit (balanced)",
@@ -49,20 +52,18 @@ def get_mock_context():
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps(
[
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
]
),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps(cpu_projected),
"migrations": [
{
"instanceName": "instance-1",
"source": "compute-0",
"destination": "compute-3",
"flavor": "m1.small",
"impact": "Low",
}
],
"host_labels": host_labels,
"cpu_current": cpu_current,
"cpu_projected": cpu_projected,
},
{
"id": "mock-audit-uuid-2",
@@ -74,12 +75,13 @@ def get_mock_context():
"scope": "Full Cluster",
"cpu_weight": "1.0",
"ram_weight": "1.0",
"migrations": json.dumps([]),
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
"migrations": [],
"host_labels": host_labels,
"cpu_current": cpu_current,
"cpu_projected": [40.0, 42.0, 50.0, 43.0, 36.0, 45.0],
},
]
audits = [serialize_audit_for_response(a) for a in audits_raw]
return {
"region": {
@@ -128,8 +130,7 @@ def get_mock_context():
"third_common_flavor": {"name": "m1.large", "count": 4},
},
"audits": audits,
"current_cluster": {
"host_labels": json.dumps(host_labels),
"cpu_current": json.dumps(cpu_current),
},
"current_cluster": serialize_current_cluster_for_template(
{"host_labels": host_labels, "cpu_current": cpu_current}
),
}

View File

@@ -49,41 +49,81 @@ def get_current_cluster_cpu(connection: Connection) -> dict:
}
def _fetch_audits_and_action_plans(session, watcher_endpoint):
"""GET audits and action_plans from Watcher API. Returns (audits_list, action_plans_list)."""
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_list = audits_resp.json().get("audits") or []
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
action_plans_list = actionplans_resp.json().get("action_plans") or []
return audits_list, action_plans_list
def _fetch_migrations_for_audit(
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
):
"""
Fetch action details for the given action plan and build migrations list and
instance->destination mapping. Returns (migrations, mapping).
"""
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append(
{
"instanceName": params["resource_name"],
"source": params["source_node"],
"destination": params["destination_node"],
"flavor": server.flavor.name,
"impact": "Low",
}
)
return migrations, mapping
def _build_projected_cpu_metrics(cpu_data, mapping):
"""
Apply instance->destination mapping to a copy of cpu_data and return
aggregated CPU metrics DataFrame (host, cpu_usage).
"""
projected_cpu_data = copy(cpu_data)
for entry in projected_cpu_data:
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
return convert_cpu_data(projected_cpu_data)
def get_audits(connection: Connection) -> list[dict] | None:
session = connection.session
watcher_endpoint = connection.endpoint_for(
service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME
)
# Collect instances prometheus metrics
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
# Fetch audit list
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_resp = audits_resp.json().get("audits") or []
# Fetch action plan list
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
actionplans_resp = actionplans_resp.json().get("action_plans") or []
# Filtering audits by PENDING state
pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"]
_, action_plans_list = _fetch_audits_and_action_plans(session, watcher_endpoint)
pending_audits = [plan for plan in action_plans_list if plan["state"] == "RECOMMENDED"]
result = []
for item in pending_audits:
projected_cpu_data = copy(cpu_data)
audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}")
audit_resp.raise_for_status()
audit_resp = audit_resp.json()
actionplan = next(
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], action_plans_list), None
)
if actionplan is None:
continue
@@ -94,32 +134,10 @@ def get_audits(connection: Connection) -> list[dict] | None:
actions_resp.raise_for_status()
actions_resp = actions_resp.json().get("actions") or []
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append(
{
"instanceName": action_resp["input_parameters"]["resource_name"],
"source": action_resp["input_parameters"]["source_node"],
"destination": action_resp["input_parameters"]["destination_node"],
"flavor": server.flavor.name,
"impact": "Low",
}
)
for entry in projected_cpu_data:
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
migrations, mapping = _fetch_migrations_for_audit(
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
)
projected_cpu_metrics = _build_projected_cpu_metrics(cpu_data, mapping)
result.append(
{

32
dashboard/serializers.py Normal file
View File

@@ -0,0 +1,32 @@
"""Serialization helpers for dashboard context and API responses."""
import json
def _ensure_json_str(value):
"""Return value as JSON string; if already a string, return as-is."""
return value if isinstance(value, str) else json.dumps(value)
def serialize_audit_for_response(audit: dict) -> dict:
"""
Return a copy of the audit dict with migrations, host_labels, cpu_current,
and cpu_projected serialized as JSON strings (for template/API response).
"""
result = dict(audit)
result["migrations"] = _ensure_json_str(audit.get("migrations"))
result["host_labels"] = _ensure_json_str(audit.get("host_labels"))
result["cpu_current"] = _ensure_json_str(audit.get("cpu_current"))
result["cpu_projected"] = _ensure_json_str(audit.get("cpu_projected"))
return result
def serialize_current_cluster_for_template(current_cluster: dict) -> dict:
"""
Return current_cluster with host_labels and cpu_current as JSON strings
for template embedding (e.g. in index.html).
"""
return {
"host_labels": _ensure_json_str(current_cluster.get("host_labels")),
"cpu_current": _ensure_json_str(current_cluster.get("cpu_current")),
}

View File

@@ -0,0 +1,57 @@
"""Tests for dashboard.serializers."""
import json
from django.test import TestCase
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
class SerializeAuditForResponseTest(TestCase):
def test_serializes_list_fields_to_json_strings(self):
audit = {
"id": "audit-1",
"name": "Test",
"migrations": [{"instanceName": "i1", "source": "h1", "destination": "h2"}],
"host_labels": ["h1", "h2"],
"cpu_current": [10.0, 20.0],
"cpu_projected": [15.0, 25.0],
}
result = serialize_audit_for_response(audit)
self.assertEqual(result["id"], "audit-1")
self.assertEqual(result["name"], "Test")
self.assertEqual(json.loads(result["migrations"]), audit["migrations"])
self.assertEqual(json.loads(result["host_labels"]), audit["host_labels"])
self.assertEqual(json.loads(result["cpu_current"]), audit["cpu_current"])
self.assertEqual(json.loads(result["cpu_projected"]), audit["cpu_projected"])
def test_leaves_already_serialized_strings_unchanged(self):
audit = {
"id": "a",
"migrations": "[1,2]",
"host_labels": "[]",
"cpu_current": "[0]",
"cpu_projected": "[0]",
}
result = serialize_audit_for_response(audit)
self.assertEqual(result["migrations"], "[1,2]")
self.assertEqual(result["host_labels"], "[]")
self.assertEqual(result["cpu_current"], "[0]")
self.assertEqual(result["cpu_projected"], "[0]")
class SerializeCurrentClusterForTemplateTest(TestCase):
def test_serializes_lists_to_json_strings(self):
cluster = {"host_labels": ["c0", "c1"], "cpu_current": [30.0, 40.0]}
result = serialize_current_cluster_for_template(cluster)
self.assertEqual(json.loads(result["host_labels"]), cluster["host_labels"])
self.assertEqual(json.loads(result["cpu_current"]), cluster["cpu_current"])
def test_leaves_already_serialized_strings_unchanged(self):
cluster = {"host_labels": "[]", "cpu_current": "[]"}
result = serialize_current_cluster_for_template(cluster)
self.assertEqual(result["host_labels"], "[]")
self.assertEqual(result["cpu_current"], "[]")

View File

@@ -1,5 +1,3 @@
import json
from django.conf import settings
from django.core.cache import cache
from django.http import JsonResponse
@@ -10,6 +8,10 @@ from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu
from dashboard.openstack_utils.connect import check_openstack, get_connection
from dashboard.openstack_utils.flavor import get_flavor_list
from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics
from dashboard.serializers import (
serialize_audit_for_response,
serialize_current_cluster_for_template,
)
from dashboard.stats import (
CACHE_KEY_AUDITS,
CACHE_KEY_CURRENT_CLUSTER,
@@ -44,17 +46,9 @@ def collect_context():
audits = get_audits(connection=connection)
metrics = fetch_dashboard_metrics()
context = build_stats(metrics, region_name, flavors)
context["audits"] = audits
context["audits"] = [serialize_audit_for_response(a) for a in audits]
current_cluster = get_current_cluster_cpu(connection)
context["current_cluster"] = {
"host_labels": json.dumps(current_cluster["host_labels"]),
"cpu_current": json.dumps(current_cluster["cpu_current"]),
}
for audit in context["audits"]:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
context["current_cluster"] = serialize_current_cluster_for_template(current_cluster)
return context
@@ -71,12 +65,7 @@ def collect_audits():
"""Build audits list with serialized fields for frontend."""
connection = get_connection()
audits = get_audits(connection=connection)
for audit in audits:
audit["migrations"] = json.dumps(audit["migrations"])
audit["host_labels"] = json.dumps(audit["host_labels"])
audit["cpu_current"] = json.dumps(audit["cpu_current"])
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
return audits
return [serialize_audit_for_response(a) for a in audits]
def _skeleton_context():

80
docs/api_context.md Normal file
View File

@@ -0,0 +1,80 @@
# Dashboard API and context contract
This document describes the structure of data passed to the index template and returned by the dashboard API endpoints. Cache keys are defined in `dashboard/stats.py`.
## Index page context (server-rendered)
When the index is rendered with full data (e.g. `USE_MOCK_DATA=True` or after JS loads from API), the template receives a context with these top-level keys:
| Key | Description |
|-----|-------------|
| `region` | `{ "name": str, "hosts_total": int }` |
| `pcpu` | Physical CPU: `total`, `usage`, `free`, `used_percentage` |
| `vcpu` | Virtual CPU: `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` |
| `pram` | Physical RAM (bytes): `total`, `usage`, `free`, `used_percentage` |
| `vram` | Virtual RAM (bytes): `total`, `allocated`, `free`, `allocated_percentage`, `overcommit_ratio`, `overcommit_max` |
| `vm` | VMs: `count`, `active`, `stopped`, `avg_cpu`, `avg_ram`, `density` |
| `flavors` | `first_common_flavor`, `second_common_flavor`, `third_common_flavor` — each `{ "name": str, "count": int }` or `None`. The `name` may be a human-readable flavor name or a flavor UUID depending on OpenStack. |
| `audits` | List of audit objects (see below). For template, `migrations`, `host_labels`, `cpu_current`, `cpu_projected` are JSON strings. |
| `current_cluster` | `{ "host_labels": str (JSON array), "cpu_current": str (JSON array) }` for embedding in the page. |
| `skeleton` | Optional boolean; when true, stats placeholders are shown and data is loaded via API. |
## Single audit object (for template / API response)
When serialized for the template or for `api/audits`, each audit has:
| Field | Type | Description |
|-------|------|-------------|
| `id` | str | Audit UUID |
| `name` | str | Audit name |
| `created_at` | str | ISO 8601 datetime |
| `strategy` | str | Strategy name |
| `goal` | str | Goal name |
| `type` | str | e.g. `ONESHOT` |
| `scope` | str | e.g. `Full Cluster` |
| `cpu_weight` | str | Weight parameter |
| `ram_weight` | str | Weight parameter |
| `migrations` | str (template) / list (API raw) | JSON string of migration list, or list of `{ instanceName, source, destination, flavor, impact }` |
| `host_labels` | str (template) / list (API raw) | JSON string of host names, or list |
| `cpu_current` | str (template) / list (API raw) | JSON string of CPU usage per host, or list of numbers |
| `cpu_projected` | str (template) / list (API raw) | JSON string of projected CPU per host, or list of numbers |
For the **index template**, `migrations`, `host_labels`, `cpu_current`, and `cpu_projected` are always JSON strings so they can be embedded in the page. For **api/audits**, `audits` are returned with these four fields as JSON strings (same as template). The **current_cluster** in the API response uses raw lists (see below).
## GET /api/stats/
Returns a JSON object with the same keys as the index context, **excluding** `audits`, `current_cluster`, and `skeleton`: `region`, `pcpu`, `vcpu`, `pram`, `vram`, `vm`, `flavors`. All numeric values are numbers; sizes are in bytes where applicable.
## GET /api/audits/
Returns:
```json
{
"audits": [ /* list of audit objects with migrations, host_labels, cpu_current, cpu_projected as JSON strings */ ],
"current_cluster": {
"host_labels": [ "compute-0", "compute-1", ... ],
"cpu_current": [ 30.5, 42.1, ... ]
}
}
```
Here `audits` use the same serialized form as the template (JSON strings for list fields). The `current_cluster` is with **raw lists** (not JSON strings) so the frontend can use them directly without parsing.
## GET /api/source-status/
Returns:
```json
{
"prometheus": { "status": "ok" | "error" | "mock", "message"?: "..." },
"openstack": { "status": "ok" | "error" | "mock", "message"?: "..." }
}
```
## Cache keys (dashboard/stats.py)
- `CACHE_KEY_STATS` — stats for `/api/stats/`
- `CACHE_KEY_AUDITS` — serialized audits list
- `CACHE_KEY_CURRENT_CLUSTER` — raw current_cluster (host_labels, cpu_current lists)
- `CACHE_KEY_SOURCE_STATUS` — source status result

View File

@@ -1,11 +1,29 @@
/**
* Dashboard logic: stats rendering, audit selector, CPU chart, migration table.
* Expects globals: SKELETON_MODE, CURRENT_CLUSTER, auditData (set by index.html).
* Depends on: utils.js (formatBytes, getCSSVar, calculateStats)
*
* Expected globals (set by index.html / inline script):
* - SKELETON_MODE (boolean): whether to fetch data from API instead of using embedded context
* - CURRENT_CLUSTER: { host_labels, cpu_current } for "current" cluster chart when no audits
* - auditData: object keyed by audit id, each value { name, migrations, hostData: { labels, current, projected } }
* - INITIAL_AUDIT_ID: first audit id to select when not in skeleton mode
*
* Required DOM element ids:
* - auditSelector, previewCpu, previewRam, previewScope, previewStrategy
* - regionBadge, auditsCount, migrationTableBody, migrationCount, cpuDistributionChart
* - currentCpuMean, currentCpuStd, currentCpuStdBlock
* - elements with data-stats="..." for renderStats()
*
* Depends on: utils.js (formatBytes, getCSSVar, calculateStats, escapeHtml, formatAuditDate)
*/
(function() {
var cpuDistributionChart = null;
var escapeHtml = typeof window.escapeHtml === 'function' ? window.escapeHtml : function(text) {
if (text == null) return '';
var s = String(text);
return s.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;').replace(/'/g, '&#39;');
};
// --- Initialization: audit selector change (preview panel) ---
document.getElementById('auditSelector').addEventListener('change', function(e) {
var option = this.options[this.selectedIndex];
if (!option) return;
@@ -15,6 +33,7 @@
document.getElementById('previewStrategy').textContent = option.dataset.strategy || 'Balanced';
});
// --- Stats: setStat, setProgress, renderStats ---
function setStat(key, text) {
document.querySelectorAll('[data-stats="' + key + '"]').forEach(function(el) {
el.textContent = text;
@@ -85,6 +104,7 @@
document.querySelectorAll('[data-stats]').forEach(function(n) { n.classList.remove('animate-pulse'); });
}
// --- Audits: renderAudits, loadSelectedAudit ---
function renderAudits(auditsList) {
if (!auditsList || !auditsList.length) {
var countEl = document.getElementById('auditsCount');
@@ -117,7 +137,7 @@
opt.setAttribute('data-scope', audit.scope || 'Full Cluster');
opt.setAttribute('data-strategy', audit.strategy || 'Balanced');
opt.setAttribute('data-goal', audit.goal || '');
var dateStr = audit.created_at ? new Date(audit.created_at).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }) : '';
var dateStr = formatAuditDate(audit.created_at);
opt.textContent = audit.name + ' (' + dateStr + ')';
sel.appendChild(opt);
});
@@ -136,6 +156,7 @@
updateCPUCharts(auditId);
};
// --- Migration table: updateMigrationTable ---
function updateMigrationTable(auditId) {
var tbody = document.getElementById('migrationTableBody');
var migrationCount = document.getElementById('migrationCount');
@@ -151,12 +172,13 @@
data.migrations.forEach(function(migration) {
var impact = migration.impact || 'Low';
var impactClass = { 'Low': 'badge-success', 'Medium': 'badge-warning', 'High': 'badge-error' }[impact] || 'badge-neutral';
html += '<tr><td class="font-medium"><div>' + migration.instanceName + '</div></td><td><div class="flex items-center gap-2"><span class="badge badge-outline badge-xs">' + migration.source + '</span><svg class="w-3 h-3 text-base-content/30" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/></svg><span class="badge badge-primary badge-outline badge-xs">' + migration.destination + '</span></div></td><td><code class="text-xs bg-base-200 px-2 py-1 rounded">' + migration.flavor + '</code></td><td><span class="badge ' + impactClass + ' badge-xs">' + impact + '</span></td></tr>';
html += '<tr><td class="font-medium"><div>' + escapeHtml(migration.instanceName) + '</div></td><td><div class="flex items-center gap-2"><span class="badge badge-outline badge-xs">' + escapeHtml(migration.source) + '</span><svg class="w-3 h-3 text-base-content/30" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7l5 5m0 0l-5 5m5-5H6"/></svg><span class="badge badge-primary badge-outline badge-xs">' + escapeHtml(migration.destination) + '</span></div></td><td><code class="text-xs bg-base-200 px-2 py-1 rounded">' + escapeHtml(migration.flavor) + '</code></td><td><span class="badge ' + impactClass + ' badge-xs">' + escapeHtml(impact) + '</span></td></tr>';
});
tbody.innerHTML = html;
migrationCount.textContent = data.migrations.length + ' action' + (data.migrations.length !== 1 ? 's' : '');
}
// --- CPU charts: updateCPUCharts ---
function updateCPUCharts(auditId) {
var data = window.auditData && window.auditData[auditId];
if (!data || !data.hostData) return;
@@ -289,6 +311,7 @@
: [ data.hostData.current.slice() ];
}
// --- Initialization: DOMContentLoaded (skeleton vs embedded data) ---
document.addEventListener('DOMContentLoaded', function() {
if (typeof SKELETON_MODE !== 'undefined' && SKELETON_MODE) {
Promise.all([
@@ -325,6 +348,7 @@
}
});
// --- Initialization: theme change (recreate chart) ---
document.addEventListener('themechange', function() {
if (cpuDistributionChart) {
var auditId = document.getElementById('auditSelector').value;

View File

@@ -1,3 +1,15 @@
// Escape for safe HTML text content (prevents XSS when inserting into HTML)
function escapeHtml(text) {
if (text == null) return '';
const s = String(text);
return s
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#39;');
}
// Format bytes to GB (matches Django convert_bytes filter default)
function formatBytes(bytes, targetUnit = 'GB') {
if (bytes == null || isNaN(Number(bytes))) return '0';
@@ -24,6 +36,16 @@ function getColorWithOpacity(className) {
return computedColor;
}
// Format audit date for display (ISO string -> short date, e.g. "Feb 1")
function formatAuditDate(isoString) {
if (!isoString) return '';
try {
return new Date(isoString).toLocaleDateString('en-US', { month: 'short', day: 'numeric' });
} catch (e) {
return '';
}
}
// Utility function to calculate mean and standard deviation
function calculateStats(data) {
if (!data || data.length === 0) return { mean: 0, std: 0 };