Refactor dashboard data serialization and mock context for improved clarity
Some checks failed
CI / ci (push) Has been cancelled
Some checks failed
CI / ci (push) Has been cancelled
- Introduced `serialize_audit_for_response` and `serialize_current_cluster_for_template` functions to handle JSON serialization of audit and cluster data, enhancing data consistency for API responses and template rendering. - Updated `get_mock_context` in `mock_data.py` to utilize the new serialization functions, simplifying the mock data structure and improving readability. - Refactored `collect_context` and `collect_audits` in `views.py` to leverage the new serialization methods, ensuring a cleaner and more maintainable codebase. - Added unit tests for the new serialization functions to ensure correctness and reliability of data formatting.
This commit is contained in:
@@ -1,6 +1,9 @@
|
||||
"""Mock context for dashboard when USE_MOCK_DATA is enabled (no OpenStack/Prometheus)."""
|
||||
|
||||
import json
|
||||
from dashboard.serializers import (
|
||||
serialize_audit_for_response,
|
||||
serialize_current_cluster_for_template,
|
||||
)
|
||||
|
||||
|
||||
def get_mock_context():
|
||||
@@ -38,7 +41,7 @@ def get_mock_context():
|
||||
cpu_current = [45.2, 38.1, 52.0, 41.3, 29.8, 32.1, 36.4, 29.2, 42.2, 41.3, 28.3, 33.3]
|
||||
cpu_projected = [42.0, 40.0, 48.0, 44.0, 35.0, 46.0, 43.0, 43.0, 44.0, 48.0, 47.0, 49.0]
|
||||
|
||||
audits = [
|
||||
audits_raw = [
|
||||
{
|
||||
"id": "mock-audit-uuid-1",
|
||||
"name": "Mock audit (balanced)",
|
||||
@@ -49,20 +52,18 @@ def get_mock_context():
|
||||
"scope": "Full Cluster",
|
||||
"cpu_weight": "1.0",
|
||||
"ram_weight": "1.0",
|
||||
"migrations": json.dumps(
|
||||
[
|
||||
{
|
||||
"instanceName": "instance-1",
|
||||
"source": "compute-0",
|
||||
"destination": "compute-3",
|
||||
"flavor": "m1.small",
|
||||
"impact": "Low",
|
||||
}
|
||||
]
|
||||
),
|
||||
"host_labels": json.dumps(host_labels),
|
||||
"cpu_current": json.dumps(cpu_current),
|
||||
"cpu_projected": json.dumps(cpu_projected),
|
||||
"migrations": [
|
||||
{
|
||||
"instanceName": "instance-1",
|
||||
"source": "compute-0",
|
||||
"destination": "compute-3",
|
||||
"flavor": "m1.small",
|
||||
"impact": "Low",
|
||||
}
|
||||
],
|
||||
"host_labels": host_labels,
|
||||
"cpu_current": cpu_current,
|
||||
"cpu_projected": cpu_projected,
|
||||
},
|
||||
{
|
||||
"id": "mock-audit-uuid-2",
|
||||
@@ -74,12 +75,13 @@ def get_mock_context():
|
||||
"scope": "Full Cluster",
|
||||
"cpu_weight": "1.0",
|
||||
"ram_weight": "1.0",
|
||||
"migrations": json.dumps([]),
|
||||
"host_labels": json.dumps(host_labels),
|
||||
"cpu_current": json.dumps(cpu_current),
|
||||
"cpu_projected": json.dumps([40.0, 42.0, 50.0, 43.0, 36.0, 45.0]),
|
||||
"migrations": [],
|
||||
"host_labels": host_labels,
|
||||
"cpu_current": cpu_current,
|
||||
"cpu_projected": [40.0, 42.0, 50.0, 43.0, 36.0, 45.0],
|
||||
},
|
||||
]
|
||||
audits = [serialize_audit_for_response(a) for a in audits_raw]
|
||||
|
||||
return {
|
||||
"region": {
|
||||
@@ -128,8 +130,7 @@ def get_mock_context():
|
||||
"third_common_flavor": {"name": "m1.large", "count": 4},
|
||||
},
|
||||
"audits": audits,
|
||||
"current_cluster": {
|
||||
"host_labels": json.dumps(host_labels),
|
||||
"cpu_current": json.dumps(cpu_current),
|
||||
},
|
||||
"current_cluster": serialize_current_cluster_for_template(
|
||||
{"host_labels": host_labels, "cpu_current": cpu_current}
|
||||
),
|
||||
}
|
||||
|
||||
@@ -49,41 +49,81 @@ def get_current_cluster_cpu(connection: Connection) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _fetch_audits_and_action_plans(session, watcher_endpoint):
|
||||
"""GET audits and action_plans from Watcher API. Returns (audits_list, action_plans_list)."""
|
||||
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
|
||||
audits_resp.raise_for_status()
|
||||
audits_list = audits_resp.json().get("audits") or []
|
||||
|
||||
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
|
||||
actionplans_resp.raise_for_status()
|
||||
action_plans_list = actionplans_resp.json().get("action_plans") or []
|
||||
|
||||
return audits_list, action_plans_list
|
||||
|
||||
|
||||
def _fetch_migrations_for_audit(
|
||||
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
|
||||
):
|
||||
"""
|
||||
Fetch action details for the given action plan and build migrations list and
|
||||
instance->destination mapping. Returns (migrations, mapping).
|
||||
"""
|
||||
migrations = []
|
||||
mapping = {}
|
||||
for action in actions_resp:
|
||||
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
|
||||
action_resp.raise_for_status()
|
||||
action_resp = action_resp.json()
|
||||
|
||||
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
|
||||
params = action_resp["input_parameters"]
|
||||
mapping[params["resource_name"]] = params["destination_node"]
|
||||
|
||||
migrations.append(
|
||||
{
|
||||
"instanceName": params["resource_name"],
|
||||
"source": params["source_node"],
|
||||
"destination": params["destination_node"],
|
||||
"flavor": server.flavor.name,
|
||||
"impact": "Low",
|
||||
}
|
||||
)
|
||||
return migrations, mapping
|
||||
|
||||
|
||||
def _build_projected_cpu_metrics(cpu_data, mapping):
|
||||
"""
|
||||
Apply instance->destination mapping to a copy of cpu_data and return
|
||||
aggregated CPU metrics DataFrame (host, cpu_usage).
|
||||
"""
|
||||
projected_cpu_data = copy(cpu_data)
|
||||
for entry in projected_cpu_data:
|
||||
if (instance := entry["metric"]["instanceName"]) in mapping:
|
||||
entry["metric"]["host"] = mapping[instance]
|
||||
return convert_cpu_data(projected_cpu_data)
|
||||
|
||||
|
||||
def get_audits(connection: Connection) -> list[dict] | None:
|
||||
session = connection.session
|
||||
|
||||
watcher_endpoint = connection.endpoint_for(
|
||||
service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME
|
||||
)
|
||||
|
||||
# Collect instances prometheus metrics
|
||||
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
|
||||
|
||||
cpu_metrics = convert_cpu_data(data=cpu_data)
|
||||
|
||||
# Fetch audit list
|
||||
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
|
||||
audits_resp.raise_for_status()
|
||||
audits_resp = audits_resp.json().get("audits") or []
|
||||
|
||||
# Fetch action plan list
|
||||
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
|
||||
actionplans_resp.raise_for_status()
|
||||
actionplans_resp = actionplans_resp.json().get("action_plans") or []
|
||||
|
||||
# Filtering audits by PENDING state
|
||||
pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"]
|
||||
_, action_plans_list = _fetch_audits_and_action_plans(session, watcher_endpoint)
|
||||
pending_audits = [plan for plan in action_plans_list if plan["state"] == "RECOMMENDED"]
|
||||
|
||||
result = []
|
||||
for item in pending_audits:
|
||||
projected_cpu_data = copy(cpu_data)
|
||||
|
||||
audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}")
|
||||
audit_resp.raise_for_status()
|
||||
audit_resp = audit_resp.json()
|
||||
|
||||
actionplan = next(
|
||||
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None
|
||||
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], action_plans_list), None
|
||||
)
|
||||
if actionplan is None:
|
||||
continue
|
||||
@@ -94,32 +134,10 @@ def get_audits(connection: Connection) -> list[dict] | None:
|
||||
actions_resp.raise_for_status()
|
||||
actions_resp = actions_resp.json().get("actions") or []
|
||||
|
||||
migrations = []
|
||||
mapping = {}
|
||||
for action in actions_resp:
|
||||
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
|
||||
action_resp.raise_for_status()
|
||||
action_resp = action_resp.json()
|
||||
|
||||
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
|
||||
params = action_resp["input_parameters"]
|
||||
mapping[params["resource_name"]] = params["destination_node"]
|
||||
|
||||
migrations.append(
|
||||
{
|
||||
"instanceName": action_resp["input_parameters"]["resource_name"],
|
||||
"source": action_resp["input_parameters"]["source_node"],
|
||||
"destination": action_resp["input_parameters"]["destination_node"],
|
||||
"flavor": server.flavor.name,
|
||||
"impact": "Low",
|
||||
}
|
||||
)
|
||||
|
||||
for entry in projected_cpu_data:
|
||||
if (instance := entry["metric"]["instanceName"]) in mapping:
|
||||
entry["metric"]["host"] = mapping[instance]
|
||||
|
||||
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
|
||||
migrations, mapping = _fetch_migrations_for_audit(
|
||||
connection, session, watcher_endpoint, audit_resp, actionplan, actions_resp
|
||||
)
|
||||
projected_cpu_metrics = _build_projected_cpu_metrics(cpu_data, mapping)
|
||||
|
||||
result.append(
|
||||
{
|
||||
|
||||
32
dashboard/serializers.py
Normal file
32
dashboard/serializers.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Serialization helpers for dashboard context and API responses."""
|
||||
|
||||
import json
|
||||
|
||||
|
||||
def _ensure_json_str(value):
|
||||
"""Return value as JSON string; if already a string, return as-is."""
|
||||
return value if isinstance(value, str) else json.dumps(value)
|
||||
|
||||
|
||||
def serialize_audit_for_response(audit: dict) -> dict:
|
||||
"""
|
||||
Return a copy of the audit dict with migrations, host_labels, cpu_current,
|
||||
and cpu_projected serialized as JSON strings (for template/API response).
|
||||
"""
|
||||
result = dict(audit)
|
||||
result["migrations"] = _ensure_json_str(audit.get("migrations"))
|
||||
result["host_labels"] = _ensure_json_str(audit.get("host_labels"))
|
||||
result["cpu_current"] = _ensure_json_str(audit.get("cpu_current"))
|
||||
result["cpu_projected"] = _ensure_json_str(audit.get("cpu_projected"))
|
||||
return result
|
||||
|
||||
|
||||
def serialize_current_cluster_for_template(current_cluster: dict) -> dict:
|
||||
"""
|
||||
Return current_cluster with host_labels and cpu_current as JSON strings
|
||||
for template embedding (e.g. in index.html).
|
||||
"""
|
||||
return {
|
||||
"host_labels": _ensure_json_str(current_cluster.get("host_labels")),
|
||||
"cpu_current": _ensure_json_str(current_cluster.get("cpu_current")),
|
||||
}
|
||||
57
dashboard/tests/test_serializers.py
Normal file
57
dashboard/tests/test_serializers.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Tests for dashboard.serializers."""
|
||||
|
||||
import json
|
||||
|
||||
from django.test import TestCase
|
||||
|
||||
from dashboard.serializers import (
|
||||
serialize_audit_for_response,
|
||||
serialize_current_cluster_for_template,
|
||||
)
|
||||
|
||||
|
||||
class SerializeAuditForResponseTest(TestCase):
|
||||
def test_serializes_list_fields_to_json_strings(self):
|
||||
audit = {
|
||||
"id": "audit-1",
|
||||
"name": "Test",
|
||||
"migrations": [{"instanceName": "i1", "source": "h1", "destination": "h2"}],
|
||||
"host_labels": ["h1", "h2"],
|
||||
"cpu_current": [10.0, 20.0],
|
||||
"cpu_projected": [15.0, 25.0],
|
||||
}
|
||||
result = serialize_audit_for_response(audit)
|
||||
self.assertEqual(result["id"], "audit-1")
|
||||
self.assertEqual(result["name"], "Test")
|
||||
self.assertEqual(json.loads(result["migrations"]), audit["migrations"])
|
||||
self.assertEqual(json.loads(result["host_labels"]), audit["host_labels"])
|
||||
self.assertEqual(json.loads(result["cpu_current"]), audit["cpu_current"])
|
||||
self.assertEqual(json.loads(result["cpu_projected"]), audit["cpu_projected"])
|
||||
|
||||
def test_leaves_already_serialized_strings_unchanged(self):
|
||||
audit = {
|
||||
"id": "a",
|
||||
"migrations": "[1,2]",
|
||||
"host_labels": "[]",
|
||||
"cpu_current": "[0]",
|
||||
"cpu_projected": "[0]",
|
||||
}
|
||||
result = serialize_audit_for_response(audit)
|
||||
self.assertEqual(result["migrations"], "[1,2]")
|
||||
self.assertEqual(result["host_labels"], "[]")
|
||||
self.assertEqual(result["cpu_current"], "[0]")
|
||||
self.assertEqual(result["cpu_projected"], "[0]")
|
||||
|
||||
|
||||
class SerializeCurrentClusterForTemplateTest(TestCase):
|
||||
def test_serializes_lists_to_json_strings(self):
|
||||
cluster = {"host_labels": ["c0", "c1"], "cpu_current": [30.0, 40.0]}
|
||||
result = serialize_current_cluster_for_template(cluster)
|
||||
self.assertEqual(json.loads(result["host_labels"]), cluster["host_labels"])
|
||||
self.assertEqual(json.loads(result["cpu_current"]), cluster["cpu_current"])
|
||||
|
||||
def test_leaves_already_serialized_strings_unchanged(self):
|
||||
cluster = {"host_labels": "[]", "cpu_current": "[]"}
|
||||
result = serialize_current_cluster_for_template(cluster)
|
||||
self.assertEqual(result["host_labels"], "[]")
|
||||
self.assertEqual(result["cpu_current"], "[]")
|
||||
@@ -1,5 +1,3 @@
|
||||
import json
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.http import JsonResponse
|
||||
@@ -10,6 +8,10 @@ from dashboard.openstack_utils.audits import get_audits, get_current_cluster_cpu
|
||||
from dashboard.openstack_utils.connect import check_openstack, get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import check_prometheus, fetch_dashboard_metrics
|
||||
from dashboard.serializers import (
|
||||
serialize_audit_for_response,
|
||||
serialize_current_cluster_for_template,
|
||||
)
|
||||
from dashboard.stats import (
|
||||
CACHE_KEY_AUDITS,
|
||||
CACHE_KEY_CURRENT_CLUSTER,
|
||||
@@ -44,17 +46,9 @@ def collect_context():
|
||||
audits = get_audits(connection=connection)
|
||||
metrics = fetch_dashboard_metrics()
|
||||
context = build_stats(metrics, region_name, flavors)
|
||||
context["audits"] = audits
|
||||
context["audits"] = [serialize_audit_for_response(a) for a in audits]
|
||||
current_cluster = get_current_cluster_cpu(connection)
|
||||
context["current_cluster"] = {
|
||||
"host_labels": json.dumps(current_cluster["host_labels"]),
|
||||
"cpu_current": json.dumps(current_cluster["cpu_current"]),
|
||||
}
|
||||
for audit in context["audits"]:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
context["current_cluster"] = serialize_current_cluster_for_template(current_cluster)
|
||||
return context
|
||||
|
||||
|
||||
@@ -71,12 +65,7 @@ def collect_audits():
|
||||
"""Build audits list with serialized fields for frontend."""
|
||||
connection = get_connection()
|
||||
audits = get_audits(connection=connection)
|
||||
for audit in audits:
|
||||
audit["migrations"] = json.dumps(audit["migrations"])
|
||||
audit["host_labels"] = json.dumps(audit["host_labels"])
|
||||
audit["cpu_current"] = json.dumps(audit["cpu_current"])
|
||||
audit["cpu_projected"] = json.dumps(audit["cpu_projected"])
|
||||
return audits
|
||||
return [serialize_audit_for_response(a) for a in audits]
|
||||
|
||||
|
||||
def _skeleton_context():
|
||||
|
||||
Reference in New Issue
Block a user