Refactor code for consistency and readability
Some checks failed
CI / ci (push) Failing after 14s

- Standardized string quotes across multiple files to use double quotes for consistency.
- Improved formatting of JSON dumps in mock data for better readability.
- Enhanced the structure of various functions and data definitions for clarity.
- Updated test cases to reflect changes in data structure and ensure accuracy.
This commit is contained in:
2026-02-07 18:01:49 +03:00
parent 02b38a25eb
commit 2a0e0c216a
19 changed files with 322 additions and 209 deletions

View File

@@ -1,13 +1,12 @@
import pandas
from copy import copy
import pandas
from openstack.connection import Connection
from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS
from watcher_visio.settings import PROMETHEUS_METRICS, WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME
from dashboard.prometheus_utils.query import query_prometheus
def convert_cpu_data(data: list):
metrics = []
@@ -16,33 +15,37 @@ def convert_cpu_data(data: list):
for entry in data:
for t, val in entry["values"]:
metrics.append({
"timestamp": int(t),
"host": entry["metric"]["host"],
"cpu_usage": float(val),
"instance": entry["metric"]["instanceName"]
})
metrics.append(
{
"timestamp": int(t),
"host": entry["metric"]["host"],
"cpu_usage": float(val),
"instance": entry["metric"]["instanceName"],
}
)
df_cpu = pandas.DataFrame(metrics)
df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s")
# Aggregate CPU usage per host
return (
df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum()
.groupby("host").mean()
df_cpu.groupby(["host", "timestamp"])["cpu_usage"]
.sum()
.groupby("host")
.mean()
.reset_index()
)
def get_current_cluster_cpu(connection: Connection) -> dict:
"""Return current per-host CPU state for the cluster (no Watcher dependency)."""
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
if cpu_metrics.empty:
return {"host_labels": [], "cpu_current": []}
return {
"host_labels": cpu_metrics['host'].to_list(),
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
"host_labels": cpu_metrics["host"].to_list(),
"cpu_current": cpu_metrics["cpu_usage"].to_list(),
}
@@ -50,43 +53,38 @@ def get_audits(connection: Connection) -> list[dict] | None:
session = connection.session
watcher_endpoint = connection.endpoint_for(
service_type=WATCHER_ENDPOINT_NAME,
interface=WATCHER_INTERFACE_NAME
service_type=WATCHER_ENDPOINT_NAME, interface=WATCHER_INTERFACE_NAME
)
# Collect instances prometheus metrics
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
cpu_data = query_prometheus(PROMETHEUS_METRICS["cpu_usage"])
cpu_metrics = convert_cpu_data(data=cpu_data)
# Fetch audit list
audits_resp = session.get(
f"{watcher_endpoint}/v1/audits"
)
audits_resp = session.get(f"{watcher_endpoint}/v1/audits")
audits_resp.raise_for_status()
audits_resp = audits_resp.json().get('audits') or []
audits_resp = audits_resp.json().get("audits") or []
# Fetch action plan list
actionplans_resp = session.get(
f"{watcher_endpoint}/v1/action_plans"
)
actionplans_resp = session.get(f"{watcher_endpoint}/v1/action_plans")
actionplans_resp.raise_for_status()
actionplans_resp = actionplans_resp.json().get('action_plans') or []
actionplans_resp = actionplans_resp.json().get("action_plans") or []
# Filtering audits by PENDING state
pending_audits = [plan for plan in actionplans_resp if plan['state'] == "RECOMMENDED"]
pending_audits = [plan for plan in actionplans_resp if plan["state"] == "RECOMMENDED"]
result = []
for item in pending_audits:
projected_cpu_data = copy(cpu_data)
audit_resp = session.get(
f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}"
)
audit_resp = session.get(f"{watcher_endpoint}/v1/audits/{item['audit_uuid']}")
audit_resp.raise_for_status()
audit_resp = audit_resp.json()
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
actionplan = next(
filter(lambda x: x.get("audit_uuid") == audit_resp["uuid"], actionplans_resp), None
)
if actionplan is None:
continue
@@ -94,49 +92,55 @@ def get_audits(connection: Connection) -> list[dict] | None:
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
)
actions_resp.raise_for_status()
actions_resp = actions_resp.json().get('actions') or []
actions_resp = actions_resp.json().get("actions") or []
migrations = []
mapping = {}
for action in actions_resp:
action_resp = session.get(
f"{watcher_endpoint}/v1/actions/{action['uuid']}"
)
action_resp = session.get(f"{watcher_endpoint}/v1/actions/{action['uuid']}")
action_resp.raise_for_status()
action_resp = action_resp.json()
server = connection.get_server_by_id(action_resp['input_parameters']['resource_id'])
params = action_resp['input_parameters']
mapping[params['resource_name']] = params['destination_node']
server = connection.get_server_by_id(action_resp["input_parameters"]["resource_id"])
params = action_resp["input_parameters"]
mapping[params["resource_name"]] = params["destination_node"]
migrations.append({
"instanceName": action_resp['input_parameters']['resource_name'],
"source": action_resp['input_parameters']['source_node'],
"destination": action_resp['input_parameters']['destination_node'],
"flavor": server.flavor.name,
"impact": 'Low'
})
migrations.append(
{
"instanceName": action_resp["input_parameters"]["resource_name"],
"source": action_resp["input_parameters"]["source_node"],
"destination": action_resp["input_parameters"]["destination_node"],
"flavor": server.flavor.name,
"impact": "Low",
}
)
for entry in projected_cpu_data:
if (instance := entry['metric']['instanceName']) in mapping:
entry['metric']['host'] = mapping[instance]
if (instance := entry["metric"]["instanceName"]) in mapping:
entry["metric"]["host"] = mapping[instance]
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
result.append({
"id": audit_resp['uuid'],
"name": audit_resp['name'],
"created_at": audit_resp['created_at'],
"strategy": audit_resp['strategy_name'],
"goal": audit_resp['goal_name'],
"type": audit_resp['audit_type'],
"scope": audit_resp['scope'],
"cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"),
"ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"),
"migrations": migrations,
"host_labels": cpu_metrics['host'].to_list(),
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
"cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(),
})
result.append(
{
"id": audit_resp["uuid"],
"name": audit_resp["name"],
"created_at": audit_resp["created_at"],
"strategy": audit_resp["strategy_name"],
"goal": audit_resp["goal_name"],
"type": audit_resp["audit_type"],
"scope": audit_resp["scope"],
"cpu_weight": audit_resp["parameters"]
.get("weights", {})
.get("instance_cpu_usage_weight", "none"),
"ram_weight": audit_resp["parameters"]
.get("weights", {})
.get("instance_ram_usage_weight", "none"),
"migrations": migrations,
"host_labels": cpu_metrics["host"].to_list(),
"cpu_current": cpu_metrics["cpu_usage"].to_list(),
"cpu_projected": projected_cpu_metrics["cpu_usage"].to_list(),
}
)
return result

View File

@@ -1,6 +1,5 @@
import openstack
from openstack.connection import Connection
from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME

View File

@@ -1,10 +1,11 @@
from collections import Counter
from openstack.connection import Connection
from collections import Counter
def get_flavor_list(connection: Connection) -> dict:
servers = list(connection.compute.servers(all_projects=True))
flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor]
flavor_ids = [s.flavor["id"] for s in servers if "id" in s.flavor]
flavor_count = Counter(flavor_ids).most_common()
flavors = list(flavor_count)
@@ -13,10 +14,7 @@ def get_flavor_list(connection: Connection) -> dict:
placeholder = {"name": "", "count": 0}
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
if len(flavors) > idx:
result[f"{prefix}_common_flavor"] = {
"name": flavors[idx][0],
"count": flavors[idx][1]
}
result[f"{prefix}_common_flavor"] = {"name": flavors[idx][0], "count": flavors[idx][1]}
else:
result[f"{prefix}_common_flavor"] = placeholder