finished all audits, first rc
This commit is contained in:
0
dashboard/__init__.py
Normal file
0
dashboard/__init__.py
Normal file
0
dashboard/_test.py
Normal file
0
dashboard/_test.py
Normal file
124
dashboard/openstack_utils/audits.py
Normal file
124
dashboard/openstack_utils/audits.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import pandas
|
||||
|
||||
from copy import copy
|
||||
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS
|
||||
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
|
||||
def convert_cpu_data(data: list):
|
||||
metrics = []
|
||||
|
||||
for entry in data:
|
||||
for t, val in entry["values"]:
|
||||
metrics.append({
|
||||
"timestamp": int(t),
|
||||
"host": entry["metric"]["host"],
|
||||
"cpu_usage": float(val),
|
||||
"instance": entry["metric"]["instanceName"]
|
||||
})
|
||||
|
||||
df_cpu = pandas.DataFrame(metrics)
|
||||
df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s")
|
||||
|
||||
# Aggregate CPU usage per host
|
||||
return (
|
||||
df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum()
|
||||
.groupby("host").mean()
|
||||
.reset_index()
|
||||
)
|
||||
|
||||
def get_audits(connection: Connection) -> list[dict] | None:
|
||||
session = connection.session
|
||||
|
||||
watcher_endpoint = connection.endpoint_for(
|
||||
service_type=WATCHER_ENDPOINT_NAME,
|
||||
interface=WATCHER_INTERFACE_NAME
|
||||
)
|
||||
|
||||
# Collect instances prometheus metrics
|
||||
cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage'])
|
||||
|
||||
cpu_metrics = convert_cpu_data(data=cpu_data)
|
||||
|
||||
# Fetch audit list
|
||||
audits_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits"
|
||||
)
|
||||
audits_resp.raise_for_status()
|
||||
audits_resp.json().get('audits')
|
||||
|
||||
# Fetch action plan list
|
||||
actionplans_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/action_plans"
|
||||
)
|
||||
actionplans_resp.raise_for_status()
|
||||
actionplans_resp.json().get('action_plans')
|
||||
|
||||
# Filtering audits by PENDING state
|
||||
pending_audits = [audit for audit in audits_resp if audit['state'] == "PENDING"]
|
||||
|
||||
result = []
|
||||
for item in pending_audits:
|
||||
projected_cpu_data = copy(cpu_data)
|
||||
|
||||
audit_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/audits/{item['uuid']}"
|
||||
)
|
||||
audit_resp.raise_for_status()
|
||||
audit_resp = audit_resp.json()
|
||||
|
||||
actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None)
|
||||
|
||||
actions_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}"
|
||||
)
|
||||
actions_resp.raise_for_status()
|
||||
actions_resp = actions_resp.json().get('actions')
|
||||
|
||||
migrations = []
|
||||
mapping = {}
|
||||
for action in actions_resp:
|
||||
action_resp = session.get(
|
||||
f"{watcher_endpoint}/v1/actions/{action['uuid']}"
|
||||
)
|
||||
action_resp.raise_for_status()
|
||||
action_resp = action_resp.json()
|
||||
|
||||
server = connection.get_server_by_id(action['input_parameters']['resource_id'])
|
||||
params = action_resp['input_parameters']
|
||||
mapping[params['resource_name']] = params['destination_node']
|
||||
|
||||
migrations.append({
|
||||
"instanceName": action['input_parameters']['resource_name'],
|
||||
"source": action['input_parameters']['source_node'],
|
||||
"destination": action['input_parameters']['destination_node'],
|
||||
"flavor": server.flavor.name,
|
||||
"impact": 'Low'
|
||||
})
|
||||
|
||||
for entry in projected_cpu_data:
|
||||
if (instance := entry['metric']['instanceName']) in mapping:
|
||||
entry['metric']['host'] = mapping[instance]
|
||||
|
||||
projected_cpu_metrics = convert_cpu_data(projected_cpu_data)
|
||||
|
||||
result.append({
|
||||
"id": audit_resp['uuid'],
|
||||
"name": audit_resp['name'],
|
||||
"created_at": audit_resp['created_at'],
|
||||
"strategy": audit_resp['strategy_name'],
|
||||
"goal": audit_resp['goal_name'],
|
||||
"type": audit_resp['audit_type'],
|
||||
"scope": audit_resp['scope'],
|
||||
"cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"),
|
||||
"ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"),
|
||||
"migrations": migrations,
|
||||
"host_labels": cpu_metrics['host'].to_list(),
|
||||
"cpu_current": cpu_metrics['cpu_usage'].to_list(),
|
||||
"cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(),
|
||||
})
|
||||
|
||||
return result
|
||||
@@ -1,7 +1,8 @@
|
||||
import openstack
|
||||
from openstack.connection import Connection
|
||||
|
||||
from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME
|
||||
|
||||
def get_connection():
|
||||
def get_connection() -> Connection:
|
||||
connection = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME)
|
||||
return connection
|
||||
return connection
|
||||
|
||||
20
dashboard/openstack_utils/flavor.py
Normal file
20
dashboard/openstack_utils/flavor.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from openstack.connection import Connection
|
||||
|
||||
from collections import Counter
|
||||
|
||||
def get_flavor_list(connection: Connection) -> dict:
|
||||
servers = list(connection.compute.servers(all_projects=True))
|
||||
flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor]
|
||||
flavor_count = Counter(flavor_ids).most_common()
|
||||
|
||||
flavors = list(flavor_count)
|
||||
|
||||
result = {}
|
||||
for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]:
|
||||
if len(flavors) > idx:
|
||||
result[f"{prefix}_common_flavor"] = {
|
||||
"name": flavors[idx][0],
|
||||
"count": flavors[idx][1]
|
||||
}
|
||||
|
||||
return result
|
||||
@@ -2,7 +2,7 @@ import requests
|
||||
|
||||
from watcher_visio.settings import PROMETHEUS_URL
|
||||
|
||||
def query_prometheus(query):
|
||||
def query_prometheus(query: str) -> str | list[str]:
|
||||
url = f"{PROMETHEUS_URL}/api/v1/query"
|
||||
params = {
|
||||
"query": query,
|
||||
|
||||
@@ -15,3 +15,42 @@ def mul(a, b):
|
||||
return float(a) * float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def sub(a, b):
|
||||
try:
|
||||
return float(a) - float(b)
|
||||
except:
|
||||
return 0
|
||||
|
||||
@register.filter
|
||||
def convert_bytes(bytes_value, target_unit='GB'):
|
||||
"""
|
||||
Convert bytes to specific unit
|
||||
|
||||
Args:
|
||||
bytes_value: Size in bytes
|
||||
target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB')
|
||||
precision: Number of decimal places
|
||||
|
||||
Returns:
|
||||
Float value in target unit
|
||||
"""
|
||||
try:
|
||||
bytes_value = float(bytes_value)
|
||||
except (ValueError, TypeError):
|
||||
return 0.0
|
||||
conversion_factors = {
|
||||
'B': 1,
|
||||
'KB': 1024,
|
||||
'MB': 1024 * 1024,
|
||||
'GB': 1024 * 1024 * 1024,
|
||||
'TB': 1024 * 1024 * 1024 * 1024,
|
||||
}
|
||||
|
||||
target_unit = target_unit.upper()
|
||||
if target_unit not in conversion_factors:
|
||||
target_unit = 'MB'
|
||||
|
||||
result = bytes_value / conversion_factors[target_unit]
|
||||
return round(result, 1)
|
||||
|
||||
@@ -2,14 +2,15 @@ import json
|
||||
|
||||
from django.shortcuts import render
|
||||
from dashboard.openstack_utils.connect import get_connection
|
||||
from dashboard.openstack_utils.flavor import get_flavor_list
|
||||
from dashboard.prometheus_utils.query import query_prometheus
|
||||
|
||||
_BASE = {
|
||||
"region_name": "ct3k1ldt"
|
||||
}
|
||||
from dashboard.openstack_utils.audits import get_audits
|
||||
|
||||
def collect_context():
|
||||
connection = get_connection()
|
||||
region_name = connection._compute_region
|
||||
flavors = get_flavor_list(connection=connection)
|
||||
audits = get_audits(connection=connection)
|
||||
hosts_total = int(
|
||||
query_prometheus(
|
||||
query="count(node_exporter_build_info{job='node_exporter_compute'})"
|
||||
@@ -22,12 +23,7 @@ def collect_context():
|
||||
)
|
||||
pcpu_usage = float(
|
||||
query_prometheus(
|
||||
query=""
|
||||
)
|
||||
)
|
||||
vcpu_total = int (
|
||||
query_prometheus(
|
||||
query="(sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode)))*(avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'}))"
|
||||
query="sum(node_load5{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vcpu_allocated = int(
|
||||
@@ -35,95 +31,176 @@ def collect_context():
|
||||
query="sum(libvirt_domain_info_virtual_cpus)"
|
||||
)
|
||||
)
|
||||
vcpu_overcommit_ratio = float(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_virtual_cpus)/(sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode)))"
|
||||
)
|
||||
)
|
||||
vcpu_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})"
|
||||
)
|
||||
)
|
||||
vm_count = int (
|
||||
pram_total = int(
|
||||
query_prometheus(
|
||||
query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes
|
||||
)
|
||||
)
|
||||
pram_usage = int (
|
||||
query_prometheus(
|
||||
query="sum(node_memory_Active_bytes{job='node_exporter_compute'})"
|
||||
)
|
||||
)
|
||||
vram_allocated = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_info_maximum_memory_bytes)"
|
||||
)
|
||||
)
|
||||
vram_overcommit_max = float(
|
||||
query_prometheus(
|
||||
query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))"
|
||||
)
|
||||
)
|
||||
vm_count = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code)"
|
||||
)
|
||||
)
|
||||
vm_active = int (
|
||||
vm_active = int(
|
||||
query_prometheus(
|
||||
query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})"
|
||||
)
|
||||
)
|
||||
return {
|
||||
"region_name": "",
|
||||
# <--- Hosts data --->
|
||||
"hosts_total": hosts_total,
|
||||
|
||||
vcpu_total = pcpu_total * vcpu_overcommit_max
|
||||
vram_total = pram_total * vram_overcommit_max
|
||||
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": region_name,
|
||||
"hosts_total": 6,
|
||||
},
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu_total": pcpu_total,
|
||||
"pcpu_usage": pcpu_usage,
|
||||
"pcpu_free": pcpu_total - pcpu_usage,
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu_total": vcpu_total,
|
||||
"vcpu_allocated": vcpu_allocated,
|
||||
"vcpu_free": vcpu_total - vcpu_allocated,
|
||||
"vcpu_overcommit_ratio": vcpu_overcommit_ratio,
|
||||
"vcpu_overcommit_max": vcpu_overcommit_max,
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm_count": vm_count,
|
||||
"vm_active": vm_active,
|
||||
"vm_stopped": vm_count - vm_active,
|
||||
"vm_error": "",
|
||||
"avg_cpu_per_vm": vcpu_allocated / vm_count,
|
||||
"avg_ram_per_vm": "",
|
||||
"vm_density": vm_count / hosts_total,
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": flavors,
|
||||
"audits": audits,
|
||||
}
|
||||
return context
|
||||
|
||||
def index(request):
|
||||
context = {**_BASE,
|
||||
# CPU and RAM utilization data
|
||||
'cpu_total': 160,
|
||||
'cpu_used': 45,
|
||||
'cpu_free': 66,
|
||||
'cpu_used_percentage': 42.0,
|
||||
'ram_used': 128,
|
||||
'ram_free': 256,
|
||||
'ram_used_percentage': 33.3,
|
||||
|
||||
# Resource allocation data
|
||||
'cpu_allocated': 94,
|
||||
'cpu_total': 160,
|
||||
'cpu_overcommit_ratio': 1.5,
|
||||
'ram_allocated': 384,
|
||||
'ram_total': 512,
|
||||
'ram_overcommit_ratio': 1.2,
|
||||
|
||||
# Instance summary data
|
||||
'vm_count': 47,
|
||||
'vm_active': 42,
|
||||
'vm_stopped': 5,
|
||||
'vm_error': 0,
|
||||
'common_flavor': 'm1.medium',
|
||||
'common_flavor_count': 18,
|
||||
'second_common_flavor': {
|
||||
'name': 'm1.small',
|
||||
'count': 12
|
||||
hosts_total = 6
|
||||
pcpu_total = 672
|
||||
pcpu_usage = 39.2
|
||||
vcpu_total = 3360
|
||||
vcpu_allocated = 98
|
||||
vcpu_overcommit_max = 5
|
||||
pram_total = 562500000000
|
||||
pram_usage = 4325000000
|
||||
vram_total = 489375000000
|
||||
vram_allocated = 5625000000
|
||||
vram_overcommit_max = 0.87
|
||||
vm_count = 120
|
||||
vm_active = 90
|
||||
context = {
|
||||
# <--- Region data --->
|
||||
"region": {
|
||||
"name": "ct3k1ldt",
|
||||
"hosts_total": 6,
|
||||
},
|
||||
'third_common_flavor': {
|
||||
'name': 'm1.large',
|
||||
'count': 8
|
||||
# <--- CPU data --->
|
||||
# pCPU data
|
||||
"pcpu": {
|
||||
"total": pcpu_total,
|
||||
"usage": pcpu_usage,
|
||||
"free": pcpu_total - pcpu_usage,
|
||||
"used_percentage": pcpu_usage / pcpu_total * 100,
|
||||
},
|
||||
# vCPU data
|
||||
"vcpu": {
|
||||
"total": vcpu_total,
|
||||
"allocated": vcpu_allocated,
|
||||
"free": vcpu_total - vcpu_allocated,
|
||||
"allocated_percentage": vcpu_allocated / vcpu_total * 100,
|
||||
"overcommit_ratio": vcpu_allocated / pcpu_total,
|
||||
"overcommit_max": vcpu_overcommit_max,
|
||||
},
|
||||
# <--- RAM data --->
|
||||
# pRAM data
|
||||
"pram" : {
|
||||
"total": pram_total,
|
||||
"usage": pram_usage,
|
||||
"free": pram_total - pram_usage,
|
||||
"used_percentage": pram_usage / pram_total * 100,
|
||||
},
|
||||
# vRAM data
|
||||
"vram": {
|
||||
"total": vram_total,
|
||||
"allocated": vram_allocated,
|
||||
"free": vram_total - vram_allocated,
|
||||
"allocated_percentage": vram_allocated / vram_total * 100,
|
||||
"overcommit_ratio": vram_allocated / pram_total,
|
||||
"overcommit_max": vram_overcommit_max,
|
||||
},
|
||||
# <--- VM data --->
|
||||
"vm": {
|
||||
"count": vm_count,
|
||||
"active": vm_active,
|
||||
"stopped": vm_count - vm_active,
|
||||
"avg_cpu": vcpu_allocated / vm_count,
|
||||
"avg_ram": vram_allocated / vm_count,
|
||||
"density": vm_count / hosts_total,
|
||||
},
|
||||
"flavors": {
|
||||
'first_common_flavor': {
|
||||
'name': 'm1.medium',
|
||||
'count': 18
|
||||
},
|
||||
'second_common_flavor': {
|
||||
'name': 'm1.small',
|
||||
'count': 12
|
||||
},
|
||||
'third_common_flavor': {
|
||||
'name': 'm1.large',
|
||||
'count': 8
|
||||
},
|
||||
},
|
||||
|
||||
# Quick stats
|
||||
'avg_cpu_per_vm': 2.0,
|
||||
'avg_ram_per_vm': 8.2,
|
||||
'vm_density': 9.4,
|
||||
|
||||
# Audit data
|
||||
'audits': [
|
||||
@@ -247,9 +324,9 @@ def index(request):
|
||||
|
||||
# Serialize lists for JavaScript
|
||||
for audit in context['audits']:
|
||||
audit['migrations'] = json.dumps(audit['migrations'])
|
||||
audit['host_labels'] = json.dumps(audit['host_labels'])
|
||||
audit['cpu_current'] = json.dumps(audit['cpu_current'])
|
||||
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
|
||||
audit['migrations'] = json.dumps(audit['migrations'])
|
||||
audit['host_labels'] = json.dumps(audit['host_labels'])
|
||||
audit['cpu_current'] = json.dumps(audit['cpu_current'])
|
||||
audit['cpu_projected'] = json.dumps(audit['cpu_projected'])
|
||||
|
||||
return render(request, 'index.html', context)
|
||||
Reference in New Issue
Block a user