diff --git a/.gitea/workflows/docker-build.yml b/.gitea/workflows/docker-build.yml new file mode 100644 index 0000000..e69de29 diff --git a/dashboard/__init__.py b/dashboard/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dashboard/_test.py b/dashboard/_test.py new file mode 100644 index 0000000..e69de29 diff --git a/dashboard/openstack_utils/audits.py b/dashboard/openstack_utils/audits.py new file mode 100644 index 0000000..5f01d72 --- /dev/null +++ b/dashboard/openstack_utils/audits.py @@ -0,0 +1,124 @@ +import pandas + +from copy import copy + +from openstack.connection import Connection + +from watcher_visio.settings import WATCHER_ENDPOINT_NAME, WATCHER_INTERFACE_NAME, PROMETHEUS_METRICS + +from dashboard.prometheus_utils.query import query_prometheus + +def convert_cpu_data(data: list): + metrics = [] + + for entry in data: + for t, val in entry["values"]: + metrics.append({ + "timestamp": int(t), + "host": entry["metric"]["host"], + "cpu_usage": float(val), + "instance": entry["metric"]["instanceName"] + }) + + df_cpu = pandas.DataFrame(metrics) + df_cpu["timestamp"] = pandas.to_datetime(df_cpu["timestamp"], unit="s") + + # Aggregate CPU usage per host + return ( + df_cpu.groupby(["host", "timestamp"])["cpu_usage"].sum() + .groupby("host").mean() + .reset_index() + ) + +def get_audits(connection: Connection) -> list[dict] | None: + session = connection.session + + watcher_endpoint = connection.endpoint_for( + service_type=WATCHER_ENDPOINT_NAME, + interface=WATCHER_INTERFACE_NAME + ) + + # Collect instances prometheus metrics + cpu_data = query_prometheus(PROMETHEUS_METRICS['cpu_usage']) + + cpu_metrics = convert_cpu_data(data=cpu_data) + + # Fetch audit list + audits_resp = session.get( + f"{watcher_endpoint}/v1/audits" + ) + audits_resp.raise_for_status() + audits_resp.json().get('audits') + + # Fetch action plan list + actionplans_resp = session.get( + f"{watcher_endpoint}/v1/action_plans" + ) + actionplans_resp.raise_for_status() + actionplans_resp.json().get('action_plans') + + # Filtering audits by PENDING state + pending_audits = [audit for audit in audits_resp if audit['state'] == "PENDING"] + + result = [] + for item in pending_audits: + projected_cpu_data = copy(cpu_data) + + audit_resp = session.get( + f"{watcher_endpoint}/v1/audits/{item['uuid']}" + ) + audit_resp.raise_for_status() + audit_resp = audit_resp.json() + + actionplan = next(filter(lambda x: x.get("audit_uuid") == audit_resp['uuid'], actionplans_resp), None) + + actions_resp = session.get( + f"{watcher_endpoint}/v1/actions/?action_plan_uuid={actionplan['uuid']}" + ) + actions_resp.raise_for_status() + actions_resp = actions_resp.json().get('actions') + + migrations = [] + mapping = {} + for action in actions_resp: + action_resp = session.get( + f"{watcher_endpoint}/v1/actions/{action['uuid']}" + ) + action_resp.raise_for_status() + action_resp = action_resp.json() + + server = connection.get_server_by_id(action['input_parameters']['resource_id']) + params = action_resp['input_parameters'] + mapping[params['resource_name']] = params['destination_node'] + + migrations.append({ + "instanceName": action['input_parameters']['resource_name'], + "source": action['input_parameters']['source_node'], + "destination": action['input_parameters']['destination_node'], + "flavor": server.flavor.name, + "impact": 'Low' + }) + + for entry in projected_cpu_data: + if (instance := entry['metric']['instanceName']) in mapping: + entry['metric']['host'] = mapping[instance] + + projected_cpu_metrics = convert_cpu_data(projected_cpu_data) + + result.append({ + "id": audit_resp['uuid'], + "name": audit_resp['name'], + "created_at": audit_resp['created_at'], + "strategy": audit_resp['strategy_name'], + "goal": audit_resp['goal_name'], + "type": audit_resp['audit_type'], + "scope": audit_resp['scope'], + "cpu_weight": audit_resp['parameters'].get('weights', {}).get('instance_cpu_usage_weight', "none"), + "ram_weight": audit_resp['parameters'].get('weights', {}).get('instance_ram_usage_weight', "none"), + "migrations": migrations, + "host_labels": cpu_metrics['host'].to_list(), + "cpu_current": cpu_metrics['cpu_usage'].to_list(), + "cpu_projected": projected_cpu_metrics['cpu_usage'].to_list(), + }) + + return result diff --git a/dashboard/openstack_utils/connect.py b/dashboard/openstack_utils/connect.py index 8b7a56f..02951a6 100644 --- a/dashboard/openstack_utils/connect.py +++ b/dashboard/openstack_utils/connect.py @@ -1,7 +1,8 @@ import openstack +from openstack.connection import Connection from watcher_visio.settings import OPENSTACK_CLOUD, OPENSTACK_REGION_NAME -def get_connection(): +def get_connection() -> Connection: connection = openstack.connect(cloud=OPENSTACK_CLOUD, region_name=OPENSTACK_REGION_NAME) - return connection \ No newline at end of file + return connection diff --git a/dashboard/openstack_utils/flavor.py b/dashboard/openstack_utils/flavor.py new file mode 100644 index 0000000..d7fcfec --- /dev/null +++ b/dashboard/openstack_utils/flavor.py @@ -0,0 +1,20 @@ +from openstack.connection import Connection + +from collections import Counter + +def get_flavor_list(connection: Connection) -> dict: + servers = list(connection.compute.servers(all_projects=True)) + flavor_ids = [s.flavor['id'] for s in servers if 'id' in s.flavor] + flavor_count = Counter(flavor_ids).most_common() + + flavors = list(flavor_count) + + result = {} + for idx, prefix in [(0, "first"), (1, "second"), (2, "third")]: + if len(flavors) > idx: + result[f"{prefix}_common_flavor"] = { + "name": flavors[idx][0], + "count": flavors[idx][1] + } + + return result diff --git a/dashboard/prometheus_utils/query.py b/dashboard/prometheus_utils/query.py index cecf9de..7fdf0fd 100644 --- a/dashboard/prometheus_utils/query.py +++ b/dashboard/prometheus_utils/query.py @@ -2,7 +2,7 @@ import requests from watcher_visio.settings import PROMETHEUS_URL -def query_prometheus(query): +def query_prometheus(query: str) -> str | list[str]: url = f"{PROMETHEUS_URL}/api/v1/query" params = { "query": query, diff --git a/dashboard/templatetags/mathfilters.py b/dashboard/templatetags/mathfilters.py index 4037010..edc3fcd 100644 --- a/dashboard/templatetags/mathfilters.py +++ b/dashboard/templatetags/mathfilters.py @@ -15,3 +15,42 @@ def mul(a, b): return float(a) * float(b) except: return 0 + +@register.filter +def sub(a, b): + try: + return float(a) - float(b) + except: + return 0 + +@register.filter +def convert_bytes(bytes_value, target_unit='GB'): + """ + Convert bytes to specific unit + + Args: + bytes_value: Size in bytes + target_unit: Target unit ('B', 'KB', 'MB', 'GB', 'TB') + precision: Number of decimal places + + Returns: + Float value in target unit + """ + try: + bytes_value = float(bytes_value) + except (ValueError, TypeError): + return 0.0 + conversion_factors = { + 'B': 1, + 'KB': 1024, + 'MB': 1024 * 1024, + 'GB': 1024 * 1024 * 1024, + 'TB': 1024 * 1024 * 1024 * 1024, + } + + target_unit = target_unit.upper() + if target_unit not in conversion_factors: + target_unit = 'MB' + + result = bytes_value / conversion_factors[target_unit] + return round(result, 1) diff --git a/dashboard/views.py b/dashboard/views.py index bf93417..733fe43 100644 --- a/dashboard/views.py +++ b/dashboard/views.py @@ -2,14 +2,15 @@ import json from django.shortcuts import render from dashboard.openstack_utils.connect import get_connection +from dashboard.openstack_utils.flavor import get_flavor_list from dashboard.prometheus_utils.query import query_prometheus - -_BASE = { - "region_name": "ct3k1ldt" -} +from dashboard.openstack_utils.audits import get_audits def collect_context(): connection = get_connection() + region_name = connection._compute_region + flavors = get_flavor_list(connection=connection) + audits = get_audits(connection=connection) hosts_total = int( query_prometheus( query="count(node_exporter_build_info{job='node_exporter_compute'})" @@ -22,12 +23,7 @@ def collect_context(): ) pcpu_usage = float( query_prometheus( - query="" - ) - ) - vcpu_total = int ( - query_prometheus( - query="(sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode)))*(avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'}))" + query="sum(node_load5{job='node_exporter_compute'})" ) ) vcpu_allocated = int( @@ -35,95 +31,176 @@ def collect_context(): query="sum(libvirt_domain_info_virtual_cpus)" ) ) - vcpu_overcommit_ratio = float( - query_prometheus( - query="sum(libvirt_domain_info_virtual_cpus)/(sum(count(node_cpu_seconds_total{job='node_exporter_compute', mode='idle'}) without (cpu,mode)))" - ) - ) vcpu_overcommit_max = float( query_prometheus( query="avg(openstack_placement_resource_allocation_ratio{resourcetype='VCPU'})" ) ) - vm_count = int ( + pram_total = int( + query_prometheus( + query="sum(node_memory_MemTotal_bytes{job='node_exporter_compute'})" # memory in bytes + ) + ) + pram_usage = int ( + query_prometheus( + query="sum(node_memory_Active_bytes{job='node_exporter_compute'})" + ) + ) + vram_allocated = int( + query_prometheus( + query="sum(libvirt_domain_info_maximum_memory_bytes)" + ) + ) + vram_overcommit_max = float( + query_prometheus( + query="avg(avg_over_time(openstack_placement_resource_allocation_ratio{resourcetype='MEMORY_MB'}[5m]))" + ) + ) + vm_count = int( query_prometheus( query="sum(libvirt_domain_state_code)" ) ) - vm_active = int ( + vm_active = int( query_prometheus( query="sum(libvirt_domain_state_code{stateDesc='the domain is running'})" ) ) - return { - "region_name": "", - # <--- Hosts data ---> - "hosts_total": hosts_total, + + vcpu_total = pcpu_total * vcpu_overcommit_max + vram_total = pram_total * vram_overcommit_max + + context = { + # <--- Region data ---> + "region": { + "name": region_name, + "hosts_total": 6, + }, # <--- CPU data ---> # pCPU data - "pcpu_total": pcpu_total, - "pcpu_usage": pcpu_usage, - "pcpu_free": pcpu_total - pcpu_usage, + "pcpu": { + "total": pcpu_total, + "usage": pcpu_usage, + "free": pcpu_total - pcpu_usage, + "used_percentage": pcpu_usage / pcpu_total * 100, + }, # vCPU data - "vcpu_total": vcpu_total, - "vcpu_allocated": vcpu_allocated, - "vcpu_free": vcpu_total - vcpu_allocated, - "vcpu_overcommit_ratio": vcpu_overcommit_ratio, - "vcpu_overcommit_max": vcpu_overcommit_max, + "vcpu": { + "total": vcpu_total, + "allocated": vcpu_allocated, + "free": vcpu_total - vcpu_allocated, + "allocated_percentage": vcpu_allocated / vcpu_total * 100, + "overcommit_ratio": vcpu_allocated / pcpu_total, + "overcommit_max": vcpu_overcommit_max, + }, # <--- RAM data ---> # pRAM data - + "pram" : { + "total": pram_total, + "usage": pram_usage, + "free": pram_total - pram_usage, + "used_percentage": pram_usage / pram_total * 100, + }, # vRAM data - + "vram": { + "total": vram_total, + "allocated": vram_allocated, + "free": vram_total - vram_allocated, + "allocated_percentage": vram_allocated / vram_total * 100, + "overcommit_ratio": vram_allocated / pram_total, + "overcommit_max": vram_overcommit_max, + }, # <--- VM data ---> - "vm_count": vm_count, - "vm_active": vm_active, - "vm_stopped": vm_count - vm_active, - "vm_error": "", - "avg_cpu_per_vm": vcpu_allocated / vm_count, - "avg_ram_per_vm": "", - "vm_density": vm_count / hosts_total, + "vm": { + "count": vm_count, + "active": vm_active, + "stopped": vm_count - vm_active, + "avg_cpu": vcpu_allocated / vm_count, + "avg_ram": vram_allocated / vm_count, + "density": vm_count / hosts_total, + }, + "flavors": flavors, + "audits": audits, } + return context def index(request): - context = {**_BASE, - # CPU and RAM utilization data - 'cpu_total': 160, - 'cpu_used': 45, - 'cpu_free': 66, - 'cpu_used_percentage': 42.0, - 'ram_used': 128, - 'ram_free': 256, - 'ram_used_percentage': 33.3, - - # Resource allocation data - 'cpu_allocated': 94, - 'cpu_total': 160, - 'cpu_overcommit_ratio': 1.5, - 'ram_allocated': 384, - 'ram_total': 512, - 'ram_overcommit_ratio': 1.2, - - # Instance summary data - 'vm_count': 47, - 'vm_active': 42, - 'vm_stopped': 5, - 'vm_error': 0, - 'common_flavor': 'm1.medium', - 'common_flavor_count': 18, - 'second_common_flavor': { - 'name': 'm1.small', - 'count': 12 + hosts_total = 6 + pcpu_total = 672 + pcpu_usage = 39.2 + vcpu_total = 3360 + vcpu_allocated = 98 + vcpu_overcommit_max = 5 + pram_total = 562500000000 + pram_usage = 4325000000 + vram_total = 489375000000 + vram_allocated = 5625000000 + vram_overcommit_max = 0.87 + vm_count = 120 + vm_active = 90 + context = { + # <--- Region data ---> + "region": { + "name": "ct3k1ldt", + "hosts_total": 6, }, - 'third_common_flavor': { - 'name': 'm1.large', - 'count': 8 + # <--- CPU data ---> + # pCPU data + "pcpu": { + "total": pcpu_total, + "usage": pcpu_usage, + "free": pcpu_total - pcpu_usage, + "used_percentage": pcpu_usage / pcpu_total * 100, + }, + # vCPU data + "vcpu": { + "total": vcpu_total, + "allocated": vcpu_allocated, + "free": vcpu_total - vcpu_allocated, + "allocated_percentage": vcpu_allocated / vcpu_total * 100, + "overcommit_ratio": vcpu_allocated / pcpu_total, + "overcommit_max": vcpu_overcommit_max, + }, + # <--- RAM data ---> + # pRAM data + "pram" : { + "total": pram_total, + "usage": pram_usage, + "free": pram_total - pram_usage, + "used_percentage": pram_usage / pram_total * 100, + }, + # vRAM data + "vram": { + "total": vram_total, + "allocated": vram_allocated, + "free": vram_total - vram_allocated, + "allocated_percentage": vram_allocated / vram_total * 100, + "overcommit_ratio": vram_allocated / pram_total, + "overcommit_max": vram_overcommit_max, + }, + # <--- VM data ---> + "vm": { + "count": vm_count, + "active": vm_active, + "stopped": vm_count - vm_active, + "avg_cpu": vcpu_allocated / vm_count, + "avg_ram": vram_allocated / vm_count, + "density": vm_count / hosts_total, + }, + "flavors": { + 'first_common_flavor': { + 'name': 'm1.medium', + 'count': 18 + }, + 'second_common_flavor': { + 'name': 'm1.small', + 'count': 12 + }, + 'third_common_flavor': { + 'name': 'm1.large', + 'count': 8 + }, }, - - # Quick stats - 'avg_cpu_per_vm': 2.0, - 'avg_ram_per_vm': 8.2, - 'vm_density': 9.4, # Audit data 'audits': [ @@ -247,9 +324,9 @@ def index(request): # Serialize lists for JavaScript for audit in context['audits']: - audit['migrations'] = json.dumps(audit['migrations']) - audit['host_labels'] = json.dumps(audit['host_labels']) - audit['cpu_current'] = json.dumps(audit['cpu_current']) - audit['cpu_projected'] = json.dumps(audit['cpu_projected']) + audit['migrations'] = json.dumps(audit['migrations']) + audit['host_labels'] = json.dumps(audit['host_labels']) + audit['cpu_current'] = json.dumps(audit['cpu_current']) + audit['cpu_projected'] = json.dumps(audit['cpu_projected']) return render(request, 'index.html', context) \ No newline at end of file diff --git a/static/css/main.css b/static/css/main.css index 2cfc25b..e190f8a 100644 --- a/static/css/main.css +++ b/static/css/main.css @@ -1,3 +1,39 @@ @import "tailwindcss"; @plugin "daisyui"; + +@plugin "daisyui/theme" { + name: "light"; + default: true; + prefersdark: false; + color-scheme: "light"; + --color-base-100: oklch(100% 0 0); + --color-base-200: oklch(98% 0 0); + --color-base-300: oklch(95% 0 0); + --color-base-content: oklch(21% 0.006 285.885); + --color-primary: #09418E; + --color-primary-content: oklch(93% 0.034 272.788); + --color-secondary: #428BCA; + --color-secondary-content: oklch(100% 0 0); + --color-accent: #A492FF; + --color-accent-content: oklch(21% 0.006 285.885); + --color-neutral: #333333; + --color-neutral-content: oklch(92% 0.004 286.32); + --color-info: oklch(74% 0.16 232.661); + --color-info-content: oklch(29% 0.066 243.157); + --color-success: oklch(76% 0.177 163.223); + --color-success-content: oklch(37% 0.077 168.94); + --color-warning: oklch(82% 0.189 84.429); + --color-warning-content: oklch(41% 0.112 45.904); + --color-error: oklch(71% 0.194 13.428); + --color-error-content: oklch(27% 0.105 12.094); + --radius-selector: 0.5rem; + --radius-field: 0.25rem; + --radius-box: 0.5rem; + --size-selector: 0.25rem; + --size-field: 0.25rem; + --border: 1px; + --depth: 1; + --noise: 0; +} + @source "../../templates"; \ No newline at end of file diff --git a/templates/base.html b/templates/base.html index 8983f11..46a72f0 100644 --- a/templates/base.html +++ b/templates/base.html @@ -4,7 +4,7 @@ - {% block title %}Watcher Visio{% endblock %} + {% block title %}SWatcher{% endblock %} {% block imports %} {% endblock %} @@ -15,13 +15,13 @@ @@ -113,36 +113,32 @@
CPU Allocation - {{ cpu_allocated }} / {{ cpu_total }} vCPU + {{ vcpu.allocated }} / {{ vcpu.total }} vCPU
- {% with pct=cpu_allocated|div:cpu_total|mul:100 %}
- - {{ pct|floatformat:1 }}% + + {{ vcpu.allocated_percentage|floatformat:1 }}%
- OC: x{{ cpu_overcommit_ratio }} - {{ pct|floatformat:1 }}% allocated + overcommit: {{ vcpu.overcommit_ratio|floatformat:1 }} / {{ vcpu.overcommit_max }} + {{ vcpu.allocated_percentage|floatformat:1 }}% allocated
- {% endwith %}
RAM Allocation - {{ ram_allocated }} / {{ ram_total }} GB + {{ vram.allocated|convert_bytes }} / {{ vram.total|convert_bytes }} GB
- {% with pct=ram_allocated|div:ram_total|mul:100 %}
- - {{ pct|floatformat:1 }}% + + {{ vram.allocated_percentage|floatformat:1 }}%
- OC: x{{ ram_overcommit_ratio }} - {{ pct|floatformat:1 }}% allocated + overcommit: {{ vram.overcommit_ratio|floatformat:1 }} / {{ vram.overcommit_max }} + {{ vram.allocated_percentage|floatformat:1 }}% allocated
- {% endwith %}
@@ -162,34 +158,34 @@
- {{ common_flavor }} - {{ common_flavor_count }} instances + {{ flavors.first_common_flavor.name }} + {{ flavors.first_common_flavor.count }} instances
Share - {{ common_flavor_count|div:vm_count|mul:100|floatformat:0 }}% + {{ flavors.first_common_flavor.count|div:vm.count|mul:100|floatformat:0 }}%
- {% if second_common_flavor %} + {% if flavors.second_common_flavor %}
- {{ second_common_flavor.name }} + {{ flavors.second_common_flavor.name }}
- {{ second_common_flavor.count }} + {{ flavors.second_common_flavor.count }}
{% endif %} - {% if third_common_flavor %} + {% if flavors.third_common_flavor %}
- {{ third_common_flavor.name }} + {{ flavors.third_common_flavor.name }}
- {{ third_common_flavor.count }} + {{ flavors.third_common_flavor.count }}
{% endif %}
diff --git a/watcher_visio/settings.py b/watcher_visio/settings.py index 2c77273..d4ac382 100644 --- a/watcher_visio/settings.py +++ b/watcher_visio/settings.py @@ -42,8 +42,10 @@ INSTALLED_APPS = [ # Prometheus settings (environment override recommended) PROMETHEUS_URL = "http://localhost:9090" -PROMETHEUS_DEFAULT_CPU_METRIC = "libvirt_domain_info_cpu_time_seconds_total" -PROMETHEUS_DEFAULT_RAM_METRIC = "libvirt_domain_info_memory_usage_bytes" +PROMETHEUS_METRICS = { + "cpu_usage": "", + "ram_usage": "" +} # Openstack cloud settings OPENSTACK_REGION_NAME = "default"