|
import requests |
|
import pandas as pd |
|
import gradio as gr |
|
import plotly.graph_objects as go |
|
import plotly.express as px |
|
from plotly.subplots import make_subplots |
|
from datetime import datetime, timedelta |
|
import json |
|
|
|
|
|
import os |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import matplotlib.dates as mdates |
|
import random |
|
import logging |
|
from typing import List, Dict, Any |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
level=logging.DEBUG, |
|
format="%(asctime)s - %(levelname)s - %(message)s", |
|
handlers=[ |
|
logging.FileHandler("app_debug.log"), |
|
logging.StreamHandler() |
|
] |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
logger.info("============= APPLICATION STARTING =============") |
|
logger.info(f"Running from directory: {os.getcwd()}") |
|
|
|
|
|
global_df = None |
|
|
|
|
|
API_BASE_URL = "https://afmdb.autonolas.tech" |
|
logger.info(f"Using API endpoint: {API_BASE_URL}") |
|
|
|
def get_agent_type_by_name(type_name: str) -> Dict[str, Any]: |
|
"""Get agent type by name""" |
|
url = f"{API_BASE_URL}/api/agent-types/name/{type_name}" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"Agent type '{type_name}' not found") |
|
return None |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Agent type response: {result}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_agent_type_by_name: {e}") |
|
return None |
|
|
|
def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]: |
|
"""Get attribute definition by name""" |
|
url = f"{API_BASE_URL}/api/attributes/name/{attr_name}" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"Attribute definition '{attr_name}' not found") |
|
return None |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Attribute definition response: {result}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_attribute_definition_by_name: {e}") |
|
return None |
|
|
|
def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]: |
|
"""Get all agents of a specific type""" |
|
url = f"{API_BASE_URL}/api/agent-types/{type_id}/agents/" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"No agents found for type ID {type_id}") |
|
return [] |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Agents count: {len(result)}") |
|
logger.debug(f"First few agents: {result[:2] if result else []}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_agents_by_type: {e}") |
|
return [] |
|
|
|
def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]: |
|
"""Get all attribute values for a specific attribute definition across all agents of a given list""" |
|
all_attributes = [] |
|
logger.debug(f"Getting attributes for {len(agents)} agents with attr_def_id: {attr_def_id}") |
|
|
|
|
|
for agent in agents: |
|
agent_id = agent["agent_id"] |
|
|
|
|
|
url = f"{API_BASE_URL}/api/agents/{agent_id}/attributes/" |
|
logger.debug(f"Calling API for agent {agent_id}: {url}") |
|
|
|
try: |
|
response = requests.get(url, params={"limit": 1000}) |
|
|
|
if response.status_code == 404: |
|
logger.error(f"No attributes found for agent ID {agent_id}") |
|
continue |
|
|
|
response.raise_for_status() |
|
agent_attrs = response.json() |
|
logger.debug(f"Agent {agent_id} has {len(agent_attrs)} attributes") |
|
|
|
|
|
filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id] |
|
logger.debug(f"Agent {agent_id} has {len(filtered_attrs)} APR attributes") |
|
|
|
if filtered_attrs: |
|
logger.debug(f"Sample attribute for agent {agent_id}: {filtered_attrs[0]}") |
|
|
|
all_attributes.extend(filtered_attrs) |
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}") |
|
|
|
logger.info(f"Total APR attributes found across all agents: {len(all_attributes)}") |
|
return all_attributes |
|
|
|
def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str: |
|
"""Get agent name from agent ID""" |
|
for agent in agents: |
|
if agent["agent_id"] == agent_id: |
|
return agent["agent_name"] |
|
return "Unknown" |
|
|
|
def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]: |
|
"""Extract APR value and timestamp from JSON value""" |
|
try: |
|
agent_id = attr.get("agent_id", "unknown") |
|
logger.debug(f"Extracting APR value for agent {agent_id}") |
|
|
|
|
|
if attr["json_value"] is None: |
|
logger.debug(f"Agent {agent_id}: json_value is None") |
|
return {"apr": None, "timestamp": None, "agent_id": agent_id, "is_dummy": False} |
|
|
|
|
|
if isinstance(attr["json_value"], str): |
|
logger.debug(f"Agent {agent_id}: json_value is string, parsing") |
|
json_data = json.loads(attr["json_value"]) |
|
else: |
|
json_data = attr["json_value"] |
|
|
|
apr = json_data.get("apr") |
|
timestamp = json_data.get("timestamp") |
|
|
|
logger.debug(f"Agent {agent_id}: Raw APR value: {apr}, timestamp: {timestamp}") |
|
|
|
|
|
timestamp_dt = None |
|
if timestamp: |
|
timestamp_dt = datetime.fromtimestamp(timestamp) |
|
|
|
result = {"apr": apr, "timestamp": timestamp_dt, "agent_id": agent_id, "is_dummy": False} |
|
logger.debug(f"Agent {agent_id}: Extracted result: {result}") |
|
return result |
|
except (json.JSONDecodeError, KeyError, TypeError) as e: |
|
logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}") |
|
logger.error(f"Problematic json_value: {attr.get('json_value')}") |
|
return {"apr": None, "timestamp": None, "agent_id": attr.get('agent_id'), "is_dummy": False} |
|
|
|
def fetch_apr_data_from_db(): |
|
""" |
|
Fetch APR data from database using the API. |
|
""" |
|
global global_df |
|
|
|
logger.info("==== Starting APR data fetch ====") |
|
|
|
try: |
|
|
|
logger.info("Finding Modius agent type") |
|
modius_type = get_agent_type_by_name("Modius") |
|
if not modius_type: |
|
logger.error("Modius agent type not found, using placeholder data") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
type_id = modius_type["type_id"] |
|
logger.info(f"Found Modius agent type with ID: {type_id}") |
|
|
|
|
|
logger.info("Finding APR attribute definition") |
|
apr_attr_def = get_attribute_definition_by_name("APR") |
|
if not apr_attr_def: |
|
logger.error("APR attribute definition not found, using placeholder data") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
attr_def_id = apr_attr_def["attr_def_id"] |
|
logger.info(f"Found APR attribute definition with ID: {attr_def_id}") |
|
|
|
|
|
logger.info(f"Getting all agents of type Modius (type_id: {type_id})") |
|
modius_agents = get_agents_by_type(type_id) |
|
if not modius_agents: |
|
logger.error("No agents of type 'Modius' found") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
logger.info(f"Found {len(modius_agents)} Modius agents") |
|
logger.debug(f"Modius agents: {[{'agent_id': a['agent_id'], 'agent_name': a['agent_name']} for a in modius_agents]}") |
|
|
|
|
|
logger.info(f"Fetching APR values for all Modius agents (attr_def_id: {attr_def_id})") |
|
apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id) |
|
if not apr_attributes: |
|
logger.error("No APR values found for 'Modius' agents") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
logger.info(f"Found {len(apr_attributes)} APR attributes total") |
|
|
|
|
|
logger.info("Extracting APR data from attributes") |
|
apr_data_list = [] |
|
for attr in apr_attributes: |
|
apr_data = extract_apr_value(attr) |
|
if apr_data["apr"] is not None and apr_data["timestamp"] is not None: |
|
|
|
agent_name = get_agent_name(attr["agent_id"], modius_agents) |
|
|
|
apr_data["agent_name"] = agent_name |
|
|
|
apr_data["is_dummy"] = False |
|
|
|
|
|
if apr_data["apr"] < 0: |
|
apr_data["metric_type"] = "Performance" |
|
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): Performance value: {apr_data['apr']}") |
|
else: |
|
apr_data["metric_type"] = "APR" |
|
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}") |
|
|
|
apr_data_list.append(apr_data) |
|
|
|
|
|
if not apr_data_list: |
|
logger.error("No valid APR data extracted") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
global_df = pd.DataFrame(apr_data_list) |
|
|
|
|
|
logger.info(f"Created DataFrame with {len(global_df)} rows") |
|
logger.info(f"DataFrame columns: {global_df.columns.tolist()}") |
|
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}") |
|
logger.info(f"Metric types count: {global_df['metric_type'].value_counts().to_dict()}") |
|
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}") |
|
|
|
|
|
logger.debug("Final DataFrame contents:") |
|
for idx, row in global_df.iterrows(): |
|
logger.debug(f"Row {idx}: {row.to_dict()}") |
|
|
|
return global_df |
|
|
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"API request error: {e}") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
except Exception as e: |
|
logger.error(f"Error fetching APR data: {e}") |
|
logger.exception("Exception details:") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
def generate_apr_visualizations(): |
|
"""Generate APR visualizations with real data only (no dummy data)""" |
|
global global_df |
|
|
|
|
|
df = fetch_apr_data_from_db() |
|
|
|
|
|
if df.empty: |
|
logger.info("No APR data available. Using fallback visualization.") |
|
|
|
fig = go.Figure() |
|
fig.add_annotation( |
|
x=0.5, y=0.5, |
|
text="No APR data available", |
|
font=dict(size=20), |
|
showarrow=False |
|
) |
|
fig.update_layout( |
|
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), |
|
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False) |
|
) |
|
|
|
|
|
fig.write_html("modius_apr_combined_graph.html") |
|
fig.write_image("modius_apr_combined_graph.png") |
|
|
|
csv_file = None |
|
return fig, csv_file |
|
|
|
|
|
|
|
global_df = df |
|
|
|
|
|
csv_file = save_to_csv(df) |
|
|
|
|
|
combined_fig = create_combined_time_series_graph(df) |
|
|
|
return combined_fig, csv_file |
|
|
|
def create_time_series_graph_per_agent(df): |
|
"""Create a time series graph for each agent using Plotly""" |
|
|
|
unique_agents = df['agent_id'].unique() |
|
|
|
if len(unique_agents) == 0: |
|
logger.error("No agent data to plot") |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="No agent data available", |
|
x=0.5, y=0.5, |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
fig = make_subplots(rows=len(unique_agents), cols=1, |
|
subplot_titles=[f"Agent: {df[df['agent_id'] == agent_id]['agent_name'].iloc[0]}" |
|
for agent_id in unique_agents], |
|
vertical_spacing=0.1) |
|
|
|
|
|
for i, agent_id in enumerate(unique_agents): |
|
agent_data = df[df['agent_id'] == agent_id].copy() |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
row = i + 1 |
|
|
|
|
|
fig.add_shape( |
|
type="line", line=dict(dash="solid", width=1.5, color="black"), |
|
y0=0, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), |
|
y0=0, y1=1000, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1, layer="below" |
|
) |
|
fig.add_shape( |
|
type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), |
|
y0=-1000, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1, layer="below" |
|
) |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
|
|
|
|
combined_agent_data = agent_data.sort_values('timestamp') |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=combined_agent_data['timestamp'], |
|
y=combined_agent_data['apr'], |
|
mode='lines', |
|
line=dict(color='purple', width=2), |
|
name=f'{agent_name}', |
|
legendgroup=agent_name, |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>Value: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
if not apr_data.empty: |
|
fig.add_trace( |
|
go.Scatter( |
|
x=apr_data['timestamp'], |
|
y=apr_data['apr'], |
|
mode='markers', |
|
marker=dict(color='blue', size=10, symbol='circle'), |
|
name='APR', |
|
legendgroup='APR', |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
if not perf_data.empty: |
|
fig.add_trace( |
|
go.Scatter( |
|
x=perf_data['timestamp'], |
|
y=perf_data['apr'], |
|
mode='markers', |
|
marker=dict(color='red', size=10, symbol='square'), |
|
name='Performance', |
|
legendgroup='Performance', |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>Performance: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
fig.update_xaxes(title_text="Time", row=row, col=1) |
|
fig.update_yaxes(title_text="Value", row=row, col=1, gridcolor='rgba(0,0,0,0.1)') |
|
|
|
|
|
fig.update_layout( |
|
height=400 * len(unique_agents), |
|
width=1000, |
|
title_text="APR and Performance Values per Agent", |
|
template="plotly_white", |
|
legend=dict( |
|
orientation="h", |
|
yanchor="bottom", |
|
y=1.02, |
|
xanchor="right", |
|
x=1 |
|
), |
|
margin=dict(r=20, l=20, t=30, b=20), |
|
hovermode="closest" |
|
) |
|
|
|
|
|
graph_file = "modius_apr_per_agent_graph.html" |
|
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) |
|
|
|
|
|
img_file = "modius_apr_per_agent_graph.png" |
|
fig.write_image(img_file) |
|
|
|
logger.info(f"Per-agent graph saved to {graph_file} and {img_file}") |
|
|
|
|
|
return fig |
|
|
|
def create_combined_time_series_graph(df): |
|
"""Create a combined time series graph for all agents using Plotly""" |
|
if len(df) == 0: |
|
logger.error("No data to plot combined graph") |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="No data available", |
|
x=0.5, y=0.5, |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
df['apr'] = df['apr'].astype(float) |
|
df['metric_type'] = df['metric_type'].astype(str) |
|
|
|
|
|
logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}") |
|
logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}") |
|
logger.info(f"Graph data - unique metric types: {df['metric_type'].unique().tolist()}") |
|
logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}") |
|
|
|
|
|
debug_csv = "debug_graph_data.csv" |
|
df.to_csv(debug_csv) |
|
logger.info(f"Exported graph data to {debug_csv} for debugging") |
|
|
|
|
|
with open("debug_graph_data_report.txt", "w") as f: |
|
f.write("==== GRAPH DATA REPORT ====\n\n") |
|
f.write(f"Total data points: {len(df)}\n") |
|
f.write(f"Timestamp range: {df['timestamp'].min()} to {df['timestamp'].max()}\n\n") |
|
|
|
|
|
unique_agents = df['agent_id'].unique() |
|
f.write(f"Number of agents: {len(unique_agents)}\n\n") |
|
|
|
for agent_id in unique_agents: |
|
agent_data = df[df['agent_id'] == agent_id] |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
|
|
f.write(f"== Agent: {agent_name} (ID: {agent_id}) ==\n") |
|
f.write(f" Total data points: {len(agent_data)}\n") |
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
|
|
f.write(f" APR data points: {len(apr_data)}\n") |
|
f.write(f" Performance data points: {len(perf_data)}\n") |
|
|
|
if not apr_data.empty: |
|
f.write(f" APR values: {apr_data['apr'].tolist()}\n") |
|
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n") |
|
|
|
if not perf_data.empty: |
|
f.write(f" Performance values: {perf_data['apr'].tolist()}\n") |
|
f.write(f" Performance timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in perf_data['timestamp']]}\n") |
|
|
|
f.write("\n") |
|
|
|
logger.info("Generated detailed graph data report") |
|
|
|
|
|
fig = go.Figure() |
|
|
|
|
|
unique_agents = df['agent_id'].unique() |
|
colors = px.colors.qualitative.Plotly[:len(unique_agents)] |
|
|
|
|
|
min_apr = min(df['apr'].min(), -100) |
|
max_apr = max(df['apr'].max(), 100) |
|
|
|
|
|
min_time = df['timestamp'].min() |
|
max_time = df['timestamp'].max() |
|
|
|
|
|
fig.add_shape( |
|
type="rect", |
|
fillcolor="rgba(230, 243, 255, 0.3)", |
|
line=dict(width=0), |
|
y0=0, y1=max_apr, |
|
x0=min_time, x1=max_time, |
|
layer="below" |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="rect", |
|
fillcolor="rgba(255, 230, 230, 0.3)", |
|
line=dict(width=0), |
|
y0=min_apr, y1=0, |
|
x0=min_time, x1=max_time, |
|
layer="below" |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="line", |
|
line=dict(dash="solid", width=1.5, color="black"), |
|
y0=0, y1=0, |
|
x0=min_time, x1=max_time |
|
) |
|
|
|
|
|
for i, agent_id in enumerate(unique_agents): |
|
agent_data = df[df['agent_id'] == agent_id].copy() |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
color = colors[i % len(colors)] |
|
|
|
|
|
agent_data = agent_data.sort_values('timestamp') |
|
|
|
|
|
logger.info(f"Plotting agent: {agent_name} (ID: {agent_id}) with {len(agent_data)} points") |
|
for idx, row in agent_data.iterrows(): |
|
logger.info(f" Point {idx}: timestamp={row['timestamp']}, apr={row['apr']}, type={row['metric_type']}") |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=agent_data['timestamp'], |
|
y=agent_data['apr'], |
|
mode='lines', |
|
line=dict(color=color, width=2), |
|
name=f'{agent_name}', |
|
legendgroup=agent_name, |
|
hovertemplate='Time: %{x}<br>Value: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>' |
|
) |
|
) |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
if not apr_data.empty: |
|
logger.info(f" Adding {len(apr_data)} APR markers for {agent_name}") |
|
for idx, row in apr_data.iterrows(): |
|
logger.info(f" APR marker: timestamp={row['timestamp']}, apr={row['apr']}") |
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=apr_data['timestamp'], |
|
y=apr_data['apr'], |
|
mode='markers', |
|
marker=dict(color=color, symbol='circle', size=10), |
|
name=f'{agent_name} APR', |
|
legendgroup=agent_name, |
|
showlegend=False, |
|
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>', |
|
visible=True |
|
) |
|
) |
|
|
|
|
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
if not perf_data.empty: |
|
logger.info(f" Adding {len(perf_data)} Performance markers for {agent_name}") |
|
for idx, row in perf_data.iterrows(): |
|
logger.info(f" Performance marker: timestamp={row['timestamp']}, apr={row['apr']}") |
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=perf_data['timestamp'], |
|
y=perf_data['apr'], |
|
mode='markers', |
|
marker=dict(color=color, symbol='square', size=10), |
|
name=f'{agent_name} Perf', |
|
legendgroup=agent_name, |
|
showlegend=False, |
|
hovertemplate='Time: %{x}<br>Performance: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>', |
|
visible=True |
|
) |
|
) |
|
|
|
|
|
fig.update_layout( |
|
title="APR and Performance Values for All Agents", |
|
xaxis_title="Time", |
|
yaxis_title="Value", |
|
template="plotly_white", |
|
height=600, |
|
width=1000, |
|
legend=dict( |
|
orientation="h", |
|
yanchor="bottom", |
|
y=1.02, |
|
xanchor="right", |
|
x=1, |
|
groupclick="toggleitem" |
|
), |
|
margin=dict(r=20, l=20, t=30, b=20), |
|
hovermode="closest" |
|
) |
|
|
|
|
|
y_padding = (max_apr - min_apr) * 0.1 |
|
fig.update_yaxes( |
|
showgrid=True, |
|
gridwidth=1, |
|
gridcolor='rgba(0,0,0,0.1)', |
|
range=[min_apr - y_padding, max_apr + y_padding] |
|
) |
|
|
|
|
|
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)') |
|
|
|
|
|
graph_file = "modius_apr_combined_graph.html" |
|
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) |
|
|
|
|
|
img_file = "modius_apr_combined_graph.png" |
|
try: |
|
fig.write_image(img_file) |
|
logger.info(f"Combined graph saved to {graph_file} and {img_file}") |
|
except Exception as e: |
|
logger.error(f"Error saving image: {e}") |
|
logger.info(f"Combined graph saved to {graph_file} only") |
|
|
|
|
|
return fig |
|
|
|
def save_to_csv(df): |
|
"""Save the APR data DataFrame to a CSV file and return the file path""" |
|
if df.empty: |
|
logger.error("No APR data to save to CSV") |
|
return None |
|
|
|
|
|
csv_file = "modius_apr_values.csv" |
|
|
|
|
|
df.to_csv(csv_file, index=False) |
|
logger.info(f"APR data saved to {csv_file}") |
|
|
|
|
|
stats_df = generate_statistics_from_data(df) |
|
stats_csv = "modius_apr_statistics.csv" |
|
stats_df.to_csv(stats_csv, index=False) |
|
logger.info(f"Statistics saved to {stats_csv}") |
|
|
|
return csv_file |
|
|
|
def generate_statistics_from_data(df): |
|
"""Generate statistics from the APR data""" |
|
if df.empty: |
|
return pd.DataFrame() |
|
|
|
|
|
unique_agents = df['agent_id'].unique() |
|
stats_list = [] |
|
|
|
|
|
for agent_id in unique_agents: |
|
agent_data = df[df['agent_id'] == agent_id] |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
real_apr = apr_data[apr_data['is_dummy'] == False] |
|
|
|
|
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
real_perf = perf_data[perf_data['is_dummy'] == False] |
|
|
|
stats = { |
|
'agent_id': agent_id, |
|
'agent_name': agent_name, |
|
'total_points': len(agent_data), |
|
'apr_points': len(apr_data), |
|
'performance_points': len(perf_data), |
|
'real_apr_points': len(real_apr), |
|
'real_performance_points': len(real_perf), |
|
'avg_apr': apr_data['apr'].mean() if not apr_data.empty else None, |
|
'avg_performance': perf_data['apr'].mean() if not perf_data.empty else None, |
|
'max_apr': apr_data['apr'].max() if not apr_data.empty else None, |
|
'min_apr': apr_data['apr'].min() if not apr_data.empty else None, |
|
'latest_timestamp': agent_data['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not agent_data.empty else None |
|
} |
|
stats_list.append(stats) |
|
|
|
|
|
apr_only = df[df['metric_type'] == 'APR'] |
|
perf_only = df[df['metric_type'] == 'Performance'] |
|
|
|
overall_stats = { |
|
'agent_id': 'ALL', |
|
'agent_name': 'All Agents', |
|
'total_points': len(df), |
|
'apr_points': len(apr_only), |
|
'performance_points': len(perf_only), |
|
'real_apr_points': len(apr_only[apr_only['is_dummy'] == False]), |
|
'real_performance_points': len(perf_only[perf_only['is_dummy'] == False]), |
|
'avg_apr': apr_only['apr'].mean() if not apr_only.empty else None, |
|
'avg_performance': perf_only['apr'].mean() if not perf_only.empty else None, |
|
'max_apr': apr_only['apr'].max() if not apr_only.empty else None, |
|
'min_apr': apr_only['apr'].min() if not apr_only.empty else None, |
|
'latest_timestamp': df['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not df.empty else None |
|
} |
|
stats_list.append(overall_stats) |
|
|
|
return pd.DataFrame(stats_list) |
|
|
|
|
|
def create_transcation_visualizations(): |
|
"""Dummy implementation that returns a placeholder graph""" |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
def create_active_agents_visualizations(): |
|
"""Dummy implementation that returns a placeholder graph""" |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
""" |
|
# Load environment variables from .env file |
|
# RPC URLs |
|
OPTIMISM_RPC_URL = os.getenv('OPTIMISM_RPC_URL') |
|
MODE_RPC_URL = os.getenv('MODE_RPC_URL') |
|
|
|
# Initialize Web3 instances |
|
web3_instances = { |
|
'optimism': Web3(Web3.HTTPProvider(OPTIMISM_RPC_URL)), |
|
'mode': Web3(Web3.HTTPProvider(MODE_RPC_URL)) |
|
} |
|
|
|
# Contract addresses for service registries |
|
contract_addresses = { |
|
'optimism': '0x3d77596beb0f130a4415df3D2D8232B3d3D31e44', |
|
'mode': '0x3C1fF68f5aa342D296d4DEe4Bb1cACCA912D95fE' |
|
} |
|
|
|
# Load the ABI from the provided JSON file |
|
with open('./contracts/service_registry_abi.json', 'r') as abi_file: |
|
contract_abi = json.load(abi_file) |
|
|
|
# Create the contract instances |
|
service_registries = { |
|
chain_name: web3.eth.contract(address=contract_addresses[chain_name], abi=contract_abi) |
|
for chain_name, web3 in web3_instances.items() |
|
} |
|
|
|
# Check if connections are successful |
|
for chain_name, web3_instance in web3_instances.items(): |
|
if not web3_instance.is_connected(): |
|
raise Exception(f"Failed to connect to the {chain_name.capitalize()} network.") |
|
else: |
|
print(f"Successfully connected to the {chain_name.capitalize()} network.") |
|
""" |
|
|
|
|
|
def get_transfers(integrator: str, wallet: str) -> str: |
|
"""Dummy function that returns an empty result""" |
|
return {"transfers": []} |
|
|
|
def fetch_and_aggregate_transactions(): |
|
"""Dummy function that returns empty data""" |
|
return [], {} |
|
|
|
|
|
def process_transactions_and_agents(data): |
|
"""Dummy function that returns empty dataframes""" |
|
df_transactions = pd.DataFrame() |
|
df_agents = pd.DataFrame(columns=['date', 'agent_count']) |
|
df_agents_weekly = pd.DataFrame() |
|
return df_transactions, df_agents, df_agents_weekly |
|
|
|
|
|
def create_visualizations(): |
|
""" |
|
# Commenting out the original visualization code temporarily for debugging |
|
transactions_data = fetch_and_aggregate_transactions() |
|
df_transactions, df_agents, df_agents_weekly = process_transactions_and_agents(transactions_data) |
|
|
|
# Fetch daily value locked data |
|
df_tvl = pd.read_csv('daily_value_locked.csv') |
|
|
|
# Calculate total value locked per chain per day |
|
df_tvl["total_value_locked_usd"] = df_tvl["amount0_usd"] + df_tvl["amount1_usd"] |
|
df_tvl_daily = df_tvl.groupby(["date", "chain_name"])["total_value_locked_usd"].sum().reset_index() |
|
df_tvl_daily['date'] = pd.to_datetime(df_tvl_daily['date']) |
|
|
|
# Filter out dates with zero total value locked |
|
df_tvl_daily = df_tvl_daily[df_tvl_daily["total_value_locked_usd"] > 0] |
|
|
|
chain_name_map = { |
|
"mode": "Mode", |
|
"base": "Base", |
|
"ethereum": "Ethereum", |
|
"optimism": "Optimism" |
|
} |
|
df_tvl_daily["chain_name"] = df_tvl_daily["chain_name"].map(chain_name_map) |
|
|
|
# Plot total value locked |
|
fig_tvl = px.bar( |
|
df_tvl_daily, |
|
x="date", |
|
y="total_value_locked_usd", |
|
color="chain_name", |
|
opacity=0.7, |
|
title="Total Volume Invested in Pools in Different Chains Daily", |
|
labels={"date": "Date","chain_name": "Transaction Chain", "total_value_locked_usd": "Total Volume Invested (USD)"}, |
|
barmode='stack', |
|
color_discrete_map={ |
|
"Mode": "orange", |
|
"Base": "purple", |
|
"Ethereum": "darkgreen", |
|
"Optimism": "blue" |
|
} |
|
) |
|
fig_tvl.update_layout( |
|
xaxis_title="Date", |
|
|
|
yaxis=dict(tickmode='linear', tick0=0, dtick=4), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=df_tvl_daily['date'], |
|
ticktext=df_tvl_daily['date'].dt.strftime('%b %d'), |
|
tickangle=-45, |
|
), |
|
bargap=0.6, # Increase gap between bar groups (0-1) |
|
bargroupgap=0.1, # Decrease gap between bars in a group (0-1) |
|
height=600, |
|
width=1200, # Specify width to prevent bars from being too wide |
|
showlegend=True, |
|
template='plotly_white' |
|
) |
|
fig_tvl.update_xaxes(tickformat="%b %d") |
|
|
|
chain_name_map = { |
|
10: "Optimism", |
|
8453: "Base", |
|
1: "Ethereum", |
|
34443: "Mode" |
|
} |
|
|
|
df_transactions["sending_chain"] = df_transactions["sending_chain"].map(chain_name_map) |
|
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].map(chain_name_map) |
|
|
|
df_transactions["sending_chain"] = df_transactions["sending_chain"].astype(str) |
|
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].astype(str) |
|
df_transactions['date'] = pd.to_datetime(df_transactions['date']) |
|
df_transactions["is_swap"] = df_transactions.apply(lambda x: x["sending_chain"] == x["receiving_chain"], axis=1) |
|
|
|
swaps_per_chain = df_transactions[df_transactions["is_swap"]].groupby(["date", "sending_chain"]).size().reset_index(name="swap_count") |
|
fig_swaps_chain = px.bar( |
|
swaps_per_chain, |
|
x="date", |
|
y="swap_count", |
|
color="sending_chain", |
|
title="Chain Daily Activity: Swaps", |
|
labels={"sending_chain": "Transaction Chain", "swap_count": "Daily Swap Nr"}, |
|
barmode="stack", |
|
opacity=0.7, |
|
color_discrete_map={ |
|
"Optimism": "blue", |
|
"Ethereum": "darkgreen", |
|
"Base": "purple", |
|
"Mode": "orange" |
|
} |
|
) |
|
fig_swaps_chain.update_layout( |
|
xaxis_title="Date", |
|
yaxis_title="Daily Swap Count", |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=[d for d in swaps_per_chain['date']], |
|
ticktext=[d.strftime('%m-%d') for d in swaps_per_chain['date']], |
|
tickangle=-45, |
|
), |
|
bargap=0.6, |
|
bargroupgap=0.1, |
|
height=600, |
|
width=1200, |
|
margin=dict(l=50, r=50, t=50, b=50), |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
y=0.99, |
|
xanchor="right", |
|
x=0.99 |
|
), |
|
template='plotly_white' |
|
) |
|
fig_swaps_chain.update_xaxes(tickformat="%m-%d") |
|
|
|
df_transactions["is_bridge"] = df_transactions.apply(lambda x: x["sending_chain"] != x["receiving_chain"], axis=1) |
|
|
|
bridges_per_chain = df_transactions[df_transactions["is_bridge"]].groupby(["date", "sending_chain"]).size().reset_index(name="bridge_count") |
|
fig_bridges_chain = px.bar( |
|
bridges_per_chain, |
|
x="date", |
|
y="bridge_count", |
|
color="sending_chain", |
|
title="Chain Daily Activity: Bridges", |
|
labels={"sending_chain": "Transaction Chain", "bridge_count": "Daily Bridge Nr"}, |
|
barmode="stack", |
|
opacity=0.7, |
|
color_discrete_map={ |
|
"Optimism": "blue", |
|
"Ethereum": "darkgreen", |
|
"Base": "purple", |
|
"Mode": "orange" |
|
} |
|
) |
|
fig_bridges_chain.update_layout( |
|
xaxis_title="Date", |
|
yaxis_title="Daily Bridge Count", |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=[d for d in bridges_per_chain['date']], |
|
ticktext=[d.strftime('%m-%d') for d in bridges_per_chain['date']], |
|
tickangle=-45, |
|
), |
|
bargap=0.6, |
|
bargroupgap=0.1, |
|
height=600, |
|
width=1200, |
|
margin=dict(l=50, r=50, t=50, b=50), |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
y=0.99, |
|
xanchor="right", |
|
x=0.99 |
|
), |
|
template='plotly_white' |
|
) |
|
fig_bridges_chain.update_xaxes(tickformat="%m-%d") |
|
df_agents['date'] = pd.to_datetime(df_agents['date']) |
|
|
|
daily_agents_df = df_agents.groupby('date').agg({'agent_count': 'sum'}).reset_index() |
|
daily_agents_df.rename(columns={'agent_count': 'daily_agent_count'}, inplace=True) |
|
# Sort by date to ensure proper running total calculation |
|
daily_agents_df = daily_agents_df.sort_values('date') |
|
|
|
# Create week column |
|
daily_agents_df['week'] = daily_agents_df['date'].dt.to_period('W').apply(lambda r: r.start_time) |
|
|
|
# Calculate running total within each week |
|
daily_agents_df['running_weekly_total'] = daily_agents_df.groupby('week')['daily_agent_count'].cumsum() |
|
|
|
# Create final merged dataframe |
|
weekly_merged_df = daily_agents_df.copy() |
|
adjustment_date = pd.to_datetime('2024-11-15') |
|
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'daily_agent_count'] -= 1 |
|
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'running_weekly_total'] -= 1 |
|
fig_agents_registered = go.Figure(data=[ |
|
go.Bar( |
|
name='Daily nr of Registered Agents', |
|
x=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
y=weekly_merged_df['daily_agent_count'], |
|
opacity=0.7, |
|
marker_color='blue' |
|
), |
|
go.Bar( |
|
name='Weekly Nr of Registered Agents', |
|
x=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
y=weekly_merged_df['running_weekly_total'], |
|
opacity=0.7, |
|
marker_color='purple' |
|
) |
|
]) |
|
|
|
fig_agents_registered.update_layout( |
|
xaxis_title='Date', |
|
yaxis_title='Number of Agents', |
|
title="Nr of Agents Registered", |
|
barmode='group', |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
categoryorder='array', |
|
categoryarray=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
tickangle=-45 |
|
), |
|
bargap=0.3, |
|
height=600, |
|
width=1200, |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
xanchor="right", |
|
), |
|
template='plotly_white', |
|
) |
|
|
|
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered,fig_tvl |
|
""" |
|
|
|
fig_swaps_chain = go.Figure() |
|
fig_swaps_chain.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_bridges_chain = go.Figure() |
|
fig_bridges_chain.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_agents_registered = go.Figure() |
|
fig_agents_registered.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_tvl = go.Figure() |
|
fig_tvl.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl |
|
|
|
|
|
def add_diagnostic_controls(demo): |
|
"""Add diagnostic UI controls to help debug the difference between local and production""" |
|
with gr.Column(): |
|
gr.Markdown("## Diagnostics") |
|
|
|
diagnostic_button = gr.Button("Run Data Diagnostics") |
|
diagnostic_output = gr.Textbox(label="Diagnostic Results", lines=10) |
|
|
|
def run_diagnostics(): |
|
"""Function to diagnose data issues""" |
|
global global_df |
|
|
|
if global_df is None or global_df.empty: |
|
return "No data available. Please click 'Refresh APR Data' first." |
|
|
|
|
|
result = [] |
|
result.append(f"=== DIAGNOSTIC REPORT ===") |
|
result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") |
|
result.append(f"API Endpoint: {API_BASE_URL}") |
|
result.append(f"Total data points: {len(global_df)}") |
|
|
|
unique_agents = global_df['agent_id'].unique() |
|
result.append(f"Number of unique agents: {len(unique_agents)}") |
|
|
|
|
|
for agent_id in unique_agents: |
|
agent_data = global_df[global_df['agent_id'] == agent_id] |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
|
|
result.append(f"\nAgent: {agent_name} (ID: {agent_id})") |
|
result.append(f" Data points: {len(agent_data)}") |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
|
|
result.append(f" APR points: {len(apr_data)}") |
|
if not apr_data.empty: |
|
result.append(f" APR values: {apr_data['apr'].tolist()}") |
|
|
|
result.append(f" Performance points: {len(perf_data)}") |
|
if not perf_data.empty: |
|
result.append(f" Performance values: {perf_data['apr'].tolist()}") |
|
|
|
|
|
with open("latest_diagnostics.txt", "w") as f: |
|
f.write("\n".join(result)) |
|
|
|
return "\n".join(result) |
|
|
|
|
|
try: |
|
|
|
|
|
diagnostic_button.click( |
|
fn=run_diagnostics, |
|
inputs=None, |
|
outputs=diagnostic_output |
|
) |
|
except TypeError: |
|
|
|
diagnostic_button.click( |
|
fn=run_diagnostics, |
|
inputs=[], |
|
outputs=[diagnostic_output] |
|
) |
|
|
|
return demo |
|
|
|
|
|
def dashboard(): |
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Valory APR Metrics") |
|
|
|
|
|
with gr.Tab("APR Metrics"): |
|
with gr.Column(): |
|
refresh_btn = gr.Button("Refresh APR Data") |
|
|
|
|
|
combined_graph = gr.Plot(label="APR for All Agents") |
|
|
|
|
|
def update_apr_graph(): |
|
|
|
try: |
|
combined_fig, _ = generate_apr_visualizations() |
|
return combined_fig |
|
except Exception as e: |
|
logger.exception("Error generating APR visualization") |
|
|
|
error_fig = go.Figure() |
|
error_fig.add_annotation( |
|
text=f"Error: {str(e)}", |
|
x=0.5, y=0.5, |
|
showarrow=False, |
|
font=dict(size=15, color="red") |
|
) |
|
return error_fig |
|
|
|
|
|
try: |
|
|
|
refresh_btn.click( |
|
fn=update_apr_graph, |
|
inputs=None, |
|
outputs=combined_graph |
|
) |
|
except TypeError: |
|
|
|
refresh_btn.click( |
|
fn=update_apr_graph, |
|
inputs=[], |
|
outputs=[combined_graph] |
|
) |
|
|
|
|
|
|
|
import plotly.graph_objects as go |
|
placeholder_fig = go.Figure() |
|
placeholder_fig.add_annotation( |
|
text="Click 'Refresh APR Data' to load APR graph", |
|
x=0.5, y=0.5, |
|
showarrow=False, |
|
font=dict(size=15) |
|
) |
|
combined_graph.value = placeholder_fig |
|
|
|
|
|
demo = add_diagnostic_controls(demo) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
dashboard().launch() |
|
|