|
import requests |
|
import pandas as pd |
|
import gradio as gr |
|
import plotly.graph_objects as go |
|
import plotly.express as px |
|
from plotly.subplots import make_subplots |
|
from datetime import datetime, timedelta |
|
import json |
|
|
|
|
|
import os |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import matplotlib.dates as mdates |
|
import random |
|
import logging |
|
from typing import List, Dict, Any |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(levelname)s - %(message)s", |
|
handlers=[ |
|
logging.FileHandler("app_debug.log"), |
|
logging.StreamHandler() |
|
] |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
logging.getLogger("urllib3").setLevel(logging.WARNING) |
|
logging.getLogger("httpx").setLevel(logging.WARNING) |
|
logging.getLogger("matplotlib").setLevel(logging.WARNING) |
|
|
|
|
|
logger.info("============= APPLICATION STARTING =============") |
|
logger.info(f"Running from directory: {os.getcwd()}") |
|
|
|
|
|
global_df = None |
|
|
|
|
|
API_BASE_URL = "https://afmdb.autonolas.tech" |
|
logger.info(f"Using API endpoint: {API_BASE_URL}") |
|
|
|
def get_agent_type_by_name(type_name: str) -> Dict[str, Any]: |
|
"""Get agent type by name""" |
|
url = f"{API_BASE_URL}/api/agent-types/name/{type_name}" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"Agent type '{type_name}' not found") |
|
return None |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Agent type response: {result}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_agent_type_by_name: {e}") |
|
return None |
|
|
|
def get_attribute_definition_by_name(attr_name: str) -> Dict[str, Any]: |
|
"""Get attribute definition by name""" |
|
url = f"{API_BASE_URL}/api/attributes/name/{attr_name}" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"Attribute definition '{attr_name}' not found") |
|
return None |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Attribute definition response: {result}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_attribute_definition_by_name: {e}") |
|
return None |
|
|
|
def get_agents_by_type(type_id: int) -> List[Dict[str, Any]]: |
|
"""Get all agents of a specific type""" |
|
url = f"{API_BASE_URL}/api/agent-types/{type_id}/agents/" |
|
logger.debug(f"Calling API: {url}") |
|
|
|
try: |
|
response = requests.get(url) |
|
logger.debug(f"Response status: {response.status_code}") |
|
|
|
if response.status_code == 404: |
|
logger.error(f"No agents found for type ID {type_id}") |
|
return [] |
|
|
|
response.raise_for_status() |
|
result = response.json() |
|
logger.debug(f"Agents count: {len(result)}") |
|
logger.debug(f"First few agents: {result[:2] if result else []}") |
|
return result |
|
except Exception as e: |
|
logger.error(f"Error in get_agents_by_type: {e}") |
|
return [] |
|
|
|
def get_attribute_values_by_type_and_attr(agents: List[Dict[str, Any]], attr_def_id: int) -> List[Dict[str, Any]]: |
|
"""Get all attribute values for a specific attribute definition across all agents of a given list""" |
|
all_attributes = [] |
|
logger.debug(f"Getting attributes for {len(agents)} agents with attr_def_id: {attr_def_id}") |
|
|
|
|
|
for agent in agents: |
|
agent_id = agent["agent_id"] |
|
|
|
|
|
url = f"{API_BASE_URL}/api/agents/{agent_id}/attributes/" |
|
logger.debug(f"Calling API for agent {agent_id}: {url}") |
|
|
|
try: |
|
response = requests.get(url, params={"limit": 1000}) |
|
|
|
if response.status_code == 404: |
|
logger.error(f"No attributes found for agent ID {agent_id}") |
|
continue |
|
|
|
response.raise_for_status() |
|
agent_attrs = response.json() |
|
logger.debug(f"Agent {agent_id} has {len(agent_attrs)} attributes") |
|
|
|
|
|
filtered_attrs = [attr for attr in agent_attrs if attr.get("attr_def_id") == attr_def_id] |
|
logger.debug(f"Agent {agent_id} has {len(filtered_attrs)} APR attributes") |
|
|
|
if filtered_attrs: |
|
logger.debug(f"Sample attribute for agent {agent_id}: {filtered_attrs[0]}") |
|
|
|
all_attributes.extend(filtered_attrs) |
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"Error fetching attributes for agent ID {agent_id}: {e}") |
|
|
|
logger.info(f"Total APR attributes found across all agents: {len(all_attributes)}") |
|
return all_attributes |
|
|
|
def get_agent_name(agent_id: int, agents: List[Dict[str, Any]]) -> str: |
|
"""Get agent name from agent ID""" |
|
for agent in agents: |
|
if agent["agent_id"] == agent_id: |
|
return agent["agent_name"] |
|
return "Unknown" |
|
|
|
def extract_apr_value(attr: Dict[str, Any]) -> Dict[str, Any]: |
|
"""Extract APR value, adjusted APR value, and timestamp from JSON value""" |
|
try: |
|
agent_id = attr.get("agent_id", "unknown") |
|
logger.debug(f"Extracting APR value for agent {agent_id}") |
|
|
|
|
|
if attr["json_value"] is None: |
|
logger.debug(f"Agent {agent_id}: json_value is None") |
|
return {"apr": None, "adjusted_apr": None, "timestamp": None, "agent_id": agent_id, "is_dummy": False} |
|
|
|
|
|
if isinstance(attr["json_value"], str): |
|
logger.debug(f"Agent {agent_id}: json_value is string, parsing") |
|
json_data = json.loads(attr["json_value"]) |
|
else: |
|
json_data = attr["json_value"] |
|
|
|
apr = json_data.get("apr") |
|
adjusted_apr = json_data.get("adjusted_apr") |
|
timestamp = json_data.get("timestamp") |
|
|
|
logger.debug(f"Agent {agent_id}: Raw APR value: {apr}, adjusted APR value: {adjusted_apr}, timestamp: {timestamp}") |
|
|
|
|
|
timestamp_dt = None |
|
if timestamp: |
|
timestamp_dt = datetime.fromtimestamp(timestamp) |
|
|
|
result = {"apr": apr, "adjusted_apr": adjusted_apr, "timestamp": timestamp_dt, "agent_id": agent_id, "is_dummy": False} |
|
logger.debug(f"Agent {agent_id}: Extracted result: {result}") |
|
return result |
|
except (json.JSONDecodeError, KeyError, TypeError) as e: |
|
logger.error(f"Error parsing JSON value: {e} for agent_id: {attr.get('agent_id')}") |
|
logger.error(f"Problematic json_value: {attr.get('json_value')}") |
|
return {"apr": None, "adjusted_apr": None, "timestamp": None, "agent_id": attr.get('agent_id'), "is_dummy": False} |
|
|
|
def fetch_apr_data_from_db(): |
|
""" |
|
Fetch APR data from database using the API. |
|
""" |
|
global global_df |
|
|
|
logger.info("==== Starting APR data fetch ====") |
|
|
|
try: |
|
|
|
logger.info("Finding Modius agent type") |
|
modius_type = get_agent_type_by_name("Modius") |
|
if not modius_type: |
|
logger.error("Modius agent type not found, using placeholder data") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
type_id = modius_type["type_id"] |
|
logger.info(f"Found Modius agent type with ID: {type_id}") |
|
|
|
|
|
logger.info("Finding APR attribute definition") |
|
apr_attr_def = get_attribute_definition_by_name("APR") |
|
if not apr_attr_def: |
|
logger.error("APR attribute definition not found, using placeholder data") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
attr_def_id = apr_attr_def["attr_def_id"] |
|
logger.info(f"Found APR attribute definition with ID: {attr_def_id}") |
|
|
|
|
|
logger.info(f"Getting all agents of type Modius (type_id: {type_id})") |
|
modius_agents = get_agents_by_type(type_id) |
|
if not modius_agents: |
|
logger.error("No agents of type 'Modius' found") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
logger.info(f"Found {len(modius_agents)} Modius agents") |
|
logger.debug(f"Modius agents: {[{'agent_id': a['agent_id'], 'agent_name': a['agent_name']} for a in modius_agents]}") |
|
|
|
|
|
logger.info(f"Fetching APR values for all Modius agents (attr_def_id: {attr_def_id})") |
|
apr_attributes = get_attribute_values_by_type_and_attr(modius_agents, attr_def_id) |
|
if not apr_attributes: |
|
logger.error("No APR values found for 'Modius' agents") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
logger.info(f"Found {len(apr_attributes)} APR attributes total") |
|
|
|
|
|
logger.info("Extracting APR data from attributes") |
|
apr_data_list = [] |
|
for attr in apr_attributes: |
|
apr_data = extract_apr_value(attr) |
|
if apr_data["apr"] is not None and apr_data["timestamp"] is not None: |
|
|
|
agent_name = get_agent_name(attr["agent_id"], modius_agents) |
|
|
|
apr_data["agent_name"] = agent_name |
|
|
|
apr_data["is_dummy"] = False |
|
|
|
|
|
if apr_data["apr"] != 0 and apr_data["apr"] != -100: |
|
apr_data["metric_type"] = "APR" |
|
logger.debug(f"Agent {agent_name} ({attr['agent_id']}): APR value: {apr_data['apr']}") |
|
|
|
apr_data_list.append(apr_data) |
|
else: |
|
|
|
logger.debug(f"Skipping value for agent {agent_name} ({attr['agent_id']}): {apr_data['apr']} (zero or -100)") |
|
|
|
|
|
if not apr_data_list: |
|
logger.error("No valid APR data extracted") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
global_df = pd.DataFrame(apr_data_list) |
|
|
|
|
|
logger.info(f"Created DataFrame with {len(global_df)} rows") |
|
logger.info(f"DataFrame columns: {global_df.columns.tolist()}") |
|
logger.info(f"APR statistics: min={global_df['apr'].min()}, max={global_df['apr'].max()}, mean={global_df['apr'].mean()}") |
|
|
|
|
|
if 'adjusted_apr' in global_df.columns and global_df['adjusted_apr'].notna().any(): |
|
logger.info(f"Adjusted APR statistics: min={global_df['adjusted_apr'].min()}, max={global_df['adjusted_apr'].max()}, mean={global_df['adjusted_apr'].mean()}") |
|
logger.info(f"Number of records with adjusted_apr: {global_df['adjusted_apr'].notna().sum()} out of {len(global_df)}") |
|
|
|
|
|
valid_rows = global_df[global_df['adjusted_apr'].notna()] |
|
if not valid_rows.empty: |
|
avg_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).mean() |
|
max_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).max() |
|
min_diff = (valid_rows['apr'] - valid_rows['adjusted_apr']).min() |
|
logger.info(f"APR vs Adjusted APR difference: avg={avg_diff:.2f}, max={max_diff:.2f}, min={min_diff:.2f}") |
|
else: |
|
logger.info("No adjusted APR values found in the data") |
|
|
|
|
|
logger.info("All values are APR type (excluding zero and -100 values)") |
|
logger.info(f"Agents count: {global_df['agent_name'].value_counts().to_dict()}") |
|
|
|
|
|
logger.debug("Final DataFrame contents:") |
|
for idx, row in global_df.iterrows(): |
|
logger.debug(f"Row {idx}: {row.to_dict()}") |
|
|
|
return global_df |
|
|
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"API request error: {e}") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
except Exception as e: |
|
logger.error(f"Error fetching APR data: {e}") |
|
logger.exception("Exception details:") |
|
global_df = pd.DataFrame([]) |
|
return global_df |
|
|
|
def generate_apr_visualizations(): |
|
"""Generate APR visualizations with real data only (no dummy data)""" |
|
global global_df |
|
|
|
|
|
df = fetch_apr_data_from_db() |
|
|
|
|
|
if df.empty: |
|
logger.info("No APR data available. Using fallback visualization.") |
|
|
|
fig = go.Figure() |
|
fig.add_annotation( |
|
x=0.5, y=0.5, |
|
text="No APR data available", |
|
font=dict(size=20), |
|
showarrow=False |
|
) |
|
fig.update_layout( |
|
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), |
|
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False) |
|
) |
|
|
|
|
|
fig.write_html("modius_apr_combined_graph.html") |
|
fig.write_image("modius_apr_combined_graph.png") |
|
|
|
csv_file = None |
|
return fig, csv_file |
|
|
|
|
|
|
|
global_df = df |
|
|
|
|
|
csv_file = save_to_csv(df) |
|
|
|
|
|
combined_fig = create_combined_time_series_graph(df) |
|
|
|
return combined_fig, csv_file |
|
|
|
def create_time_series_graph_per_agent(df): |
|
"""Create a time series graph for each agent using Plotly""" |
|
|
|
unique_agents = df['agent_id'].unique() |
|
|
|
if len(unique_agents) == 0: |
|
logger.error("No agent data to plot") |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="No agent data available", |
|
x=0.5, y=0.5, |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
fig = make_subplots(rows=len(unique_agents), cols=1, |
|
subplot_titles=[f"Agent: {df[df['agent_id'] == agent_id]['agent_name'].iloc[0]}" |
|
for agent_id in unique_agents], |
|
vertical_spacing=0.1) |
|
|
|
|
|
for i, agent_id in enumerate(unique_agents): |
|
agent_data = df[df['agent_id'] == agent_id].copy() |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
row = i + 1 |
|
|
|
|
|
fig.add_shape( |
|
type="line", line=dict(dash="solid", width=1.5, color="black"), |
|
y0=0, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="rect", fillcolor="rgba(230, 243, 255, 0.3)", line=dict(width=0), |
|
y0=0, y1=1000, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1, layer="below" |
|
) |
|
fig.add_shape( |
|
type="rect", fillcolor="rgba(255, 230, 230, 0.3)", line=dict(width=0), |
|
y0=-1000, y1=0, x0=agent_data['timestamp'].min(), x1=agent_data['timestamp'].max(), |
|
row=row, col=1, layer="below" |
|
) |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
|
|
|
|
combined_agent_data = agent_data.sort_values('timestamp') |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=combined_agent_data['timestamp'], |
|
y=combined_agent_data['apr'], |
|
mode='lines', |
|
line=dict(color='purple', width=2), |
|
name=f'{agent_name}', |
|
legendgroup=agent_name, |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>Value: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
if not apr_data.empty: |
|
fig.add_trace( |
|
go.Scatter( |
|
x=apr_data['timestamp'], |
|
y=apr_data['apr'], |
|
mode='markers', |
|
marker=dict(color='blue', size=10, symbol='circle'), |
|
name='APR', |
|
legendgroup='APR', |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
if not perf_data.empty: |
|
fig.add_trace( |
|
go.Scatter( |
|
x=perf_data['timestamp'], |
|
y=perf_data['apr'], |
|
mode='markers', |
|
marker=dict(color='red', size=10, symbol='square'), |
|
name='Performance', |
|
legendgroup='Performance', |
|
showlegend=(i == 0), |
|
hovertemplate='Time: %{x}<br>Performance: %{y:.2f}<extra></extra>' |
|
), |
|
row=row, col=1 |
|
) |
|
|
|
|
|
fig.update_xaxes(title_text="Time", row=row, col=1) |
|
fig.update_yaxes(title_text="Value", row=row, col=1, gridcolor='rgba(0,0,0,0.1)') |
|
|
|
|
|
fig.update_layout( |
|
height=400 * len(unique_agents), |
|
width=1000, |
|
title_text="APR and Performance Values per Agent", |
|
template="plotly_white", |
|
legend=dict( |
|
orientation="h", |
|
yanchor="bottom", |
|
y=1.02, |
|
xanchor="right", |
|
x=1 |
|
), |
|
margin=dict(r=20, l=20, t=30, b=20), |
|
hovermode="closest" |
|
) |
|
|
|
|
|
graph_file = "modius_apr_per_agent_graph.html" |
|
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) |
|
|
|
|
|
img_file = "modius_apr_per_agent_graph.png" |
|
fig.write_image(img_file) |
|
|
|
logger.info(f"Per-agent graph saved to {graph_file} and {img_file}") |
|
|
|
|
|
return fig |
|
|
|
def write_debug_info(df, fig): |
|
"""Minimal debug info function""" |
|
try: |
|
|
|
logger.debug(f"Graph created with {len(df)} data points and {len(fig.data)} traces") |
|
return True |
|
except Exception as e: |
|
logger.error(f"Error writing debug info: {e}") |
|
return False |
|
|
|
def create_combined_time_series_graph(df): |
|
"""Create a time series graph showing average APR values across all agents""" |
|
if len(df) == 0: |
|
logger.error("No data to plot combined graph") |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="No data available", |
|
x=0.5, y=0.5, |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
df['apr'] = df['apr'].astype(float) |
|
df['metric_type'] = df['metric_type'].astype(str) |
|
|
|
|
|
logger.info(f"Graph data - shape: {df.shape}, columns: {df.columns}") |
|
logger.info(f"Graph data - unique agents: {df['agent_name'].unique().tolist()}") |
|
logger.info("Graph data - all positive APR values only") |
|
logger.info(f"Graph data - min APR: {df['apr'].min()}, max APR: {df['apr'].max()}") |
|
|
|
|
|
debug_csv = "debug_graph_data.csv" |
|
df.to_csv(debug_csv) |
|
logger.info(f"Exported graph data to {debug_csv} for debugging") |
|
|
|
|
|
with open("debug_graph_data_report.txt", "w") as f: |
|
f.write("==== GRAPH DATA REPORT ====\n\n") |
|
f.write(f"Total data points: {len(df)}\n") |
|
f.write(f"Timestamp range: {df['timestamp'].min()} to {df['timestamp'].max()}\n\n") |
|
|
|
|
|
unique_agents = df['agent_id'].unique() |
|
f.write(f"Number of agents: {len(unique_agents)}\n\n") |
|
|
|
for agent_id in unique_agents: |
|
agent_data = df[df['agent_id'] == agent_id] |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
|
|
f.write(f"== Agent: {agent_name} (ID: {agent_id}) ==\n") |
|
f.write(f" Total data points: {len(agent_data)}\n") |
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
|
|
f.write(f" APR data points: {len(apr_data)}\n") |
|
|
|
if not apr_data.empty: |
|
f.write(f" APR values: {apr_data['apr'].tolist()}\n") |
|
f.write(f" APR timestamps: {[ts.strftime('%Y-%m-%d %H:%M:%S') if ts is not None else 'None' for ts in apr_data['timestamp']]}\n") |
|
|
|
f.write("\n") |
|
|
|
logger.info("Generated detailed graph data report") |
|
|
|
|
|
|
|
fig = go.Figure() |
|
|
|
|
|
logger.info("Using autoscaling for axes ranges") |
|
|
|
|
|
min_time = df['timestamp'].min() |
|
max_time = df['timestamp'].max() |
|
|
|
|
|
fig.add_shape( |
|
type="rect", |
|
fillcolor="rgba(230, 243, 255, 0.3)", |
|
line=dict(width=0), |
|
y0=0, y1=100, |
|
x0=min_time, x1=max_time, |
|
layer="below" |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="rect", |
|
fillcolor="rgba(255, 230, 230, 0.3)", |
|
line=dict(width=0), |
|
y0=-100, y1=0, |
|
x0=min_time, x1=max_time, |
|
layer="below" |
|
) |
|
|
|
|
|
fig.add_shape( |
|
type="line", |
|
line=dict(dash="solid", width=1.5, color="black"), |
|
y0=0, y1=0, |
|
x0=min_time, x1=max_time |
|
) |
|
|
|
|
|
|
|
apr_data = df[df['metric_type'] == 'APR'].copy() |
|
|
|
|
|
outlier_data = apr_data[(apr_data['apr'] > 200) | (apr_data['apr'] < -200)].copy() |
|
apr_data_filtered = apr_data[(apr_data['apr'] <= 200) & (apr_data['apr'] >= -200)].copy() |
|
|
|
|
|
if len(outlier_data) > 0: |
|
excluded_count = len(outlier_data) |
|
logger.info(f"Excluded {excluded_count} data points with outlier APR values (>200 or <-200)") |
|
|
|
|
|
outlier_agents = outlier_data.groupby('agent_name') |
|
for agent_name, agent_outliers in outlier_agents: |
|
logger.info(f"Agent '{agent_name}' has {len(agent_outliers)} outlier values:") |
|
for idx, row in agent_outliers.iterrows(): |
|
logger.info(f" - APR: {row['apr']}, timestamp: {row['timestamp']}") |
|
|
|
|
|
apr_data = apr_data_filtered |
|
|
|
|
|
avg_apr_data = apr_data.groupby('timestamp')['apr'].mean().reset_index() |
|
|
|
|
|
avg_apr_data = avg_apr_data.sort_values('timestamp') |
|
|
|
|
|
logger.info(f"Calculated average APR data with {len(avg_apr_data)} points") |
|
for idx, row in avg_apr_data.iterrows(): |
|
logger.info(f" Average point {idx}: timestamp={row['timestamp']}, avg_apr={row['apr']}") |
|
|
|
|
|
|
|
apr_data_sorted = apr_data.sort_values('timestamp') |
|
|
|
|
|
avg_apr_data_with_ma = avg_apr_data.copy() |
|
avg_apr_data_with_ma['moving_avg'] = None |
|
|
|
|
|
time_window = pd.Timedelta(days=3) |
|
logger.info(f"Calculating moving average with time window of {time_window}") |
|
|
|
|
|
avg_apr_data_with_ma['moving_avg'] = None |
|
avg_apr_data_with_ma['adjusted_moving_avg'] = None |
|
|
|
|
|
for i, row in avg_apr_data_with_ma.iterrows(): |
|
current_time = row['timestamp'] |
|
window_start = current_time - time_window |
|
|
|
|
|
window_data = apr_data_sorted[ |
|
(apr_data_sorted['timestamp'] >= window_start) & |
|
(apr_data_sorted['timestamp'] <= current_time) |
|
] |
|
|
|
|
|
if not window_data.empty: |
|
avg_apr_data_with_ma.at[i, 'moving_avg'] = window_data['apr'].mean() |
|
logger.debug(f"APR time window {window_start} to {current_time}: {len(window_data)} points, avg={window_data['apr'].mean()}") |
|
|
|
|
|
if 'adjusted_apr' in window_data.columns and window_data['adjusted_apr'].notna().any(): |
|
avg_apr_data_with_ma.at[i, 'adjusted_moving_avg'] = window_data['adjusted_apr'].mean() |
|
logger.debug(f"Adjusted APR time window {window_start} to {current_time}: {len(window_data)} points, avg={window_data['adjusted_apr'].mean()}") |
|
else: |
|
|
|
avg_apr_data_with_ma.at[i, 'moving_avg'] = row['apr'] |
|
logger.debug(f"No data points in time window for {current_time}, using current value {row['apr']}") |
|
|
|
logger.info(f"Calculated time-based moving averages with {len(avg_apr_data_with_ma)} points") |
|
|
|
|
|
if not apr_data.empty: |
|
|
|
unique_agents = apr_data['agent_name'].unique() |
|
colors = px.colors.qualitative.Plotly[:len(unique_agents)] |
|
|
|
|
|
color_map = {agent: colors[i % len(colors)] for i, agent in enumerate(unique_agents)} |
|
|
|
|
|
agent_counts = apr_data['agent_name'].value_counts() |
|
|
|
|
|
MAX_VISIBLE_AGENTS = 5 |
|
top_agents = agent_counts.nlargest(min(MAX_VISIBLE_AGENTS, len(agent_counts))).index.tolist() |
|
|
|
logger.info(f"Showing {len(top_agents)} agents by default out of {len(unique_agents)} total agents") |
|
|
|
|
|
for agent_name in unique_agents: |
|
agent_data = apr_data[apr_data['agent_name'] == agent_name] |
|
|
|
|
|
x_values = agent_data['timestamp'].tolist() |
|
y_values = agent_data['apr'].tolist() |
|
|
|
|
|
is_visible = agent_name in top_agents |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=x_values, |
|
y=y_values, |
|
mode='markers', |
|
marker=dict( |
|
color=color_map[agent_name], |
|
symbol='circle', |
|
size=10, |
|
line=dict(width=1, color='black') |
|
), |
|
name=f'Agent: {agent_name} (APR)', |
|
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>', |
|
visible=is_visible |
|
) |
|
) |
|
logger.info(f"Added APR data points for agent {agent_name} with {len(x_values)} points (visible: {is_visible})") |
|
|
|
|
|
if 'adjusted_apr' in agent_data.columns and agent_data['adjusted_apr'].notna().any(): |
|
x_values_adj = agent_data['timestamp'].tolist() |
|
y_values_adj = agent_data['adjusted_apr'].tolist() |
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=x_values_adj, |
|
y=y_values_adj, |
|
mode='markers', |
|
marker=dict( |
|
color=color_map[agent_name], |
|
symbol='diamond', |
|
size=10, |
|
line=dict(width=1, color='black') |
|
), |
|
name=f'Agent: {agent_name} (Adjusted APR)', |
|
hovertemplate='Time: %{x}<br>Adjusted APR: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>', |
|
visible=is_visible |
|
) |
|
) |
|
logger.info(f"Added Adjusted APR data points for agent {agent_name} with {len(x_values_adj)} points (visible: {is_visible})") |
|
|
|
|
|
x_values_ma = avg_apr_data_with_ma['timestamp'].tolist() |
|
y_values_ma = avg_apr_data_with_ma['moving_avg'].tolist() |
|
|
|
|
|
hover_data_apr = [] |
|
for idx, row in avg_apr_data_with_ma.iterrows(): |
|
timestamp = row['timestamp'] |
|
hover_data_apr.append( |
|
f"Time: {timestamp}<br>Moving Avg APR (3d window): {row['moving_avg']:.2f}" |
|
) |
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=x_values_ma, |
|
y=y_values_ma, |
|
mode='lines', |
|
line=dict(color='red', width=2), |
|
name='Moving Average APR (3d window)', |
|
hovertext=hover_data_apr, |
|
hoverinfo='text', |
|
visible=True |
|
) |
|
) |
|
logger.info(f"Added 3-day moving average APR trace with {len(x_values_ma)} points") |
|
|
|
|
|
if 'adjusted_moving_avg' in avg_apr_data_with_ma.columns and avg_apr_data_with_ma['adjusted_moving_avg'].notna().any(): |
|
y_values_adj_ma = avg_apr_data_with_ma['adjusted_moving_avg'].tolist() |
|
|
|
|
|
hover_data_adj = [] |
|
for idx, row in avg_apr_data_with_ma.iterrows(): |
|
timestamp = row['timestamp'] |
|
if pd.notna(row['adjusted_moving_avg']): |
|
hover_data_adj.append( |
|
f"Time: {timestamp}<br>Moving Avg Adjusted APR (3d window): {row['adjusted_moving_avg']:.2f}" |
|
) |
|
else: |
|
hover_data_adj.append( |
|
f"Time: {timestamp}<br>Moving Avg Adjusted APR (3d window): N/A" |
|
) |
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=x_values_ma, |
|
y=y_values_adj_ma, |
|
mode='lines', |
|
line=dict(color='green', width=4), |
|
name='Moving Average Adjusted APR (3d window)', |
|
hovertext=hover_data_adj, |
|
hoverinfo='text', |
|
visible=True |
|
) |
|
) |
|
logger.info(f"Added 3-day moving average Adjusted APR trace with {len(x_values_ma)} points") |
|
|
|
|
|
logger.info("Cumulative APR graph line has been removed as requested") |
|
|
|
|
|
|
|
fig.update_layout( |
|
title="Babydegen agents", |
|
xaxis_title="Time", |
|
yaxis_title="Value", |
|
template="plotly_white", |
|
height=700, |
|
width=1400, |
|
legend=dict( |
|
orientation="h", |
|
yanchor="bottom", |
|
y=1.02, |
|
xanchor="right", |
|
x=1, |
|
groupclick="toggleitem" |
|
), |
|
margin=dict(r=20, l=20, t=30, b=20), |
|
hovermode="closest" |
|
) |
|
|
|
|
|
if len(unique_agents) > MAX_VISIBLE_AGENTS: |
|
fig.add_annotation( |
|
text=f"Note: Only showing top {MAX_VISIBLE_AGENTS} agents by default. Toggle others in legend.", |
|
xref="paper", yref="paper", |
|
x=0.5, y=1.05, |
|
showarrow=False, |
|
font=dict(size=12, color="gray"), |
|
align="center" |
|
) |
|
|
|
|
|
dtick = 5 |
|
|
|
|
|
fig.update_yaxes( |
|
showgrid=True, |
|
gridwidth=1, |
|
gridcolor='rgba(0,0,0,0.1)', |
|
|
|
autorange=True, |
|
tickformat=".2f", |
|
tickfont=dict(size=12) |
|
) |
|
|
|
|
|
fig.update_xaxes( |
|
showgrid=True, |
|
gridwidth=1, |
|
gridcolor='rgba(0,0,0,0.1)', |
|
|
|
autorange=True, |
|
tickformat="%b %d, %H:%M", |
|
tickangle=-30, |
|
tickfont=dict(size=12) |
|
) |
|
|
|
|
|
|
|
try: |
|
|
|
write_debug_info(df, fig) |
|
|
|
|
|
graph_file = "modius_apr_combined_graph.html" |
|
fig.write_html(graph_file, include_plotlyjs='cdn', full_html=False) |
|
|
|
|
|
img_file = "modius_apr_combined_graph.png" |
|
try: |
|
fig.write_image(img_file) |
|
logger.info(f"Combined graph saved to {graph_file} and {img_file}") |
|
except Exception as e: |
|
logger.error(f"Error saving image: {e}") |
|
logger.info(f"Combined graph saved to {graph_file} only") |
|
|
|
|
|
return fig |
|
except Exception as e: |
|
|
|
logger.error(f"Error creating advanced graph: {e}") |
|
logger.info("Falling back to simpler graph") |
|
|
|
|
|
simple_fig = go.Figure() |
|
|
|
|
|
simple_fig.add_shape( |
|
type="line", |
|
line=dict(dash="solid", width=1.5, color="black"), |
|
y0=0, y1=0, |
|
x0=min_time, x1=max_time |
|
) |
|
|
|
|
|
fallback_colors = px.colors.qualitative.Plotly |
|
|
|
|
|
if not avg_apr_data.empty: |
|
|
|
avg_apr_data = avg_apr_data.sort_values('timestamp') |
|
|
|
|
|
avg_apr_data_with_ma = avg_apr_data.copy() |
|
avg_apr_data_with_ma['moving_avg'] = None |
|
avg_apr_data_with_ma['infinite_avg'] = None |
|
|
|
|
|
time_window = pd.Timedelta(hours=6) |
|
|
|
|
|
for i, row in avg_apr_data_with_ma.iterrows(): |
|
current_time = row['timestamp'] |
|
window_start = current_time - time_window |
|
|
|
|
|
window_data = apr_data[ |
|
(apr_data['timestamp'] >= window_start) & |
|
(apr_data['timestamp'] <= current_time) |
|
] |
|
|
|
|
|
infinite_window_data = apr_data[ |
|
apr_data['timestamp'] <= current_time |
|
] |
|
|
|
|
|
if not window_data.empty: |
|
avg_apr_data_with_ma.at[i, 'moving_avg'] = window_data['apr'].mean() |
|
else: |
|
|
|
avg_apr_data_with_ma.at[i, 'moving_avg'] = row['apr'] |
|
|
|
|
|
if not infinite_window_data.empty: |
|
avg_apr_data_with_ma.at[i, 'infinite_avg'] = infinite_window_data['apr'].mean() |
|
else: |
|
avg_apr_data_with_ma.at[i, 'infinite_avg'] = row['apr'] |
|
|
|
|
|
unique_agents = apr_data['agent_name'].unique() |
|
colors = px.colors.qualitative.Plotly[:len(unique_agents)] |
|
color_map = {agent: colors[i % len(colors)] for i, agent in enumerate(unique_agents)} |
|
|
|
|
|
agent_counts = apr_data['agent_name'].value_counts() |
|
|
|
|
|
MAX_VISIBLE_AGENTS = 5 |
|
top_agents = agent_counts.nlargest(min(MAX_VISIBLE_AGENTS, len(agent_counts))).index.tolist() |
|
|
|
for agent_name in unique_agents: |
|
agent_data = apr_data[apr_data['agent_name'] == agent_name] |
|
|
|
|
|
is_visible = agent_name in top_agents |
|
|
|
|
|
simple_fig.add_trace( |
|
go.Scatter( |
|
x=agent_data['timestamp'], |
|
y=agent_data['apr'], |
|
mode='markers', |
|
name=f'Agent: {agent_name}', |
|
marker=dict( |
|
size=10, |
|
color=color_map[agent_name] |
|
), |
|
hovertemplate='Time: %{x}<br>APR: %{y:.2f}<br>Agent: ' + agent_name + '<extra></extra>', |
|
visible=is_visible |
|
) |
|
) |
|
|
|
|
|
simple_fig.add_trace( |
|
go.Scatter( |
|
x=avg_apr_data_with_ma['timestamp'], |
|
y=avg_apr_data_with_ma['moving_avg'], |
|
mode='lines', |
|
name='Moving Average APR (6h window)', |
|
line=dict(width=2, color='red') |
|
) |
|
) |
|
|
|
|
|
simple_fig.add_trace( |
|
go.Scatter( |
|
x=avg_apr_data_with_ma['timestamp'], |
|
y=avg_apr_data_with_ma['infinite_avg'], |
|
mode='lines', |
|
name='Cumulative Average APR (all data)', |
|
line=dict(width=4, color='green') |
|
) |
|
) |
|
|
|
|
|
simple_fig.update_layout( |
|
title="Babydegen agents", |
|
xaxis_title="Time", |
|
yaxis_title="Value", |
|
yaxis=dict( |
|
|
|
autorange=True, |
|
tickformat=".2f", |
|
tickfont=dict(size=12) |
|
), |
|
height=700, |
|
width=1400, |
|
template="plotly_white" |
|
) |
|
|
|
|
|
simple_fig.update_xaxes( |
|
autorange=True, |
|
tickformat="%b %d, %H:%M", |
|
tickangle=-30, |
|
tickfont=dict(size=12) |
|
) |
|
|
|
|
|
if len(unique_agents) > MAX_VISIBLE_AGENTS: |
|
simple_fig.add_annotation( |
|
text=f"Note: Only showing top {MAX_VISIBLE_AGENTS} agents by default. Toggle others in legend.", |
|
xref="paper", yref="paper", |
|
x=0.5, y=1.05, |
|
showarrow=False, |
|
font=dict(size=12, color="gray"), |
|
align="center" |
|
) |
|
|
|
|
|
return simple_fig |
|
|
|
def save_to_csv(df): |
|
"""Save the APR data DataFrame to a CSV file and return the file path""" |
|
if df.empty: |
|
logger.error("No APR data to save to CSV") |
|
return None |
|
|
|
|
|
csv_file = "modius_apr_values.csv" |
|
|
|
|
|
df.to_csv(csv_file, index=False) |
|
logger.info(f"APR data saved to {csv_file}") |
|
|
|
|
|
stats_df = generate_statistics_from_data(df) |
|
stats_csv = "modius_apr_statistics.csv" |
|
stats_df.to_csv(stats_csv, index=False) |
|
logger.info(f"Statistics saved to {stats_csv}") |
|
|
|
|
|
if 'adjusted_apr' in df.columns and df['adjusted_apr'].notna().any(): |
|
adjusted_stats = stats_df[stats_df['avg_adjusted_apr'].notna()] |
|
logger.info(f"Agents with adjusted APR data: {len(adjusted_stats)} out of {len(stats_df)}") |
|
|
|
for _, row in adjusted_stats.iterrows(): |
|
if row['agent_id'] != 'ALL': |
|
logger.info(f"Agent {row['agent_name']} adjusted APR stats: avg={row['avg_adjusted_apr']:.2f}, min={row['min_adjusted_apr']:.2f}, max={row['max_adjusted_apr']:.2f}") |
|
|
|
|
|
overall_row = stats_df[stats_df['agent_id'] == 'ALL'] |
|
if not overall_row.empty and pd.notna(overall_row['avg_adjusted_apr'].iloc[0]): |
|
logger.info(f"Overall adjusted APR stats: avg={overall_row['avg_adjusted_apr'].iloc[0]:.2f}, min={overall_row['min_adjusted_apr'].iloc[0]:.2f}, max={overall_row['max_adjusted_apr'].iloc[0]:.2f}") |
|
|
|
return csv_file |
|
|
|
def generate_statistics_from_data(df): |
|
"""Generate statistics from the APR data""" |
|
if df.empty: |
|
return pd.DataFrame() |
|
|
|
|
|
unique_agents = df['agent_id'].unique() |
|
stats_list = [] |
|
|
|
|
|
for agent_id in unique_agents: |
|
agent_data = df[df['agent_id'] == agent_id] |
|
agent_name = agent_data['agent_name'].iloc[0] |
|
|
|
|
|
apr_data = agent_data[agent_data['metric_type'] == 'APR'] |
|
real_apr = apr_data[apr_data['is_dummy'] == False] |
|
|
|
|
|
perf_data = agent_data[agent_data['metric_type'] == 'Performance'] |
|
real_perf = perf_data[perf_data['is_dummy'] == False] |
|
|
|
|
|
has_adjusted_apr = 'adjusted_apr' in apr_data.columns and apr_data['adjusted_apr'].notna().any() |
|
|
|
stats = { |
|
'agent_id': agent_id, |
|
'agent_name': agent_name, |
|
'total_points': len(agent_data), |
|
'apr_points': len(apr_data), |
|
'performance_points': len(perf_data), |
|
'real_apr_points': len(real_apr), |
|
'real_performance_points': len(real_perf), |
|
'avg_apr': apr_data['apr'].mean() if not apr_data.empty else None, |
|
'avg_performance': perf_data['apr'].mean() if not perf_data.empty else None, |
|
'max_apr': apr_data['apr'].max() if not apr_data.empty else None, |
|
'min_apr': apr_data['apr'].min() if not apr_data.empty else None, |
|
'avg_adjusted_apr': apr_data['adjusted_apr'].mean() if has_adjusted_apr else None, |
|
'max_adjusted_apr': apr_data['adjusted_apr'].max() if has_adjusted_apr else None, |
|
'min_adjusted_apr': apr_data['adjusted_apr'].min() if has_adjusted_apr else None, |
|
'latest_timestamp': agent_data['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not agent_data.empty else None |
|
} |
|
stats_list.append(stats) |
|
|
|
|
|
apr_only = df[df['metric_type'] == 'APR'] |
|
perf_only = df[df['metric_type'] == 'Performance'] |
|
|
|
|
|
has_adjusted_apr_overall = 'adjusted_apr' in apr_only.columns and apr_only['adjusted_apr'].notna().any() |
|
|
|
overall_stats = { |
|
'agent_id': 'ALL', |
|
'agent_name': 'All Agents', |
|
'total_points': len(df), |
|
'apr_points': len(apr_only), |
|
'performance_points': len(perf_only), |
|
'real_apr_points': len(apr_only[apr_only['is_dummy'] == False]), |
|
'real_performance_points': len(perf_only[perf_only['is_dummy'] == False]), |
|
'avg_apr': apr_only['apr'].mean() if not apr_only.empty else None, |
|
'avg_performance': perf_only['apr'].mean() if not perf_only.empty else None, |
|
'max_apr': apr_only['apr'].max() if not apr_only.empty else None, |
|
'min_apr': apr_only['apr'].min() if not apr_only.empty else None, |
|
'avg_adjusted_apr': apr_only['adjusted_apr'].mean() if has_adjusted_apr_overall else None, |
|
'max_adjusted_apr': apr_only['adjusted_apr'].max() if has_adjusted_apr_overall else None, |
|
'min_adjusted_apr': apr_only['adjusted_apr'].min() if has_adjusted_apr_overall else None, |
|
'latest_timestamp': df['timestamp'].max().strftime('%Y-%m-%d %H:%M:%S') if not df.empty else None |
|
} |
|
stats_list.append(overall_stats) |
|
|
|
return pd.DataFrame(stats_list) |
|
|
|
|
|
def create_transcation_visualizations(): |
|
"""Dummy implementation that returns a placeholder graph""" |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
def create_active_agents_visualizations(): |
|
"""Dummy implementation that returns a placeholder graph""" |
|
fig = go.Figure() |
|
fig.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
return fig |
|
|
|
|
|
""" |
|
# Load environment variables from .env file |
|
# RPC URLs |
|
OPTIMISM_RPC_URL = os.getenv('OPTIMISM_RPC_URL') |
|
MODE_RPC_URL = os.getenv('MODE_RPC_URL') |
|
|
|
# Initialize Web3 instances |
|
web3_instances = { |
|
'optimism': Web3(Web3.HTTPProvider(OPTIMISM_RPC_URL)), |
|
'mode': Web3(Web3.HTTPProvider(MODE_RPC_URL)) |
|
} |
|
|
|
# Contract addresses for service registries |
|
contract_addresses = { |
|
'optimism': '0x3d77596beb0f130a4415df3D2D8232B3d3D31e44', |
|
'mode': '0x3C1fF68f5aa342D296d4DEe4Bb1cACCA912D95fE' |
|
} |
|
|
|
# Load the ABI from the provided JSON file |
|
with open('./contracts/service_registry_abi.json', 'r') as abi_file: |
|
contract_abi = json.load(abi_file) |
|
|
|
# Create the contract instances |
|
service_registries = { |
|
chain_name: web3.eth.contract(address=contract_addresses[chain_name], abi=contract_abi) |
|
for chain_name, web3 in web3_instances.items() |
|
} |
|
|
|
# Check if connections are successful |
|
for chain_name, web3_instance in web3_instances.items(): |
|
if not web3_instance.is_connected(): |
|
raise Exception(f"Failed to connect to the {chain_name.capitalize()} network.") |
|
else: |
|
print(f"Successfully connected to the {chain_name.capitalize()} network.") |
|
""" |
|
|
|
|
|
def get_transfers(integrator: str, wallet: str) -> str: |
|
"""Dummy function that returns an empty result""" |
|
return {"transfers": []} |
|
|
|
def fetch_and_aggregate_transactions(): |
|
"""Dummy function that returns empty data""" |
|
return [], {} |
|
|
|
|
|
def process_transactions_and_agents(data): |
|
"""Dummy function that returns empty dataframes""" |
|
df_transactions = pd.DataFrame() |
|
df_agents = pd.DataFrame(columns=['date', 'agent_count']) |
|
df_agents_weekly = pd.DataFrame() |
|
return df_transactions, df_agents, df_agents_weekly |
|
|
|
|
|
def create_visualizations(): |
|
""" |
|
# Commenting out the original visualization code temporarily for debugging |
|
transactions_data = fetch_and_aggregate_transactions() |
|
df_transactions, df_agents, df_agents_weekly = process_transactions_and_agents(transactions_data) |
|
|
|
# Fetch daily value locked data |
|
df_tvl = pd.read_csv('daily_value_locked.csv') |
|
|
|
# Calculate total value locked per chain per day |
|
df_tvl["total_value_locked_usd"] = df_tvl["amount0_usd"] + df_tvl["amount1_usd"] |
|
df_tvl_daily = df_tvl.groupby(["date", "chain_name"])["total_value_locked_usd"].sum().reset_index() |
|
df_tvl_daily['date'] = pd.to_datetime(df_tvl_daily['date']) |
|
|
|
# Filter out dates with zero total value locked |
|
df_tvl_daily = df_tvl_daily[df_tvl_daily["total_value_locked_usd"] > 0] |
|
|
|
chain_name_map = { |
|
"mode": "Mode", |
|
"base": "Base", |
|
"ethereum": "Ethereum", |
|
"optimism": "Optimism" |
|
} |
|
df_tvl_daily["chain_name"] = df_tvl_daily["chain_name"].map(chain_name_map) |
|
|
|
# Plot total value locked |
|
fig_tvl = px.bar( |
|
df_tvl_daily, |
|
x="date", |
|
y="total_value_locked_usd", |
|
color="chain_name", |
|
opacity=0.7, |
|
title="Total Volume Invested in Pools in Different Chains Daily", |
|
labels={"date": "Date","chain_name": "Transaction Chain", "total_value_locked_usd": "Total Volume Invested (USD)"}, |
|
barmode='stack', |
|
color_discrete_map={ |
|
"Mode": "orange", |
|
"Base": "purple", |
|
"Ethereum": "darkgreen", |
|
"Optimism": "blue" |
|
} |
|
) |
|
fig_tvl.update_layout( |
|
xaxis_title="Date", |
|
|
|
yaxis=dict(tickmode='linear', tick0=0, dtick=4), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=df_tvl_daily['date'], |
|
ticktext=df_tvl_daily['date'].dt.strftime('%b %d'), |
|
tickangle=-45, |
|
), |
|
bargap=0.6, # Increase gap between bar groups (0-1) |
|
bargroupgap=0.1, # Decrease gap between bars in a group (0-1) |
|
height=600, |
|
width=1200, # Specify width to prevent bars from being too wide |
|
showlegend=True, |
|
template='plotly_white' |
|
) |
|
fig_tvl.update_xaxes(tickformat="%b %d") |
|
|
|
chain_name_map = { |
|
10: "Optimism", |
|
8453: "Base", |
|
1: "Ethereum", |
|
34443: "Mode" |
|
} |
|
|
|
df_transactions["sending_chain"] = df_transactions["sending_chain"].map(chain_name_map) |
|
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].map(chain_name_map) |
|
|
|
df_transactions["sending_chain"] = df_transactions["sending_chain"].astype(str) |
|
df_transactions["receiving_chain"] = df_transactions["receiving_chain"].astype(str) |
|
df_transactions['date'] = pd.to_datetime(df_transactions['date']) |
|
df_transactions["is_swap"] = df_transactions.apply(lambda x: x["sending_chain"] == x["receiving_chain"], axis=1) |
|
|
|
swaps_per_chain = df_transactions[df_transactions["is_swap"]].groupby(["date", "sending_chain"]).size().reset_index(name="swap_count") |
|
fig_swaps_chain = px.bar( |
|
swaps_per_chain, |
|
x="date", |
|
y="swap_count", |
|
color="sending_chain", |
|
title="Chain Daily Activity: Swaps", |
|
labels={"sending_chain": "Transaction Chain", "swap_count": "Daily Swap Nr"}, |
|
barmode="stack", |
|
opacity=0.7, |
|
color_discrete_map={ |
|
"Optimism": "blue", |
|
"Ethereum": "darkgreen", |
|
"Base": "purple", |
|
"Mode": "orange" |
|
} |
|
) |
|
fig_swaps_chain.update_layout( |
|
xaxis_title="Date", |
|
yaxis_title="Daily Swap Count", |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=[d for d in swaps_per_chain['date']], |
|
ticktext=[d.strftime('%m-%d') for d in swaps_per_chain['date']], |
|
tickangle=-45, |
|
), |
|
bargap=0.6, |
|
bargroupgap=0.1, |
|
height=600, |
|
width=1200, |
|
margin=dict(l=50, r=50, t=50, b=50), |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
y=0.99, |
|
xanchor="right", |
|
x=0.99 |
|
), |
|
template='plotly_white' |
|
) |
|
fig_swaps_chain.update_xaxes(tickformat="%m-%d") |
|
|
|
df_transactions["is_bridge"] = df_transactions.apply(lambda x: x["sending_chain"] != x["receiving_chain"], axis=1) |
|
|
|
bridges_per_chain = df_transactions[df_transactions["is_bridge"]].groupby(["date", "sending_chain"]).size().reset_index(name="bridge_count") |
|
fig_bridges_chain = px.bar( |
|
bridges_per_chain, |
|
x="date", |
|
y="bridge_count", |
|
color="sending_chain", |
|
title="Chain Daily Activity: Bridges", |
|
labels={"sending_chain": "Transaction Chain", "bridge_count": "Daily Bridge Nr"}, |
|
barmode="stack", |
|
opacity=0.7, |
|
color_discrete_map={ |
|
"Optimism": "blue", |
|
"Ethereum": "darkgreen", |
|
"Base": "purple", |
|
"Mode": "orange" |
|
} |
|
) |
|
fig_bridges_chain.update_layout( |
|
xaxis_title="Date", |
|
yaxis_title="Daily Bridge Count", |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
tickmode='array', |
|
tickvals=[d for d in bridges_per_chain['date']], |
|
ticktext=[d.strftime('%m-%d') for d in bridges_per_chain['date']], |
|
tickangle=-45, |
|
), |
|
bargap=0.6, |
|
bargroupgap=0.1, |
|
height=600, |
|
width=1200, |
|
margin=dict(l=50, r=50, t=50, b=50), |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
y=0.99, |
|
xanchor="right", |
|
x=0.99 |
|
), |
|
template='plotly_white' |
|
) |
|
fig_bridges_chain.update_xaxes(tickformat="%m-%d") |
|
df_agents['date'] = pd.to_datetime(df_agents['date']) |
|
|
|
daily_agents_df = df_agents.groupby('date').agg({'agent_count': 'sum'}).reset_index() |
|
daily_agents_df.rename(columns={'agent_count': 'daily_agent_count'}, inplace=True) |
|
# Sort by date to ensure proper running total calculation |
|
daily_agents_df = daily_agents_df.sort_values('date') |
|
|
|
# Create week column |
|
daily_agents_df['week'] = daily_agents_df['date'].dt.to_period('W').apply(lambda r: r.start_time) |
|
|
|
# Calculate running total within each week |
|
daily_agents_df['running_weekly_total'] = daily_agents_df.groupby('week')['daily_agent_count'].cumsum() |
|
|
|
# Create final merged dataframe |
|
weekly_merged_df = daily_agents_df.copy() |
|
adjustment_date = pd.to_datetime('2024-11-15') |
|
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'daily_agent_count'] -= 1 |
|
weekly_merged_df.loc[weekly_merged_df['date'] == adjustment_date, 'running_weekly_total'] -= 1 |
|
fig_agents_registered = go.Figure(data=[ |
|
go.Bar( |
|
name='Daily nr of Registered Agents', |
|
x=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
y=weekly_merged_df['daily_agent_count'], |
|
opacity=0.7, |
|
marker_color='blue' |
|
), |
|
go.Bar( |
|
name='Weekly Nr of Registered Agents', |
|
x=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
y=weekly_merged_df['running_weekly_total'], |
|
opacity=0.7, |
|
marker_color='purple' |
|
) |
|
]) |
|
|
|
fig_agents_registered.update_layout( |
|
xaxis_title='Date', |
|
yaxis_title='Number of Agents', |
|
title="Nr of Agents Registered", |
|
barmode='group', |
|
yaxis=dict(tickmode='linear', tick0=0, dtick=1), |
|
xaxis=dict( |
|
categoryorder='array', |
|
categoryarray=weekly_merged_df['date'].dt.strftime("%b %d"), |
|
tickangle=-45 |
|
), |
|
bargap=0.3, |
|
height=600, |
|
width=1200, |
|
showlegend=True, |
|
legend=dict( |
|
yanchor="top", |
|
xanchor="right", |
|
), |
|
template='plotly_white', |
|
) |
|
|
|
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered,fig_tvl |
|
""" |
|
|
|
fig_swaps_chain = go.Figure() |
|
fig_swaps_chain.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_bridges_chain = go.Figure() |
|
fig_bridges_chain.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_agents_registered = go.Figure() |
|
fig_agents_registered.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
fig_tvl = go.Figure() |
|
fig_tvl.add_annotation( |
|
text="Blockchain data loading disabled - placeholder visualization", |
|
x=0.5, y=0.5, xref="paper", yref="paper", |
|
showarrow=False, font=dict(size=20) |
|
) |
|
|
|
return fig_swaps_chain, fig_bridges_chain, fig_agents_registered, fig_tvl |
|
|
|
|
|
def dashboard(): |
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Valory APR Metrics") |
|
|
|
|
|
with gr.Tab("APR Metrics"): |
|
with gr.Column(): |
|
refresh_btn = gr.Button("Refresh APR Data") |
|
|
|
|
|
combined_graph = gr.Plot(label="APR for All Agents") |
|
|
|
|
|
with gr.Row(visible=True): |
|
gr.Markdown("##### Toggle Graph Lines", elem_id="toggle_title") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(elem_id="toggle_container"): |
|
with gr.Column(scale=1, min_width=150): |
|
apr_toggle = gr.Checkbox(label="APR Moving Average", value=True, elem_id="apr_toggle") |
|
|
|
with gr.Column(scale=1, min_width=150): |
|
adjusted_apr_toggle = gr.Checkbox(label="Adjusted APR Moving Average", value=True, elem_id="adjusted_apr_toggle") |
|
|
|
|
|
gr.HTML(""" |
|
<style> |
|
/* Style for toggle checkboxes */ |
|
#apr_toggle .gr-checkbox { |
|
accent-color: #e74c3c !important; |
|
} |
|
|
|
#adjusted_apr_toggle .gr-checkbox { |
|
accent-color: #2ecc71 !important; |
|
} |
|
|
|
/* Make the toggle section more compact */ |
|
#toggle_title { |
|
margin-bottom: 0; |
|
margin-top: 10px; |
|
} |
|
|
|
#toggle_container { |
|
margin-top: 5px; |
|
} |
|
|
|
/* Style the checkbox labels */ |
|
.gr-form.gr-box { |
|
border: none !important; |
|
background: transparent !important; |
|
} |
|
|
|
/* Make checkboxes and labels appear on the same line */ |
|
.gr-checkbox-container { |
|
display: flex !important; |
|
align-items: center !important; |
|
} |
|
|
|
/* Add colored indicators */ |
|
#apr_toggle .gr-checkbox-label::before { |
|
content: "●"; |
|
color: #e74c3c; |
|
margin-right: 5px; |
|
} |
|
|
|
#adjusted_apr_toggle .gr-checkbox-label::before { |
|
content: "●"; |
|
color: #2ecc71; |
|
margin-right: 5px; |
|
} |
|
</style> |
|
""") |
|
|
|
|
|
def update_apr_graph(show_apr_ma=True, show_adjusted_apr_ma=True): |
|
|
|
try: |
|
combined_fig, _ = generate_apr_visualizations() |
|
|
|
|
|
for i, trace in enumerate(combined_fig.data): |
|
|
|
if trace.name == 'Moving Average APR (3d window)': |
|
trace.visible = show_apr_ma |
|
elif trace.name == 'Moving Average Adjusted APR (3d window)': |
|
trace.visible = show_adjusted_apr_ma |
|
|
|
return combined_fig |
|
except Exception as e: |
|
logger.exception("Error generating APR visualization") |
|
|
|
error_fig = go.Figure() |
|
error_fig.add_annotation( |
|
text=f"Error: {str(e)}", |
|
x=0.5, y=0.5, |
|
showarrow=False, |
|
font=dict(size=15, color="red") |
|
) |
|
return error_fig |
|
|
|
|
|
placeholder_fig = go.Figure() |
|
placeholder_fig.add_annotation( |
|
text="Click 'Refresh APR Data' to load APR graph", |
|
x=0.5, y=0.5, |
|
showarrow=False, |
|
font=dict(size=15) |
|
) |
|
combined_graph.value = placeholder_fig |
|
|
|
|
|
def update_graph_with_toggles(apr_visible, adjusted_apr_visible): |
|
return update_apr_graph(apr_visible, adjusted_apr_visible) |
|
|
|
|
|
def refresh_graph(): |
|
return update_apr_graph(apr_toggle.value, adjusted_apr_toggle.value) |
|
|
|
|
|
refresh_btn.click(fn=refresh_graph, inputs=None, outputs=[combined_graph]) |
|
|
|
|
|
apr_toggle.change( |
|
fn=update_graph_with_toggles, |
|
inputs=[apr_toggle, adjusted_apr_toggle], |
|
outputs=[combined_graph] |
|
) |
|
|
|
adjusted_apr_toggle.change( |
|
fn=update_graph_with_toggles, |
|
inputs=[apr_toggle, adjusted_apr_toggle], |
|
outputs=[combined_graph] |
|
) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
dashboard().launch() |
|
|