model_trace / src /populate.py
Ahmed Ahmed
consolidate
536d515
raw
history blame
4.29 kB
import pandas as pd
import sys
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn
from src.leaderboard.read_evals import get_raw_eval_results
def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
"""Creates a dataframe from all the individual experiment results"""
try:
sys.stderr.write("\n=== Starting leaderboard creation ===\n")
sys.stderr.write(f"Looking for results in: {results_path}\n")
sys.stderr.write(f"Expected columns: {cols}\n")
sys.stderr.write(f"Benchmark columns: {benchmark_cols}\n")
sys.stderr.flush()
raw_data = get_raw_eval_results(results_path)
sys.stderr.write(f"\nFound {len(raw_data)} raw results\n")
sys.stderr.flush()
all_data_json = []
for i, v in enumerate(raw_data):
try:
data_dict = v.to_dict()
all_data_json.append(data_dict)
sys.stderr.write(f"Successfully processed result {i+1}/{len(raw_data)}: {v.full_model}\n")
sys.stderr.flush()
except Exception as e:
sys.stderr.write(f"Error processing result {i+1}/{len(raw_data)} ({v.full_model}): {e}\n")
sys.stderr.flush()
continue
sys.stderr.write(f"\nConverted to {len(all_data_json)} JSON records\n")
sys.stderr.flush()
if all_data_json:
sys.stderr.write("Sample record keys: " + str(list(all_data_json[0].keys())) + "\n")
sys.stderr.flush()
if not all_data_json:
sys.stderr.write("\nNo data found, creating empty DataFrame\n")
sys.stderr.flush()
empty_df = pd.DataFrame(columns=cols)
# Ensure correct column types
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
for col in benchmark_cols:
empty_df[col] = pd.Series(dtype=float)
return empty_df
df = pd.DataFrame.from_records(all_data_json)
sys.stderr.write("\nCreated DataFrame with columns: " + str(df.columns.tolist()) + "\n")
sys.stderr.write("DataFrame shape: " + str(df.shape) + "\n")
sys.stderr.flush()
try:
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
sys.stderr.write("\nSorted DataFrame by average\n")
sys.stderr.flush()
except KeyError as e:
sys.stderr.write(f"\nError sorting DataFrame: {e}\n")
sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
sys.stderr.flush()
try:
df = df[cols].round(decimals=2)
sys.stderr.write("\nSelected and rounded columns\n")
sys.stderr.flush()
except KeyError as e:
sys.stderr.write(f"\nError selecting columns: {e}\n")
sys.stderr.write("Requested columns: " + str(cols) + "\n")
sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
sys.stderr.flush()
# Create empty DataFrame with correct structure
empty_df = pd.DataFrame(columns=cols)
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
for col in benchmark_cols:
empty_df[col] = pd.Series(dtype=float)
return empty_df
# filter out if perplexity hasn't been evaluated
df = df[has_no_nan_values(df, benchmark_cols)]
sys.stderr.write("\nFinal DataFrame shape after filtering: " + str(df.shape) + "\n")
sys.stderr.write("Final columns: " + str(df.columns.tolist()) + "\n")
sys.stderr.flush()
return df
except Exception as e:
sys.stderr.write(f"\nCritical error in get_leaderboard_df: {e}\n")
import traceback
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
sys.stderr.flush()
# Return empty DataFrame as fallback
empty_df = pd.DataFrame(columns=cols)
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
for col in benchmark_cols:
empty_df[col] = pd.Series(dtype=float)
return empty_df