File size: 4,288 Bytes
359f755
536d515
359f755
77c0f20
359f755
 
77c0f20
359f755
536d515
 
 
 
 
 
ce8066d
536d515
 
 
ce8066d
536d515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce8066d
536d515
 
 
 
 
 
 
 
 
ce8066d
536d515
 
 
 
ce8066d
536d515
 
 
 
 
 
 
 
24c8512
536d515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24c8512
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import pandas as pd
import sys
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn
from src.leaderboard.read_evals import get_raw_eval_results

def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
    """Creates a dataframe from all the individual experiment results"""
    try:
        sys.stderr.write("\n=== Starting leaderboard creation ===\n")
        sys.stderr.write(f"Looking for results in: {results_path}\n")
        sys.stderr.write(f"Expected columns: {cols}\n")
        sys.stderr.write(f"Benchmark columns: {benchmark_cols}\n")
        sys.stderr.flush()

        raw_data = get_raw_eval_results(results_path)
        sys.stderr.write(f"\nFound {len(raw_data)} raw results\n")
        sys.stderr.flush()

        all_data_json = []
        for i, v in enumerate(raw_data):
            try:
                data_dict = v.to_dict()
                all_data_json.append(data_dict)
                sys.stderr.write(f"Successfully processed result {i+1}/{len(raw_data)}: {v.full_model}\n")
                sys.stderr.flush()
            except Exception as e:
                sys.stderr.write(f"Error processing result {i+1}/{len(raw_data)} ({v.full_model}): {e}\n")
                sys.stderr.flush()
                continue
        
        sys.stderr.write(f"\nConverted to {len(all_data_json)} JSON records\n")
        sys.stderr.flush()
        
        if all_data_json:
            sys.stderr.write("Sample record keys: " + str(list(all_data_json[0].keys())) + "\n")
            sys.stderr.flush()

        if not all_data_json:
            sys.stderr.write("\nNo data found, creating empty DataFrame\n")
            sys.stderr.flush()
            empty_df = pd.DataFrame(columns=cols)
            # Ensure correct column types
            empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
            for col in benchmark_cols:
                empty_df[col] = pd.Series(dtype=float)
            return empty_df

        df = pd.DataFrame.from_records(all_data_json)
        sys.stderr.write("\nCreated DataFrame with columns: " + str(df.columns.tolist()) + "\n")
        sys.stderr.write("DataFrame shape: " + str(df.shape) + "\n")
        sys.stderr.flush()

        try:
            df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
            sys.stderr.write("\nSorted DataFrame by average\n")
            sys.stderr.flush()
        except KeyError as e:
            sys.stderr.write(f"\nError sorting DataFrame: {e}\n")
            sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
            sys.stderr.flush()

        try:
            df = df[cols].round(decimals=2)
            sys.stderr.write("\nSelected and rounded columns\n")
            sys.stderr.flush()
        except KeyError as e:
            sys.stderr.write(f"\nError selecting columns: {e}\n")
            sys.stderr.write("Requested columns: " + str(cols) + "\n")
            sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
            sys.stderr.flush()
            # Create empty DataFrame with correct structure
            empty_df = pd.DataFrame(columns=cols)
            empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
            for col in benchmark_cols:
                empty_df[col] = pd.Series(dtype=float)
            return empty_df

        # filter out if perplexity hasn't been evaluated
        df = df[has_no_nan_values(df, benchmark_cols)]
        sys.stderr.write("\nFinal DataFrame shape after filtering: " + str(df.shape) + "\n")
        sys.stderr.write("Final columns: " + str(df.columns.tolist()) + "\n")
        sys.stderr.flush()
        
        return df
        
    except Exception as e:
        sys.stderr.write(f"\nCritical error in get_leaderboard_df: {e}\n")
        import traceback
        sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
        sys.stderr.flush()
        # Return empty DataFrame as fallback
        empty_df = pd.DataFrame(columns=cols)
        empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
        for col in benchmark_cols:
            empty_df[col] = pd.Series(dtype=float)
        return empty_df