File size: 10,309 Bytes
359f755
 
de8f813
 
24c8512
359f755
 
 
 
 
 
 
 
 
 
 
1b2d49a
359f755
24c8512
77c0f20
70ea05e
359f755
f02d36b
 
1dd4b6a
 
 
 
 
f02d36b
1dd4b6a
 
 
 
f02d36b
1dd4b6a
 
f02d36b
1dd4b6a
77c0f20
1dd4b6a
 
 
 
 
f02d36b
1dd4b6a
 
f02d36b
 
 
1dd4b6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce8066d
f02d36b
1dd4b6a
 
 
 
 
 
 
 
 
f02d36b
1dd4b6a
f02d36b
359f755
70ea05e
 
536d515
 
3a2ac99
ce8066d
70ea05e
86c1853
70ea05e
536d515
 
21bc425
536d515
 
 
 
 
 
 
 
 
 
86c1853
3a2ac99
 
1dd4b6a
 
 
 
f02d36b
 
1dd4b6a
 
 
f02d36b
3a2ac99
 
 
 
1dd4b6a
 
 
f02d36b
86c1853
536d515
86c1853
536d515
 
 
 
 
 
 
86c1853
70ea05e
24c8512
77c0f20
24c8512
 
 
 
 
 
 
 
 
 
de8f813
24c8512
 
 
 
 
 
 
 
 
 
77c0f20
 
24c8512
 
70ea05e
f02d36b
1dd4b6a
 
 
 
 
f02d36b
77c0f20
1dd4b6a
 
 
 
77c0f20
1dd4b6a
 
 
359f755
 
 
 
 
 
f02d36b
 
 
 
1dd4b6a
f02d36b
 
 
359f755
77c0f20
359f755
 
77c0f20
536d515
 
359f755
 
536d515
77c0f20
359f755
70ea05e
 
 
 
536d515
70ea05e
 
77c0f20
 
70ea05e
f02d36b
 
 
1dd4b6a
f02d36b
 
 
 
536d515
 
3a2ac99
f02d36b
3a2ac99
 
 
 
 
 
 
f02d36b
536d515
 
77c0f20
70ea05e
77c0f20
86c1853
359f755
 
1dd4b6a
 
 
 
 
 
 
 
 
77c0f20
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
import gradio as gr
import pandas as pd
from huggingface_hub import snapshot_download, create_repo
from huggingface_hub.utils import RepositoryNotFoundError
import os

from src.about import (
    INTRODUCTION_TEXT,
    LLM_BENCHMARKS_TEXT,
    TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
    BENCHMARK_COLS,
    COLS,
    AutoEvalColumn,
    fields,
)
from src.envs import API, EVAL_RESULTS_PATH, RESULTS_REPO, TOKEN, OWNER
from src.populate import get_leaderboard_df
from src.evaluation.dynamic_eval import run_dynamic_perplexity_eval

def create_results_dataframe():
    """Create and return the results DataFrame for display"""
    import sys
    
    sys.stderr.write("\nπŸ“Š CREATE_RESULTS_DATAFRAME CALLED\n")
    sys.stderr.flush()
    
    df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
    
    sys.stderr.write(f"πŸ“‹ Retrieved leaderboard df: {df.shape if df is not None else 'None'}\n")
    sys.stderr.flush()
    
    if df is None or df.empty:
        sys.stderr.write("⚠️ DataFrame is None or empty, returning empty DataFrame\n")
        sys.stderr.flush()
        # Return empty DataFrame with proper columns
        return pd.DataFrame(columns=["Model", "Perplexity", "Match P-Value", "Average Score", "Type", "Precision"])
    
    sys.stderr.write(f"πŸ“Š Original DataFrame columns: {list(df.columns)}\n")
    sys.stderr.flush()
    
    # Check if required columns exist
    required_cols = [
        AutoEvalColumn.model.name,
        "Perplexity",
        AutoEvalColumn.model_trace_p_value.name,
        AutoEvalColumn.average.name,
        AutoEvalColumn.model_type.name,
        AutoEvalColumn.precision.name,
    ]
    
    missing_cols = [col for col in required_cols if col not in df.columns]
    if missing_cols:
        sys.stderr.write(f"⚠️ Missing columns in DataFrame: {missing_cols}\n")
        sys.stderr.flush()
        # Add missing columns with default values
        for col in missing_cols:
            if col == AutoEvalColumn.model_trace_p_value.name:
                df[col] = None
                sys.stderr.write(f"βž• Added {col} column with None values\n")
    
    # Select and rename columns for display
    try:
        display_df = df[required_cols].copy()
        sys.stderr.write(f"βœ… Selected columns successfully: {list(display_df.columns)}\n")
    except Exception as e:
        sys.stderr.write(f"πŸ’₯ Error selecting columns: {e}\n")
        sys.stderr.flush()
        return pd.DataFrame(columns=["Model", "Perplexity", "Match P-Value", "Average Score", "Type", "Precision"])
    
    # Rename columns for better display
    display_df.columns = ["Model", "Perplexity", "Match P-Value", "Average Score", "Type", "Precision"]
    
    sys.stderr.write(f"🎯 Final display DataFrame shape: {display_df.shape}\n")
    sys.stderr.write(f"🎯 Final columns: {list(display_df.columns)}\n")
    
    # Check p-value column
    if "Match P-Value" in display_df.columns:
        p_value_stats = display_df["Match P-Value"].describe()
        sys.stderr.write(f"πŸ“ˆ P-Value column stats:\n{p_value_stats}\n")
    
    sys.stderr.flush()
    return display_df

def run_perplexity_test(model_name, revision, precision):
    """Run perplexity evaluation on demand."""
    import sys
    import traceback
    import gradio as gr
    
    if not model_name:
        return "Please enter a model name.", gr.update(), gr.update()
    
    try:
        # Use stderr for more reliable logging in HF Spaces
        sys.stderr.write(f"\n=== RUNNING PERPLEXITY TEST ===\n")
        sys.stderr.write(f"Model: {model_name}\n")
        sys.stderr.write(f"Revision: {revision}\n")
        sys.stderr.write(f"Precision: {precision}\n")
        sys.stderr.flush()
        
        success, result = run_dynamic_perplexity_eval(model_name, revision, precision)
        sys.stderr.write(f"Evaluation result - Success: {success}, Result: {result}\n")
        sys.stderr.flush()
        
        if success:
            sys.stderr.write("Evaluation succeeded - updating both results tables\n")
            sys.stderr.flush()
            
            # Get updated results (this will trigger model trace p-value computation for the new model)
            sys.stderr.write("πŸ”„ Creating updated results DataFrame (may compute model trace p-values)...\n")
            sys.stderr.flush()
            
            updated_df = create_results_dataframe()
            
            sys.stderr.write("βœ… Updated DataFrame created successfully\n")
            sys.stderr.flush()
            
            success_msg = f"""βœ… **Perplexity evaluation completed successfully!**

**Model**: {model_name}
**Perplexity Score**: {result:.4f}

πŸŽ‰ **Results have been saved and both tables have been updated!**

Note: Model trace p-value computation may take additional time and will appear in the logs."""
            
            return success_msg, gr.update(value=updated_df), gr.update(value=updated_df)
        else:
            return f"❌ **Evaluation failed**: {result}", gr.update(), gr.update()
            
    except Exception as e:
        error_msg = str(e)
        traceback_str = traceback.format_exc()
        sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
        sys.stderr.write(f"Traceback: {traceback_str}\n")
        sys.stderr.flush()
        return f"❌ **Critical error**: {error_msg}", gr.update(), gr.update()

# Initialize results repository and directory
try:
    # Try to download existing repository
    try:
        snapshot_download(
            repo_id=RESULTS_REPO,
            local_dir=EVAL_RESULTS_PATH,
            repo_type="dataset",
            tqdm_class=None,
            etag_timeout=30,
            token=TOKEN
        )
    except RepositoryNotFoundError:
        # Create the repository if it doesn't exist
        print(f"Creating new results repository: {RESULTS_REPO}")
        create_repo(
            repo_id=RESULTS_REPO,
            repo_type="dataset",
            private=False,
            token=TOKEN
        )
        # Create local directory
        os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
except Exception as e:
    print(f"Error initializing results: {e}")
    # Ensure local directory exists even if repo operations fail
    os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)

# Get initial results data
import sys
sys.stderr.write("\nπŸš€ STARTING GRADIO APP INITIALIZATION\n")
sys.stderr.write("πŸ“Š Creating initial results DataFrame...\n")
sys.stderr.flush()

RESULTS_DF = create_results_dataframe()

sys.stderr.write(f"βœ… Initial DataFrame created with shape: {RESULTS_DF.shape}\n")
sys.stderr.write(f"πŸ“‹ Columns: {list(RESULTS_DF.columns)}\n")
sys.stderr.flush()

# Create the Gradio interface
sys.stderr.write("🎨 Creating Gradio interface...\n")
sys.stderr.flush()

demo = gr.Blocks(css=custom_css)
with demo:
    gr.HTML(TITLE)
    gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")

    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("πŸ… Results", elem_id="results-tab", id=0):
            gr.Markdown("## Model Evaluation Results")
            results_table = gr.DataFrame(
                value=RESULTS_DF,
                headers=["Model", "Perplexity", "Match P-Value", "Average Score", "Type", "Precision"],
                interactive=False,
                wrap=False
            )

        with gr.TabItem("πŸ“ About", elem_id="about-tab", id=1):
            gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")

        with gr.TabItem("πŸ§ͺ Test Model", elem_id="test-model-tab", id=2):
            gr.Markdown("## Run Perplexity Test\n\nTest any Hugging Face model for perplexity evaluation.")
            
            with gr.Row():
                with gr.Column():
                    model_name = gr.Textbox(label="Model name", placeholder="openai-community/gpt2")
                    revision = gr.Textbox(label="Revision", placeholder="main", value="main")
                    precision = gr.Dropdown(
                        choices=["float16", "bfloat16"],
                        label="Precision",
                        value="float16"
                    )
                    debug_mode = gr.Checkbox(label="Enable debug mode (more verbose logging)", value=True)
                
                with gr.Column():
                    test_button = gr.Button("πŸš€ Run Perplexity Test", variant="primary")
                    result = gr.Markdown()
            
            gr.Markdown("## Live Results")
            live_results_table = gr.DataFrame(
                value=RESULTS_DF,
                headers=["Model", "Perplexity", "Match P-Value", "Average Score", "Type", "Precision"],
                interactive=False,
                wrap=False
            )
            
            gr.Markdown("""
            ### Tips:
            - **Check stderr logs** in HF Spaces for detailed debugging information
            - **Results will update automatically** in the table above after evaluation completes
            - **Example models to test**: `openai-community/gpt2`, `EleutherAI/gpt-neo-1.3B`, `openai-community/gpt2-large`
            - **Lower perplexity scores = better performance** (better at predicting text)
            
            ### How it works:
            1. Enter a model name from Hugging Face Hub
            2. Click "Run Perplexity Test" 
            3. Wait for evaluation to complete (may take a few minutes for large models)
            4. Results will appear automatically in the table above!
            """)
            
            test_button.click(
                run_perplexity_test,
                [model_name, revision, precision],
                [result, live_results_table, results_table]
            )

sys.stderr.write("🎯 GRADIO INTERFACE SETUP COMPLETE\n")
sys.stderr.write("πŸš€ LAUNCHING GRADIO APP WITH MODEL TRACING INTEGRATION\n")
sys.stderr.write("πŸ“Š Features enabled:\n")
sys.stderr.write("   - Perplexity evaluation\n")
sys.stderr.write("   - Model trace p-value computation (vs GPT-2 base)\n")
sys.stderr.write("   - Match statistic with alignment\n")
sys.stderr.write("πŸŽ‰ Ready to accept requests!\n")
sys.stderr.flush()

demo.queue(default_concurrency_limit=5).launch()