model_trace / app.py
Ahmed Ahmed
add histogram now
86c1853
raw
history blame
7.22 kB
import gradio as gr
import pandas as pd
from huggingface_hub import snapshot_download, create_repo
from huggingface_hub.utils import RepositoryNotFoundError
import os
from src.about import (
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
AutoEvalColumn,
fields,
)
from src.envs import API, EVAL_RESULTS_PATH, RESULTS_REPO, TOKEN, OWNER
from src.populate import get_leaderboard_df
from src.evaluation.dynamic_eval import run_dynamic_perplexity_eval
def create_results_dataframe():
"""Create and return the results DataFrame for display"""
df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
if df is None or df.empty:
# Return empty DataFrame with proper columns
return pd.DataFrame(columns=["Model", "Perplexity", "Average Score", "Type", "Precision"])
# Select and rename columns for display
display_df = df[[
AutoEvalColumn.model.name,
"Perplexity", # This matches the task column name from Tasks.task0.value.col_name
AutoEvalColumn.average.name,
AutoEvalColumn.model_type.name,
AutoEvalColumn.precision.name,
]].copy()
# Rename columns for better display
display_df.columns = ["Model", "Perplexity", "Average Score", "Type", "Precision"]
return display_df
def run_perplexity_test(model_name, revision, precision):
"""Run perplexity evaluation on demand."""
import sys
import traceback
import gradio as gr
if not model_name:
return "Please enter a model name.", gr.update(), gr.update()
try:
# Use stderr for more reliable logging in HF Spaces
sys.stderr.write(f"\n=== RUNNING PERPLEXITY TEST ===\n")
sys.stderr.write(f"Model: {model_name}\n")
sys.stderr.write(f"Revision: {revision}\n")
sys.stderr.write(f"Precision: {precision}\n")
sys.stderr.flush()
success, result = run_dynamic_perplexity_eval(model_name, revision, precision)
sys.stderr.write(f"Evaluation result - Success: {success}, Result: {result}\n")
sys.stderr.flush()
if success:
sys.stderr.write("Evaluation succeeded - updating both results tables\n")
sys.stderr.flush()
# Get updated results
updated_df = create_results_dataframe()
success_msg = f"""βœ… **Perplexity evaluation completed successfully!**
**Model**: {model_name}
**Perplexity Score**: {result:.4f}
πŸŽ‰ **Results have been saved and both tables have been updated!**"""
return success_msg, gr.update(value=updated_df), gr.update(value=updated_df)
else:
return f"❌ **Evaluation failed**: {result}", gr.update(), gr.update()
except Exception as e:
error_msg = str(e)
traceback_str = traceback.format_exc()
sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
sys.stderr.write(f"Traceback: {traceback_str}\n")
sys.stderr.flush()
return f"❌ **Critical error**: {error_msg}", gr.update(), gr.update()
# Initialize results repository and directory
try:
# Try to download existing repository
try:
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN
)
except RepositoryNotFoundError:
# Create the repository if it doesn't exist
print(f"Creating new results repository: {RESULTS_REPO}")
create_repo(
repo_id=RESULTS_REPO,
repo_type="dataset",
private=False,
token=TOKEN
)
# Create local directory
os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
except Exception as e:
print(f"Error initializing results: {e}")
# Ensure local directory exists even if repo operations fail
os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
# Get initial results data
RESULTS_DF = create_results_dataframe()
# Create the Gradio interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… Results", elem_id="results-tab", id=0):
gr.Markdown("## Model Evaluation Results")
results_table = gr.DataFrame(
value=RESULTS_DF,
headers=["Model", "Perplexity", "Average Score", "Type", "Precision"],
interactive=False,
wrap=False
)
with gr.TabItem("πŸ“ About", elem_id="about-tab", id=1):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("πŸ§ͺ Test Model", elem_id="test-model-tab", id=2):
gr.Markdown("## Run Perplexity Test\n\nTest any Hugging Face model for perplexity evaluation.")
with gr.Row():
with gr.Column():
model_name = gr.Textbox(label="Model name", placeholder="openai-community/gpt2")
revision = gr.Textbox(label="Revision", placeholder="main", value="main")
precision = gr.Dropdown(
choices=["float16", "bfloat16"],
label="Precision",
value="float16"
)
debug_mode = gr.Checkbox(label="Enable debug mode (more verbose logging)", value=True)
with gr.Column():
test_button = gr.Button("πŸš€ Run Perplexity Test", variant="primary")
result = gr.Markdown()
gr.Markdown("## Live Results")
live_results_table = gr.DataFrame(
value=RESULTS_DF,
headers=["Model", "Perplexity", "Average Score", "Type", "Precision"],
interactive=False,
wrap=False
)
gr.Markdown("""
### Tips:
- **Check stderr logs** in HF Spaces for detailed debugging information
- **Results will update automatically** in the table above after evaluation completes
- **Example models to test**: `openai-community/gpt2`, `EleutherAI/gpt-neo-1.3B`, `openai-community/gpt2-large`
- **Lower perplexity scores = better performance** (better at predicting text)
### How it works:
1. Enter a model name from Hugging Face Hub
2. Click "Run Perplexity Test"
3. Wait for evaluation to complete (may take a few minutes for large models)
4. Results will appear automatically in the table above!
""")
test_button.click(
run_perplexity_test,
[model_name, revision, precision],
[result, live_results_table, results_table]
)
demo.queue(default_concurrency_limit=5).launch()