Spaces:
Runtime error
Runtime error
from time import sleep | |
import gradio as gr | |
import pandas as pd | |
from apscheduler.schedulers.background import BackgroundScheduler | |
from huggingface_hub import snapshot_download | |
# from gradio_space_ci import enable_space_ci | |
from src.display.about import ( | |
INTRODUCTION_TEXT, | |
LLM_BENCHMARKS_TEXT, | |
CITATION_BUTTON_LABEL, | |
CITATION_BUTTON_TEXT, | |
TITLE, | |
) | |
from src.display.css_html_js import custom_css | |
from src.display.utils import ( | |
BENCHMARK_COLS, | |
COLS, | |
EVAL_COLS, | |
EVAL_TYPES, | |
NUMERIC_INTERVALS, | |
TYPES, | |
AutoEvalColumn, | |
ModelType, | |
fields, | |
WeightType, | |
Precision, | |
Format | |
) | |
from src.envs import API, EVAL_RESULTS_PATH, RESULTS_REPO, REPO_ID, HF_TOKEN | |
from src.populate import get_leaderboard_df | |
# from src.tools.collections import update_collections | |
from src.tools.plots import ( | |
create_metric_plot_obj, | |
create_plot_df, | |
create_scores_df, | |
) | |
# Start ephemeral Spaces on PRs (see config in README.md) | |
# enable_space_ci() | |
def restart_space(): | |
API.restart_space(repo_id=REPO_ID, token=HF_TOKEN) | |
def init_space(): | |
try: | |
print(EVAL_RESULTS_PATH) | |
snapshot_download( | |
repo_id=RESULTS_REPO, | |
local_dir=EVAL_RESULTS_PATH, | |
repo_type="dataset", | |
tqdm_class=None, | |
etag_timeout=30, | |
resume_download=True, | |
) | |
except Exception as e: | |
print(e) | |
sleep(180) # sleep 3 min | |
return init_space() | |
raw_data, original_df = get_leaderboard_df( | |
results_path=EVAL_RESULTS_PATH, cols=COLS, benchmark_cols=BENCHMARK_COLS | |
) | |
# update_collections(original_df.copy()) | |
leaderboard_df = original_df.copy() | |
plot_df = create_plot_df(create_scores_df(raw_data)) | |
return leaderboard_df, original_df, plot_df | |
leaderboard_df, original_df, plot_df = init_space() | |
# Searching and filtering | |
def update_table( | |
hidden_df: pd.DataFrame, | |
columns: list, | |
# type_query: list, | |
weight_precision_query: str, | |
activation_precision_query: str, | |
size_query: list, | |
hide_models: list, | |
format_query: list, | |
query: str, | |
): | |
filtered_df = filter_models( | |
df=hidden_df, | |
# type_query=type_query, | |
size_query=size_query, | |
weight_precision_query=weight_precision_query, | |
activation_precision_query=activation_precision_query, | |
hide_models=hide_models, | |
format_query=format_query, | |
) | |
filtered_df = filter_queries(query, filtered_df) | |
df = select_columns(filtered_df, columns) | |
return df | |
def load_query(request: gr.Request): # triggered only once at startup => read query parameter if it exists | |
query = request.query_params.get("query") or "" | |
return ( | |
query, | |
query, | |
) # return one for the "search_bar", one for a hidden component that triggers a reload only if value has changed | |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame: | |
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))] | |
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame: | |
always_here_cols = [c.name for c in fields(AutoEvalColumn) if c.never_hidden] | |
dummy_col = [AutoEvalColumn.dummy.name] | |
# AutoEvalColumn.model_type_symbol.name, | |
# AutoEvalColumn.model.name, | |
# We use COLS to maintain sorting | |
filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + dummy_col] | |
return filtered_df | |
def filter_queries(query: str, filtered_df: pd.DataFrame): | |
"""Added by Abishek""" | |
final_df = [] | |
if query != "": | |
queries = [q.strip() for q in query.split(";")] | |
for _q in queries: | |
_q = _q.strip() | |
if _q != "": | |
temp_filtered_df = search_table(filtered_df, _q) | |
if len(temp_filtered_df) > 0: | |
final_df.append(temp_filtered_df) | |
if len(final_df) > 0: | |
filtered_df = pd.concat(final_df) | |
filtered_df = filtered_df.drop_duplicates( | |
subset=[ | |
AutoEvalColumn.model.name, | |
AutoEvalColumn.weight_precision.name, | |
AutoEvalColumn.activation_precision.name, | |
AutoEvalColumn.revision.name, | |
] | |
) | |
return filtered_df | |
def filter_models( | |
df: pd.DataFrame, | |
# type_query: list, | |
size_query: list, | |
weight_precision_query: list, | |
activation_precision_query: list, | |
hide_models: list, | |
format_query: list, | |
) -> pd.DataFrame: | |
# Show all models | |
if "Private or deleted" in hide_models: | |
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True] | |
else: | |
filtered_df = df | |
if "Contains a merge/moerge" in hide_models: | |
filtered_df = filtered_df[filtered_df[AutoEvalColumn.merged.name] == False] | |
if "MoE" in hide_models: | |
filtered_df = filtered_df[filtered_df[AutoEvalColumn.moe.name] == False] | |
if "Flagged" in hide_models: | |
filtered_df = filtered_df[filtered_df[AutoEvalColumn.flagged.name] == False] | |
# type_emoji = [t[0] for t in type_query] | |
# filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)] | |
filtered_df = filtered_df.loc[df[AutoEvalColumn.weight_precision.name].isin(weight_precision_query + ["None"])] | |
filtered_df = filtered_df.loc[ | |
df[AutoEvalColumn.activation_precision.name].isin(activation_precision_query + ["None"]) | |
] | |
filtered_df = filtered_df.loc[df[AutoEvalColumn.format.name].isin(format_query)] | |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query])) | |
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") | |
mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) | |
filtered_df = filtered_df.loc[mask] | |
return filtered_df | |
leaderboard_df = filter_models( | |
df=leaderboard_df, | |
# type_query=[t.to_str(" : ") for t in ModelType], | |
size_query=list(NUMERIC_INTERVALS.keys()), | |
weight_precision_query=[i.value.name for i in Precision], | |
activation_precision_query=[i.value.name for i in Precision], | |
hide_models=["Private or deleted", "Contains a merge/moerge", "Flagged"], # Deleted, merges, flagged, MoEs | |
format_query=[i.value.name for i in Format], | |
) | |
demo = gr.Blocks(css=custom_css) | |
with demo: | |
gr.HTML(TITLE) | |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
with gr.TabItem("π LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Row(): | |
search_bar = gr.Textbox( | |
placeholder=" π Search for your model (separate multiple queries with `;`) and press ENTER...", | |
show_label=False, | |
elem_id="search-bar", | |
) | |
with gr.Row(): | |
shown_columns = gr.CheckboxGroup( | |
choices=[ | |
c.name | |
for c in fields(AutoEvalColumn) | |
if not c.hidden and not c.never_hidden and not c.dummy | |
], | |
value=[ | |
c.name | |
for c in fields(AutoEvalColumn) | |
if c.displayed_by_default and not c.hidden and not c.never_hidden | |
], | |
label="Select columns to show", | |
elem_id="column-select", | |
interactive=True, | |
) | |
with gr.Row(): | |
hide_models = gr.CheckboxGroup( | |
label="Hide models", | |
choices=["Private or deleted", "Contains a merge/moerge", "Flagged"], #, "MoE"], | |
value=["Private or deleted", "Contains a merge/moerge", "Flagged"], | |
interactive=True, | |
) | |
with gr.Column(min_width=320): | |
# with gr.Box(elem_id="box-filter"): | |
# filter_columns_type = gr.CheckboxGroup( | |
# label="Model types", | |
# choices=[t.to_str() for t in ModelType], | |
# value=[t.to_str() for t in ModelType], | |
# interactive=True, | |
# elem_id="filter-columns-type", | |
# ) | |
filter_columns_weight_precision = gr.CheckboxGroup( | |
label="Weight Precision", | |
choices=[i.value.name for i in Precision], | |
value=[i.value.name for i in Precision], | |
interactive=True, | |
elem_id="filter-columns-weight-precision", | |
) | |
filter_columns_activation_precision = gr.CheckboxGroup( | |
label="Activation Precision", | |
choices=[i.value.name for i in Precision], | |
value=[i.value.name for i in Precision], | |
interactive=True, | |
elem_id="filter-columns-activation-precision", | |
) | |
filter_columns_size = gr.CheckboxGroup( | |
label="Model sizes (in billions of parameters)", | |
choices=list(NUMERIC_INTERVALS.keys()), | |
value=list(NUMERIC_INTERVALS.keys()), | |
interactive=True, | |
elem_id="filter-columns-size", | |
) | |
filter_format = gr.CheckboxGroup( | |
label="Format", | |
choices=[i.value.name for i in Format], | |
value=[i.value.name for i in Format], | |
interactive=True, | |
elem_id="filter-format", | |
) | |
leaderboard_table = gr.components.Dataframe( | |
value=leaderboard_df[ | |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden] | |
+ shown_columns.value | |
+ [AutoEvalColumn.dummy.name] | |
], | |
headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, | |
datatype=TYPES, | |
elem_id="leaderboard-table", | |
interactive=False, | |
visible=True, | |
# column_widths=["2%", "33%"] | |
) | |
# Dummy leaderboard for handling the case when the user uses backspace key | |
hidden_leaderboard_table_for_search = gr.components.Dataframe( | |
value=original_df[COLS], | |
headers=COLS, | |
datatype=TYPES, | |
visible=False, | |
) | |
search_bar.submit( | |
update_table, | |
[ | |
hidden_leaderboard_table_for_search, | |
shown_columns, | |
# filter_columns_type, | |
filter_columns_weight_precision, | |
filter_columns_activation_precision, | |
filter_columns_size, | |
hide_models, | |
filter_format, | |
search_bar, | |
], | |
leaderboard_table, | |
) | |
# Define a hidden component that will trigger a reload only if a query parameter has been set | |
hidden_search_bar = gr.Textbox(value="", visible=False) | |
hidden_search_bar.change( | |
update_table, | |
[ | |
hidden_leaderboard_table_for_search, | |
shown_columns, | |
# filter_columns_type, | |
filter_columns_weight_precision, | |
filter_columns_activation_precision, | |
filter_columns_size, | |
hide_models, | |
filter_format, | |
search_bar, | |
], | |
leaderboard_table, | |
) | |
# Check query parameter once at startup and update search bar + hidden component | |
demo.load(load_query, inputs=[], outputs=[search_bar, hidden_search_bar]) | |
for selector in [ | |
shown_columns, | |
# filter_columns_type, | |
filter_columns_weight_precision, | |
filter_columns_activation_precision, | |
filter_columns_size, | |
hide_models, | |
filter_format, | |
]: | |
selector.change( | |
update_table, | |
[ | |
hidden_leaderboard_table_for_search, | |
shown_columns, | |
# filter_columns_type, | |
filter_columns_weight_precision, | |
filter_columns_activation_precision, | |
filter_columns_size, | |
hide_models, | |
filter_format, | |
search_bar, | |
], | |
leaderboard_table, | |
queue=True, | |
) | |
with gr.TabItem("π Metrics through time", elem_id="llm-benchmark-tab-table", id=4): | |
with gr.Row(): | |
with gr.Column(): | |
chart = create_metric_plot_obj( | |
plot_df, | |
[AutoEvalColumn.average.name], | |
title="Average of Top Scores and Human Baseline Over Time (from last update)", | |
) | |
gr.Plot(value=chart, min_width=500) | |
with gr.Column(): | |
chart = create_metric_plot_obj( | |
plot_df, | |
BENCHMARK_COLS, | |
title="Top Scores and Human Baseline Over Time (from last update)", | |
) | |
gr.Plot(value=chart, min_width=500) | |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2): | |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
with gr.Row(): | |
with gr.Accordion("π Citation", open=False): | |
citation_button = gr.Textbox( | |
value=CITATION_BUTTON_TEXT, | |
label=CITATION_BUTTON_LABEL, | |
lines=20, | |
elem_id="citation-button", | |
show_copy_button=True, | |
) | |
scheduler = BackgroundScheduler() | |
scheduler.add_job(restart_space, "interval", seconds=1800) # restarted every 3h | |
scheduler.start() | |
demo.queue(default_concurrency_limit=40).launch() | |