agressin's picture
++
36c08d4
import gradio as gr
from gradio_leaderboard import Leaderboard, SelectColumns
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
COLS,
AutoEvalColumn,
fields
)
from src.envs import API, EVAL_RESULTS_PATH, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_leaderboard_df
from src.submission.submit import add_new_eval
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, COLS)
def init_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=SelectColumns(
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn.result_name.name,AutoEvalColumn.eval_name.name],
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=[],
bool_checkboxgroup_label="Hide models",
interactive=False,
)
def greet_user(profile: gr.OAuthProfile | None):
if profile is None:
return "⚠️ You are not logged in."
return f"πŸ‘‹ Hello, **{profile.username}**!"
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… GridNet-HD Benchmark", elem_id="benchmark-tab-table", id=0):
leaderboard = init_leaderboard(LEADERBOARD_DF)
def reload_leaderboard():
# Reload dataframe
print("reload_leaderboard")
df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS)
return df
# Load on app start or page refresh
demo.load(
fn=reload_leaderboard,
inputs=[],
outputs=[leaderboard]
)
with gr.TabItem("πŸ“ About", elem_id="benchmark-tab-table", id=2):
gr.Markdown(BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("πŸš€ Submit here! ", elem_id="benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Row():
gr.Markdown("# βœ‰οΈβœ¨ Submit your result here!", elem_classes="markdown-text")
with gr.Row():
gr.LoginButton()
with gr.Row():
with gr.Column():
greeting = gr.Markdown()
demo.load(fn=greet_user, inputs=None, outputs=greeting)
# user_name_textbox = gr.Textbox(label="User name")
result_name_textbox = gr.Textbox(label="Result name")
npz_files_input = gr.File(label="Upload NPZ files", file_types=[".npz"], file_count="multiple")
remap = gr.Checkbox(label="Remap classes : check it if you upload original classes (evaluation will only be done on mapped classes.)", value=False)
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
# user_name_textbox,
result_name_textbox,
npz_files_input,
remap
],
submission_result,
)
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()