openfree's picture
Update app.py
ef55ded verified
raw
history blame
9.84 kB
#λͺ¨λΈλͺ…κ³Ό url λ³€κ²½: "src/display/formatting.py"
#평가 ν•­λͺ©λͺ… λ³€κ²½: "src/about.py"
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
def debug_model_names(df, label="디버그"):
"""
λ°μ΄ν„°ν”„λ ˆμž„μ—μ„œ λͺ¨λΈ 이름 κ΄€λ ¨ 열을 λ””λ²„κΉ…ν•˜κΈ° μœ„ν•œ ν•¨μˆ˜
"""
print(f"===== {label} 디버깅 =====")
if df is None or df.empty:
print("λ°μ΄ν„°ν”„λ ˆμž„μ΄ λΉ„μ–΄μžˆμŠ΅λ‹ˆλ‹€.")
return
model_cols = [col for col in df.columns if 'model' in col.lower()]
if not model_cols:
print("λͺ¨λΈ κ΄€λ ¨ 열이 μ—†μŠ΅λ‹ˆλ‹€.")
return
for col in model_cols:
print(f"컬럼: {col}")
print(df[col].head())
print("\n")
print("==================\n")
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO,
local_dir=EVAL_REQUESTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
# 디버깅을 μœ„ν•œ μ½”λ“œ (ν•„μš”μ‹œ 주석 ν•΄μ œ)
# debug_model_names(LEADERBOARD_DF, "Leaderboard 데이터")
# λ³€ν™˜ λ§€ν•‘ μ •μ˜
benchmark_mapping = {
"ANLI": "Korean Bar Exam (Lawyer)",
"LogiQA": "Senior Civil Service Examination(ꡭ가직 5κΈ‰)"
}
# Leaderboard에 ν‘œμ‹œλ  λͺ¨λΈ νƒ€μž… 컬럼 λ³€ν™˜ (컬럼 이름은 AutoEvalColumn.model_type.name둜 κ°€μ •)
model_type_column = AutoEvalColumn.model_type.name
if model_type_column in LEADERBOARD_DF.columns:
LEADERBOARD_DF[model_type_column] = LEADERBOARD_DF[model_type_column].apply(lambda s: benchmark_mapping.get(s, s))
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
# 디버깅을 μœ„ν•œ μ½”λ“œ (ν•„μš”μ‹œ 주석 ν•΄μ œ)
# debug_model_names(finished_eval_queue_df, "μ™„λ£Œλœ 평가 큐")
# debug_model_names(running_eval_queue_df, "μ‹€ν–‰ 쀑인 평가 큐")
# debug_model_names(pending_eval_queue_df, "λŒ€κΈ° 쀑인 평가 큐")
def init_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=SelectColumns(
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=[
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
ColumnFilter(
AutoEvalColumn.params.name,
type="slider",
min=0.01,
max=150,
label="Select the number of parameters (B)",
),
ColumnFilter(
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
),
],
bool_checkboxgroup_label="Hide models",
interactive=False,
)
def get_model_type_display(enum_obj):
"""
ModelType enum 객체λ₯Ό λ°›μ•„μ„œ λ³€ν™˜ 맀핑에 있으면 ν•΄λ‹Ή 값을 λ°˜ν™˜,
μ—†μœΌλ©΄ κΈ°λ³Έ to_str(" : ") κ²°κ³Όλ₯Ό λ°˜ν™˜.
"""
try:
key = enum_obj.name
except AttributeError:
key = enum_obj.to_str(" : ")
return benchmark_mapping.get(key, enum_obj.to_str(" : "))
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard = init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
model_type = gr.Dropdown(
choices=[get_model_type_display(t) for t in ModelType if t != ModelType.Unknown],
label="Model type",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()