Spaces:
Running
Running
import gradio as gr | |
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns | |
import pandas as pd | |
from apscheduler.schedulers.background import BackgroundScheduler | |
from huggingface_hub import snapshot_download | |
import numpy as np | |
import os | |
from src.about import ( | |
CITATION_BUTTON_LABEL, | |
CITATION_BUTTON_TEXT, | |
EVALUATION_QUEUE_TEXT, | |
INTRODUCTION_TEXT, | |
TITLE, | |
Tasks | |
) | |
from src.display.css_html_js import custom_css | |
from src.display.utils import ( | |
EVAL_COLS, | |
EVAL_TYPES, | |
AutoEvalColumn, | |
ModelType, | |
fields, | |
WeightType, | |
Precision, | |
AREA_DEFINITIONS, | |
AREA_AVG_COLUMN_MAP | |
) | |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN | |
from src.populate import get_evaluation_queue_df, get_leaderboard_df | |
from src.submission.submit import add_new_eval | |
# --- TESTE: Carregar dados locais --- | |
TEST_DATA_PATH = "output/leaderboard_data_20250413_002339.csv" # Ajuste o caminho se necessário | |
LOAD_TEST_DATA = True # Defina como False para usar dados do Hub | |
# ------- | |
def restart_space(): | |
API.restart_space(repo_id=REPO_ID) | |
### Space initialisation | |
if not LOAD_TEST_DATA: | |
try: | |
print(EVAL_REQUESTS_PATH) | |
snapshot_download( | |
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN | |
) | |
except Exception as e: # Adicionar captura de exceção | |
print(f"Erro ao baixar EVAL_REQUESTS: {e}") | |
# Considerar restart_space() aqui também, dependendo da severidade | |
try: | |
print(EVAL_RESULTS_PATH) | |
snapshot_download( | |
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN | |
) | |
except Exception as e: # Adicionar captura de exceção | |
print(f"Erro ao baixar EVAL_RESULTS: {e}") | |
# Considerar restart_space() aqui também | |
else: | |
print(f"Modo de teste: Carregando dados locais de {TEST_DATA_PATH}") | |
EVAL_RESULTS_PATH = None # Não precisamos do caminho do Hub para resultados | |
EVAL_REQUESTS_PATH = "data/eval_requests" # Manter ou ajustar se a fila ainda for lida do Hub | |
# Certifique-se de que o diretório da fila de requests existe se for usado | |
os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True) | |
# Obter todas as colunas definidas | |
ALL_COLS = [c.name for c in fields(AutoEvalColumn)] | |
# Obter o leaderboard completo com as médias calculadas | |
try: | |
initial_df_for_test = None | |
if LOAD_TEST_DATA: | |
try: | |
initial_df_for_test = pd.read_csv(TEST_DATA_PATH) | |
# Renomear colunas do CSV para corresponder às chaves internas | |
rename_map = {} | |
# Mapear tasks (Nome no CSV -> Nome interno da Enum Task) | |
for task in Tasks: | |
rename_map[task.value.col_name] = task.name # Ex: {"Revalida": "REVALIDA"} | |
# Mapear outras colunas (Nome no CSV -> Nome interno de AutoEvalColumn) | |
# Verificar se a coluna existe no CSV antes de adicionar ao mapa | |
csv_columns = initial_df_for_test.columns | |
if "T" in csv_columns: rename_map["T"] = AutoEvalColumn.model_type_symbol.name | |
if "Modelo" in csv_columns: rename_map["Modelo"] = AutoEvalColumn.model.name | |
if "Tipo" in csv_columns: rename_map["Tipo"] = AutoEvalColumn.model_type.name | |
if "Arquitetura" in csv_columns: rename_map["Arquitetura"] = AutoEvalColumn.architecture.name | |
if "Tipo de Peso" in csv_columns: rename_map["Tipo de Peso"] = AutoEvalColumn.weight_type.name | |
if "Precisão" in csv_columns: rename_map["Precisão"] = AutoEvalColumn.precision.name | |
if "Licença" in csv_columns: rename_map["Licença"] = AutoEvalColumn.license.name | |
if "#Params (B)" in csv_columns: rename_map["#Params (B)"] = AutoEvalColumn.params.name | |
if "Hub Likes" in csv_columns: rename_map["Hub Likes"] = AutoEvalColumn.likes.name | |
if "Disponível no hub" in csv_columns: rename_map["Disponível no hub"] = AutoEvalColumn.still_on_hub.name | |
if "SHA do modelo" in csv_columns: rename_map["SHA do modelo"] = AutoEvalColumn.revision.name | |
# Mapear colunas de médias (já devem estar com nome correto se calculadas, mas por segurança) | |
if "Média Geral" in csv_columns: rename_map["Média Geral"] = AutoEvalColumn.average.name | |
if "Área Médica" in csv_columns: rename_map["Área Médica"] = AutoEvalColumn.area_medica_avg.name | |
if "Área do Direito" in csv_columns: rename_map["Área do Direito"] = AutoEvalColumn.area_direito_avg.name | |
if "Provas Militares" in csv_columns: rename_map["Provas Militares"] = AutoEvalColumn.provas_militares_avg.name | |
if "Computação" in csv_columns: rename_map["Computação"] = AutoEvalColumn.computacao_avg.name | |
if "Discurso de Ódio" in csv_columns: rename_map["Discurso de Ódio"] = AutoEvalColumn.discurso_odio_avg.name | |
if "Economia e Contabilidade" in csv_columns: rename_map["Economia e Contabilidade"] = AutoEvalColumn.economia_contabilidade_avg.name | |
if "Semântica e Inferência" in csv_columns: rename_map["Semântica e Inferência"] = AutoEvalColumn.semantica_inferencia_avg.name | |
if "Multidisciplinar" in csv_columns: rename_map["Multidisciplinar"] = AutoEvalColumn.multidisciplinar_avg.name | |
# Aplicar o rename | |
initial_df_for_test.rename(columns=rename_map, inplace=True) | |
print(f"Colunas após renomeação: {initial_df_for_test.columns.tolist()}") # Log para verificar | |
print("DataFrame de teste carregado e colunas renomeadas.") | |
except FileNotFoundError: | |
print(f"Erro: Arquivo de teste não encontrado em {TEST_DATA_PATH}") | |
initial_df_for_test = pd.DataFrame() | |
except Exception as e: | |
print(f"Erro ao carregar ou processar o arquivo de teste: {e}") | |
initial_df_for_test = pd.DataFrame() | |
LEADERBOARD_DF = get_leaderboard_df( | |
results_path=EVAL_RESULTS_PATH if not LOAD_TEST_DATA else None, | |
requests_path=EVAL_REQUESTS_PATH if not LOAD_TEST_DATA else None, | |
cols=ALL_COLS, | |
initial_df=initial_df_for_test | |
) | |
except Exception as e: | |
print(f"Erro ao gerar o DataFrame do Leaderboard: {e}") | |
LEADERBOARD_DF = pd.DataFrame() # Criar DataFrame vazio em caso de erro | |
# Obter DataFrames da fila de avaliação (pode precisar ser ajustado se LOAD_TEST_DATA=True) | |
# Se a fila também deve ser mockada/lida localmente, ajuste aqui | |
( | |
finished_eval_queue_df, | |
running_eval_queue_df, | |
pending_eval_queue_df, | |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) | |
def create_leaderboard_component(dataframe, displayed_cols, hidden_cols=None, cant_deselect_cols=None, title=None): | |
if dataframe is None or dataframe.empty: | |
return gr.Markdown(f"## {title or ''}\nNão há dados para exibir.") | |
if hidden_cols is None: | |
hidden_cols = [] | |
if cant_deselect_cols is None: | |
cant_deselect_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] | |
# Filtrar dataframe para conter apenas as colunas a serem exibidas (ou ocultas/não deselecionáveis) | |
all_required_cols = set(displayed_cols) | set(hidden_cols) | set(cant_deselect_cols) | {AutoEvalColumn.model_type.name, AutoEvalColumn.precision.name, AutoEvalColumn.params.name, AutoEvalColumn.still_on_hub.name} | |
available_cols = [col for col in all_required_cols if col in dataframe.columns] | |
filtered_df = dataframe[available_cols].copy() # Usar cópia para evitar SettingWithCopyWarning | |
# Garantir que as colunas 'always visible' estejam presentes | |
for col in cant_deselect_cols: | |
if col not in filtered_df.columns: | |
filtered_df[col] = np.nan # Ou algum valor padrão apropriado | |
# Construir lista de filtros, incluindo None para colunas ausentes | |
raw_filter_columns=[ | |
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Tipos de Modelo") if AutoEvalColumn.model_type.name in filtered_df.columns else None, | |
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precisão") if AutoEvalColumn.precision.name in filtered_df.columns else None, | |
ColumnFilter( | |
AutoEvalColumn.params.name, | |
type="slider", | |
min=0.01, | |
max=max(150, filtered_df[AutoEvalColumn.params.name].max(skipna=True) if AutoEvalColumn.params.name in filtered_df.columns and not filtered_df[AutoEvalColumn.params.name].dropna().empty else 150), # Ajustar max dinamicamente e ignorar NaN | |
label="Selecionar número de parâmetros (B)", | |
) if AutoEvalColumn.params.name in filtered_df.columns else None, | |
ColumnFilter( | |
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deletado/incompleto", default=True | |
) if AutoEvalColumn.still_on_hub.name in filtered_df.columns else None, | |
] | |
# Filtrar Nones da lista de filtros | |
final_filter_columns = [f for f in raw_filter_columns if f is not None] | |
# --- DEBUG PRINTS --- | |
if title == "Área Médica": # Only print for the problematic tab for clarity | |
print(f"--- Debugging Tab: {title} ---") | |
# Check columns before filtering for the tab | |
print(f"dataframe cols IN: {dataframe.columns.tolist()}") | |
print(f"displayed_cols: {displayed_cols}") | |
print(f"hidden_cols: {hidden_cols}") | |
print(f"cant_deselect_cols: {cant_deselect_cols}") | |
# Check columns used to create filtered_df | |
print(f"all_required_cols for filter: {sorted(list(all_required_cols))}") | |
print(f"available_cols from dataframe: {sorted(available_cols)}") | |
# Check columns AFTER filtering for the tab | |
print(f"filtered_df cols OUT: {filtered_df.columns.tolist()}") | |
# Check columns passed to Leaderboard config | |
print(f"Leaderboard - default_selection: {displayed_cols}") | |
print(f"Leaderboard - hide_columns: {[c for c in hidden_cols if c in filtered_df.columns]}") | |
print(f"Leaderboard - cant_deselect: {cant_deselect_cols}") | |
combined_checked_cols = set(displayed_cols) | set([c for c in hidden_cols if c in filtered_df.columns]) | set(cant_deselect_cols) | |
print(f"Leaderboard - combined checked cols: {sorted(list(combined_checked_cols))}") | |
print(f"--- End Debugging Tab: {title} ---") | |
# --- END DEBUG PRINTS --- | |
# --- Reordenar Colunas --- | |
current_cols = filtered_df.columns.tolist() | |
# Definir as colunas que devem vir primeiro | |
first_cols_desired = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] | |
# Garantir que elas existem no dataframe atual | |
first_cols_actual = [c for c in first_cols_desired if c in current_cols] | |
# Obter as outras colunas | |
other_cols = [c for c in current_cols if c not in first_cols_actual] | |
# Priorizar as colunas que deveriam ser exibidas por padrão (exceto as primeiras) | |
other_displayed_cols = [c for c in displayed_cols if c in other_cols] | |
# Obter as colunas restantes (ocultas por padrão ou não especificadas em displayed_cols) e ordená-las | |
remaining_cols = sorted([c for c in other_cols if c not in other_displayed_cols]) | |
# Montar a ordem final | |
final_order = first_cols_actual + other_displayed_cols + remaining_cols | |
# Aplicar a nova ordem | |
filtered_df = filtered_df[final_order] | |
# --- Fim Reordenar Colunas --- | |
return Leaderboard( | |
value=filtered_df, # Passar o DataFrame reordenado | |
datatype=[c.type for c in fields(AutoEvalColumn) if c.name in filtered_df.columns], # Ajustar datatype às colunas presentes | |
select_columns=SelectColumns( | |
default_selection=displayed_cols, | |
cant_deselect=cant_deselect_cols, | |
label="Selecionar Colunas para Exibir:", | |
), | |
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name] if AutoEvalColumn.license.name in filtered_df.columns else [AutoEvalColumn.model.name], | |
hide_columns=[c for c in hidden_cols if c in filtered_df.columns], # Ocultar apenas colunas existentes | |
filter_columns=final_filter_columns, # Usar a lista filtrada | |
bool_checkboxgroup_label="Ocultar modelos", | |
interactive=False, | |
) | |
# Definição do tema verde | |
# green_theme = gr.themes.Monochrome(primary_hue="green", secondary_hue="green", neutral_hue="slate") # Alternativa | |
green_theme = gr.themes.Base(primary_hue=gr.themes.colors.green, secondary_hue=gr.themes.colors.blue, neutral_hue=gr.themes.colors.slate) | |
demo = gr.Blocks(css=custom_css, theme=green_theme) | |
with demo: | |
gr.HTML(TITLE) | |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
with gr.TabItem("📊 Benchmark Geral", id=0): | |
general_cols_to_display = [ | |
AutoEvalColumn.model_type_symbol.name, | |
AutoEvalColumn.model.name, | |
AutoEvalColumn.average.name, | |
] + list(AREA_AVG_COLUMN_MAP.values()) | |
general_hidden_cols = [task.name for task in Tasks] + [ | |
AutoEvalColumn.model_type.name, | |
AutoEvalColumn.architecture.name, | |
AutoEvalColumn.weight_type.name, | |
AutoEvalColumn.precision.name, | |
AutoEvalColumn.license.name, | |
AutoEvalColumn.params.name, | |
AutoEvalColumn.likes.name, | |
AutoEvalColumn.still_on_hub.name, | |
AutoEvalColumn.revision.name | |
] | |
create_leaderboard_component( | |
LEADERBOARD_DF, | |
displayed_cols=general_cols_to_display, | |
hidden_cols=general_hidden_cols, | |
title="Benchmark Geral" | |
) | |
tab_index = 1 | |
for area_name, tasks_in_area in AREA_DEFINITIONS.items(): | |
with gr.TabItem(f"🎓 {area_name}", id=tab_index): | |
area_cols_to_display = [ | |
AutoEvalColumn.model_type_symbol.name, | |
AutoEvalColumn.model.name, | |
] + [task.name for task in tasks_in_area] | |
area_hidden_cols = list(AREA_AVG_COLUMN_MAP.values()) + [ | |
task.name for task in Tasks if task not in tasks_in_area | |
] + [ | |
AutoEvalColumn.model_type.name, | |
AutoEvalColumn.architecture.name, | |
AutoEvalColumn.weight_type.name, | |
AutoEvalColumn.precision.name, | |
AutoEvalColumn.license.name, | |
AutoEvalColumn.params.name, | |
AutoEvalColumn.likes.name, | |
AutoEvalColumn.still_on_hub.name, | |
AutoEvalColumn.revision.name | |
] | |
create_leaderboard_component( | |
LEADERBOARD_DF, | |
displayed_cols=area_cols_to_display, | |
hidden_cols=[col for col in area_hidden_cols if col != AutoEvalColumn.average.name], | |
title=area_name | |
) | |
tab_index += 1 | |
with gr.TabItem("🚀 Submit aqui!", id=tab_index): | |
with gr.Column(): | |
with gr.Row(): | |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") | |
with gr.Column(): | |
with gr.Accordion( | |
f"✅ Avaliações Concluídas ({len(finished_eval_queue_df)})", | |
open=False, | |
): | |
with gr.Row(): | |
finished_eval_table = gr.components.Dataframe( | |
value=finished_eval_queue_df, | |
headers=EVAL_COLS, | |
datatype=EVAL_TYPES, | |
row_count=5, | |
) | |
with gr.Accordion( | |
f"🔄 Fila de Avaliação em Execução ({len(running_eval_queue_df)})", | |
open=False, | |
): | |
with gr.Row(): | |
running_eval_table = gr.components.Dataframe( | |
value=running_eval_queue_df, | |
headers=EVAL_COLS, | |
datatype=EVAL_TYPES, | |
row_count=5, | |
) | |
with gr.Accordion( | |
f"⏳ Fila de Avaliação Pendente ({len(pending_eval_queue_df)})", | |
open=False, | |
): | |
with gr.Row(): | |
pending_eval_table = gr.components.Dataframe( | |
value=pending_eval_queue_df, | |
headers=EVAL_COLS, | |
datatype=EVAL_TYPES, | |
row_count=5, | |
) | |
with gr.Row(): | |
gr.Markdown("# ✉️✨ Submeta seu modelo aqui!", elem_classes="markdown-text") | |
with gr.Row(): | |
with gr.Column(): | |
model_name_textbox = gr.Textbox(label="Nome do Modelo") | |
revision_name_textbox = gr.Textbox(label="Commit da Revisão", placeholder="main") | |
model_type = gr.Dropdown( | |
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], | |
label="Tipo do Modelo", | |
multiselect=False, | |
value=None, | |
interactive=True, | |
) | |
with gr.Column(): | |
precision = gr.Dropdown( | |
choices=[i.value.name for i in Precision if i != Precision.Unknown], | |
label="Precisão", | |
multiselect=False, | |
value="float16", | |
interactive=True, | |
) | |
weight_type = gr.Dropdown( | |
choices=[i.value.name for i in WeightType], | |
label="Tipo dos Pesos", | |
multiselect=False, | |
value="Original", | |
interactive=True, | |
) | |
base_model_name_textbox = gr.Textbox(label="Modelo Base (para pesos delta ou adapter)") | |
submit_button = gr.Button("Submeter Avaliação") | |
submission_result = gr.Markdown() | |
submit_button.click( | |
add_new_eval, | |
[ | |
model_name_textbox, | |
base_model_name_textbox, | |
revision_name_textbox, | |
precision, | |
weight_type, | |
model_type, | |
], | |
submission_result, | |
) | |
with gr.Row(): | |
with gr.Accordion("📙 Citação", open=False): | |
citation_button = gr.Textbox( | |
value=CITATION_BUTTON_TEXT, | |
label=CITATION_BUTTON_LABEL, | |
lines=20, | |
elem_id="citation-button", | |
show_copy_button=True, | |
) | |
scheduler = BackgroundScheduler() | |
scheduler.add_job(restart_space, "interval", seconds=1800) | |
scheduler.start() | |
demo.queue(default_concurrency_limit=40).launch() |