Spaces:
Running
Running
File size: 24,099 Bytes
ac500fb 99732d3 ac500fb 70c8648 ac500fb 99732d3 d961598 ebb9c13 99732d3 ac500fb 99732d3 ebb9c13 99732d3 ac500fb 99732d3 ac500fb 99732d3 5ace481 99732d3 5ace481 99732d3 ac500fb 99732d3 ac500fb 99732d3 ac500fb 99732d3 ac500fb f58ec83 ac500fb f58ec83 ac500fb f58ec83 ac500fb f58ec83 ac500fb d2992da 31ed6f5 7f5d2cc 31ed6f5 d2992da ac500fb d2992da 7f5d2cc ac500fb b303276 ac500fb f58ec83 ac500fb 5098c7c 91ff1cc 5098c7c ebb9c13 ac500fb 90b6b3b b303276 e2a36f9 ac500fb 038e993 e2a36f9 038e993 90b6b3b e2a36f9 90b6b3b e2a36f9 90b6b3b ac500fb 038e993 ac500fb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 8f26ebb 90b6b3b 85bef7d ac500fb 90b6b3b ac500fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 |
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
import numpy as np
import os
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
TITLE,
Tasks
)
from src.display.css_html_js import custom_css
from src.display.utils import (
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision,
AREA_DEFINITIONS,
AREA_AVG_COLUMN_MAP,
PLUE_GROUP_AREAS
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
# --- TESTE: Carregar dados locais ---
TEST_DATA_PATH = "output/leaderboard_results_1.csv"
#TEST_DATA_PATH = "output/leaderboard_data_20250413_002339.csv" # Ajuste o caminho se necessário
LOAD_TEST_DATA = True # Defina como False para usar dados do Hub
# -------
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
if not LOAD_TEST_DATA:
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception as e: # Adicionar captura de exceção
print(f"Erro ao baixar EVAL_REQUESTS: {e}")
# Considerar restart_space() aqui também, dependendo da severidade
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception as e: # Adicionar captura de exceção
print(f"Erro ao baixar EVAL_RESULTS: {e}")
# Considerar restart_space() aqui também
else:
print(f"Modo de teste: Carregando dados locais de {TEST_DATA_PATH}")
EVAL_RESULTS_PATH = None # Não precisamos do caminho do Hub para resultados
EVAL_REQUESTS_PATH = "data/eval_requests" # Manter ou ajustar se a fila ainda for lida do Hub
# Certifique-se de que o diretório da fila de requests existe se for usado
os.makedirs(EVAL_REQUESTS_PATH, exist_ok=True)
# Obter todas as colunas definidas
ALL_COLS = [c.name for c in fields(AutoEvalColumn)]
# Obter o leaderboard completo com as médias calculadas
try:
initial_df_for_test = None
if LOAD_TEST_DATA:
try:
initial_df_for_test = pd.read_csv(TEST_DATA_PATH)
# Renomear colunas do CSV para corresponder às chaves internas
rename_map = {}
# Mapear tasks (Nome no CSV -> Nome interno da Enum Task)
for task in Tasks:
rename_map[task.value.col_name] = task.name # Ex: {"Revalida": "REVALIDA"}
# Mapear outras colunas (Nome no CSV -> Nome interno de AutoEvalColumn)
# Verificar se a coluna existe no CSV antes de adicionar ao mapa
csv_columns = initial_df_for_test.columns
if "T" in csv_columns: rename_map["T"] = AutoEvalColumn.model_type_symbol.name
if "Modelo" in csv_columns: rename_map["Modelo"] = AutoEvalColumn.model.name
if "Tipo" in csv_columns: rename_map["Tipo"] = AutoEvalColumn.model_type.name
if "Arquitetura" in csv_columns: rename_map["Arquitetura"] = AutoEvalColumn.architecture.name
if "Tipo de Peso" in csv_columns: rename_map["Tipo de Peso"] = AutoEvalColumn.weight_type.name
if "Precisão" in csv_columns: rename_map["Precisão"] = AutoEvalColumn.precision.name
if "Licença" in csv_columns: rename_map["Licença"] = AutoEvalColumn.license.name
if "#Params (B)" in csv_columns: rename_map["#Params (B)"] = AutoEvalColumn.params.name
if "Hub Likes" in csv_columns: rename_map["Hub Likes"] = AutoEvalColumn.likes.name
if "Disponível no hub" in csv_columns: rename_map["Disponível no hub"] = AutoEvalColumn.still_on_hub.name
if "SHA do modelo" in csv_columns: rename_map["SHA do modelo"] = AutoEvalColumn.revision.name
# Mapear colunas de médias (já devem estar com nome correto se calculadas, mas por segurança)
if "Média Geral" in csv_columns: rename_map["Média Geral"] = AutoEvalColumn.average.name
if "Área Médica" in csv_columns: rename_map["Área Médica"] = AutoEvalColumn.area_medica_avg.name
if "Área do Direito" in csv_columns: rename_map["Área do Direito"] = AutoEvalColumn.area_direito_avg.name
if "Provas Militares" in csv_columns: rename_map["Provas Militares"] = AutoEvalColumn.provas_militares_avg.name
if "Computação" in csv_columns: rename_map["Computação"] = AutoEvalColumn.computacao_avg.name
if "Discurso de Ódio" in csv_columns: rename_map["Discurso de Ódio"] = AutoEvalColumn.discurso_odio_avg.name
if "Economia e Contabilidade" in csv_columns: rename_map["Economia e Contabilidade"] = AutoEvalColumn.economia_contabilidade_avg.name
if "Semântica e Inferência" in csv_columns: rename_map["Semântica e Inferência"] = AutoEvalColumn.semantica_inferencia_avg.name
if "Multidisciplinar" in csv_columns: rename_map["Multidisciplinar"] = AutoEvalColumn.multidisciplinar_avg.name
# Aplicar o rename
initial_df_for_test.rename(columns=rename_map, inplace=True)
print(f"Colunas após renomeação: {initial_df_for_test.columns.tolist()}") # Log para verificar
print("DataFrame de teste carregado e colunas renomeadas.")
except FileNotFoundError:
print(f"Erro: Arquivo de teste não encontrado em {TEST_DATA_PATH}")
initial_df_for_test = pd.DataFrame()
except Exception as e:
print(f"Erro ao carregar ou processar o arquivo de teste: {e}")
initial_df_for_test = pd.DataFrame()
LEADERBOARD_DF = get_leaderboard_df(
results_path=EVAL_RESULTS_PATH if not LOAD_TEST_DATA else None,
requests_path=EVAL_REQUESTS_PATH if not LOAD_TEST_DATA else None,
cols=ALL_COLS,
initial_df=initial_df_for_test
)
except Exception as e:
print(f"Erro ao gerar o DataFrame do Leaderboard: {e}")
LEADERBOARD_DF = pd.DataFrame() # Criar DataFrame vazio em caso de erro
# Obter DataFrames da fila de avaliação (pode precisar ser ajustado se LOAD_TEST_DATA=True)
# Se a fila também deve ser mockada/lida localmente, ajuste aqui
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def create_leaderboard_component(dataframe, displayed_cols, hidden_cols=None, cant_deselect_cols=None, title=None):
if dataframe is None or dataframe.empty:
return gr.Markdown(f"## {title or ''}\nNão há dados para exibir.")
if hidden_cols is None:
hidden_cols = []
if cant_deselect_cols is None:
cant_deselect_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]
# Filtrar dataframe para conter apenas as colunas a serem exibidas (ou ocultas/não deselecionáveis)
all_required_cols = set(displayed_cols) | set(hidden_cols) | set(cant_deselect_cols) | {AutoEvalColumn.model_type.name, AutoEvalColumn.precision.name, AutoEvalColumn.params.name, AutoEvalColumn.still_on_hub.name}
available_cols = [col for col in all_required_cols if col in dataframe.columns]
filtered_df = dataframe[available_cols].copy() # Usar cópia para evitar SettingWithCopyWarning
# Garantir que as colunas 'always visible' estejam presentes
for col in cant_deselect_cols:
if col not in filtered_df.columns:
filtered_df[col] = np.nan # Ou algum valor padrão apropriado
# Construir lista de filtros, incluindo None para colunas ausentes
raw_filter_columns=[
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Tipos de Modelo") if AutoEvalColumn.model_type.name in filtered_df.columns else None,
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precisão") if AutoEvalColumn.precision.name in filtered_df.columns else None,
ColumnFilter(
AutoEvalColumn.params.name,
type="slider",
min=0.01,
max=max(150, filtered_df[AutoEvalColumn.params.name].max(skipna=True) if AutoEvalColumn.params.name in filtered_df.columns and not filtered_df[AutoEvalColumn.params.name].dropna().empty else 150), # Ajustar max dinamicamente e ignorar NaN
label="Selecionar número de parâmetros (B)",
) if AutoEvalColumn.params.name in filtered_df.columns else None,
ColumnFilter(
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deletado/incompleto", default=True
) if AutoEvalColumn.still_on_hub.name in filtered_df.columns else None,
]
# Filtrar Nones da lista de filtros
final_filter_columns = [f for f in raw_filter_columns if f is not None]
# --- Reordenar Colunas ---
current_cols = filtered_df.columns.tolist()
# Definir as colunas que devem vir primeiro
first_cols_desired = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]
# Garantir que elas existem no dataframe atual
first_cols_actual = [c for c in first_cols_desired if c in current_cols]
# Obter as outras colunas
other_cols = [c for c in current_cols if c not in first_cols_actual]
# Priorizar as colunas que deveriam ser exibidas por padrão (exceto as primeiras)
other_displayed_cols = [c for c in displayed_cols if c in other_cols]
# Obter as colunas restantes (ocultas por padrão ou não especificadas em displayed_cols) e ordená-las
remaining_cols = sorted([c for c in other_cols if c not in other_displayed_cols])
# Montar a ordem final
final_order = first_cols_actual + other_displayed_cols + remaining_cols
# Aplicar a nova ordem
filtered_df = filtered_df[final_order]
# --- Fim Reordenar Colunas ---
# --- INÍCIO DA MODIFICAÇÃO ---
# print(f"--- Info for DataFrame passed to Leaderboard ({title}) ---")
# filtered_df.info()
# print("----------------------------------------------------------")
# --- FIM DA MODIFICAÇÃO ---
return Leaderboard(
value=filtered_df, # Passar o DataFrame reordenado
datatype=[c.type for c in fields(AutoEvalColumn) if c.name in filtered_df.columns],
select_columns=SelectColumns(
default_selection=displayed_cols,
cant_deselect=cant_deselect_cols,
label="Selecionar Benchmarks a Serem Exibidos:",
),
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name] if AutoEvalColumn.license.name in filtered_df.columns else [AutoEvalColumn.model.name],
hide_columns=[c for c in hidden_cols if c in filtered_df.columns], # Ocultar apenas colunas existentes
filter_columns=final_filter_columns, # Usar a lista filtrada
bool_checkboxgroup_label="Ocultar modelos",
interactive=False,
)
# --- Definição do Grupo PLUE ---
PLUE_GENERAL_VIEW_NAME = "Conhecimentos Gerais para Língua Portuguesa"
# -------
# Definição do tema verde
green_theme = gr.themes.Base(primary_hue=gr.themes.colors.green, secondary_hue=gr.themes.colors.blue, neutral_hue=gr.themes.colors.slate)
demo = gr.Blocks(css=custom_css, theme=green_theme)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
# --- Definir Ordem das Abas ---
tab_index = 0
# 1. Benchmark Geral
with gr.TabItem("📊 Resume", id=tab_index):
# Colunas a exibir: T, Modelo, Média Geral, PLUE, Energy, Reasoning
general_cols_to_display = [
AutoEvalColumn.model_type_symbol.name, # T
AutoEvalColumn.model.name, # Modelo
AutoEvalColumn.average.name, # Média Geral
AutoEvalColumn.plue_avg.name, # Média PLUE
AutoEvalColumn.energy_avg.name, # Média Energy (Exibir por padrão)
AutoEvalColumn.reasoning_avg.name, # Média Reasoning (Exibir por padrão)
]
general_cols_to_display = [col for col in general_cols_to_display if col in LEADERBOARD_DF.columns]
# Colunas a ocultar: Tasks + Médias Individuais SOMENTE do grupo PLUE + detalhes
general_hidden_cols = [task.name for task in Tasks] + \
[AREA_AVG_COLUMN_MAP[area] for area in PLUE_GROUP_AREAS if area in AREA_AVG_COLUMN_MAP] + \
[
AutoEvalColumn.model_type.name,
AutoEvalColumn.architecture.name,
AutoEvalColumn.weight_type.name,
AutoEvalColumn.precision.name,
AutoEvalColumn.license.name,
AutoEvalColumn.params.name,
AutoEvalColumn.likes.name,
AutoEvalColumn.still_on_hub.name,
AutoEvalColumn.revision.name
]
create_leaderboard_component(
LEADERBOARD_DF,
displayed_cols=general_cols_to_display,
hidden_cols=[col for col in general_hidden_cols if col in LEADERBOARD_DF.columns],
title="Benchmark Geral"
)
tab_index += 1
# 2. PLUE (Agora apenas com as áreas originais + 3 adicionadas)
with gr.TabItem("📚 PLUE", id=tab_index) as plue_tab:
# --- Lógica interna da aba PLUE (ajustada) ---
gr.Markdown("## Selecione a visualização PLUE:")
# RECALCULAR choices e options com base na PLUE_GROUP_AREAS atualizada (sem Energy/Reasoning)
all_plue_options = [PLUE_GENERAL_VIEW_NAME] + sorted(PLUE_GROUP_AREAS)
plue_dropdown = gr.Dropdown(
choices=all_plue_options,
label="Visualização PLUE",
value=PLUE_GENERAL_VIEW_NAME
)
# Função auxiliar (lógica interna não muda, mas opera sobre PLUE_GROUP_AREAS atualizada)
def get_plue_leaderboard_config(selected_option):
if selected_option == PLUE_GENERAL_VIEW_NAME:
displayed_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name,] + [AREA_AVG_COLUMN_MAP[area] for area in PLUE_GROUP_AREAS if area in AREA_AVG_COLUMN_MAP]
hidden_cols = [task.name for task in Tasks] + [avg_col for area, avg_col in AREA_AVG_COLUMN_MAP.items() if area not in PLUE_GROUP_AREAS] + [AutoEvalColumn.average.name] + [AutoEvalColumn.plue_avg.name, AutoEvalColumn.model_type.name, AutoEvalColumn.architecture.name, AutoEvalColumn.weight_type.name, AutoEvalColumn.precision.name, AutoEvalColumn.license.name, AutoEvalColumn.params.name, AutoEvalColumn.likes.name, AutoEvalColumn.still_on_hub.name, AutoEvalColumn.revision.name]
title = PLUE_GENERAL_VIEW_NAME
else:
selected_area = selected_option
tasks_in_area = AREA_DEFINITIONS[selected_area]
displayed_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name,] + [task.name for task in tasks_in_area]
hidden_cols = list(AREA_AVG_COLUMN_MAP.values()) + [task.name for task in Tasks if task not in tasks_in_area] + [AutoEvalColumn.plue_avg.name, AutoEvalColumn.average.name, AutoEvalColumn.model_type.name, AutoEvalColumn.architecture.name, AutoEvalColumn.weight_type.name, AutoEvalColumn.precision.name, AutoEvalColumn.license.name, AutoEvalColumn.params.name, AutoEvalColumn.likes.name, AutoEvalColumn.still_on_hub.name, AutoEvalColumn.revision.name]
title = selected_area
final_hidden_cols = [col for col in hidden_cols if col in LEADERBOARD_DF.columns]
return displayed_cols, final_hidden_cols, title
# Pré-renderização (ATUALIZAR loop e containers com novas all_plue_options)
plue_containers = {}
for option in all_plue_options:
displayed_cols, hidden_cols, title = get_plue_leaderboard_config(option)
is_visible = (option == PLUE_GENERAL_VIEW_NAME)
with gr.Group(visible=is_visible) as plue_containers[option]:
create_leaderboard_component(LEADERBOARD_DF, displayed_cols=displayed_cols, hidden_cols=hidden_cols, title=title)
# Função de callback (ATUALIZAR loop com novas all_plue_options)
def switch_plue_view(selected_option):
update_list = []
for option in all_plue_options:
update_list.append(gr.update(visible=(option == selected_option)))
return update_list
# Evento change (ATUALIZAR outputs com novas all_plue_options)
plue_dropdown.change(fn=switch_plue_view, inputs=[plue_dropdown], outputs=[plue_containers[option] for option in all_plue_options])
# --- Fim Lógica PLUE ---
tab_index += 1
# 3. Energy
with gr.TabItem("⚡️ Energy", id=tab_index):
# Exibir leaderboard com dados de Energy
energy_tasks = AREA_DEFINITIONS.get("Energy", [])
energy_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + [t.name for t in energy_tasks]
energy_hidden = [t.name for t in Tasks if t not in energy_tasks] + \
list(AREA_AVG_COLUMN_MAP.values()) + \
[AutoEvalColumn.plue_avg.name, AutoEvalColumn.average.name] + \
[c.name for c in fields(AutoEvalColumn) if c.name not in energy_cols and c.name != AutoEvalColumn.model_type_symbol.name and c.name != AutoEvalColumn.model.name ] # Detalhes
create_leaderboard_component(LEADERBOARD_DF, displayed_cols=energy_cols, hidden_cols=[c for c in energy_hidden if c in LEADERBOARD_DF.columns], title="Energy")
tab_index += 1
# 4. Reasoning
with gr.TabItem("🤔 Reasoning", id=tab_index):
# Exibir leaderboard com dados de Reasoning
reasoning_tasks = AREA_DEFINITIONS.get("Reasoning", [])
reasoning_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + [t.name for t in reasoning_tasks]
reasoning_hidden = [t.name for t in Tasks if t not in reasoning_tasks] + \
list(AREA_AVG_COLUMN_MAP.values()) + \
[AutoEvalColumn.plue_avg.name, AutoEvalColumn.average.name] + \
[c.name for c in fields(AutoEvalColumn) if c.name not in reasoning_cols and c.name != AutoEvalColumn.model_type_symbol.name and c.name != AutoEvalColumn.model.name ] # Detalhes
create_leaderboard_component(LEADERBOARD_DF, displayed_cols=reasoning_cols, hidden_cols=[c for c in reasoning_hidden if c in LEADERBOARD_DF.columns], title="Reasoning")
tab_index += 1
# 5. Submit
with gr.TabItem("📤 Submit!", id=tab_index):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"✅ Avaliações Concluídas ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"🔄 Fila de Avaliação em Execução ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"⏳ Fila de Avaliação Pendente ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# ✉️✨ Submeta seu modelo aqui!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Nome do Modelo")
revision_name_textbox = gr.Textbox(label="Commit da Revisão", placeholder="main")
model_type = gr.Dropdown(
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
label="Tipo do Modelo",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precisão",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Tipo dos Pesos",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Modelo Base (para pesos delta ou adapter)")
submit_button = gr.Button("Submeter Avaliação")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
with gr.Row():
with gr.Accordion("📙 Citação", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch() |