leaderboard v1
Browse files- src/envs.py +26 -25
- src/populate.py +90 -58
src/envs.py
CHANGED
@@ -1,25 +1,26 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from huggingface_hub import HfApi
|
4 |
-
|
5 |
-
# Info to change for your repository
|
6 |
-
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
8 |
-
|
9 |
-
OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from huggingface_hub import HfApi
|
4 |
+
|
5 |
+
# Info to change for your repository
|
6 |
+
# ----------------------------------
|
7 |
+
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
8 |
+
|
9 |
+
# OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
10 |
+
OWNER = "kluster-ai"
|
11 |
+
# ----------------------------------
|
12 |
+
|
13 |
+
REPO_ID = f"{OWNER}/LLM-Hallucination-Detection-Leaderboard"
|
14 |
+
QUEUE_REPO = f"{OWNER}/requests"
|
15 |
+
RESULTS_REPO = f"{OWNER}/results"
|
16 |
+
|
17 |
+
# If you setup a cache later, just change HF_HOME
|
18 |
+
CACHE_PATH=os.getenv("HF_HOME", ".")
|
19 |
+
|
20 |
+
# Local caches
|
21 |
+
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
22 |
+
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
23 |
+
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
24 |
+
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
25 |
+
|
26 |
+
API = HfApi(token=TOKEN)
|
src/populate.py
CHANGED
@@ -1,58 +1,90 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
-
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
-
|
10 |
-
|
11 |
-
def get_leaderboard_df(results_path
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
+
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
+
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
+
|
10 |
+
|
11 |
+
def get_leaderboard_df(results_path):
|
12 |
+
df = pd.read_csv(results_path)
|
13 |
+
# numeric formatting
|
14 |
+
df["ha_rag_rate"] = df["ha_rag_rate"].round(2)
|
15 |
+
df["ha_non_rag_rate"] = df["ha_non_rag_rate"].round(2)
|
16 |
+
|
17 |
+
# --- map to pretty headers just before returning ---
|
18 |
+
pretty = {
|
19 |
+
"Models": "Models",
|
20 |
+
"ha_rag_rate": "RAG Hallucination Rate (%)",
|
21 |
+
"ha_non_rag_rate": "Non-RAG Hallucination Rate (%)",
|
22 |
+
}
|
23 |
+
df = df.rename(columns=pretty) # this is what the UI will use
|
24 |
+
# ----------- Average column & ranking ---------------------------------------------
|
25 |
+
df["Average Hallucination Rate (%)"] = df[
|
26 |
+
["RAG Hallucination Rate (%)", "Non-RAG Hallucination Rate (%)"]
|
27 |
+
].mean(axis=1).round(2)
|
28 |
+
|
29 |
+
# sort so *lower* average = better (true leaderboard style)
|
30 |
+
df = df.sort_values("Average Hallucination Rate (%)", ascending=True).reset_index(drop=True)
|
31 |
+
|
32 |
+
# # Rank & medal
|
33 |
+
medal_map = {1: "🥇", 2: "🥈", 3: "🥉"}
|
34 |
+
|
35 |
+
def medal_html(rank):
|
36 |
+
m = medal_map.get(rank)
|
37 |
+
return f'<span style="font-size:2.0rem;">{m}</span>' if m else rank
|
38 |
+
|
39 |
+
df["Rank"] = df.index + 1
|
40 |
+
df["Rank"] = df["Rank"].apply(medal_html)
|
41 |
+
|
42 |
+
|
43 |
+
# ----------- column ordering ------------------------------------------------------
|
44 |
+
df = df[[
|
45 |
+
"Rank", # pretty column user sees
|
46 |
+
"Models",
|
47 |
+
"Average Hallucination Rate (%)",
|
48 |
+
"RAG Hallucination Rate (%)",
|
49 |
+
"Non-RAG Hallucination Rate (%)",
|
50 |
+
]]
|
51 |
+
|
52 |
+
return df
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
58 |
+
"""Creates the different dataframes for the evaluation queues requestes"""
|
59 |
+
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
60 |
+
all_evals = []
|
61 |
+
|
62 |
+
for entry in entries:
|
63 |
+
if ".json" in entry:
|
64 |
+
file_path = os.path.join(save_path, entry)
|
65 |
+
with open(file_path) as fp:
|
66 |
+
data = json.load(fp)
|
67 |
+
|
68 |
+
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
69 |
+
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
70 |
+
|
71 |
+
all_evals.append(data)
|
72 |
+
elif ".md" not in entry:
|
73 |
+
# this is a folder
|
74 |
+
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
|
75 |
+
for sub_entry in sub_entries:
|
76 |
+
file_path = os.path.join(save_path, entry, sub_entry)
|
77 |
+
with open(file_path) as fp:
|
78 |
+
data = json.load(fp)
|
79 |
+
|
80 |
+
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
81 |
+
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
82 |
+
all_evals.append(data)
|
83 |
+
|
84 |
+
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
85 |
+
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
86 |
+
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
87 |
+
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
88 |
+
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
89 |
+
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
90 |
+
return df_finished[cols], df_running[cols], df_pending[cols]
|