pkalkman's picture
re-adding clickable model and user
2d941f4
raw
history blame
7.96 kB
import os
import json
import requests
import datetime
import gradio as gr
import pandas as pd
from huggingface_hub import HfApi, hf_hub_download, snapshot_download
from huggingface_hub.repocard import metadata_load
from apscheduler.schedulers.background import BackgroundScheduler
from tqdm.contrib.concurrent import thread_map
from utils import make_clickable_model
from utils import make_clickable_user
DATASET_REPO_URL = "https://huggingface.co/datasets/pkalkman/drlc-leaderboard-data"
DATASET_REPO_ID = "pkalkman/drlc-leaderboard-data"
HF_TOKEN = os.environ.get("HF_TOKEN")
block = gr.Blocks()
api = HfApi(token=HF_TOKEN)
# Read the environments from the JSON file
with open('envs.json', 'r') as f:
rl_envs = json.load(f)
def get_metadata(model_id):
try:
readme_path = hf_hub_download(model_id, filename="README.md", etag_timeout=180)
return metadata_load(readme_path)
except requests.exceptions.HTTPError:
# 404 README.md not found
return None
def parse_metrics_accuracy(meta):
if "model-index" not in meta:
return None
result = meta["model-index"][0]["results"]
metrics = result[0]["metrics"]
accuracy = metrics[0]["value"]
return accuracy
# We keep the worst case episode
def parse_rewards(accuracy):
default_std = -1000
default_reward=-1000
if accuracy != None:
accuracy = str(accuracy)
parsed = accuracy.split('+/-')
if len(parsed)>1:
mean_reward = float(parsed[0].strip())
std_reward = float(parsed[1].strip())
elif len(parsed)==1: #only mean reward
mean_reward = float(parsed[0].strip())
std_reward = float(0)
else:
mean_reward = float(default_std)
std_reward = float(default_reward)
else:
mean_reward = float(default_std)
std_reward = float(default_reward)
return mean_reward, std_reward
def get_model_ids(rl_env):
api = HfApi()
models = api.list_models(filter=rl_env)
model_ids = [x.modelId for x in models]
return model_ids
# Parralelized version
def update_leaderboard_dataset_parallel(rl_env, path):
# Get model ids associated with rl_env
model_ids = get_model_ids(rl_env)
def process_model(model_id):
meta = get_metadata(model_id)
#LOADED_MODEL_METADATA[model_id] = meta if meta is not None else ''
if meta is None:
return None
user_id = model_id.split('/')[0]
row = {}
row["User"] = user_id
row["Model"] = model_id
accuracy = parse_metrics_accuracy(meta)
mean_reward, std_reward = parse_rewards(accuracy)
mean_reward = mean_reward if not pd.isna(mean_reward) else 0
std_reward = std_reward if not pd.isna(std_reward) else 0
row["Results"] = mean_reward - std_reward
row["Mean Reward"] = mean_reward
row["Std Reward"] = std_reward
return row
data = list(thread_map(process_model, model_ids, desc="Processing models"))
# Filter out None results (models with no metadata)
data = [row for row in data if row is not None]
ranked_dataframe = rank_dataframe(pd.DataFrame.from_records(data))
new_history = ranked_dataframe
file_path = path + "/" + rl_env + ".csv"
new_history.to_csv(file_path, index=False)
return ranked_dataframe
def update_leaderboard_dataset(rl_env, path):
# Get model ids associated with rl_env
model_ids = get_model_ids(rl_env)
data = []
for model_id in model_ids:
"""
readme_path = hf_hub_download(model_id, filename="README.md")
meta = metadata_load(readme_path)
"""
meta = get_metadata(model_id)
#LOADED_MODEL_METADATA[model_id] = meta if meta is not None else ''
if meta is None:
continue
user_id = model_id.split('/')[0]
row = {}
row["User"] = user_id
row["Model"] = model_id
accuracy = parse_metrics_accuracy(meta)
mean_reward, std_reward = parse_rewards(accuracy)
mean_reward = mean_reward if not pd.isna(mean_reward) else 0
std_reward = std_reward if not pd.isna(std_reward) else 0
row["Results"] = mean_reward - std_reward
row["Mean Reward"] = mean_reward
row["Std Reward"] = std_reward
data.append(row)
ranked_dataframe = rank_dataframe(pd.DataFrame.from_records(data))
new_history = ranked_dataframe
file_path = path + "/" + rl_env + ".csv"
new_history.to_csv(file_path, index=False)
return ranked_dataframe
def get_data_no_html(rl_env, path) -> pd.DataFrame:
"""
Get data from rl_env
:return: data as a pandas DataFrame
"""
csv_path = path + "/" + rl_env + ".csv"
data = pd.read_csv(csv_path)
return data
def rank_dataframe(dataframe):
dataframe = dataframe.sort_values(by=['Results', 'User', 'Model'], ascending=False)
if not 'Ranking' in dataframe.columns:
dataframe.insert(0, 'Ranking', [i for i in range(1,len(dataframe)+1)])
else:
dataframe['Ranking'] = [i for i in range(1,len(dataframe)+1)]
return dataframe
def run_update_dataset():
path_ = download_leaderboard_dataset()
for i in range(0, len(rl_envs)):
rl_env = rl_envs[i]
update_leaderboard_dataset_parallel(rl_env["rl_env"], path_)
api.upload_folder(
folder_path=path_,
repo_id="pkalkman/drlc-leaderboard-data",
repo_type="dataset",
commit_message="Update dataset")
def download_leaderboard_dataset():
# Download the dataset from the Hugging Face Hub
path = snapshot_download(repo_id=DATASET_REPO_ID, repo_type="dataset")
return path
def get_data(rl_env, path) -> pd.DataFrame:
"""
Get data from rl_env CSV file, format model and user as clickable links, and return as DataFrame
"""
csv_path = os.path.join(path, rl_env + ".csv")
data = pd.read_csv(csv_path)
# Add clickable links for model and user
for index, row in data.iterrows():
data.at[index, "User"] = make_clickable_user(row["User"])
data.at[index, "Model"] = make_clickable_model(row["Model"])
return data
def get_last_refresh_time(path) -> str:
"""
Get the latest modification time of any CSV file in the dataset path
"""
# Get list of all CSV files in the dataset path
csv_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.csv')]
# Get the latest modification time
latest_time = max([os.path.getmtime(f) for f in csv_files])
# Convert to human-readable format
return datetime.datetime.fromtimestamp(latest_time).strftime('%Y-%m-%d %H:%M:%S')
with block:
path_ = download_leaderboard_dataset()
# Get the last refresh time
last_refresh_time = get_last_refresh_time(path_)
gr.Markdown(f"""
# πŸ† Deep Reinforcement Learning Course Leaderboard πŸ†
Presenting the latest leaderboard from the Hugging Face Deep RL Course - refresh ({last_refresh_time}).
""")
for i in range(0, len(rl_envs)):
rl_env = rl_envs[i]
with gr.TabItem(rl_env["rl_env_beautiful"]):
with gr.Row():
markdown = f"""
# {rl_env['rl_env_beautiful']}
### Leaderboard for {rl_env['rl_env_beautiful']}
"""
gr.Markdown(markdown)
with gr.Row():
# Display the data for this RL environment
data = get_data(rl_env["rl_env"], path_)
gr.Dataframe(
value=data,
headers=["Ranking πŸ†", "User πŸ€—", "Model id πŸ€–", "Results", "Mean Reward", "Std Reward"],
datatype=["number", "markdown", "markdown", "number", "number", "number"],
row_count=(100, 'fixed')
)
block.launch()