Spaces:
Runtime error
Runtime error
File size: 3,525 Bytes
88d91f4 383512a a1f3b5b 3850b6d 6114d73 383512a e63522b b774671 bd2b779 fb67b80 f8e1881 88d91f4 e9f37ce 383512a 88d91f4 383512a f8e1881 383512a 88d91f4 f6931eb 1347af3 88d91f4 f6931eb 88d91f4 2d941f4 88d91f4 f6931eb 88d91f4 2d941f4 88d91f4 0496749 3850b6d e63522b 3850b6d e63522b 6f6a1b0 e63522b 3850b6d 6114d73 059d8f0 6b1339b 3850b6d 1c34187 e63522b 6114d73 1c34187 681afd2 383512a f6931eb 383512a f6931eb e63522b f6931eb 383512a 58a3f61 f6931eb f6889a3 e63522b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import os
import json
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
import gradio as gr
import pandas as pd
from huggingface_hub import HfApi, snapshot_download
from utils import make_clickable_model
from utils import make_clickable_user
DATASET_REPO_URL = "https://huggingface.co/datasets/pkalkman/drlc-leaderboard-data"
DATASET_REPO_ID = "pkalkman/drlc-leaderboard-data"
HF_TOKEN = os.environ.get("HF_TOKEN")
block = gr.Blocks()
api = HfApi(token=HF_TOKEN)
# Read the environments from the JSON file
with open('envs.json', 'r') as f:
rl_envs = json.load(f)
def download_leaderboard_dataset():
# Download the dataset from the Hugging Face Hub
path = snapshot_download(repo_id=DATASET_REPO_ID, repo_type="dataset")
return path
def get_data(rl_env, path) -> pd.DataFrame:
"""
Get data from rl_env CSV file, format model and user as clickable links, and return as DataFrame
"""
csv_path = os.path.join(path, rl_env + ".csv")
data = pd.read_csv(csv_path)
# Add clickable links for model and user
for index, row in data.iterrows():
data.at[index, "User"] = make_clickable_user(row["User"])
data.at[index, "Model"] = make_clickable_model(row["Model"])
return data
def get_last_refresh_time(path) -> str:
"""
Get the last update time from the last_update.txt file in the dataset path.
"""
# Path to the last_update.txt file
update_file_path = os.path.join(path, 'last_update.txt')
# Check if the file exists
if os.path.exists(update_file_path):
# Read the content of the file (the timestamp)
with open(update_file_path, 'r') as f:
last_refresh_time = f.read().strip()
return last_refresh_time
else:
# Fallback: If the file is missing, return a default message
return "Last update time not available"
# Function to refresh the dataset periodically
def refresh_dataset():
global path_
path_ = download_leaderboard_dataset() # Redownload the dataset
global last_refresh_time
last_refresh_time = get_last_refresh_time(path_)
# Set up a background scheduler to refresh the dataset every hour
scheduler = BackgroundScheduler()
scheduler.add_job(refresh_dataset, 'interval', minutes=15)
scheduler.start()
with block:
path_ = download_leaderboard_dataset()
# Get the last refresh time
last_refresh_time = get_last_refresh_time(path_)
gr.Markdown(f"""
# π Deep Reinforcement Learning Course Leaderboard (Mirror)π
Presenting the latest leaderboard from the Hugging Face Deep RL Course - refreshed at {last_refresh_time}.
""")
for i in range(0, len(rl_envs)):
rl_env = rl_envs[i]
with gr.TabItem(rl_env["rl_env_beautiful"]):
with gr.Row():
markdown = f"""
# {rl_env['rl_env_beautiful']}
### Leaderboard for {rl_env['rl_env_beautiful']}
"""
gr.Markdown(markdown)
with gr.Row():
# Display the data for this RL environment
data = get_data(rl_env["rl_env"], path_)
gr.Dataframe(
value=data,
headers=["Ranking π", "User π€", "Model id π€", "Results", "Mean Reward", "Std Reward"],
datatype=["number", "markdown", "markdown", "number", "number", "number"],
row_count=(100, 'fixed')
)
block.launch()
|