Abhishek Thakur
commited on
Commit
·
815b0dc
1
Parent(s):
bed3965
refactor
Browse files- Dockerfile +1 -1
- competitions/__init__.py +11 -1
- competitions/app.py +0 -160
- competitions/cli/__init__.py +13 -0
- competitions/cli/competitions.py +42 -0
- competitions/cli/run.py +27 -0
- competitions/competitions.py +61 -0
- competitions/errors.py +14 -0
- competitions/{config.py → info.py} +10 -21
- competitions/leaderboard.py +99 -0
- competitions/submissions.py +310 -0
- competitions/text.py +11 -0
- competitions/utils.py +7 -262
- requirements.txt +1 -1
- setup.py +1 -0
Dockerfile
CHANGED
|
@@ -34,4 +34,4 @@ SHELL ["conda", "run","--no-capture-output", "-p","/app/env", "/bin/bash", "-c"]
|
|
| 34 |
COPY --chown=1000:1000 . /app/
|
| 35 |
RUN python setup.py install
|
| 36 |
|
| 37 |
-
CMD
|
|
|
|
| 34 |
COPY --chown=1000:1000 . /app/
|
| 35 |
RUN python setup.py install
|
| 36 |
|
| 37 |
+
CMD competitions run
|
competitions/__init__.py
CHANGED
|
@@ -1,4 +1,14 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
| 2 |
|
| 3 |
|
| 4 |
__version__ = "0.0.1"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from .info import CompetitionInfo
|
| 4 |
|
| 5 |
|
| 6 |
__version__ = "0.0.1"
|
| 7 |
+
|
| 8 |
+
MOONLANDING_URL = os.getenv("MOONLANDING_URL")
|
| 9 |
+
COMPETITION_ID = os.getenv("COMPETITION_ID")
|
| 10 |
+
AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
|
| 11 |
+
AUTOTRAIN_TOKEN = os.getenv("AUTOTRAIN_TOKEN")
|
| 12 |
+
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
|
| 13 |
+
|
| 14 |
+
competition_info = CompetitionInfo(competition_id=COMPETITION_ID, autotrain_token=AUTOTRAIN_TOKEN)
|
competitions/app.py
DELETED
|
@@ -1,160 +0,0 @@
|
|
| 1 |
-
import uuid
|
| 2 |
-
from datetime import datetime
|
| 3 |
-
from functools import partial
|
| 4 |
-
|
| 5 |
-
import config
|
| 6 |
-
import gradio as gr
|
| 7 |
-
import pandas as pd
|
| 8 |
-
import utils
|
| 9 |
-
from huggingface_hub import HfApi
|
| 10 |
-
from huggingface_hub.utils._errors import EntryNotFoundError
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
SUBMISSION_TEXT = f"""You can make upto {config.competition_info.submission_limit} submissions per day.
|
| 14 |
-
The test data has been divided into public and private splits.
|
| 15 |
-
Your score on the public split will be shown on the leaderboard.
|
| 16 |
-
Your final score will be based on your private split performance.
|
| 17 |
-
The final rankings will be based on the private split performance.
|
| 18 |
-
"""
|
| 19 |
-
|
| 20 |
-
SUBMISSION_ERROR = """Submission is not in a proper format.
|
| 21 |
-
Please check evaluation instructions for more details."""
|
| 22 |
-
|
| 23 |
-
SUBMISSION_LIMIT_TEXT = f"""You can select upto {config.competition_info.selection_limit}
|
| 24 |
-
submissions for private leaderboard."""
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
def get_subs(user_info, private=False):
|
| 28 |
-
# get user submissions
|
| 29 |
-
user_id = user_info["id"]
|
| 30 |
-
try:
|
| 31 |
-
user_submissions = utils.fetch_submissions(user_id)
|
| 32 |
-
except EntryNotFoundError:
|
| 33 |
-
return_value = "No submissions found"
|
| 34 |
-
return [gr.Markdown.update(visible=True, value=return_value), gr.DataFrame.update(visible=False)]
|
| 35 |
-
submissions_df = pd.DataFrame(user_submissions)
|
| 36 |
-
if not private:
|
| 37 |
-
submissions_df = submissions_df.drop(columns=["private_score"])
|
| 38 |
-
submissions_df = submissions_df[
|
| 39 |
-
["date", "submission_id", "public_score", "submission_comment", "selected", "status"]
|
| 40 |
-
]
|
| 41 |
-
else:
|
| 42 |
-
submissions_df = submissions_df[
|
| 43 |
-
["date", "submission_id", "public_score", "private_score", "submission_comment", "selected", "status"]
|
| 44 |
-
]
|
| 45 |
-
return [gr.Markdown.update(visible=False), gr.DataFrame.update(visible=True, value=submissions_df)]
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
def my_submissions(user_token):
|
| 49 |
-
if user_token != "":
|
| 50 |
-
user_info = utils.user_authentication(token=user_token)
|
| 51 |
-
if "error" in user_info:
|
| 52 |
-
return_value = "Invalid token"
|
| 53 |
-
return [gr.Markdown.update(visible=True, value=return_value), gr.DataFrame.update(visible=False)]
|
| 54 |
-
|
| 55 |
-
if user_info["emailVerified"] is False:
|
| 56 |
-
return_value = "Please verify your email on Hugging Face Hub"
|
| 57 |
-
return [gr.Markdown.update(visible=True, value=return_value), gr.DataFrame.update(visible=False)]
|
| 58 |
-
|
| 59 |
-
current_date_time = datetime.now()
|
| 60 |
-
private = False
|
| 61 |
-
if current_date_time >= config.competition_info.end_date:
|
| 62 |
-
private = True
|
| 63 |
-
subs = get_subs(user_info, private=private)
|
| 64 |
-
return subs
|
| 65 |
-
return [gr.Markdown.update(visible=True, value="Invalid token"), gr.DataFrame.update(visible=False)]
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
def new_submission(user_token, uploaded_file):
|
| 69 |
-
if uploaded_file is not None and user_token != "":
|
| 70 |
-
# verify token
|
| 71 |
-
user_info = utils.user_authentication(token=user_token)
|
| 72 |
-
if "error" in user_info:
|
| 73 |
-
return "Invalid token"
|
| 74 |
-
|
| 75 |
-
if user_info["emailVerified"] is False:
|
| 76 |
-
return "Please verify your email on Hugging Face Hub"
|
| 77 |
-
|
| 78 |
-
# check if user can submit to the competition
|
| 79 |
-
if utils.check_user_submission_limit(user_info) is False:
|
| 80 |
-
return "You have reached your submission limit for today"
|
| 81 |
-
with open(uploaded_file.name, "rb") as f:
|
| 82 |
-
bytes_data = f.read()
|
| 83 |
-
# verify file is valid
|
| 84 |
-
if not utils.verify_submission(bytes_data):
|
| 85 |
-
return "Invalid submission"
|
| 86 |
-
# gr.Markdown(SUBMISSION_ERROR)
|
| 87 |
-
# write a horizontal html line
|
| 88 |
-
# gr.Markdown("<hr/>", unsafe_allow_html=True)
|
| 89 |
-
else:
|
| 90 |
-
user_id = user_info["id"]
|
| 91 |
-
submission_id = str(uuid.uuid4())
|
| 92 |
-
file_extension = uploaded_file.orig_name.split(".")[-1]
|
| 93 |
-
# upload file to hf hub
|
| 94 |
-
api = HfApi()
|
| 95 |
-
api.upload_file(
|
| 96 |
-
path_or_fileobj=bytes_data,
|
| 97 |
-
path_in_repo=f"submissions/{user_id}-{submission_id}.{file_extension}",
|
| 98 |
-
repo_id=config.COMPETITION_ID,
|
| 99 |
-
repo_type="dataset",
|
| 100 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 101 |
-
)
|
| 102 |
-
# update submission limit
|
| 103 |
-
submissions_made = utils.increment_submissions(
|
| 104 |
-
user_id=user_id,
|
| 105 |
-
submission_id=submission_id,
|
| 106 |
-
submission_comment="",
|
| 107 |
-
)
|
| 108 |
-
# schedule submission for evaluation
|
| 109 |
-
utils.create_project(
|
| 110 |
-
project_id=f"{submission_id}",
|
| 111 |
-
dataset=f"{config.COMPETITION_ID}",
|
| 112 |
-
submission_dataset=user_id,
|
| 113 |
-
model="generic_competition",
|
| 114 |
-
)
|
| 115 |
-
return_text = f"Submission scheduled for evaluation. You have {config.competition_info.submission_limit - submissions_made} submissions left for today."
|
| 116 |
-
return return_text
|
| 117 |
-
return "Error"
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
with gr.Blocks() as demo:
|
| 121 |
-
with gr.Tab("Overview"):
|
| 122 |
-
gr.Markdown(f"# Welcome to {config.competition_info.competition_name}! 👋")
|
| 123 |
-
|
| 124 |
-
gr.Markdown(f"{config.competition_info.competition_description}")
|
| 125 |
-
|
| 126 |
-
gr.Markdown("## Dataset")
|
| 127 |
-
gr.Markdown(f"{config.competition_info.dataset_description}")
|
| 128 |
-
|
| 129 |
-
with gr.Tab("Public Leaderboard"):
|
| 130 |
-
output_markdown = gr.Markdown("")
|
| 131 |
-
fetch_lb = gr.Button("Fetch Leaderboard")
|
| 132 |
-
fetch_lb_partial = partial(utils.fetch_leaderboard, private=False)
|
| 133 |
-
fetch_lb.click(fn=fetch_lb_partial, outputs=[output_markdown])
|
| 134 |
-
# lb = utils.fetch_leaderboard(private=False)
|
| 135 |
-
# gr.Markdown(lb.to_markdown())
|
| 136 |
-
with gr.Tab("Private Leaderboard"):
|
| 137 |
-
current_date_time = datetime.now()
|
| 138 |
-
if current_date_time >= config.competition_info.end_date:
|
| 139 |
-
lb = utils.fetch_leaderboard(private=True)
|
| 140 |
-
gr.Markdown(lb)
|
| 141 |
-
else:
|
| 142 |
-
gr.Markdown("Private Leaderboard will be available after the competition ends")
|
| 143 |
-
with gr.Tab("New Submission"):
|
| 144 |
-
gr.Markdown(SUBMISSION_TEXT)
|
| 145 |
-
user_token = gr.Textbox(max_lines=1, value="hf_XXX", label="Please enter your Hugging Face token")
|
| 146 |
-
uploaded_file = gr.File()
|
| 147 |
-
output_text = gr.Markdown(visible=True, show_label=False)
|
| 148 |
-
new_sub_button = gr.Button("Upload Submission")
|
| 149 |
-
new_sub_button.click(fn=new_submission, inputs=[user_token, uploaded_file], outputs=[output_text])
|
| 150 |
-
|
| 151 |
-
with gr.Tab("My Submissions"):
|
| 152 |
-
gr.Markdown(SUBMISSION_LIMIT_TEXT)
|
| 153 |
-
user_token = gr.Textbox(max_lines=1, value="hf_XXX", label="Please enter your Hugging Face token")
|
| 154 |
-
output_text = gr.Markdown(visible=True, show_label=False)
|
| 155 |
-
output_df = gr.Dataframe(visible=False)
|
| 156 |
-
my_subs_button = gr.Button("Fetch Submissions")
|
| 157 |
-
my_subs_button.click(fn=my_submissions, inputs=[user_token], outputs=[output_text, output_df])
|
| 158 |
-
|
| 159 |
-
if __name__ == "__main__":
|
| 160 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
competitions/cli/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
from argparse import ArgumentParser
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class BaseCompetitionsCommand(ABC):
|
| 6 |
+
@staticmethod
|
| 7 |
+
@abstractmethod
|
| 8 |
+
def register_subcommand(parser: ArgumentParser):
|
| 9 |
+
raise NotImplementedError()
|
| 10 |
+
|
| 11 |
+
@abstractmethod
|
| 12 |
+
def run(self):
|
| 13 |
+
raise NotImplementedError()
|
competitions/cli/competitions.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
from loguru import logger
|
| 5 |
+
|
| 6 |
+
from .. import __version__
|
| 7 |
+
from .run import RunCompetitionsAppCommand
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def main():
|
| 11 |
+
parser = argparse.ArgumentParser(
|
| 12 |
+
"Competitions CLI",
|
| 13 |
+
usage="competitions <command> [<args>]",
|
| 14 |
+
epilog="For more information about a command, run: `competitions <command> --help`",
|
| 15 |
+
)
|
| 16 |
+
parser.add_argument("--version", "-v", help="Display competitions version", action="store_true")
|
| 17 |
+
commands_parser = parser.add_subparsers(help="commands")
|
| 18 |
+
|
| 19 |
+
# Register commands
|
| 20 |
+
RunCompetitionsAppCommand.register_subcommand(commands_parser)
|
| 21 |
+
|
| 22 |
+
args = parser.parse_args()
|
| 23 |
+
|
| 24 |
+
if args.version:
|
| 25 |
+
print(__version__)
|
| 26 |
+
exit(0)
|
| 27 |
+
|
| 28 |
+
if not hasattr(args, "func"):
|
| 29 |
+
parser.print_help()
|
| 30 |
+
exit(1)
|
| 31 |
+
|
| 32 |
+
command = args.func(args)
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
command.run()
|
| 36 |
+
except Exception as e:
|
| 37 |
+
logger.error(e)
|
| 38 |
+
sys.exit(1)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
if __name__ == "__main__":
|
| 42 |
+
main()
|
competitions/cli/run.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from argparse import ArgumentParser
|
| 2 |
+
|
| 3 |
+
from . import BaseCompetitionsCommand
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def run_app_command_factory(args):
|
| 7 |
+
return RunCompetitionsAppCommand()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class RunCompetitionsAppCommand(BaseCompetitionsCommand):
|
| 11 |
+
@staticmethod
|
| 12 |
+
def register_subcommand(parser: ArgumentParser):
|
| 13 |
+
create_project_parser = parser.add_parser("run", description="✨ Run competitions app")
|
| 14 |
+
# create_project_parser.add_argument("--name", type=str, default=None, required=True, help="The project's name")
|
| 15 |
+
create_project_parser.set_defaults(func=run_app_command_factory)
|
| 16 |
+
|
| 17 |
+
# def __init__(self):
|
| 18 |
+
# self._name = name
|
| 19 |
+
# self._task = task
|
| 20 |
+
# self._lang = language
|
| 21 |
+
# self._max_models = max_models
|
| 22 |
+
# self._hub_model = hub_model
|
| 23 |
+
|
| 24 |
+
def run(self):
|
| 25 |
+
from ..competitions import demo
|
| 26 |
+
|
| 27 |
+
demo.launch()
|
competitions/competitions.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
from . import competition_info
|
| 4 |
+
from .leaderboard import Leaderboard
|
| 5 |
+
from .submissions import Submissions
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# with gr.Blocks() as demo:
|
| 9 |
+
# with gr.Tab("Overview"):
|
| 10 |
+
# gr.Markdown(f"# Welcome to {config.competition_info.competition_name}! 👋")
|
| 11 |
+
|
| 12 |
+
# gr.Markdown(f"{config.competition_info.competition_description}")
|
| 13 |
+
|
| 14 |
+
# gr.Markdown("## Dataset")
|
| 15 |
+
# gr.Markdown(f"{config.competition_info.dataset_description}")
|
| 16 |
+
|
| 17 |
+
# with gr.Tab("Public Leaderboard"):
|
| 18 |
+
# output_markdown = gr.Markdown("")
|
| 19 |
+
# fetch_lb = gr.Button("Fetch Leaderboard")
|
| 20 |
+
# fetch_lb_partial = partial(utils.fetch_leaderboard, private=False)
|
| 21 |
+
# fetch_lb.click(fn=fetch_lb_partial, outputs=[output_markdown])
|
| 22 |
+
# # lb = utils.fetch_leaderboard(private=False)
|
| 23 |
+
# # gr.Markdown(lb.to_markdown())
|
| 24 |
+
# with gr.Tab("Private Leaderboard"):
|
| 25 |
+
# current_date_time = datetime.now()
|
| 26 |
+
# if current_date_time >= config.competition_info.end_date:
|
| 27 |
+
# lb = utils.fetch_leaderboard(private=True)
|
| 28 |
+
# gr.Markdown(lb)
|
| 29 |
+
# else:
|
| 30 |
+
# gr.Markdown("Private Leaderboard will be available after the competition ends")
|
| 31 |
+
# with gr.Tab("New Submission"):
|
| 32 |
+
# gr.Markdown(SUBMISSION_TEXT)
|
| 33 |
+
# user_token = gr.Textbox(max_lines=1, value="hf_XXX", label="Please enter your Hugging Face token")
|
| 34 |
+
# uploaded_file = gr.File()
|
| 35 |
+
# output_text = gr.Markdown(visible=True, show_label=False)
|
| 36 |
+
# new_sub_button = gr.Button("Upload Submission")
|
| 37 |
+
# new_sub_button.click(fn=new_submission, inputs=[user_token, uploaded_file], outputs=[output_text])
|
| 38 |
+
|
| 39 |
+
# with gr.Tab("My Submissions"):
|
| 40 |
+
# gr.Markdown(SUBMISSION_LIMIT_TEXT)
|
| 41 |
+
# user_token = gr.Textbox(max_lines=1, value="hf_XXX", label="Please enter your Hugging Face token")
|
| 42 |
+
# output_text = gr.Markdown(visible=True, show_label=False)
|
| 43 |
+
# output_df = gr.Dataframe(visible=False)
|
| 44 |
+
# my_subs_button = gr.Button("Fetch Submissions")
|
| 45 |
+
# my_subs_button.click(fn=my_submissions, inputs=[user_token], outputs=[output_text, output_df])
|
| 46 |
+
|
| 47 |
+
with gr.Blocks() as demo:
|
| 48 |
+
with gr.Tabs() as tab_container:
|
| 49 |
+
with gr.TabItem("Overview", id="overview"):
|
| 50 |
+
gr.Markdown(f"# Welcome to {competition_info.competition_name}! 👋")
|
| 51 |
+
gr.Markdown(f"{competition_info.competition_description}")
|
| 52 |
+
gr.Markdown("## Dataset")
|
| 53 |
+
gr.Markdown(f"{competition_info.dataset_description}")
|
| 54 |
+
with gr.TabItem("Public Leaderboard", id="public_leaderboard"):
|
| 55 |
+
pass
|
| 56 |
+
with gr.TabItem("Private Leaderboard", id="private_leaderboard"):
|
| 57 |
+
pass
|
| 58 |
+
with gr.TabItem("New Submission", id="new_submission"):
|
| 59 |
+
pass
|
| 60 |
+
with gr.TabItem("My Submissions", id="my_submissions"):
|
| 61 |
+
pass
|
competitions/errors.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class AuthenticationError(Exception):
|
| 2 |
+
pass
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class NoSubmissionError(Exception):
|
| 6 |
+
pass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class SubmissionError(Exception):
|
| 10 |
+
pass
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SubmissionLimitError(Exception):
|
| 14 |
+
pass
|
competitions/{config.py → info.py}
RENAMED
|
@@ -1,37 +1,29 @@
|
|
| 1 |
import json
|
| 2 |
-
import
|
| 3 |
from datetime import datetime
|
| 4 |
-
from pathlib import Path
|
| 5 |
|
| 6 |
-
from dotenv import load_dotenv
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
from huggingface_hub.utils._errors import EntryNotFoundError
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
-
|
| 12 |
-
load_dotenv("../.env")
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
MOONLANDING_URL = os.getenv("MOONLANDING_URL")
|
| 16 |
-
COMPETITION_ID = os.getenv("COMPETITION_ID")
|
| 17 |
-
AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
|
| 18 |
-
AUTOTRAIN_TOKEN = os.getenv("AUTOTRAIN_TOKEN")
|
| 19 |
-
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
|
| 20 |
-
|
| 21 |
-
|
| 22 |
class CompetitionInfo:
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
try:
|
| 25 |
config_fname = hf_hub_download(
|
| 26 |
-
repo_id=
|
| 27 |
filename="conf",
|
| 28 |
-
use_auth_token=
|
| 29 |
repo_type="dataset",
|
| 30 |
)
|
| 31 |
except EntryNotFoundError:
|
| 32 |
raise Exception("Competition config not found. Please check the competition id.")
|
| 33 |
except Exception as e:
|
| 34 |
-
|
| 35 |
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 36 |
|
| 37 |
self.config = self.load_config(config_fname)
|
|
@@ -78,6 +70,3 @@ class CompetitionInfo:
|
|
| 78 |
@property
|
| 79 |
def dataset_description(self):
|
| 80 |
return self.config["DATASET_DESCRIPTION"]
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
competition_info = CompetitionInfo()
|
|
|
|
| 1 |
import json
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
from datetime import datetime
|
|
|
|
| 4 |
|
|
|
|
| 5 |
from huggingface_hub import hf_hub_download
|
| 6 |
from huggingface_hub.utils._errors import EntryNotFoundError
|
| 7 |
+
from loguru import logger
|
| 8 |
|
| 9 |
|
| 10 |
+
@dataclass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
class CompetitionInfo:
|
| 12 |
+
competition_id: str
|
| 13 |
+
autotrain_token: str
|
| 14 |
+
|
| 15 |
+
def __post_init__(self):
|
| 16 |
try:
|
| 17 |
config_fname = hf_hub_download(
|
| 18 |
+
repo_id=self.competition_id,
|
| 19 |
filename="conf",
|
| 20 |
+
use_auth_token=self.autotrain_token,
|
| 21 |
repo_type="dataset",
|
| 22 |
)
|
| 23 |
except EntryNotFoundError:
|
| 24 |
raise Exception("Competition config not found. Please check the competition id.")
|
| 25 |
except Exception as e:
|
| 26 |
+
logger.error(e)
|
| 27 |
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 28 |
|
| 29 |
self.config = self.load_config(config_fname)
|
|
|
|
| 70 |
@property
|
| 71 |
def dataset_description(self):
|
| 72 |
return self.config["DATASET_DESCRIPTION"]
|
|
|
|
|
|
|
|
|
competitions/leaderboard.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from huggingface_hub import snapshot_download
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class Leaderboard:
|
| 12 |
+
end_date: str
|
| 13 |
+
eval_higher_is_better: bool
|
| 14 |
+
competition_id: str
|
| 15 |
+
autotrain_token: str
|
| 16 |
+
|
| 17 |
+
def __post_init__(self):
|
| 18 |
+
self.private_columns = [
|
| 19 |
+
"rank",
|
| 20 |
+
"name",
|
| 21 |
+
"private_score",
|
| 22 |
+
"submission_datetime",
|
| 23 |
+
]
|
| 24 |
+
self.public_columns = [
|
| 25 |
+
"rank",
|
| 26 |
+
"name",
|
| 27 |
+
"public_score",
|
| 28 |
+
"submission_datetime",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
def _download_submissions(self, private):
|
| 32 |
+
submissions_folder = snapshot_download(
|
| 33 |
+
repo_id=self.competition_id,
|
| 34 |
+
allow_patterns="*.json",
|
| 35 |
+
use_auth_token=self.autotrain_token,
|
| 36 |
+
repo_type="dataset",
|
| 37 |
+
)
|
| 38 |
+
submissions = []
|
| 39 |
+
for submission in glob.glob(os.path.join(submissions_folder, "*.json")):
|
| 40 |
+
with open(submission, "r") as f:
|
| 41 |
+
submission_info = json.load(f)
|
| 42 |
+
if self.eval_higher_is_better:
|
| 43 |
+
submission_info["submissions"].sort(
|
| 44 |
+
key=lambda x: x["private_score"] if private else x["public_score"],
|
| 45 |
+
reverse=True,
|
| 46 |
+
)
|
| 47 |
+
else:
|
| 48 |
+
submission_info["submissions"].sort(key=lambda x: x["private_score"] if private else x["public_score"])
|
| 49 |
+
|
| 50 |
+
# select only the best submission
|
| 51 |
+
submission_info["submissions"] = submission_info["submissions"][0]
|
| 52 |
+
temp_info = {
|
| 53 |
+
"id": submission_info["id"],
|
| 54 |
+
"name": submission_info["name"],
|
| 55 |
+
"submission_id": submission_info["submissions"]["submission_id"],
|
| 56 |
+
"submission_comment": submission_info["submissions"]["submission_comment"],
|
| 57 |
+
"status": submission_info["submissions"]["status"],
|
| 58 |
+
"selected": submission_info["submissions"]["selected"],
|
| 59 |
+
"public_score": submission_info["submissions"]["public_score"],
|
| 60 |
+
"private_score": submission_info["submissions"]["private_score"],
|
| 61 |
+
"submission_date": submission_info["submissions"]["date"],
|
| 62 |
+
"submission_time": submission_info["submissions"]["time"],
|
| 63 |
+
}
|
| 64 |
+
submissions.append(temp_info)
|
| 65 |
+
return submissions
|
| 66 |
+
|
| 67 |
+
def fetch(self, private=False):
|
| 68 |
+
submissions = self._download_submissions(private)
|
| 69 |
+
|
| 70 |
+
if len(submissions) == 0:
|
| 71 |
+
return pd.DataFrame()
|
| 72 |
+
|
| 73 |
+
df = pd.DataFrame(submissions)
|
| 74 |
+
# convert submission date and time to datetime
|
| 75 |
+
df["submission_datetime"] = pd.to_datetime(
|
| 76 |
+
df["submission_date"] + " " + df["submission_time"], format="%Y-%m-%d %H:%M:%S"
|
| 77 |
+
)
|
| 78 |
+
# convert datetime column to string
|
| 79 |
+
df["submission_datetime"] = df["submission_datetime"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
| 80 |
+
|
| 81 |
+
# sort by submission datetime
|
| 82 |
+
# sort by public score and submission datetime
|
| 83 |
+
if self.eval_higher_is_better:
|
| 84 |
+
df = df.sort_values(
|
| 85 |
+
by=["public_score", "submission_datetime"],
|
| 86 |
+
ascending=[False, True],
|
| 87 |
+
)
|
| 88 |
+
else:
|
| 89 |
+
df = df.sort_values(
|
| 90 |
+
by=["public_score", "submission_datetime"],
|
| 91 |
+
ascending=[True, True],
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# reset index
|
| 95 |
+
df = df.reset_index(drop=True)
|
| 96 |
+
df["rank"] = df.index + 1
|
| 97 |
+
|
| 98 |
+
columns = self.public_columns if not private else self.private_columns
|
| 99 |
+
return df[columns]
|
competitions/submissions.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
import uuid
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from huggingface_hub import HfApi, hf_hub_download
|
| 10 |
+
from huggingface_hub.utils._errors import EntryNotFoundError
|
| 11 |
+
from loguru import logger
|
| 12 |
+
|
| 13 |
+
from .errors import AuthenticationError, NoSubmissionError, SubmissionError, SubmissionLimitError
|
| 14 |
+
from .utils import http_get, http_post, user_authentication
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class Submissions:
|
| 19 |
+
competition_id: str
|
| 20 |
+
submission_limit: str
|
| 21 |
+
end_date: datetime
|
| 22 |
+
autotrain_username: str
|
| 23 |
+
autotrain_token: str
|
| 24 |
+
autotrain_backend_api: str
|
| 25 |
+
|
| 26 |
+
def __post_init__(self):
|
| 27 |
+
self.public_sub_columns = [
|
| 28 |
+
"date",
|
| 29 |
+
"submission_id",
|
| 30 |
+
"public_score",
|
| 31 |
+
"submission_comment",
|
| 32 |
+
"selected",
|
| 33 |
+
]
|
| 34 |
+
self.private_sub_columns = [
|
| 35 |
+
"date",
|
| 36 |
+
"submission_id",
|
| 37 |
+
"public_score",
|
| 38 |
+
"private_score",
|
| 39 |
+
"submission_comment",
|
| 40 |
+
"selected",
|
| 41 |
+
"status",
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
def _verify_submission(self, bytes_data):
|
| 45 |
+
return True
|
| 46 |
+
|
| 47 |
+
def _add_new_user(self, user_info):
|
| 48 |
+
api = HfApi()
|
| 49 |
+
user_submission_info = {}
|
| 50 |
+
user_submission_info["name"] = user_info["name"]
|
| 51 |
+
user_submission_info["id"] = user_info["id"]
|
| 52 |
+
user_submission_info["submissions"] = []
|
| 53 |
+
# convert user_submission_info to BufferedIOBase file object
|
| 54 |
+
user_submission_info_json = json.dumps(user_submission_info)
|
| 55 |
+
user_submission_info_json_bytes = user_submission_info_json.encode("utf-8")
|
| 56 |
+
user_submission_info_json_buffer = io.BytesIO(user_submission_info_json_bytes)
|
| 57 |
+
|
| 58 |
+
api.upload_file(
|
| 59 |
+
path_or_fileobj=user_submission_info_json_buffer,
|
| 60 |
+
path_in_repo=f"{user_info['id']}.json",
|
| 61 |
+
repo_id=self.competition_id,
|
| 62 |
+
repo_type="dataset",
|
| 63 |
+
token=self.autotrain_token,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def _check_user_submission_limit(self, user_info):
|
| 67 |
+
user_id = user_info["id"]
|
| 68 |
+
try:
|
| 69 |
+
user_fname = hf_hub_download(
|
| 70 |
+
repo_id=self.competition_id,
|
| 71 |
+
filename=f"{user_id}.json",
|
| 72 |
+
use_auth_token=self.autotrain_token,
|
| 73 |
+
repo_type="dataset",
|
| 74 |
+
)
|
| 75 |
+
except EntryNotFoundError:
|
| 76 |
+
self._add_new_user(user_info)
|
| 77 |
+
user_fname = hf_hub_download(
|
| 78 |
+
repo_id=self.competition_id,
|
| 79 |
+
filename=f"{user_id}.json",
|
| 80 |
+
use_auth_token=self.autotrain_token,
|
| 81 |
+
repo_type="dataset",
|
| 82 |
+
)
|
| 83 |
+
except Exception as e:
|
| 84 |
+
logger.error(e)
|
| 85 |
+
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 86 |
+
|
| 87 |
+
with open(user_fname, "r") as f:
|
| 88 |
+
user_submission_info = json.load(f)
|
| 89 |
+
|
| 90 |
+
todays_date = datetime.datetime.now().strftime("%Y-%m-%d")
|
| 91 |
+
if len(user_submission_info["submissions"]) == 0:
|
| 92 |
+
user_submission_info["submissions"] = []
|
| 93 |
+
|
| 94 |
+
# count the number of times user has submitted today
|
| 95 |
+
todays_submissions = 0
|
| 96 |
+
for sub in user_submission_info["submissions"]:
|
| 97 |
+
if sub["date"] == todays_date:
|
| 98 |
+
todays_submissions += 1
|
| 99 |
+
if todays_submissions >= self.submission_limit:
|
| 100 |
+
return False
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
def _increment_submissions(self, user_id, submission_id, submission_comment):
|
| 104 |
+
user_fname = hf_hub_download(
|
| 105 |
+
repo_id=self.competition_id,
|
| 106 |
+
filename=f"{user_id}.json",
|
| 107 |
+
use_auth_token=self.autotrain_token,
|
| 108 |
+
repo_type="dataset",
|
| 109 |
+
)
|
| 110 |
+
with open(user_fname, "r") as f:
|
| 111 |
+
user_submission_info = json.load(f)
|
| 112 |
+
todays_date = datetime.now().strftime("%Y-%m-%d")
|
| 113 |
+
current_time = datetime.now().strftime("%H:%M:%S")
|
| 114 |
+
|
| 115 |
+
# here goes all the default stuff for submission
|
| 116 |
+
user_submission_info["submissions"].append(
|
| 117 |
+
{
|
| 118 |
+
"date": todays_date,
|
| 119 |
+
"time": current_time,
|
| 120 |
+
"submission_id": submission_id,
|
| 121 |
+
"submission_comment": submission_comment,
|
| 122 |
+
"status": "pending",
|
| 123 |
+
"selected": False,
|
| 124 |
+
"public_score": -1,
|
| 125 |
+
"private_score": -1,
|
| 126 |
+
}
|
| 127 |
+
)
|
| 128 |
+
# count the number of times user has submitted today
|
| 129 |
+
todays_submissions = 0
|
| 130 |
+
for sub in user_submission_info["submissions"]:
|
| 131 |
+
if sub["date"] == todays_date:
|
| 132 |
+
todays_submissions += 1
|
| 133 |
+
|
| 134 |
+
# convert user_submission_info to BufferedIOBase file object
|
| 135 |
+
user_submission_info_json = json.dumps(user_submission_info)
|
| 136 |
+
user_submission_info_json_bytes = user_submission_info_json.encode("utf-8")
|
| 137 |
+
user_submission_info_json_buffer = io.BytesIO(user_submission_info_json_bytes)
|
| 138 |
+
api = HfApi()
|
| 139 |
+
api.upload_file(
|
| 140 |
+
path_or_fileobj=user_submission_info_json_buffer,
|
| 141 |
+
path_in_repo=f"{user_id}.json",
|
| 142 |
+
repo_id=self.competition_id,
|
| 143 |
+
repo_type="dataset",
|
| 144 |
+
token=self.autotrain_token,
|
| 145 |
+
)
|
| 146 |
+
return todays_submissions
|
| 147 |
+
|
| 148 |
+
def _download_user_subs(self, user_id):
|
| 149 |
+
user_fname = hf_hub_download(
|
| 150 |
+
repo_id=self.competition_id,
|
| 151 |
+
filename=f"{user_id}.json",
|
| 152 |
+
use_auth_token=self.autotrain_token,
|
| 153 |
+
repo_type="dataset",
|
| 154 |
+
)
|
| 155 |
+
with open(user_fname, "r") as f:
|
| 156 |
+
user_submission_info = json.load(f)
|
| 157 |
+
return user_submission_info["submissions"]
|
| 158 |
+
|
| 159 |
+
def _get_user_subs(self, user_info, private=False):
|
| 160 |
+
# get user submissions
|
| 161 |
+
user_id = user_info["id"]
|
| 162 |
+
try:
|
| 163 |
+
user_submissions = self._download_user_subs(user_id)
|
| 164 |
+
except EntryNotFoundError:
|
| 165 |
+
return NoSubmissionError("No submissions found ")
|
| 166 |
+
|
| 167 |
+
submissions_df = pd.DataFrame(user_submissions)
|
| 168 |
+
if not private:
|
| 169 |
+
submissions_df = submissions_df.drop(columns=["private_score"])
|
| 170 |
+
submissions_df = submissions_df[self.public_sub_columns]
|
| 171 |
+
else:
|
| 172 |
+
submissions_df = submissions_df[self.private_sub_columns]
|
| 173 |
+
return submissions_df
|
| 174 |
+
|
| 175 |
+
def _get_user_info(self, user_token):
|
| 176 |
+
user_info = user_authentication(token=user_token)
|
| 177 |
+
if "error" in user_info:
|
| 178 |
+
return AuthenticationError("Invalid token")
|
| 179 |
+
|
| 180 |
+
if user_info["emailVerified"] is False:
|
| 181 |
+
return AuthenticationError("Please verify your email on Hugging Face Hub")
|
| 182 |
+
return user_info
|
| 183 |
+
|
| 184 |
+
def _create_autotrain_project(self, project_id, submission_dataset, model, dataset):
|
| 185 |
+
project_config = {}
|
| 186 |
+
project_config["dataset_name"] = "lewtun/imdb-dummy"
|
| 187 |
+
project_config["dataset_config"] = "lewtun--imdb-dummy"
|
| 188 |
+
project_config["dataset_split"] = "train"
|
| 189 |
+
project_config["col_mapping"] = {"text": "text", "label": "target"}
|
| 190 |
+
|
| 191 |
+
payload = {
|
| 192 |
+
"username": self.autotrain_username,
|
| 193 |
+
"proj_name": project_id,
|
| 194 |
+
"task": 1,
|
| 195 |
+
"config": {
|
| 196 |
+
"language": "en",
|
| 197 |
+
"max_models": 5,
|
| 198 |
+
"benchmark": {
|
| 199 |
+
"dataset": dataset,
|
| 200 |
+
"model": model,
|
| 201 |
+
"submission_dataset": submission_dataset,
|
| 202 |
+
"create_prediction_repo": False,
|
| 203 |
+
},
|
| 204 |
+
},
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
project_json_resp = http_post(
|
| 208 |
+
path="/projects/create",
|
| 209 |
+
payload=payload,
|
| 210 |
+
token=self.autotrain_token,
|
| 211 |
+
domain=self.autotrain_backend_api,
|
| 212 |
+
).json()
|
| 213 |
+
|
| 214 |
+
time.sleep(5)
|
| 215 |
+
# Upload data
|
| 216 |
+
payload = {
|
| 217 |
+
"split": 4,
|
| 218 |
+
"col_mapping": project_config["col_mapping"],
|
| 219 |
+
"load_config": {"max_size_bytes": 0, "shuffle": False},
|
| 220 |
+
"dataset_id": project_config["dataset_name"],
|
| 221 |
+
"dataset_config": project_config["dataset_config"],
|
| 222 |
+
"dataset_split": project_config["dataset_split"],
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
_ = http_post(
|
| 226 |
+
path=f"/projects/{project_json_resp['id']}/data/dataset",
|
| 227 |
+
payload=payload,
|
| 228 |
+
token=self.autotrain_token,
|
| 229 |
+
domain=self.autotrain_backend_api,
|
| 230 |
+
).json()
|
| 231 |
+
logger.info("💾💾💾 Dataset creation done 💾💾💾")
|
| 232 |
+
|
| 233 |
+
# Process data
|
| 234 |
+
_ = http_post(
|
| 235 |
+
path=f"/projects/{project_json_resp['id']}/data/start_processing",
|
| 236 |
+
token=self.autotrain_token,
|
| 237 |
+
domain=self.autotrain_backend_api,
|
| 238 |
+
).json()
|
| 239 |
+
|
| 240 |
+
logger.info("⏳ Waiting for data processing to complete ...")
|
| 241 |
+
is_data_processing_success = False
|
| 242 |
+
while is_data_processing_success is not True:
|
| 243 |
+
project_status = http_get(
|
| 244 |
+
path=f"/projects/{project_json_resp['id']}",
|
| 245 |
+
token=self.autotrain_token,
|
| 246 |
+
domain=self.autotrain_backend_api,
|
| 247 |
+
).json()
|
| 248 |
+
# See database.database.enums.ProjectStatus for definitions of `status`
|
| 249 |
+
if project_status["status"] == 3:
|
| 250 |
+
is_data_processing_success = True
|
| 251 |
+
logger.info("✅ Data processing complete!")
|
| 252 |
+
time.sleep(5)
|
| 253 |
+
|
| 254 |
+
# Approve training job
|
| 255 |
+
_ = http_post(
|
| 256 |
+
path=f"/projects/{project_json_resp['id']}/start_training",
|
| 257 |
+
token=self.autotrain_token,
|
| 258 |
+
domain=self.autotrain_backend_api,
|
| 259 |
+
).json()
|
| 260 |
+
|
| 261 |
+
def my_submissions(self, user_token):
|
| 262 |
+
user_info = self._get_user_info(user_token)
|
| 263 |
+
current_date_time = datetime.now()
|
| 264 |
+
private = False
|
| 265 |
+
if current_date_time >= self.end_date:
|
| 266 |
+
private = True
|
| 267 |
+
subs = self._get_user_subs(user_info, private=private)
|
| 268 |
+
return subs
|
| 269 |
+
|
| 270 |
+
def new_submission(self, user_token, uploaded_file):
|
| 271 |
+
# verify token
|
| 272 |
+
user_info = self._get_user_info(user_token)
|
| 273 |
+
|
| 274 |
+
# check if user can submit to the competition
|
| 275 |
+
if self._check_user_submission_limit(user_info) is False:
|
| 276 |
+
return SubmissionLimitError("Submission limit reached")
|
| 277 |
+
|
| 278 |
+
with open(uploaded_file.name, "rb") as f:
|
| 279 |
+
bytes_data = f.read()
|
| 280 |
+
# verify file is valid
|
| 281 |
+
if not self._verify_submission(bytes_data):
|
| 282 |
+
return SubmissionError("Invalid submission file")
|
| 283 |
+
else:
|
| 284 |
+
user_id = user_info["id"]
|
| 285 |
+
submission_id = str(uuid.uuid4())
|
| 286 |
+
file_extension = uploaded_file.orig_name.split(".")[-1]
|
| 287 |
+
# upload file to hf hub
|
| 288 |
+
api = HfApi()
|
| 289 |
+
api.upload_file(
|
| 290 |
+
path_or_fileobj=bytes_data,
|
| 291 |
+
path_in_repo=f"submissions/{user_id}-{submission_id}.{file_extension}",
|
| 292 |
+
repo_id=self.competition_id,
|
| 293 |
+
repo_type="dataset",
|
| 294 |
+
token=self.autotrain_token,
|
| 295 |
+
)
|
| 296 |
+
# update submission limit
|
| 297 |
+
submissions_made = self._increment_submissions(
|
| 298 |
+
user_id=user_id,
|
| 299 |
+
submission_id=submission_id,
|
| 300 |
+
submission_comment="",
|
| 301 |
+
)
|
| 302 |
+
# schedule submission for evaluation
|
| 303 |
+
self._create_autotrain_project(
|
| 304 |
+
project_id=f"{submission_id}",
|
| 305 |
+
dataset=f"{self.competition_id}",
|
| 306 |
+
submission_dataset=user_id,
|
| 307 |
+
model="generic_competition",
|
| 308 |
+
)
|
| 309 |
+
remaining_submissions = self.submission_limit - submissions_made
|
| 310 |
+
return remaining_submissions
|
competitions/text.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
SUBMISSION_TEXT = """You can make upto {} submissions per day.
|
| 2 |
+
The test data has been divided into public and private splits.
|
| 3 |
+
Your score on the public split will be shown on the leaderboard.
|
| 4 |
+
Your final score will be based on your private split performance.
|
| 5 |
+
The final rankings will be based on the private split performance.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
SUBMISSION_ERROR = """Submission is not in a proper format.
|
| 9 |
+
Please check evaluation instructions for more details."""
|
| 10 |
+
|
| 11 |
+
SUBMISSION_LIMIT_TEXT = """You can select upto {} submissions for private leaderboard."""
|
competitions/utils.py
CHANGED
|
@@ -1,15 +1,7 @@
|
|
| 1 |
-
import datetime
|
| 2 |
-
import glob
|
| 3 |
-
import io
|
| 4 |
-
import json
|
| 5 |
-
import os
|
| 6 |
-
import time
|
| 7 |
-
|
| 8 |
-
import config
|
| 9 |
-
import pandas as pd
|
| 10 |
import requests
|
| 11 |
-
from
|
| 12 |
-
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
def get_auth_headers(token: str, prefix: str = "Bearer"):
|
|
@@ -23,7 +15,7 @@ def http_post(path: str, token: str, payload=None, domain: str = None, params=No
|
|
| 23 |
url=domain + path, json=payload, headers=get_auth_headers(token=token), allow_redirects=True, params=params
|
| 24 |
)
|
| 25 |
except requests.exceptions.ConnectionError:
|
| 26 |
-
|
| 27 |
response.raise_for_status()
|
| 28 |
return response
|
| 29 |
|
|
@@ -33,85 +25,11 @@ def http_get(path: str, token: str, domain: str = None) -> requests.Response:
|
|
| 33 |
try:
|
| 34 |
response = requests.get(url=domain + path, headers=get_auth_headers(token=token), allow_redirects=True)
|
| 35 |
except requests.exceptions.ConnectionError:
|
| 36 |
-
|
| 37 |
response.raise_for_status()
|
| 38 |
return response
|
| 39 |
|
| 40 |
|
| 41 |
-
def create_project(project_id, submission_dataset, model, dataset):
|
| 42 |
-
project_config = {}
|
| 43 |
-
project_config["dataset_name"] = "lewtun/imdb-dummy"
|
| 44 |
-
project_config["dataset_config"] = "lewtun--imdb-dummy"
|
| 45 |
-
project_config["dataset_split"] = "train"
|
| 46 |
-
project_config["col_mapping"] = {"text": "text", "label": "target"}
|
| 47 |
-
|
| 48 |
-
payload = {
|
| 49 |
-
"username": config.AUTOTRAIN_USERNAME,
|
| 50 |
-
"proj_name": project_id,
|
| 51 |
-
"task": 1,
|
| 52 |
-
"config": {
|
| 53 |
-
"language": "en",
|
| 54 |
-
"max_models": 5,
|
| 55 |
-
"benchmark": {
|
| 56 |
-
"dataset": dataset,
|
| 57 |
-
"model": model,
|
| 58 |
-
"submission_dataset": submission_dataset,
|
| 59 |
-
"create_prediction_repo": False,
|
| 60 |
-
},
|
| 61 |
-
},
|
| 62 |
-
}
|
| 63 |
-
|
| 64 |
-
project_json_resp = http_post(
|
| 65 |
-
path="/projects/create", payload=payload, token=config.AUTOTRAIN_TOKEN, domain=config.AUTOTRAIN_BACKEND_API
|
| 66 |
-
).json()
|
| 67 |
-
time.sleep(5)
|
| 68 |
-
# Upload data
|
| 69 |
-
payload = {
|
| 70 |
-
"split": 4,
|
| 71 |
-
"col_mapping": project_config["col_mapping"],
|
| 72 |
-
"load_config": {"max_size_bytes": 0, "shuffle": False},
|
| 73 |
-
"dataset_id": project_config["dataset_name"],
|
| 74 |
-
"dataset_config": project_config["dataset_config"],
|
| 75 |
-
"dataset_split": project_config["dataset_split"],
|
| 76 |
-
}
|
| 77 |
-
|
| 78 |
-
_ = http_post(
|
| 79 |
-
path=f"/projects/{project_json_resp['id']}/data/dataset",
|
| 80 |
-
payload=payload,
|
| 81 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 82 |
-
domain=config.AUTOTRAIN_BACKEND_API,
|
| 83 |
-
).json()
|
| 84 |
-
print("💾💾💾 Dataset creation done 💾💾💾")
|
| 85 |
-
|
| 86 |
-
# Process data
|
| 87 |
-
_ = http_post(
|
| 88 |
-
path=f"/projects/{project_json_resp['id']}/data/start_processing",
|
| 89 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 90 |
-
domain=config.AUTOTRAIN_BACKEND_API,
|
| 91 |
-
).json()
|
| 92 |
-
|
| 93 |
-
print("⏳ Waiting for data processing to complete ...")
|
| 94 |
-
is_data_processing_success = False
|
| 95 |
-
while is_data_processing_success is not True:
|
| 96 |
-
project_status = http_get(
|
| 97 |
-
path=f"/projects/{project_json_resp['id']}",
|
| 98 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 99 |
-
domain=config.AUTOTRAIN_BACKEND_API,
|
| 100 |
-
).json()
|
| 101 |
-
# See database.database.enums.ProjectStatus for definitions of `status`
|
| 102 |
-
if project_status["status"] == 3:
|
| 103 |
-
is_data_processing_success = True
|
| 104 |
-
print("✅ Data processing complete!")
|
| 105 |
-
time.sleep(10)
|
| 106 |
-
|
| 107 |
-
# Approve training job
|
| 108 |
-
_ = http_post(
|
| 109 |
-
path=f"/projects/{project_json_resp['id']}/start_training",
|
| 110 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 111 |
-
domain=config.AUTOTRAIN_BACKEND_API,
|
| 112 |
-
).json()
|
| 113 |
-
|
| 114 |
-
|
| 115 |
def user_authentication(token):
|
| 116 |
headers = {}
|
| 117 |
cookies = {}
|
|
@@ -121,185 +39,12 @@ def user_authentication(token):
|
|
| 121 |
cookies = {"token": token}
|
| 122 |
try:
|
| 123 |
response = requests.get(
|
| 124 |
-
|
| 125 |
headers=headers,
|
| 126 |
cookies=cookies,
|
| 127 |
timeout=3,
|
| 128 |
)
|
| 129 |
except (requests.Timeout, ConnectionError) as err:
|
| 130 |
-
|
| 131 |
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 132 |
return response.json()
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
def add_new_user(user_info):
|
| 136 |
-
api = HfApi()
|
| 137 |
-
user_submission_info = {}
|
| 138 |
-
user_submission_info["name"] = user_info["name"]
|
| 139 |
-
user_submission_info["id"] = user_info["id"]
|
| 140 |
-
user_submission_info["submissions"] = []
|
| 141 |
-
# convert user_submission_info to BufferedIOBase file object
|
| 142 |
-
user_submission_info_json = json.dumps(user_submission_info)
|
| 143 |
-
user_submission_info_json_bytes = user_submission_info_json.encode("utf-8")
|
| 144 |
-
user_submission_info_json_buffer = io.BytesIO(user_submission_info_json_bytes)
|
| 145 |
-
|
| 146 |
-
api.upload_file(
|
| 147 |
-
path_or_fileobj=user_submission_info_json_buffer,
|
| 148 |
-
path_in_repo=f"{user_info['id']}.json",
|
| 149 |
-
repo_id=config.COMPETITION_ID,
|
| 150 |
-
repo_type="dataset",
|
| 151 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 152 |
-
)
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
def check_user_submission_limit(user_info):
|
| 156 |
-
user_id = user_info["id"]
|
| 157 |
-
try:
|
| 158 |
-
user_fname = hf_hub_download(
|
| 159 |
-
repo_id=config.COMPETITION_ID,
|
| 160 |
-
filename=f"{user_id}.json",
|
| 161 |
-
use_auth_token=config.AUTOTRAIN_TOKEN,
|
| 162 |
-
repo_type="dataset",
|
| 163 |
-
)
|
| 164 |
-
except EntryNotFoundError:
|
| 165 |
-
add_new_user(user_info)
|
| 166 |
-
user_fname = hf_hub_download(
|
| 167 |
-
repo_id=config.COMPETITION_ID,
|
| 168 |
-
filename=f"{user_id}.json",
|
| 169 |
-
use_auth_token=config.AUTOTRAIN_TOKEN,
|
| 170 |
-
repo_type="dataset",
|
| 171 |
-
)
|
| 172 |
-
except Exception as e:
|
| 173 |
-
print(e)
|
| 174 |
-
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 175 |
-
|
| 176 |
-
with open(user_fname, "r") as f:
|
| 177 |
-
user_submission_info = json.load(f)
|
| 178 |
-
|
| 179 |
-
todays_date = datetime.datetime.now().strftime("%Y-%m-%d")
|
| 180 |
-
if len(user_submission_info["submissions"]) == 0:
|
| 181 |
-
user_submission_info["submissions"] = []
|
| 182 |
-
|
| 183 |
-
# count the number of times user has submitted today
|
| 184 |
-
todays_submissions = 0
|
| 185 |
-
for sub in user_submission_info["submissions"]:
|
| 186 |
-
if sub["date"] == todays_date:
|
| 187 |
-
todays_submissions += 1
|
| 188 |
-
if todays_submissions >= config.competition_info.submission_limit:
|
| 189 |
-
return False
|
| 190 |
-
return True
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
def increment_submissions(user_id, submission_id, submission_comment):
|
| 194 |
-
user_fname = hf_hub_download(
|
| 195 |
-
repo_id=config.COMPETITION_ID,
|
| 196 |
-
filename=f"{user_id}.json",
|
| 197 |
-
use_auth_token=config.AUTOTRAIN_TOKEN,
|
| 198 |
-
repo_type="dataset",
|
| 199 |
-
)
|
| 200 |
-
with open(user_fname, "r") as f:
|
| 201 |
-
user_submission_info = json.load(f)
|
| 202 |
-
todays_date = datetime.datetime.now().strftime("%Y-%m-%d")
|
| 203 |
-
current_time = datetime.datetime.now().strftime("%H:%M:%S")
|
| 204 |
-
# here goes all the default stuff for submission
|
| 205 |
-
user_submission_info["submissions"].append(
|
| 206 |
-
{
|
| 207 |
-
"date": todays_date,
|
| 208 |
-
"time": current_time,
|
| 209 |
-
"submission_id": submission_id,
|
| 210 |
-
"submission_comment": submission_comment,
|
| 211 |
-
"status": "pending",
|
| 212 |
-
"selected": False,
|
| 213 |
-
"public_score": -1,
|
| 214 |
-
"private_score": -1,
|
| 215 |
-
}
|
| 216 |
-
)
|
| 217 |
-
# count the number of times user has submitted today
|
| 218 |
-
todays_submissions = 0
|
| 219 |
-
for sub in user_submission_info["submissions"]:
|
| 220 |
-
if sub["date"] == todays_date:
|
| 221 |
-
todays_submissions += 1
|
| 222 |
-
# convert user_submission_info to BufferedIOBase file object
|
| 223 |
-
user_submission_info_json = json.dumps(user_submission_info)
|
| 224 |
-
user_submission_info_json_bytes = user_submission_info_json.encode("utf-8")
|
| 225 |
-
user_submission_info_json_buffer = io.BytesIO(user_submission_info_json_bytes)
|
| 226 |
-
api = HfApi()
|
| 227 |
-
api.upload_file(
|
| 228 |
-
path_or_fileobj=user_submission_info_json_buffer,
|
| 229 |
-
path_in_repo=f"{user_id}.json",
|
| 230 |
-
repo_id=config.COMPETITION_ID,
|
| 231 |
-
repo_type="dataset",
|
| 232 |
-
token=config.AUTOTRAIN_TOKEN,
|
| 233 |
-
)
|
| 234 |
-
return todays_submissions
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
def verify_submission(bytes_data):
|
| 238 |
-
return True
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
def fetch_submissions(user_id):
|
| 242 |
-
user_fname = hf_hub_download(
|
| 243 |
-
repo_id=config.COMPETITION_ID,
|
| 244 |
-
filename=f"{user_id}.json",
|
| 245 |
-
use_auth_token=config.AUTOTRAIN_TOKEN,
|
| 246 |
-
repo_type="dataset",
|
| 247 |
-
)
|
| 248 |
-
with open(user_fname, "r") as f:
|
| 249 |
-
user_submission_info = json.load(f)
|
| 250 |
-
return user_submission_info["submissions"]
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
def fetch_leaderboard(private=False):
|
| 254 |
-
submissions_folder = snapshot_download(
|
| 255 |
-
repo_id=config.COMPETITION_ID,
|
| 256 |
-
allow_patterns="*.json",
|
| 257 |
-
use_auth_token=config.AUTOTRAIN_TOKEN,
|
| 258 |
-
repo_type="dataset",
|
| 259 |
-
)
|
| 260 |
-
submissions = []
|
| 261 |
-
for submission in glob.glob(os.path.join(submissions_folder, "*.json")):
|
| 262 |
-
with open(submission, "r") as f:
|
| 263 |
-
submission_info = json.load(f)
|
| 264 |
-
if config.competition_info.eval_higher_is_better:
|
| 265 |
-
submission_info["submissions"].sort(
|
| 266 |
-
key=lambda x: x["private_score"] if private else x["public_score"], reverse=True
|
| 267 |
-
)
|
| 268 |
-
else:
|
| 269 |
-
submission_info["submissions"].sort(key=lambda x: x["private_score"] if private else x["public_score"])
|
| 270 |
-
# select only the best submission
|
| 271 |
-
submission_info["submissions"] = submission_info["submissions"][0]
|
| 272 |
-
temp_info = {
|
| 273 |
-
"id": submission_info["id"],
|
| 274 |
-
"name": submission_info["name"],
|
| 275 |
-
"submission_id": submission_info["submissions"]["submission_id"],
|
| 276 |
-
"submission_comment": submission_info["submissions"]["submission_comment"],
|
| 277 |
-
"status": submission_info["submissions"]["status"],
|
| 278 |
-
"selected": submission_info["submissions"]["selected"],
|
| 279 |
-
"public_score": submission_info["submissions"]["public_score"],
|
| 280 |
-
"private_score": submission_info["submissions"]["private_score"],
|
| 281 |
-
"submission_date": submission_info["submissions"]["date"],
|
| 282 |
-
"submission_time": submission_info["submissions"]["time"],
|
| 283 |
-
}
|
| 284 |
-
submissions.append(temp_info)
|
| 285 |
-
|
| 286 |
-
df = pd.DataFrame(submissions)
|
| 287 |
-
# convert submission date and time to datetime
|
| 288 |
-
df["submission_datetime"] = pd.to_datetime(
|
| 289 |
-
df["submission_date"] + " " + df["submission_time"], format="%Y-%m-%d %H:%M:%S"
|
| 290 |
-
)
|
| 291 |
-
# sort by submission datetime
|
| 292 |
-
# sort by public score and submission datetime
|
| 293 |
-
if config.competition_info.eval_higher_is_better:
|
| 294 |
-
df = df.sort_values(by=["public_score", "submission_datetime"], ascending=[False, True])
|
| 295 |
-
else:
|
| 296 |
-
df = df.sort_values(by=["public_score", "submission_datetime"], ascending=[True, True])
|
| 297 |
-
# reset index
|
| 298 |
-
df = df.reset_index(drop=True)
|
| 299 |
-
df["rank"] = df.index + 1
|
| 300 |
-
|
| 301 |
-
if private:
|
| 302 |
-
columns = ["rank", "name", "private_score", "submission_datetime"]
|
| 303 |
-
else:
|
| 304 |
-
columns = ["rank", "name", "public_score", "submission_datetime"]
|
| 305 |
-
return df[columns].to_markdown()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import requests
|
| 2 |
+
from loguru import logger
|
| 3 |
+
|
| 4 |
+
from . import MOONLANDING_URL
|
| 5 |
|
| 6 |
|
| 7 |
def get_auth_headers(token: str, prefix: str = "Bearer"):
|
|
|
|
| 15 |
url=domain + path, json=payload, headers=get_auth_headers(token=token), allow_redirects=True, params=params
|
| 16 |
)
|
| 17 |
except requests.exceptions.ConnectionError:
|
| 18 |
+
logger.error("❌ Failed to reach AutoNLP API, check your internet connection")
|
| 19 |
response.raise_for_status()
|
| 20 |
return response
|
| 21 |
|
|
|
|
| 25 |
try:
|
| 26 |
response = requests.get(url=domain + path, headers=get_auth_headers(token=token), allow_redirects=True)
|
| 27 |
except requests.exceptions.ConnectionError:
|
| 28 |
+
logger.error("❌ Failed to reach AutoNLP API, check your internet connection")
|
| 29 |
response.raise_for_status()
|
| 30 |
return response
|
| 31 |
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
def user_authentication(token):
|
| 34 |
headers = {}
|
| 35 |
cookies = {}
|
|
|
|
| 39 |
cookies = {"token": token}
|
| 40 |
try:
|
| 41 |
response = requests.get(
|
| 42 |
+
MOONLANDING_URL + "/api/whoami-v2",
|
| 43 |
headers=headers,
|
| 44 |
cookies=cookies,
|
| 45 |
timeout=3,
|
| 46 |
)
|
| 47 |
except (requests.Timeout, ConnectionError) as err:
|
| 48 |
+
logger.error(f"Failed to request whoami-v2 - {repr(err)}")
|
| 49 |
raise Exception("Hugging Face Hub is unreachable, please try again later.")
|
| 50 |
return response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
gradio==3.9.1
|
|
|
|
| 2 |
pandas==1.5.0
|
| 3 |
-
python-dotenv==0.20.0
|
| 4 |
huggingface_hub==0.10.1
|
| 5 |
tabulate==0.9.0
|
|
|
|
| 1 |
gradio==3.9.1
|
| 2 |
+
loguru==0.6.0
|
| 3 |
pandas==1.5.0
|
|
|
|
| 4 |
huggingface_hub==0.10.1
|
| 5 |
tabulate==0.9.0
|
setup.py
CHANGED
|
@@ -44,6 +44,7 @@ setup(
|
|
| 44 |
url="https://github.com/huggingface/competitions",
|
| 45 |
download_url="https://github.com/huggingface/competitions/tags",
|
| 46 |
packages=find_packages("."),
|
|
|
|
| 47 |
install_requires=INSTALL_REQUIRES,
|
| 48 |
extras_require=EXTRAS_REQUIRE,
|
| 49 |
python_requires=">=3.8",
|
|
|
|
| 44 |
url="https://github.com/huggingface/competitions",
|
| 45 |
download_url="https://github.com/huggingface/competitions/tags",
|
| 46 |
packages=find_packages("."),
|
| 47 |
+
entry_points={"console_scripts": ["competitions=competitions.cli.competitions:main"]},
|
| 48 |
install_requires=INSTALL_REQUIRES,
|
| 49 |
extras_require=EXTRAS_REQUIRE,
|
| 50 |
python_requires=">=3.8",
|