Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import requests
|
|
7 |
from huggingface_hub import HfApi
|
8 |
import tempfile
|
9 |
from dotenv import load_dotenv
|
|
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
@@ -175,8 +176,9 @@ When your API is deployed :
|
|
175 |
2. Enter the Space URL of your API
|
176 |
3. (Optional) Precise the API route (default is ``/text``, ``/image``, or ``/audio``)
|
177 |
4. Step 1 - Evaluate model: Click on the button to evaluate your model. This will run you model on your API, computes the accuracy on the test set (20% of the train set), and track the energy consumption and emissions.
|
178 |
-
5. Step 2 - Submit to leaderboard: Click on the button to submit your results to the leaderboard. This will upload the results to the leaderboard dataset and update the leaderboard.
|
179 |
-
6.
|
|
|
180 |
- Text - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-text
|
181 |
- Image - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-image
|
182 |
- Audio - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-audio
|
@@ -209,7 +211,9 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
209 |
with gr.Column(scale=1):
|
210 |
text_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
211 |
with gr.Column(scale=1):
|
212 |
-
text_submit_btn = gr.Button("2. Submit to leaderboard", variant="
|
|
|
|
|
213 |
|
214 |
with gr.Row():
|
215 |
text_accuracy = gr.Number(label="Accuracy", precision=4)
|
@@ -236,7 +240,9 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
236 |
with gr.Column(scale=1):
|
237 |
image_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
238 |
with gr.Column(scale=1):
|
239 |
-
image_submit_btn = gr.Button("2. Submit to leaderboard", variant="
|
|
|
|
|
240 |
|
241 |
with gr.Row():
|
242 |
image_accuracy = gr.Number(label="Accuracy", precision=4)
|
@@ -263,7 +269,9 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
263 |
with gr.Column(scale=1):
|
264 |
audio_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
265 |
with gr.Column(scale=1):
|
266 |
-
audio_submit_btn = gr.Button("2. Submit to leaderboard", variant="
|
|
|
|
|
267 |
|
268 |
with gr.Row():
|
269 |
audio_accuracy = gr.Number(label="Accuracy", precision=4)
|
@@ -272,6 +280,13 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
272 |
with gr.Row():
|
273 |
audio_results_json = gr.JSON(label="Detailed Results", visible=True)
|
274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
# Set up event handlers
|
276 |
text_evaluate_btn.click(
|
277 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
@@ -279,35 +294,47 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
279 |
outputs=[text_accuracy, text_emissions, text_energy, text_results_json]
|
280 |
)
|
281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
image_evaluate_btn.click(
|
283 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
284 |
inputs=[image_space_url, image_route],
|
285 |
outputs=[image_accuracy, image_emissions, image_energy, image_results_json]
|
286 |
)
|
287 |
|
288 |
-
audio_evaluate_btn.click(
|
289 |
-
lambda url, route: evaluate_model(route.strip("/"), url),
|
290 |
-
inputs=[audio_space_url, audio_route],
|
291 |
-
outputs=[audio_accuracy, audio_emissions, audio_energy, audio_results_json]
|
292 |
-
)
|
293 |
-
|
294 |
-
text_submit_btn.click(
|
295 |
-
lambda results: submit_results("text", results),
|
296 |
-
inputs=[text_results_json],
|
297 |
-
outputs=None
|
298 |
-
)
|
299 |
-
|
300 |
image_submit_btn.click(
|
301 |
lambda results: submit_results("image", results),
|
302 |
inputs=[image_results_json],
|
303 |
outputs=None
|
304 |
)
|
305 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
audio_submit_btn.click(
|
307 |
lambda results: submit_results("audio", results),
|
308 |
inputs=[audio_results_json],
|
309 |
outputs=None
|
310 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
|
312 |
if __name__ == "__main__":
|
313 |
demo.launch()
|
|
|
7 |
from huggingface_hub import HfApi
|
8 |
import tempfile
|
9 |
from dotenv import load_dotenv
|
10 |
+
import webbrowser
|
11 |
|
12 |
# Load environment variables
|
13 |
load_dotenv()
|
|
|
176 |
2. Enter the Space URL of your API
|
177 |
3. (Optional) Precise the API route (default is ``/text``, ``/image``, or ``/audio``)
|
178 |
4. Step 1 - Evaluate model: Click on the button to evaluate your model. This will run you model on your API, computes the accuracy on the test set (20% of the train set), and track the energy consumption and emissions.
|
179 |
+
5. Step 2 - Submit to leaderboard (optional): Click on the button to submit your results to the leaderboard. This will upload the results to the leaderboard dataset and update the leaderboard.
|
180 |
+
6. Step 3 - Submit to final evaluation (as a form): [Click on the button to submit your results to the challenge](https://framaforms.org/2025-frugal-ai-challenge-submission-form-1736883260-0). This will open a form to submit your results to the challenge.
|
181 |
+
7. You can see the public leaderboards at the following links - they are mostly informational because we will rank the models on the private dataset after the challenge ended, but you can see the current state of the leaderboard.
|
182 |
- Text - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-text
|
183 |
- Image - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-image
|
184 |
- Audio - https://huggingface.co/datasets/frugal-ai-challenge/public-leaderboard-audio
|
|
|
211 |
with gr.Column(scale=1):
|
212 |
text_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
213 |
with gr.Column(scale=1):
|
214 |
+
text_submit_btn = gr.Button("2. Submit to public leaderboard (optional)", variant="secondary")
|
215 |
+
with gr.Column(scale=1):
|
216 |
+
text_submit_btn2 = gr.Button("3. Submit to final evaluation (form)", variant="primary")
|
217 |
|
218 |
with gr.Row():
|
219 |
text_accuracy = gr.Number(label="Accuracy", precision=4)
|
|
|
240 |
with gr.Column(scale=1):
|
241 |
image_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
242 |
with gr.Column(scale=1):
|
243 |
+
image_submit_btn = gr.Button("2. Submit to public leaderboard (optional)", variant="secondary")
|
244 |
+
with gr.Column(scale=1):
|
245 |
+
image_submit_btn2 = gr.Button("3. Submit to final evaluation (form)", variant="primary")
|
246 |
|
247 |
with gr.Row():
|
248 |
image_accuracy = gr.Number(label="Accuracy", precision=4)
|
|
|
269 |
with gr.Column(scale=1):
|
270 |
audio_evaluate_btn = gr.Button("1. Evaluate model", variant="secondary")
|
271 |
with gr.Column(scale=1):
|
272 |
+
audio_submit_btn = gr.Button("2. Submit to public leaderboard (optional)", variant="secondary")
|
273 |
+
with gr.Column(scale=1):
|
274 |
+
audio_submit_btn2 = gr.Button("3. Submit to final evaluation (form)", variant="primary")
|
275 |
|
276 |
with gr.Row():
|
277 |
audio_accuracy = gr.Number(label="Accuracy", precision=4)
|
|
|
280 |
with gr.Row():
|
281 |
audio_results_json = gr.JSON(label="Detailed Results", visible=True)
|
282 |
|
283 |
+
|
284 |
+
FORM_URL = "https://framaforms.org/2025-frugal-ai-challenge-submission-form-1736883260-0"
|
285 |
+
|
286 |
+
def open_form():
|
287 |
+
webbrowser.open_new_tab(FORM_URL)
|
288 |
+
return gr.Info("Opening submission form in new tab...")
|
289 |
+
|
290 |
# Set up event handlers
|
291 |
text_evaluate_btn.click(
|
292 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
|
|
294 |
outputs=[text_accuracy, text_emissions, text_energy, text_results_json]
|
295 |
)
|
296 |
|
297 |
+
text_submit_btn2.click(
|
298 |
+
fn=open_form,
|
299 |
+
inputs=[],
|
300 |
+
outputs=[]
|
301 |
+
)
|
302 |
+
|
303 |
image_evaluate_btn.click(
|
304 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
305 |
inputs=[image_space_url, image_route],
|
306 |
outputs=[image_accuracy, image_emissions, image_energy, image_results_json]
|
307 |
)
|
308 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
image_submit_btn.click(
|
310 |
lambda results: submit_results("image", results),
|
311 |
inputs=[image_results_json],
|
312 |
outputs=None
|
313 |
)
|
314 |
|
315 |
+
image_submit_btn2.click(
|
316 |
+
fn=open_form,
|
317 |
+
inputs=[],
|
318 |
+
outputs=[]
|
319 |
+
)
|
320 |
+
|
321 |
+
audio_evaluate_btn.click(
|
322 |
+
lambda url, route: evaluate_model(route.strip("/"), url),
|
323 |
+
inputs=[audio_space_url, audio_route],
|
324 |
+
outputs=[audio_accuracy, audio_emissions, audio_energy, audio_results_json]
|
325 |
+
)
|
326 |
+
|
327 |
audio_submit_btn.click(
|
328 |
lambda results: submit_results("audio", results),
|
329 |
inputs=[audio_results_json],
|
330 |
outputs=None
|
331 |
)
|
332 |
+
|
333 |
+
audio_submit_btn2.click(
|
334 |
+
fn=open_form,
|
335 |
+
inputs=[],
|
336 |
+
outputs=[]
|
337 |
+
)
|
338 |
|
339 |
if __name__ == "__main__":
|
340 |
demo.launch()
|