Spaces:
Runtime error
Runtime error
Ahmed Ahmed
commited on
Commit
Β·
86c1853
1
Parent(s):
f02d36b
add histogram now
Browse files
app.py
CHANGED
@@ -48,7 +48,7 @@ def run_perplexity_test(model_name, revision, precision):
|
|
48 |
import gradio as gr
|
49 |
|
50 |
if not model_name:
|
51 |
-
return "Please enter a model name.", gr.update()
|
52 |
|
53 |
try:
|
54 |
# Use stderr for more reliable logging in HF Spaces
|
@@ -63,7 +63,7 @@ def run_perplexity_test(model_name, revision, precision):
|
|
63 |
sys.stderr.flush()
|
64 |
|
65 |
if success:
|
66 |
-
sys.stderr.write("Evaluation succeeded - updating results
|
67 |
sys.stderr.flush()
|
68 |
|
69 |
# Get updated results
|
@@ -74,11 +74,11 @@ def run_perplexity_test(model_name, revision, precision):
|
|
74 |
**Model**: {model_name}
|
75 |
**Perplexity Score**: {result:.4f}
|
76 |
|
77 |
-
π **Results have been saved and
|
78 |
|
79 |
-
return success_msg, gr.update(value=updated_df)
|
80 |
else:
|
81 |
-
return f"β **Evaluation failed**: {result}", gr.update()
|
82 |
|
83 |
except Exception as e:
|
84 |
error_msg = str(e)
|
@@ -86,7 +86,7 @@ def run_perplexity_test(model_name, revision, precision):
|
|
86 |
sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
|
87 |
sys.stderr.write(f"Traceback: {traceback_str}\n")
|
88 |
sys.stderr.flush()
|
89 |
-
return f"β **Critical error**: {error_msg}", gr.update()
|
90 |
|
91 |
# Initialize results repository and directory
|
92 |
try:
|
@@ -181,7 +181,7 @@ with demo:
|
|
181 |
test_button.click(
|
182 |
run_perplexity_test,
|
183 |
[model_name, revision, precision],
|
184 |
-
[result, live_results_table]
|
185 |
)
|
186 |
|
187 |
demo.queue(default_concurrency_limit=5).launch()
|
|
|
48 |
import gradio as gr
|
49 |
|
50 |
if not model_name:
|
51 |
+
return "Please enter a model name.", gr.update(), gr.update()
|
52 |
|
53 |
try:
|
54 |
# Use stderr for more reliable logging in HF Spaces
|
|
|
63 |
sys.stderr.flush()
|
64 |
|
65 |
if success:
|
66 |
+
sys.stderr.write("Evaluation succeeded - updating both results tables\n")
|
67 |
sys.stderr.flush()
|
68 |
|
69 |
# Get updated results
|
|
|
74 |
**Model**: {model_name}
|
75 |
**Perplexity Score**: {result:.4f}
|
76 |
|
77 |
+
π **Results have been saved and both tables have been updated!**"""
|
78 |
|
79 |
+
return success_msg, gr.update(value=updated_df), gr.update(value=updated_df)
|
80 |
else:
|
81 |
+
return f"β **Evaluation failed**: {result}", gr.update(), gr.update()
|
82 |
|
83 |
except Exception as e:
|
84 |
error_msg = str(e)
|
|
|
86 |
sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
|
87 |
sys.stderr.write(f"Traceback: {traceback_str}\n")
|
88 |
sys.stderr.flush()
|
89 |
+
return f"β **Critical error**: {error_msg}", gr.update(), gr.update()
|
90 |
|
91 |
# Initialize results repository and directory
|
92 |
try:
|
|
|
181 |
test_button.click(
|
182 |
run_perplexity_test,
|
183 |
[model_name, revision, precision],
|
184 |
+
[result, live_results_table, results_table]
|
185 |
)
|
186 |
|
187 |
demo.queue(default_concurrency_limit=5).launch()
|