rymc commited on
Commit
c9518d2
Β·
verified Β·
1 Parent(s): e35f81a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +316 -316
app.py CHANGED
@@ -1,317 +1,317 @@
1
- import gradio as gr
2
- import pandas as pd
3
- from pathlib import Path
4
- import plotly.express as px
5
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
6
- from apscheduler.schedulers.background import BackgroundScheduler
7
- from huggingface_hub import snapshot_download
8
-
9
- from src.about import (
10
- CITATION_BUTTON_LABEL,
11
- CITATION_BUTTON_TEXT,
12
- EVALUATION_QUEUE_TEXT,
13
- INTRODUCTION_TEXT,
14
- LLM_BENCHMARKS_TEXT,
15
- TITLE,
16
- )
17
- from src.display.css_html_js import custom_css
18
- from src.display.utils import (
19
- BENCHMARK_COLS,
20
- COLS,
21
- EVAL_COLS,
22
- EVAL_TYPES,
23
- AutoEvalColumn,
24
- ModelType,
25
- fields,
26
- WeightType,
27
- Precision
28
- )
29
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
- from src.submission.submit import add_new_eval
32
- import base64
33
-
34
-
35
- def restart_space():
36
- API.restart_space(repo_id=REPO_ID)
37
-
38
-
39
-
40
- def make_rate_chart(df: pd.DataFrame):
41
- """Return a Plotly bar chart of hallucination rates."""
42
- # long-form dataframe for grouped bars
43
- df_long = df.melt(
44
- id_vars="Models",
45
- value_vars=["RAG Hallucination Rate (%)", "Non-RAG Hallucination Rate (%)"],
46
- var_name="Benchmark",
47
- value_name="Rate",
48
- )
49
- fig = px.bar(
50
- df_long,
51
- x="Models",
52
- y="Rate",
53
- color="Benchmark",
54
- barmode="group",
55
- title="Hallucination Rates by Model",
56
- height=400,
57
- )
58
- fig.update_layout(xaxis_title="", yaxis_title="%")
59
- return fig
60
-
61
- def make_leaderboard_plot(df: pd.DataFrame, col: str, title: str, bar_color: str):
62
- """
63
- Return a horizontal bar chart sorted ascending by `col`.
64
- Lowest value (best) at the top.
65
- """
66
- df_sorted = df.sort_values(col, ascending=False) # best β†’ worst
67
- fig = px.bar(
68
- df_sorted,
69
- x=col,
70
- y="Models",
71
- orientation="h",
72
- title=title,
73
- text_auto=".2f",
74
- height=400,
75
- color_discrete_sequence=[bar_color],
76
- )
77
- fig.update_traces(textposition="outside", cliponaxis=False)
78
-
79
- fig.update_layout(
80
- xaxis_title="Hallucination Rate (%)",
81
- yaxis_title="",
82
- yaxis=dict(dtick=1), # ensure every model shown
83
- margin=dict(l=140, r=60, t=60, b=40)
84
- )
85
- fig.update_traces(textposition="outside")
86
- return fig
87
-
88
-
89
- def color_scale(s, cmap):
90
- """
91
- Return background-colour styles for a numeric Series (lower = greener,
92
- higher = redder). Works with any palette length.
93
- """
94
- colours = px.colors.sequential.__dict__[cmap]
95
- n = len(colours) - 1 # max valid index
96
-
97
- rng = s.max() - s.min()
98
- norm = (s - s.min()) / (rng if rng else 1)
99
-
100
- return [f"background-color:{colours[int(v * n)]}" for v in 1 - norm]
101
-
102
-
103
- ### Space initialisation
104
- try:
105
- print(EVAL_REQUESTS_PATH)
106
- snapshot_download(
107
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
108
- )
109
- except Exception:
110
- # restart_space()
111
- print(f"[WARN] Skipping RESULTS sync: {Exception}")
112
- try:
113
- print(EVAL_RESULTS_PATH)
114
- snapshot_download(
115
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
116
- )
117
- except Exception:
118
- # restart_space()
119
- print(f"[WARN] Skipping RESULTS sync: {Exception}")
120
-
121
-
122
- # LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
123
- LEADERBOARD_DF = get_leaderboard_df("leaderboard/data/leaderboard.csv")
124
-
125
- # (
126
- # finished_eval_queue_df,
127
- # running_eval_queue_df,
128
- # pending_eval_queue_df,
129
- # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
130
-
131
- def init_leaderboard(df: pd.DataFrame):
132
- if df is None or df.empty:
133
- raise ValueError("Leaderboard DataFrame is empty or None.")
134
-
135
- return Leaderboard(
136
- value=df,
137
- datatype=["markdown", "markdown", "number", "number", "number"],
138
- select_columns=SelectColumns(
139
- default_selection=[
140
- "Rank", "Models",
141
- "Average Hallucination Rate (%)",
142
- "RAG Hallucination Rate (%)",
143
- "Non-RAG Hallucination Rate (%)"
144
- ],
145
- cant_deselect=["Models", "Rank"],
146
- label="Select Columns to Display:",
147
- ),
148
- search_columns=["Models"],
149
- # column_widths=["3%"],
150
- bool_checkboxgroup_label=None,
151
- interactive=False,
152
- )
153
-
154
- image_path = "static/kluster-color.png"
155
- with open(image_path, "rb") as img_file:
156
- b64_string = base64.b64encode(img_file.read()).decode("utf-8")
157
-
158
-
159
- # print("CUSTOM CSS\n", custom_css[-1000:], "\n---------")
160
- demo = gr.Blocks(css=custom_css)
161
- with demo:
162
- gr.HTML(f"""
163
- <div style="text-align: center; margin-top: 2em; margin-bottom: 1em;">
164
- <img src="data:image/png;base64,{b64_string}" alt="kluster.ai logo"
165
- style="height: 80px; display: block; margin-left: auto; margin-right: auto;" />
166
-
167
- <div style="font-size: 2.5em; font-weight: bold; margin-top: 0.4em; color: var(--text-color);">
168
- LLM Hallucination Detection Leaderboard
169
- </div>
170
-
171
- <div style="font-size: 1.5em; margin-top: 0.5em;">
172
- Evaluating factual accuracy and faithfulness of LLMs in both RAG and real-world knowledge settings with
173
- <a href="https://platform.kluster.ai/verify" target="_blank">
174
- Verify
175
- </a> by
176
- <a href="https://platform.kluster.ai/" target="_blank">
177
- kluster.ai
178
- </a>
179
- </div>
180
- </div>
181
- """)
182
-
183
-
184
- # gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
185
-
186
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
187
- with gr.TabItem("πŸ… Hallucination Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
188
- # ---------- Chart ----------
189
- with gr.Row():
190
- gr.Plot(
191
- make_leaderboard_plot(
192
- LEADERBOARD_DF,
193
- "RAG Hallucination Rate (%)",
194
- "RAG Hallucination Rate (lower is better)",
195
- bar_color="#4CAF50",
196
- ),
197
- show_label=False,
198
- )
199
- gr.Plot(
200
- make_leaderboard_plot(
201
- LEADERBOARD_DF,
202
- "Non-RAG Hallucination Rate (%)",
203
- "Non-RAG Hallucination Rate (lower is better)",
204
- bar_color="#FF7043",
205
- ),
206
- show_label=False,
207
- )
208
-
209
- # ---------- Leaderboard ----------
210
- leaderboard = init_leaderboard(LEADERBOARD_DF)
211
-
212
- with gr.TabItem("πŸ“ Details", elem_id="llm-benchmark-tab-table", id=2):
213
- gr.Markdown((Path(__file__).parent / "docs.md").read_text())
214
-
215
- with gr.TabItem("πŸš€ Submit Here! ", elem_id="llm-benchmark-tab-table", id=3):
216
- gr.Markdown((Path(__file__).parent / "submit.md").read_text())
217
-
218
- # with gr.Column():
219
- # with gr.Row():
220
- # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
221
-
222
- # with gr.Column():
223
- # with gr.Accordion(
224
- # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
225
- # open=False,
226
- # ):
227
- # with gr.Row():
228
- # finished_eval_table = gr.components.Dataframe(
229
- # value=finished_eval_queue_df,
230
- # headers=EVAL_COLS,
231
- # datatype=EVAL_TYPES,
232
- # row_count=5,
233
- # )
234
- # with gr.Accordion(
235
- # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
236
- # open=False,
237
- # ):
238
- # with gr.Row():
239
- # running_eval_table = gr.components.Dataframe(
240
- # value=running_eval_queue_df,
241
- # headers=EVAL_COLS,
242
- # datatype=EVAL_TYPES,
243
- # row_count=5,
244
- # )
245
-
246
- # with gr.Accordion(
247
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
248
- # open=False,
249
- # ):
250
- # with gr.Row():
251
- # pending_eval_table = gr.components.Dataframe(
252
- # value=pending_eval_queue_df,
253
- # headers=EVAL_COLS,
254
- # datatype=EVAL_TYPES,
255
- # row_count=5,
256
- # )
257
- # with gr.Row():
258
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
259
-
260
- # with gr.Row():
261
- # with gr.Column():
262
- # model_name_textbox = gr.Textbox(label="Model name")
263
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
264
- # model_type = gr.Dropdown(
265
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
266
- # label="Model type",
267
- # multiselect=False,
268
- # value=None,
269
- # interactive=True,
270
- # )
271
-
272
- # with gr.Column():
273
- # precision = gr.Dropdown(
274
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
275
- # label="Precision",
276
- # multiselect=False,
277
- # value="float16",
278
- # interactive=True,
279
- # )
280
- # weight_type = gr.Dropdown(
281
- # choices=[i.value.name for i in WeightType],
282
- # label="Weights type",
283
- # multiselect=False,
284
- # value="Original",
285
- # interactive=True,
286
- # )
287
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
288
-
289
- # submit_button = gr.Button("Submit Eval")
290
- # submission_result = gr.Markdown()
291
- # submit_button.click(
292
- # add_new_eval,
293
- # [
294
- # model_name_textbox,
295
- # base_model_name_textbox,
296
- # revision_name_textbox,
297
- # precision,
298
- # weight_type,
299
- # model_type,
300
- # ],
301
- # submission_result,
302
- # )
303
-
304
- # with gr.Row():
305
- # with gr.Accordion("πŸ“™ Citation", open=False):
306
- # citation_button = gr.Textbox(
307
- # value=CITATION_BUTTON_TEXT,
308
- # label=CITATION_BUTTON_LABEL,
309
- # lines=20,
310
- # elem_id="citation-button",
311
- # show_copy_button=True,
312
- # )
313
-
314
- scheduler = BackgroundScheduler()
315
- scheduler.add_job(restart_space, "interval", seconds=1800)
316
- scheduler.start()
317
  demo.queue(default_concurrency_limit=40).launch()
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from pathlib import Path
4
+ import plotly.express as px
5
+ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
6
+ from apscheduler.schedulers.background import BackgroundScheduler
7
+ from huggingface_hub import snapshot_download
8
+
9
+ from src.about import (
10
+ CITATION_BUTTON_LABEL,
11
+ CITATION_BUTTON_TEXT,
12
+ EVALUATION_QUEUE_TEXT,
13
+ INTRODUCTION_TEXT,
14
+ LLM_BENCHMARKS_TEXT,
15
+ TITLE,
16
+ )
17
+ from src.display.css_html_js import custom_css
18
+ from src.display.utils import (
19
+ BENCHMARK_COLS,
20
+ COLS,
21
+ EVAL_COLS,
22
+ EVAL_TYPES,
23
+ AutoEvalColumn,
24
+ ModelType,
25
+ fields,
26
+ WeightType,
27
+ Precision
28
+ )
29
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
+ from src.submission.submit import add_new_eval
32
+ import base64
33
+
34
+
35
+ def restart_space():
36
+ API.restart_space(repo_id=REPO_ID)
37
+
38
+
39
+
40
+ def make_rate_chart(df: pd.DataFrame):
41
+ """Return a Plotly bar chart of hallucination rates."""
42
+ # long-form dataframe for grouped bars
43
+ df_long = df.melt(
44
+ id_vars="Models",
45
+ value_vars=["RAG Hallucination Rate (%)", "Non-RAG Hallucination Rate (%)"],
46
+ var_name="Benchmark",
47
+ value_name="Rate",
48
+ )
49
+ fig = px.bar(
50
+ df_long,
51
+ x="Models",
52
+ y="Rate",
53
+ color="Benchmark",
54
+ barmode="group",
55
+ title="Hallucination Rates by Model",
56
+ height=400,
57
+ )
58
+ fig.update_layout(xaxis_title="", yaxis_title="%")
59
+ return fig
60
+
61
+ def make_leaderboard_plot(df: pd.DataFrame, col: str, title: str, bar_color: str):
62
+ """
63
+ Return a horizontal bar chart sorted ascending by `col`.
64
+ Lowest value (best) at the top.
65
+ """
66
+ df_sorted = df.sort_values(col, ascending=False) # best β†’ worst
67
+ fig = px.bar(
68
+ df_sorted,
69
+ x=col,
70
+ y="Models",
71
+ orientation="h",
72
+ title=title,
73
+ text_auto=".2f",
74
+ height=400,
75
+ color_discrete_sequence=[bar_color],
76
+ )
77
+ fig.update_traces(textposition="outside", cliponaxis=False)
78
+
79
+ fig.update_layout(
80
+ xaxis_title="Hallucination Rate (%)",
81
+ yaxis_title="",
82
+ yaxis=dict(dtick=1), # ensure every model shown
83
+ margin=dict(l=140, r=60, t=60, b=40)
84
+ )
85
+ fig.update_traces(textposition="outside")
86
+ return fig
87
+
88
+
89
+ def color_scale(s, cmap):
90
+ """
91
+ Return background-colour styles for a numeric Series (lower = greener,
92
+ higher = redder). Works with any palette length.
93
+ """
94
+ colours = px.colors.sequential.__dict__[cmap]
95
+ n = len(colours) - 1 # max valid index
96
+
97
+ rng = s.max() - s.min()
98
+ norm = (s - s.min()) / (rng if rng else 1)
99
+
100
+ return [f"background-color:{colours[int(v * n)]}" for v in 1 - norm]
101
+
102
+
103
+ ### Space initialisation
104
+ try:
105
+ print(EVAL_REQUESTS_PATH)
106
+ snapshot_download(
107
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
108
+ )
109
+ except Exception:
110
+ # restart_space()
111
+ print(f"[WARN] Skipping RESULTS sync: {Exception}")
112
+ try:
113
+ print(EVAL_RESULTS_PATH)
114
+ snapshot_download(
115
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
116
+ )
117
+ except Exception:
118
+ # restart_space()
119
+ print(f"[WARN] Skipping RESULTS sync: {Exception}")
120
+
121
+
122
+ # LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
123
+ LEADERBOARD_DF = get_leaderboard_df("leaderboard/data/leaderboard.csv")
124
+
125
+ # (
126
+ # finished_eval_queue_df,
127
+ # running_eval_queue_df,
128
+ # pending_eval_queue_df,
129
+ # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
130
+
131
+ def init_leaderboard(df: pd.DataFrame):
132
+ if df is None or df.empty:
133
+ raise ValueError("Leaderboard DataFrame is empty or None.")
134
+
135
+ return Leaderboard(
136
+ value=df,
137
+ datatype=["markdown", "markdown", "number", "number", "number"],
138
+ select_columns=SelectColumns(
139
+ default_selection=[
140
+ "Rank", "Models",
141
+ "Average Hallucination Rate (%)",
142
+ "RAG Hallucination Rate (%)",
143
+ "Non-RAG Hallucination Rate (%)"
144
+ ],
145
+ cant_deselect=["Models", "Rank"],
146
+ label="Select Columns to Display:",
147
+ ),
148
+ search_columns=["Models"],
149
+ # column_widths=["3%"],
150
+ bool_checkboxgroup_label=None,
151
+ interactive=False,
152
+ )
153
+
154
+ image_path = "static/kluster-color.png"
155
+ with open(image_path, "rb") as img_file:
156
+ b64_string = base64.b64encode(img_file.read()).decode("utf-8")
157
+
158
+
159
+ # print("CUSTOM CSS\n", custom_css[-1000:], "\n---------")
160
+ demo = gr.Blocks(css=custom_css)
161
+ with demo:
162
+ gr.HTML(f"""
163
+ <div style="text-align: center; margin-top: 2em; margin-bottom: 1em;">
164
+ <img src="data:image/png;base64,{b64_string}" alt="kluster.ai logo"
165
+ style="height: 80px; display: block; margin-left: auto; margin-right: auto;" />
166
+
167
+ <div style="font-size: 2.5em; font-weight: bold; margin-top: 0.4em; color: var(--text-color);">
168
+ LLM Hallucination Detection Leaderboard
169
+ </div>
170
+
171
+ <div style="font-size: 1.5em; margin-top: 0.5em;">
172
+ Evaluating factual accuracy and faithfulness of LLMs in both RAG and non-RAG settings with
173
+ <a href="https://platform.kluster.ai/verify" target="_blank">
174
+ Verify
175
+ </a> by
176
+ <a href="https://platform.kluster.ai/" target="_blank">
177
+ kluster.ai
178
+ </a>
179
+ </div>
180
+ </div>
181
+ """)
182
+
183
+
184
+ # gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
185
+
186
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
187
+ with gr.TabItem("πŸ… Hallucination Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
188
+ # ---------- Chart ----------
189
+ with gr.Row():
190
+ gr.Plot(
191
+ make_leaderboard_plot(
192
+ LEADERBOARD_DF,
193
+ "RAG Hallucination Rate (%)",
194
+ "RAG Hallucination Rate (lower is better)",
195
+ bar_color="#4CAF50",
196
+ ),
197
+ show_label=False,
198
+ )
199
+ gr.Plot(
200
+ make_leaderboard_plot(
201
+ LEADERBOARD_DF,
202
+ "Non-RAG Hallucination Rate (%)",
203
+ "Non-RAG Hallucination Rate (lower is better)",
204
+ bar_color="#FF7043",
205
+ ),
206
+ show_label=False,
207
+ )
208
+
209
+ # ---------- Leaderboard ----------
210
+ leaderboard = init_leaderboard(LEADERBOARD_DF)
211
+
212
+ with gr.TabItem("πŸ“ Details", elem_id="llm-benchmark-tab-table", id=2):
213
+ gr.Markdown((Path(__file__).parent / "docs.md").read_text())
214
+
215
+ with gr.TabItem("πŸš€ Submit Here! ", elem_id="llm-benchmark-tab-table", id=3):
216
+ gr.Markdown((Path(__file__).parent / "submit.md").read_text())
217
+
218
+ # with gr.Column():
219
+ # with gr.Row():
220
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
221
+
222
+ # with gr.Column():
223
+ # with gr.Accordion(
224
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
225
+ # open=False,
226
+ # ):
227
+ # with gr.Row():
228
+ # finished_eval_table = gr.components.Dataframe(
229
+ # value=finished_eval_queue_df,
230
+ # headers=EVAL_COLS,
231
+ # datatype=EVAL_TYPES,
232
+ # row_count=5,
233
+ # )
234
+ # with gr.Accordion(
235
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
236
+ # open=False,
237
+ # ):
238
+ # with gr.Row():
239
+ # running_eval_table = gr.components.Dataframe(
240
+ # value=running_eval_queue_df,
241
+ # headers=EVAL_COLS,
242
+ # datatype=EVAL_TYPES,
243
+ # row_count=5,
244
+ # )
245
+
246
+ # with gr.Accordion(
247
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
248
+ # open=False,
249
+ # ):
250
+ # with gr.Row():
251
+ # pending_eval_table = gr.components.Dataframe(
252
+ # value=pending_eval_queue_df,
253
+ # headers=EVAL_COLS,
254
+ # datatype=EVAL_TYPES,
255
+ # row_count=5,
256
+ # )
257
+ # with gr.Row():
258
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
259
+
260
+ # with gr.Row():
261
+ # with gr.Column():
262
+ # model_name_textbox = gr.Textbox(label="Model name")
263
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
264
+ # model_type = gr.Dropdown(
265
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
266
+ # label="Model type",
267
+ # multiselect=False,
268
+ # value=None,
269
+ # interactive=True,
270
+ # )
271
+
272
+ # with gr.Column():
273
+ # precision = gr.Dropdown(
274
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
275
+ # label="Precision",
276
+ # multiselect=False,
277
+ # value="float16",
278
+ # interactive=True,
279
+ # )
280
+ # weight_type = gr.Dropdown(
281
+ # choices=[i.value.name for i in WeightType],
282
+ # label="Weights type",
283
+ # multiselect=False,
284
+ # value="Original",
285
+ # interactive=True,
286
+ # )
287
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
288
+
289
+ # submit_button = gr.Button("Submit Eval")
290
+ # submission_result = gr.Markdown()
291
+ # submit_button.click(
292
+ # add_new_eval,
293
+ # [
294
+ # model_name_textbox,
295
+ # base_model_name_textbox,
296
+ # revision_name_textbox,
297
+ # precision,
298
+ # weight_type,
299
+ # model_type,
300
+ # ],
301
+ # submission_result,
302
+ # )
303
+
304
+ # with gr.Row():
305
+ # with gr.Accordion("πŸ“™ Citation", open=False):
306
+ # citation_button = gr.Textbox(
307
+ # value=CITATION_BUTTON_TEXT,
308
+ # label=CITATION_BUTTON_LABEL,
309
+ # lines=20,
310
+ # elem_id="citation-button",
311
+ # show_copy_button=True,
312
+ # )
313
+
314
+ scheduler = BackgroundScheduler()
315
+ scheduler.add_job(restart_space, "interval", seconds=1800)
316
+ scheduler.start()
317
  demo.queue(default_concurrency_limit=40).launch()