kamil1300 commited on
Commit
25e7bd7
·
verified ·
1 Parent(s): 33e9d10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +182 -334
app.py CHANGED
@@ -1,348 +1,216 @@
1
- """ Advanced Agent Evaluation Runner"""
2
  import os
3
  import inspect
4
  import gradio as gr
5
  import requests
6
  import pandas as pd
7
- import json
8
  from agent.agent import chat_with_agent
9
 
 
 
10
  # (Keep Constants as is)
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
- # --- Advanced Agent Definition ---
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
- class AdvancedAgent:
17
- def __init__(self):
18
- print("AdvancedAgent initialized with tools and capabilities.")
19
-
20
  def __call__(self, question: str) -> dict:
21
- """
22
- Process a question and return a structured response with answer and reasoning.
23
- Returns: {"model_answer": "answer", "reasoning_trace": "reasoning"}
24
- """
25
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
 
26
 
27
- try:
28
- # Get response from the agent
29
- response = chat_with_agent(question)
30
-
31
- # Extract the final answer from the response
32
- # The agent is configured to end with "FINAL ANSWER: [answer]"
33
- if "FINAL ANSWER:" in response:
34
- # Extract everything after "FINAL ANSWER:"
35
- final_answer = response.split("FINAL ANSWER:")[-1].strip()
36
- # Remove any extra formatting or newlines
37
- final_answer = final_answer.replace('\n', ' ').strip()
38
- else:
39
- # If no FINAL ANSWER format, use the whole response
40
- final_answer = response.strip()
41
-
42
- # Create the reasoning trace (the full response without the final answer)
43
- reasoning_trace = response.strip()
44
-
45
- return {
46
- "model_answer": final_answer,
47
- "reasoning_trace": reasoning_trace
48
- }
49
-
50
- except Exception as e:
51
- print(f"Error in agent processing: {e}")
52
- return {
53
- "model_answer": f"Error: {str(e)}",
54
- "reasoning_trace": f"Agent encountered an error while processing the question: {str(e)}"
55
- }
56
 
57
  def run_and_submit_all( profile: gr.OAuthProfile | None):
58
  """
59
- Fetches all questions, runs the AdvancedAgent on them, submits all answers,
60
  and displays the results.
61
  """
62
- try:
63
- # --- Determine HF Space Runtime URL and Repo URL ---
64
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
65
 
66
- if profile:
67
- username= f"{profile.username}"
68
- print(f"User logged in: {username}")
69
- else:
70
- print("User not logged in.")
71
- return "Please Login to Hugging Face with the button.", None
72
 
73
- api_url = DEFAULT_API_URL
74
- questions_url = f"{api_url}/questions"
75
- submit_url = f"{api_url}/submit"
76
 
77
- # 1. Instantiate Agent ( modify this part to create your agent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  try:
79
- agent = AdvancedAgent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  except Exception as e:
81
- print(f"Error instantiating agent: {e}")
82
- return f"Error initializing agent: {e}", None
83
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
84
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "https://huggingface.co/spaces/your-space-id/tree/main"
85
- print(agent_code)
86
-
87
- # 2. Fetch Questions
88
- print(f"Fetching questions from: {questions_url}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  try:
90
- response = requests.get(questions_url, timeout=15)
91
- response.raise_for_status()
92
- questions_data = response.json()
93
- if not questions_data:
94
- print("Fetched questions list is empty.")
95
- return "Fetched questions list is empty or invalid format.", None
96
- print(f"Fetched {len(questions_data)} questions.")
97
- except requests.exceptions.RequestException as e:
98
- print(f"Error fetching questions: {e}")
99
- return f"Error fetching questions: {e}", None
100
- except requests.exceptions.JSONDecodeError as e:
101
- print(f"Error decoding JSON response from questions endpoint: {e}")
102
- print(f"Response text: {response.text[:500]}")
103
- return f"Error decoding server response for questions: {e}", None
104
- except Exception as e:
105
- print(f"An unexpected error occurred fetching questions: {e}")
106
- return f"An unexpected error occurred fetching questions: {e}", None
 
 
 
 
 
 
107
 
108
- # 3. Run your Agent
109
- results_log = []
110
- answers_payload = []
111
- print(f"Running agent on {len(questions_data)} questions...")
112
- for item in questions_data:
113
- task_id = item.get("task_id")
114
- question_text = item.get("question")
115
- if not task_id or question_text is None:
116
- print(f"Skipping item with missing task_id or question: {item}")
117
- continue
118
- try:
119
- # Get structured response from agent
120
- agent_response = agent(question_text)
121
-
122
- # Extract model_answer and reasoning_trace
123
- model_answer = agent_response.get("model_answer", "")
124
- reasoning_trace = agent_response.get("reasoning_trace", "")
125
-
126
- # Ensure the answer is a string and not too long
127
- if not isinstance(model_answer, str):
128
- model_answer = str(model_answer)
129
- if len(model_answer) > 10000: # Limit answer length
130
- model_answer = model_answer[:10000] + "..."
131
-
132
- # Ensure reasoning_trace is a string and not too long
133
- if not isinstance(reasoning_trace, str):
134
- reasoning_trace = str(reasoning_trace)
135
- if len(reasoning_trace) > 50000: # Limit reasoning length
136
- reasoning_trace = reasoning_trace[:50000] + "..."
137
-
138
- # Create JSON-line format entry
139
- json_line_entry = {
140
- "task_id": task_id,
141
- "model_answer": model_answer,
142
- "reasoning_trace": reasoning_trace
143
- }
144
-
145
- answers_payload.append(json_line_entry)
146
-
147
- # For display in the table, show truncated versions
148
- display_question = question_text[:200] + "..." if len(question_text) > 200 else question_text
149
- display_answer = model_answer[:200] + "..." if len(model_answer) > 200 else model_answer
150
- display_reasoning = reasoning_trace[:200] + "..." if len(reasoning_trace) > 200 else reasoning_trace
151
-
152
- results_log.append({
153
- "Task ID": task_id,
154
- "Question": display_question,
155
- "Model Answer": display_answer,
156
- "Reasoning Trace": display_reasoning
157
- })
158
-
159
- except Exception as e:
160
- print(f"Error running agent on task {task_id}: {e}")
161
- error_response = {
162
- "task_id": task_id,
163
- "model_answer": f"AGENT ERROR: {e}",
164
- "reasoning_trace": f"Agent encountered an error while processing the question: {str(e)}"
165
- }
166
- answers_payload.append(error_response)
167
- results_log.append({
168
- "Task ID": task_id,
169
- "Question": question_text[:200] + "..." if question_text and len(question_text) > 200 else question_text,
170
- "Model Answer": f"AGENT ERROR: {e}",
171
- "Reasoning Trace": f"Error occurred during processing"
172
- })
173
 
174
- if not answers_payload:
175
- print("Agent did not produce any answers to submit.")
176
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- # 4. Prepare Submission
179
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
180
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
181
- print(status_update)
182
 
183
- # 5. Submit
184
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
185
- try:
186
- response = requests.post(submit_url, json=submission_data, timeout=60)
187
- response.raise_for_status()
188
- result_data = response.json()
189
- final_status = (
190
- f"Submission Successful!\n"
191
- f"User: {result_data.get('username')}\n"
192
- f"Overall Score: {result_data.get('score', 'N/A')}% "
193
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
194
- f"Message: {result_data.get('message', 'No message received.')}"
195
- )
196
- print("Submission successful.")
197
- results_df = pd.DataFrame(results_log)
198
- return final_status, results_df
199
- except requests.exceptions.HTTPError as e:
200
- error_detail = f"Server responded with status {e.response.status_code}."
201
- try:
202
- error_json = e.response.json()
203
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
204
- except requests.exceptions.JSONDecodeError:
205
- error_detail += f" Response: {e.response.text[:500]}"
206
- status_message = f"Submission Failed: {error_detail}"
207
- print(status_message)
208
- results_df = pd.DataFrame(results_log)
209
- return status_message, results_df
210
- except requests.exceptions.Timeout:
211
- status_message = "Submission Failed: The request timed out."
212
- print(status_message)
213
- results_df = pd.DataFrame(results_log)
214
- return status_message, results_df
215
- except requests.exceptions.RequestException as e:
216
- status_message = f"Submission Failed: Network error - {e}"
217
- print(status_message)
218
- results_df = pd.DataFrame(results_log)
219
- return status_message, results_df
220
- except Exception as e:
221
- status_message = f"An unexpected error occurred during submission: {e}"
222
- print(status_message)
223
- results_df = pd.DataFrame(results_log)
224
- return status_message, results_df
225
- except Exception as e:
226
- error_msg = f"Critical error in run_and_submit_all: {str(e)}"
227
- print(error_msg)
228
- return error_msg, None
229
 
230
- # --- Simple Command Line Interface (bypasses HTTP issues) ---
231
- def run_cli_interface():
232
- """Run a simple command-line interface to test the agent."""
233
- print("=" * 60)
234
- print("ADVANCED AGENT EVALUATION - COMMAND LINE INTERFACE")
235
- print("=" * 60)
236
- print()
237
-
238
- # Get username
239
- username = input("Enter your Hugging Face username: ").strip()
240
- if not username:
241
- print("Username is required. Exiting.")
242
- return
243
-
244
- print(f"\nUsing username: {username}")
245
- print("Starting evaluation...")
246
- print("-" * 40)
247
-
248
- # Create mock profile
249
- class MockProfile:
250
- def __init__(self, username):
251
- self.username = username
252
-
253
- profile = MockProfile(username)
254
-
255
- # Run evaluation
256
- try:
257
- status, results_df = run_and_submit_all(profile)
258
- print("\n" + "=" * 60)
259
- print("EVALUATION RESULTS")
260
- print("=" * 60)
261
- print(status)
262
-
263
- if results_df is not None and not results_df.empty:
264
- print(f"\nProcessed {len(results_df)} questions:")
265
- print(results_df.to_string(index=False))
266
- else:
267
- print("\nNo results to display.")
268
-
269
- except Exception as e:
270
- print(f"\nError during evaluation: {e}")
271
- import traceback
272
- traceback.print_exc()
273
 
274
- # --- Ultra-Minimal Web Interface (fallback only) ---
275
- def create_minimal_interface():
276
- """Create an ultra-minimal interface that might work around HTTP issues."""
277
-
278
- def simple_evaluation(username):
279
- """Simplified evaluation function."""
280
- if not username or username.strip() == "":
281
- return "Please enter your Hugging Face username.", None
282
-
283
- # Create a mock profile object
284
- class MockProfile:
285
- def __init__(self, username):
286
- self.username = username
287
-
288
- profile = MockProfile(username.strip())
289
- return run_and_submit_all(profile)
290
-
291
- # Ultra-minimal interface with minimal components
292
- interface = gr.Interface(
293
- fn=simple_evaluation,
294
- inputs=gr.Textbox(label="Username", placeholder="your_username"),
295
- outputs=[
296
- gr.Textbox(label="Status"),
297
- gr.DataFrame()
298
- ],
299
- title="Agent Evaluation",
300
- description="Enter username and click Submit. Use CLI mode if you get HTTP errors.",
301
- flagging_mode="never"
302
- )
303
-
304
- return interface
305
-
306
- # --- Build Gradio Interface using Interface (minimal working version) ---
307
- def create_interface():
308
- """Create a minimal working Gradio interface."""
309
-
310
- def run_evaluation(username):
311
- """Wrapper function for the evaluation."""
312
- if not username or username.strip() == "":
313
- return "Please enter your Hugging Face username.", None
314
-
315
- # Create a mock profile object
316
- class MockProfile:
317
- def __init__(self, username):
318
- self.username = username
319
-
320
- profile = MockProfile(username.strip())
321
- return run_and_submit_all(profile)
322
-
323
- # Create a simple interface without LoginButton
324
- interface = gr.Interface(
325
- fn=run_evaluation,
326
- inputs=gr.Textbox(label="Enter your Hugging Face username", placeholder="your_username"),
327
- outputs=[
328
- gr.Textbox(label="Status", lines=5),
329
- gr.DataFrame(label="Results")
330
- ],
331
- title="Advanced Agent Evaluation Runner",
332
- description="""
333
- **Instructions:**
334
- 1. Enter your Hugging Face username in the text box below.
335
- 2. Click 'Submit' to fetch questions, run your agent, and submit answers.
336
-
337
- **Note:** This will take some time as the agent processes all questions.
338
- """,
339
- flagging_mode="never"
340
  )
341
-
342
- return interface
343
-
344
- # Create the interface - try minimal first
345
- demo = create_minimal_interface()
346
 
347
  if __name__ == "__main__":
348
  print("\n" + "-"*30 + " App Starting " + "-"*30)
@@ -365,25 +233,5 @@ if __name__ == "__main__":
365
 
366
  print("-"*(60 + len(" App Starting ")) + "\n")
367
 
368
- # Check if we should run CLI mode (recommended)
369
- import sys
370
- if len(sys.argv) > 1 and sys.argv[1] == "--cli":
371
- print("Running in CLI mode...")
372
- run_cli_interface()
373
- else:
374
- print("⚠️ IMPORTANT: Hugging Face Spaces may have HTTP protocol errors.")
375
- print("💡 RECOMMENDED: Use CLI mode for reliable operation:")
376
- print(" python app.py --cli")
377
- print()
378
- print("Launching minimal web interface...")
379
- print("If you encounter HTTP errors, please use the CLI mode instead.")
380
-
381
- # Ultra-minimal launch configuration
382
- try:
383
- demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
384
- except Exception as e:
385
- print(f"❌ Web interface failed to launch: {e}")
386
- print("💡 Please use CLI mode instead: python app.py --cli")
387
- print("Starting CLI mode automatically...")
388
- run_cli_interface()
389
-
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
  import inspect
4
  import gradio as gr
5
  import requests
6
  import pandas as pd
 
7
  from agent.agent import chat_with_agent
8
 
9
+
10
+
11
  # (Keep Constants as is)
12
  # --- Constants ---
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
 
15
+ # --- Basic Agent Definition ---
16
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
17
+
18
+
19
+ class BasicAgent:
 
20
  def __call__(self, question: str) -> dict:
 
 
 
 
21
  print(f"Agent received question (first 50 chars): {question[:50]}...")
22
+ # Get response from the agent
23
+ answer = chat_with_agent(question)
24
 
25
+ # Return in the format expected by the API
26
+ return {
27
+ "model_answer": answer,
28
+ "reasoning_trace": answer # Using the full response as reasoning trace
29
+ }
30
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  def run_and_submit_all( profile: gr.OAuthProfile | None):
33
  """
34
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
35
  and displays the results.
36
  """
37
+ # --- Determine HF Space Runtime URL and Repo URL ---
38
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
39
 
40
+ if profile:
41
+ username= f"{profile.username}"
42
+ print(f"User logged in: {username}")
43
+ else:
44
+ print("User not logged in.")
45
+ return "Please Login to Hugging Face with the button.", None
46
 
47
+ api_url = DEFAULT_API_URL
48
+ questions_url = f"{api_url}/questions"
49
+ submit_url = f"{api_url}/submit"
50
 
51
+ # 1. Instantiate Agent ( modify this part to create your agent)
52
+ try:
53
+ agent = BasicAgent()
54
+ except Exception as e:
55
+ print(f"Error instantiating agent: {e}")
56
+ return f"Error initializing agent: {e}", None
57
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
58
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
59
+ print(agent_code)
60
+
61
+ # 2. Fetch Questions
62
+ print(f"Fetching questions from: {questions_url}")
63
+ try:
64
+ response = requests.get(questions_url, timeout=15)
65
+ response.raise_for_status()
66
+ questions_data = response.json()
67
+ if not questions_data:
68
+ print("Fetched questions list is empty.")
69
+ return "Fetched questions list is empty or invalid format.", None
70
+ print(f"Fetched {len(questions_data)} questions.")
71
+ except requests.exceptions.RequestException as e:
72
+ print(f"Error fetching questions: {e}")
73
+ return f"Error fetching questions: {e}", None
74
+ except requests.exceptions.JSONDecodeError as e:
75
+ print(f"Error decoding JSON response from questions endpoint: {e}")
76
+ print(f"Response text: {response.text[:500]}")
77
+ return f"Error decoding server response for questions: {e}", None
78
+ except Exception as e:
79
+ print(f"An unexpected error occurred fetching questions: {e}")
80
+ return f"An unexpected error occurred fetching questions: {e}", None
81
+
82
+ # 3. Run your Agent
83
+ results_log = []
84
+ answers_payload = []
85
+ print(f"Running agent on {len(questions_data)} questions...")
86
+ for item in questions_data:
87
+ task_id = item.get("task_id")
88
+ question_text = item.get("question")
89
+ if not task_id or question_text is None:
90
+ print(f"Skipping item with missing task_id or question: {item}")
91
+ continue
92
  try:
93
+ # Get structured response from agent
94
+ agent_response = agent(question_text)
95
+
96
+ # Extract model_answer and reasoning_trace
97
+ model_answer = agent_response.get("model_answer", "")
98
+ reasoning_trace = agent_response.get("reasoning_trace", "")
99
+
100
+ # Create JSON-line format entry
101
+ json_line_entry = {
102
+ "task_id": task_id,
103
+ "model_answer": model_answer,
104
+ "reasoning_trace": reasoning_trace
105
+ }
106
+
107
+ answers_payload.append(json_line_entry)
108
+
109
+ # For display in the table, show truncated versions
110
+ display_question = question_text[:200] + "..." if len(question_text) > 200 else question_text
111
+ display_answer = model_answer[:200] + "..." if len(model_answer) > 200 else model_answer
112
+
113
+ results_log.append({
114
+ "Task ID": task_id,
115
+ "Question": display_question,
116
+ "Model Answer": display_answer
117
+ })
118
+
119
  except Exception as e:
120
+ print(f"Error running agent on task {task_id}: {e}")
121
+ error_response = {
122
+ "task_id": task_id,
123
+ "model_answer": f"AGENT ERROR: {e}",
124
+ "reasoning_trace": f"Agent encountered an error while processing the question: {str(e)}"
125
+ }
126
+ answers_payload.append(error_response)
127
+ results_log.append({
128
+ "Task ID": task_id,
129
+ "Question": question_text[:200] + "..." if question_text and len(question_text) > 200 else question_text,
130
+ "Model Answer": f"AGENT ERROR: {e}"
131
+ })
132
+
133
+ if not answers_payload:
134
+ print("Agent did not produce any answers to submit.")
135
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
136
+
137
+ # 4. Prepare Submission
138
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
139
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
140
+ print(status_update)
141
+
142
+ # 5. Submit
143
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
144
+ try:
145
+ response = requests.post(submit_url, json=submission_data, timeout=60)
146
+ response.raise_for_status()
147
+ result_data = response.json()
148
+ final_status = (
149
+ f"Submission Successful!\n"
150
+ f"User: {result_data.get('username')}\n"
151
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
152
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
153
+ f"Message: {result_data.get('message', 'No message received.')}"
154
+ )
155
+ print("Submission successful.")
156
+ results_df = pd.DataFrame(results_log)
157
+ return final_status, results_df
158
+ except requests.exceptions.HTTPError as e:
159
+ error_detail = f"Server responded with status {e.response.status_code}."
160
  try:
161
+ error_json = e.response.json()
162
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
163
+ except requests.exceptions.JSONDecodeError:
164
+ error_detail += f" Response: {e.response.text[:500]}"
165
+ status_message = f"Submission Failed: {error_detail}"
166
+ print(status_message)
167
+ results_df = pd.DataFrame(results_log)
168
+ return status_message, results_df
169
+ except requests.exceptions.Timeout:
170
+ status_message = "Submission Failed: The request timed out."
171
+ print(status_message)
172
+ results_df = pd.DataFrame(results_log)
173
+ return status_message, results_df
174
+ except requests.exceptions.RequestException as e:
175
+ status_message = f"Submission Failed: Network error - {e}"
176
+ print(status_message)
177
+ results_df = pd.DataFrame(results_log)
178
+ return status_message, results_df
179
+ except Exception as e:
180
+ status_message = f"An unexpected error occurred during submission: {e}"
181
+ print(status_message)
182
+ results_df = pd.DataFrame(results_log)
183
+ return status_message, results_df
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ # --- Build Gradio Interface using Blocks ---
187
+ with gr.Blocks() as demo:
188
+ gr.Markdown("# Basic Agent Evaluation Runner")
189
+ gr.Markdown(
190
+ """
191
+ **Instructions:**
192
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
193
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
194
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
195
+ ---
196
+ **Disclaimers:**
197
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
198
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
199
+ """
200
+ )
201
 
202
+ gr.LoginButton()
 
 
 
203
 
204
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
207
+ # Removed max_rows=10 from DataFrame constructor
208
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
+ run_button.click(
211
+ fn=run_and_submit_all,
212
+ outputs=[status_output, results_table]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  )
 
 
 
 
 
214
 
215
  if __name__ == "__main__":
216
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
233
 
234
  print("-"*(60 + len(" App Starting ")) + "\n")
235
 
236
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
237
+ demo.launch(debug=True, share=True)