kamil1300 commited on
Commit
e50c712
·
verified ·
1 Parent(s): 9c4e5f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +290 -115
app.py CHANGED
@@ -1,147 +1,322 @@
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
 
 
5
 
 
6
  # --- Constants ---
7
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
8
 
9
- # --- Basic Agent Definition ---
10
- class BasicAgent:
 
11
  def __init__(self):
12
- print(" BasicAgent initialized.")
13
- def __call__(self, question: str) -> str:
14
- print(f"🧠 Agent received question: {question[:50]}...")
15
- return "This is a default answer."
16
-
17
- def run_and_submit_all(profile: gr.OAuthProfile | None):
18
- space_id = os.getenv("SPACE_ID")
19
-
20
- if profile:
21
- username = profile.username
22
- print(f"👤 Logged in as: {username}")
23
- else:
24
- print("❌ User not logged in.")
25
- return "Please Login to Hugging Face using the button above.", None
26
-
27
- api_url = DEFAULT_API_URL
28
- questions_url = f"{api_url}/questions"
29
- submit_url = f"{api_url}/submit"
30
-
31
- try:
32
- agent = BasicAgent()
33
- except Exception as e:
34
- return f"❌ Error initializing agent: {e}", None
35
-
36
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
 
 
 
 
 
38
  try:
39
- print(f"📥 Fetching questions from: {questions_url}")
40
- response = requests.get(questions_url, timeout=15)
41
- response.raise_for_status()
42
- questions_data = response.json()
43
-
44
- if not questions_data:
45
- return "❌ No questions received from API.", None
46
 
47
- # ✅ Limit to first 20 for safer UI
48
- questions_data = questions_data[:20]
 
 
 
 
49
 
50
- print(f"✅ Fetched {len(questions_data)} questions.")
51
- except Exception as e:
52
- return f"❌ Failed to fetch questions: {e}", None
53
 
54
- results_log = []
55
- answers_payload = []
 
 
 
 
 
 
 
56
 
57
- for item in questions_data:
58
- task_id = item.get("task_id")
59
- question_text = item.get("question")
60
- if not task_id or question_text is None:
61
- continue
62
  try:
63
- submitted_answer = agent(question_text)
64
- answers_payload.append({
65
- "task_id": task_id,
66
- "submitted_answer": submitted_answer
67
- })
68
- results_log.append({
69
- "Task ID": task_id,
70
- "Question": question_text,
71
- "Submitted Answer": submitted_answer
72
- })
 
 
 
 
73
  except Exception as e:
74
- results_log.append({
75
- "Task ID": task_id,
76
- "Question": question_text,
77
- "Submitted Answer": f"ERROR: {e}"
78
- })
79
-
80
- if not answers_payload:
81
- return "❌ Agent did not answer any questions.", pd.DataFrame(results_log)
82
-
83
- submission_data = {
84
- "username": username.strip(),
85
- "agent_code": agent_code,
86
- "answers": answers_payload
87
- }
88
 
89
- try:
90
- print("📤 Submitting answers...")
91
- response = requests.post(submit_url, json=submission_data, timeout=60)
92
- response.raise_for_status()
93
- result_data = response.json()
94
-
95
- # Minified message to prevent Content-Length errors
96
- final_status = (
97
- f" Submission Successful\n"
98
- f"User: {result_data.get('username')}\n"
99
- f"Score: {result_data.get('score', 'N/A')}%\n"
100
- f"Correct: {result_data.get('correct_count', '?')}/"
101
- f"{result_data.get('total_attempted', '?')}"
102
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
- # Limit rows for Hugging Face display
105
- results_df = pd.DataFrame(results_log).head(20)
106
- return final_status, results_df
107
 
108
- except Exception as e:
109
- return f" Submission failed: {e}", pd.DataFrame(results_log)
 
 
110
 
111
- # --- Gradio Interface ---
112
- with gr.Blocks() as demo:
113
- gr.Markdown("## 🧪 Basic Agent Evaluation Runner")
114
- gr.Markdown("""
115
- **Instructions:**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- 1. Log in with your Hugging Face account below.
118
- 2. Click the button to run your agent on 20 questions and auto-submit the results.
119
- 3. Score and output will be displayed.
120
 
121
- _Note: This is optimized for Hugging Face Spaces and avoids large UI payloads._
122
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- gr.LoginButton()
125
- run_button = gr.Button("Run Evaluation & Submit All Answers")
 
126
 
127
- status_output = gr.TextArea(
128
- label="Run Status / Submission Result",
129
- lines=5,
130
- max_lines=10,
131
- interactive=False
132
- )
 
 
 
 
 
 
 
 
 
133
 
134
- results_table = gr.DataFrame(
135
- label="Agent Answers (First 50)",
136
- wrap=True
137
- )
 
 
 
 
138
 
139
  run_button.click(
140
- fn=run_and_submit_all,
 
141
  outputs=[status_output, results_table]
142
  )
143
 
144
- # --- Run the Gradio App ---
145
  if __name__ == "__main__":
146
- print("🚀 Launching Gradio App for Hugging Face Agent Evaluation")
147
- demo.launch(debug=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import gradio as gr
3
  import requests
4
+ import inspect
5
  import pandas as pd
6
+ import json
7
+ from agent.agent import chat_with_agent
8
 
9
+ # (Keep Constants as is)
10
  # --- Constants ---
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
 
13
+ # --- Agent Definition ---
14
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
+ class AdvancedAgent:
16
  def __init__(self):
17
+ print("AdvancedAgent initialized with tools and capabilities.")
18
+
19
+ def __call__(self, question: str) -> dict:
20
+ """
21
+ Process a question and return a structured response with answer and reasoning.
22
+ Returns: {"model_answer": "answer", "reasoning_trace": "reasoning"}
23
+ """
24
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
25
+
26
+ try:
27
+ # Get response from the agent
28
+ response = chat_with_agent(question)
29
+
30
+ # Extract the final answer from the response
31
+ # The agent is configured to end with "FINAL ANSWER: [answer]"
32
+ if "FINAL ANSWER:" in response:
33
+ # Extract everything after "FINAL ANSWER:"
34
+ final_answer = response.split("FINAL ANSWER:")[-1].strip()
35
+ # Remove any extra formatting or newlines
36
+ final_answer = final_answer.replace('\n', ' ').strip()
37
+ else:
38
+ # If no FINAL ANSWER format, use the whole response
39
+ final_answer = response.strip()
40
+
41
+ # Create the reasoning trace (the full response without the final answer)
42
+ reasoning_trace = response.strip()
43
+
44
+ return {
45
+ "model_answer": final_answer,
46
+ "reasoning_trace": reasoning_trace
47
+ }
48
+
49
+ except Exception as e:
50
+ print(f"Error in agent processing: {e}")
51
+ return {
52
+ "model_answer": f"Error: {str(e)}",
53
+ "reasoning_trace": f"Agent encountered an error while processing the question: {str(e)}"
54
+ }
55
 
56
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
57
+ """
58
+ Fetches all questions, runs the AdvancedAgent on them, submits all answers,
59
+ and displays the results.
60
+ """
61
  try:
62
+ # --- Determine HF Space Runtime URL and Repo URL ---
63
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
 
 
 
 
64
 
65
+ if profile:
66
+ username= f"{profile.username}"
67
+ print(f"User logged in: {username}")
68
+ else:
69
+ print("User not logged in.")
70
+ return "Please Login to Hugging Face with the button.", None
71
 
72
+ api_url = DEFAULT_API_URL
73
+ questions_url = f"{api_url}/questions"
74
+ submit_url = f"{api_url}/submit"
75
 
76
+ # 1. Instantiate Agent ( modify this part to create your agent)
77
+ try:
78
+ agent = AdvancedAgent()
79
+ except Exception as e:
80
+ print(f"Error instantiating agent: {e}")
81
+ return f"Error initializing agent: {e}", None
82
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
83
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "https://huggingface.co/spaces/your-space-id/tree/main"
84
+ print(agent_code)
85
 
86
+ # 2. Fetch Questions
87
+ print(f"Fetching questions from: {questions_url}")
 
 
 
88
  try:
89
+ response = requests.get(questions_url, timeout=15)
90
+ response.raise_for_status()
91
+ questions_data = response.json()
92
+ if not questions_data:
93
+ print("Fetched questions list is empty.")
94
+ return "Fetched questions list is empty or invalid format.", None
95
+ print(f"Fetched {len(questions_data)} questions.")
96
+ except requests.exceptions.RequestException as e:
97
+ print(f"Error fetching questions: {e}")
98
+ return f"Error fetching questions: {e}", None
99
+ except requests.exceptions.JSONDecodeError as e:
100
+ print(f"Error decoding JSON response from questions endpoint: {e}")
101
+ print(f"Response text: {response.text[:500]}")
102
+ return f"Error decoding server response for questions: {e}", None
103
  except Exception as e:
104
+ print(f"An unexpected error occurred fetching questions: {e}")
105
+ return f"An unexpected error occurred fetching questions: {e}", None
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ # 3. Run your Agent
108
+ results_log = []
109
+ answers_payload = []
110
+ print(f"Running agent on {len(questions_data)} questions...")
111
+ for item in questions_data:
112
+ task_id = item.get("task_id")
113
+ question_text = item.get("question")
114
+ if not task_id or question_text is None:
115
+ print(f"Skipping item with missing task_id or question: {item}")
116
+ continue
117
+ try:
118
+ # Get structured response from agent
119
+ agent_response = agent(question_text)
120
+
121
+ # Extract model_answer and reasoning_trace
122
+ model_answer = agent_response.get("model_answer", "")
123
+ reasoning_trace = agent_response.get("reasoning_trace", "")
124
+
125
+ # Ensure the answer is a string and not too long
126
+ if not isinstance(model_answer, str):
127
+ model_answer = str(model_answer)
128
+ if len(model_answer) > 10000: # Limit answer length
129
+ model_answer = model_answer[:10000] + "..."
130
+
131
+ # Ensure reasoning_trace is a string and not too long
132
+ if not isinstance(reasoning_trace, str):
133
+ reasoning_trace = str(reasoning_trace)
134
+ if len(reasoning_trace) > 50000: # Limit reasoning length
135
+ reasoning_trace = reasoning_trace[:50000] + "..."
136
+
137
+ # Create JSON-line format entry
138
+ json_line_entry = {
139
+ "task_id": task_id,
140
+ "model_answer": model_answer,
141
+ "reasoning_trace": reasoning_trace
142
+ }
143
+
144
+ answers_payload.append(json_line_entry)
145
+
146
+ # For display in the table, show truncated versions
147
+ display_question = question_text[:200] + "..." if len(question_text) > 200 else question_text
148
+ display_answer = model_answer[:200] + "..." if len(model_answer) > 200 else model_answer
149
+ display_reasoning = reasoning_trace[:200] + "..." if len(reasoning_trace) > 200 else reasoning_trace
150
+
151
+ results_log.append({
152
+ "Task ID": task_id,
153
+ "Question": display_question,
154
+ "Model Answer": display_answer,
155
+ "Reasoning Trace": display_reasoning
156
+ })
157
+
158
+ except Exception as e:
159
+ print(f"Error running agent on task {task_id}: {e}")
160
+ error_response = {
161
+ "task_id": task_id,
162
+ "model_answer": f"AGENT ERROR: {e}",
163
+ "reasoning_trace": f"Agent encountered an error while processing the question: {str(e)}"
164
+ }
165
+ answers_payload.append(error_response)
166
+ results_log.append({
167
+ "Task ID": task_id,
168
+ "Question": question_text[:200] + "..." if question_text and len(question_text) > 200 else question_text,
169
+ "Model Answer": f"AGENT ERROR: {e}",
170
+ "Reasoning Trace": f"Error occurred during processing"
171
+ })
172
 
173
+ if not answers_payload:
174
+ print("Agent did not produce any answers to submit.")
175
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
176
 
177
+ # 4. Prepare Submission
178
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
179
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
180
+ print(status_update)
181
 
182
+ # 5. Submit
183
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
184
+ try:
185
+ response = requests.post(submit_url, json=submission_data, timeout=60)
186
+ response.raise_for_status()
187
+ result_data = response.json()
188
+ final_status = (
189
+ f"Submission Successful!\n"
190
+ f"User: {result_data.get('username')}\n"
191
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
192
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
193
+ f"Message: {result_data.get('message', 'No message received.')}"
194
+ )
195
+ print("Submission successful.")
196
+ results_df = pd.DataFrame(results_log)
197
+ return final_status, results_df
198
+ except requests.exceptions.HTTPError as e:
199
+ error_detail = f"Server responded with status {e.response.status_code}."
200
+ try:
201
+ error_json = e.response.json()
202
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
203
+ except requests.exceptions.JSONDecodeError:
204
+ error_detail += f" Response: {e.response.text[:500]}"
205
+ status_message = f"Submission Failed: {error_detail}"
206
+ print(status_message)
207
+ results_df = pd.DataFrame(results_log)
208
+ return status_message, results_df
209
+ except requests.exceptions.Timeout:
210
+ status_message = "Submission Failed: The request timed out."
211
+ print(status_message)
212
+ results_df = pd.DataFrame(results_log)
213
+ return status_message, results_df
214
+ except requests.exceptions.RequestException as e:
215
+ status_message = f"Submission Failed: Network error - {e}"
216
+ print(status_message)
217
+ results_df = pd.DataFrame(results_log)
218
+ return status_message, results_df
219
+ except Exception as e:
220
+ status_message = f"An unexpected error occurred during submission: {e}"
221
+ print(status_message)
222
+ results_df = pd.DataFrame(results_log)
223
+ return status_message, results_df
224
+ except Exception as e:
225
+ error_msg = f"Critical error in run_and_submit_all: {str(e)}"
226
+ print(error_msg)
227
+ return error_msg, None
228
 
 
 
 
229
 
230
+ # --- Build Gradio Interface using Blocks ---
231
+ with gr.Blocks(
232
+ title="Advanced Agent Evaluation Runner",
233
+ theme=gr.themes.Soft(),
234
+ css="""
235
+ .gradio-container {
236
+ max-width: 1200px !important;
237
+ }
238
+ """
239
+ ) as demo:
240
+ gr.Markdown("# Advanced Agent Evaluation Runner")
241
+ gr.Markdown(
242
+ """
243
+ **Instructions:**
244
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
245
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
246
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
247
+ ---
248
+ **Disclaimers:**
249
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
250
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
251
+ """
252
+ )
253
 
254
+ with gr.Row():
255
+ login_button = gr.LoginButton(label="Login to Hugging Face")
256
+ run_button = gr.Button("Run Evaluation & Submit All Answers", variant="primary")
257
 
258
+ with gr.Row():
259
+ status_output = gr.Textbox(
260
+ label="Run Status / Submission Result",
261
+ lines=5,
262
+ interactive=False,
263
+ max_lines=10
264
+ )
265
+
266
+ with gr.Row():
267
+ results_table = gr.DataFrame(
268
+ label="Questions and Agent Answers",
269
+ wrap=True,
270
+ max_rows=50,
271
+ height=400
272
+ )
273
 
274
+ # Add error handling to the button click
275
+ def safe_run_and_submit(profile):
276
+ try:
277
+ return run_and_submit_all(profile)
278
+ except Exception as e:
279
+ error_msg = f"An error occurred: {str(e)}"
280
+ print(f"Error in safe_run_and_submit: {e}")
281
+ return error_msg, None
282
 
283
  run_button.click(
284
+ fn=safe_run_and_submit,
285
+ inputs=[login_button],
286
  outputs=[status_output, results_table]
287
  )
288
 
 
289
  if __name__ == "__main__":
290
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
291
+ # Check for SPACE_HOST and SPACE_ID at startup for information
292
+ space_host_startup = os.getenv("SPACE_HOST")
293
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
294
+
295
+ if space_host_startup:
296
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
297
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
298
+ else:
299
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
300
+
301
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
302
+ print(f"✅ SPACE_ID found: {space_id_startup}")
303
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
304
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
305
+ else:
306
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
307
+
308
+ print("-"*(60 + len(" App Starting ")) + "\n")
309
+
310
+ print("Launching Gradio Interface for Advanced Agent Evaluation...")
311
+ # Fixed launch configuration to prevent HTTP protocol errors
312
+ demo.launch(
313
+ debug=False, # Disable debug mode to prevent extra logging that can cause issues
314
+ share=False,
315
+ server_name="0.0.0.0",
316
+ server_port=7860,
317
+ show_error=True,
318
+ quiet=False,
319
+ # Disable features that can cause HTTP protocol issues
320
+ prevent_thread_lock=False,
321
+ show_tips=False
322
+ )