CultriX commited on
Commit
c3e36e2
·
verified ·
1 Parent(s): bbe6fe0

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +31 -11
run.py CHANGED
@@ -186,6 +186,8 @@ def run_agent_with_streaming(agent, question, stream_callback=None):
186
  def create_gradio_interface():
187
  """Create Gradio interface with streaming support"""
188
  import gradio as gr
 
 
189
 
190
  def process_question(question, model_id, hf_token, serpapi_key, custom_api_endpoint,
191
  custom_api_key, search_provider, search_api_key, custom_search_url):
@@ -194,9 +196,9 @@ def create_gradio_interface():
194
  agent = create_agent(
195
  model_id=model_id,
196
  hf_token=hf_token,
197
- openai_api_key=None, # Add if needed
198
  serpapi_key=serpapi_key,
199
- api_endpoint=None, # Add if needed
200
  custom_api_endpoint=custom_api_endpoint,
201
  custom_api_key=custom_api_key,
202
  search_provider=search_provider,
@@ -204,22 +206,40 @@ def create_gradio_interface():
204
  custom_search_url=custom_search_url,
205
  )
206
 
207
- # Stream output
208
- full_output = []
 
209
 
210
  def stream_callback(text):
211
- full_output.append(text)
212
- return "".join(full_output)
213
 
214
- # Generator function for streaming
215
- def generate():
216
  try:
217
  answer = run_agent_with_streaming(agent, question, stream_callback)
218
- yield "".join(full_output) + f"\n\n**FINAL ANSWER:** {answer}"
219
  except Exception as e:
220
- yield "".join(full_output) + f"\n\n**ERROR:** {str(e)}"
 
 
221
 
222
- return generate()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
  # Create Gradio interface
225
  with gr.Blocks(title="Streaming Agent Chat") as demo:
 
186
  def create_gradio_interface():
187
  """Create Gradio interface with streaming support"""
188
  import gradio as gr
189
+ import time
190
+ import threading
191
 
192
  def process_question(question, model_id, hf_token, serpapi_key, custom_api_endpoint,
193
  custom_api_key, search_provider, search_api_key, custom_search_url):
 
196
  agent = create_agent(
197
  model_id=model_id,
198
  hf_token=hf_token,
199
+ openai_api_key=None,
200
  serpapi_key=serpapi_key,
201
+ api_endpoint=None,
202
  custom_api_endpoint=custom_api_endpoint,
203
  custom_api_key=custom_api_key,
204
  search_provider=search_provider,
 
206
  custom_search_url=custom_search_url,
207
  )
208
 
209
+ # Shared state for streaming
210
+ output_buffer = []
211
+ is_complete = False
212
 
213
  def stream_callback(text):
214
+ output_buffer.append(text)
 
215
 
216
+ def run_agent_async():
217
+ nonlocal is_complete
218
  try:
219
  answer = run_agent_with_streaming(agent, question, stream_callback)
220
+ output_buffer.append(f"\n\n**FINAL ANSWER:** {answer}")
221
  except Exception as e:
222
+ output_buffer.append(f"\n\n**ERROR:** {str(e)}")
223
+ finally:
224
+ is_complete = True
225
 
226
+ # Start agent in background thread
227
+ agent_thread = threading.Thread(target=run_agent_async)
228
+ agent_thread.start()
229
+
230
+ # Generator that yields updates
231
+ last_length = 0
232
+ while not is_complete or agent_thread.is_alive():
233
+ current_output = "".join(output_buffer)
234
+ if len(current_output) > last_length:
235
+ yield current_output
236
+ last_length = len(current_output)
237
+ time.sleep(0.1) # Small delay to prevent excessive updates
238
+
239
+ # Final yield to ensure everything is captured
240
+ final_output = "".join(output_buffer)
241
+ if len(final_output) > last_length:
242
+ yield final_output
243
 
244
  # Create Gradio interface
245
  with gr.Blocks(title="Streaming Agent Chat") as demo: