Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -195,10 +195,6 @@ def process_image(image_input):
|
|
195 |
fn_index=2 # Function index if the server has multiple functions
|
196 |
)
|
197 |
|
198 |
-
# Clean up the temporary file if created
|
199 |
-
if not isinstance(image_input, str) or isinstance(image_input, str) and 'tmp' in image_path:
|
200 |
-
os.remove(image_path)
|
201 |
-
|
202 |
return result
|
203 |
|
204 |
|
@@ -381,8 +377,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
381 |
# Process image input
|
382 |
if image_input is not None:
|
383 |
image_text = process_image(image_input)
|
384 |
-
combined_text += "\n\n**Image Input:**\n" + image_text
|
385 |
-
|
386 |
|
387 |
# Process audio input
|
388 |
if audio_input is not None:
|
@@ -396,7 +391,12 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
396 |
# Check if combined text is empty
|
397 |
if not combined_text.strip():
|
398 |
return "Error: Please provide some input (text, audio, or image)."
|
399 |
-
|
|
|
|
|
|
|
|
|
|
|
400 |
# Use the text to query Vectara
|
401 |
vectara_response_json = query_vectara(combined_text)
|
402 |
|
@@ -405,6 +405,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
405 |
summary = vectara_response.get('summary', 'No summary available')
|
406 |
sources_info = vectara_response.get('sources', [])
|
407 |
|
|
|
408 |
# Format Vectara response in Markdown
|
409 |
markdown_output = "### Vectara Response Summary\n"
|
410 |
markdown_output += f"* **Summary**: {summary}\n"
|
@@ -412,11 +413,6 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
412 |
for source in sources_info:
|
413 |
markdown_output += f"* {source}\n"
|
414 |
|
415 |
-
# Append the original image description in Markdown
|
416 |
-
if image_description:
|
417 |
-
markdown_output += "\n### Original Image Description\n"
|
418 |
-
markdown_output += image_description + "\n"
|
419 |
-
|
420 |
# Process the summary with OpenAI
|
421 |
final_response = process_summary_with_stablemed(summary)
|
422 |
|
|
|
195 |
fn_index=2 # Function index if the server has multiple functions
|
196 |
)
|
197 |
|
|
|
|
|
|
|
|
|
198 |
return result
|
199 |
|
200 |
|
|
|
377 |
# Process image input
|
378 |
if image_input is not None:
|
379 |
image_text = process_image(image_input)
|
380 |
+
combined_text += "\n\n**Image Input:**\n" + image_text
|
|
|
381 |
|
382 |
# Process audio input
|
383 |
if audio_input is not None:
|
|
|
391 |
# Check if combined text is empty
|
392 |
if not combined_text.strip():
|
393 |
return "Error: Please provide some input (text, audio, or image)."
|
394 |
+
|
395 |
+
# Append the original image description in Markdown
|
396 |
+
if image_text: # Changed to image_text
|
397 |
+
markdown_output += "\n### Original Image Description\n"
|
398 |
+
markdown_output += image_text + "\n" # Changed to image_text
|
399 |
+
|
400 |
# Use the text to query Vectara
|
401 |
vectara_response_json = query_vectara(combined_text)
|
402 |
|
|
|
405 |
summary = vectara_response.get('summary', 'No summary available')
|
406 |
sources_info = vectara_response.get('sources', [])
|
407 |
|
408 |
+
|
409 |
# Format Vectara response in Markdown
|
410 |
markdown_output = "### Vectara Response Summary\n"
|
411 |
markdown_output += f"* **Summary**: {summary}\n"
|
|
|
413 |
for source in sources_info:
|
414 |
markdown_output += f"* {source}\n"
|
415 |
|
|
|
|
|
|
|
|
|
|
|
416 |
# Process the summary with OpenAI
|
417 |
final_response = process_summary_with_stablemed(summary)
|
418 |
|