panda992 commited on
Commit
63a3038
Β·
1 Parent(s): 7f1fcf1

Update GHAPA UI and README: multilingual detection, backend error handling, and full project description

Browse files
Files changed (2) hide show
  1. README.md +32 -10
  2. app.py +67 -41
README.md CHANGED
@@ -7,17 +7,39 @@ sdk: "gradio"
7
  app_file: "app.py"
8
  pinned: false
9
  secrets:
10
- - MODAL_BACKEND_URL
11
  ---
12
 
13
- # 🧠 GHAPA: Intelligent Health AI Assistant (Powered by Modal ⚑)
14
- This is the user interface for the GHAPA multi-agent system. It provides a simple, responsive UI that communicates with a powerful, GPU-accelerated backend hosted on Modal Labs.
15
 
16
- This decoupled architecture ensures that the user interface remains lightweight and fast, while all computationally intensive AI tasks are handled by a scalable, on-demand backend.
17
 
18
- ### How to Use
19
- Ask your health question in **English, Spanish, Hindi, or French**. The system will:
20
- 1. Auto-detect your language.
21
- 2. Send the query to the Modal backend.
22
- 3. Receive a trusted answer generated from PubMed articles.
23
- 4. Display the result, usually within seconds.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  app_file: "app.py"
8
  pinned: false
9
  secrets:
10
+ - MODAL_BACKEND_URL
11
  ---
12
 
13
+ # 🧠 GHAPA: Global Health AI Prompt Assistant (Powered by Modal ⚑)
 
14
 
15
+ **GHAPA** is a multilingual biomedical assistant that provides trusted, citable health information across global languages. It combines real-time hybrid search with domain-specific LLM reasoning in a modular Retrieval-Augmented Generation (RAG) pipeline.
16
 
17
+ ## 🌍 Key Features
18
+
19
+ - 🌐 **Multilingual Q&A**: Accepts questions in **English**, **Hindi**, **Spanish**, and **French**, with automatic language detection and translation.
20
+ - πŸ” **Hybrid Search**: Retrieves biomedical data from **PubMed** and general health insights via **DuckDuckGo**, ensuring broader yet reliable context.
21
+ - πŸ“š **Citable Results**: Every answer includes a direct **source link** and **context snippet**.
22
+ - 🧠 **Biomedical Reasoning**: Uses **PMC-LLaMA**, a large language model fine-tuned on medical literature, for precise and grounded answers.
23
+ - βš™οΈ **Serverless Backend**: All heavy processing runs on scalable GPU backends via [Modal](https://modal.com), decoupled from the lightweight Gradio UI.
24
+ - πŸ’‘ **Minimal UI**: Clean, responsive interface built using [Gradio](https://www.gradio.app), hosted on [Hugging Face Spaces](https://huggingface.co/spaces).
25
+
26
+ ---
27
+
28
+ ## πŸ›  Architecture Overview
29
+
30
+ ```plaintext
31
+ User Input (any language)
32
+ ↓
33
+ Language Detection & Translation
34
+ ↓
35
+ Hybrid Search (PubMed + DuckDuckGo)
36
+ ↓
37
+ Unified Ranking (SentenceTransformer)
38
+ ↓
39
+ Prompt Construction with Top Contexts
40
+ ↓
41
+ PMC-LLaMA Generative Reasoning Engine
42
+ ↓
43
+ Back-Translation & Markdown Output
44
+ ↓
45
+ Final Answer + Cited Sources
app.py CHANGED
@@ -5,58 +5,84 @@ import requests
5
  import os
6
 
7
  # --- Configuration ---
8
- # This securely retrieves the backend URL from the Hugging Face Space secret.
9
  MODAL_BACKEND_URL = os.getenv("MODAL_BACKEND_URL")
10
 
11
- # A critical check to ensure the secret is set. The app will not start without it.
12
- if not MODAL_BACKEND_URL:
13
- raise ValueError("The MODAL_BACKEND_URL secret is not set in your Hugging Face Space! Please add it in the Settings tab.")
 
 
 
 
14
 
15
- # This function is the bridge between your Gradio UI and your Modal backend.
16
- def call_modal_backend(user_input: str):
17
- """
18
- Takes the user's input, sends it to the Modal backend via an HTTP POST
19
- request, and returns the final markdown response.
20
- """
21
- if not user_input or not user_input.strip():
22
- return "Please enter a question to get started."
23
-
24
- # Prepare the request headers and payload
25
  headers = {"Content-Type": "application/json"}
26
- payload = {"user_input": user_input}
27
 
28
  try:
29
- # Make the POST request to the Modal serverless function
30
- response = requests.post(MODAL_BACKEND_URL, headers=headers, json=payload, timeout=300)
31
-
32
- # Raise an exception for bad HTTP status codes (like 404 or 500)
33
  response.raise_for_status()
34
-
35
- # Parse the JSON response from the backend
36
- result = response.json()
37
-
38
- # Safely get the final markdown. If the key is missing, return an error.
39
- return result.get("final_markdown", "### ❗ Error\nReceived an unexpected response format from the backend.")
40
-
41
- except requests.exceptions.Timeout:
42
- return "### ❗ Error: The request timed out. The AI backend may be busy or starting up. Please try again in a moment."
43
  except requests.exceptions.RequestException as e:
44
- # Handle any other network errors (e.g., connection failed)
45
- return f"### ❗ Error: Could not connect to the AI backend.\n**Details**: {e}"
 
 
46
 
47
- # --- Gradio User Interface ---
48
- with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo:
49
- gr.Markdown("""# 🧠 GHAPA: Intelligent Health AI Assistant (Powered by Modal ⚑)
50
- Ask your health question in **English, Spanish, Hindi, or French**. The system fetches trusted PubMed answers at high speed.""")
 
 
51
 
52
  with gr.Row():
53
- inp = gr.Textbox(label="Enter your question", placeholder="e.g., 'What are the symptoms of dengue?'", scale=4)
54
- btn = gr.Button("Submit", variant="primary", scale=1)
55
-
56
- o5 = gr.Markdown(label="Answer & Explanation")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # The click function is now simple and calls our backend function.
59
- btn.click(fn=call_modal_backend, inputs=[inp], outputs=[o5])
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  if __name__ == "__main__":
62
- demo.launch()
 
5
  import os
6
 
7
  # --- Configuration ---
 
8
  MODAL_BACKEND_URL = os.getenv("MODAL_BACKEND_URL")
9
 
10
+ # --- Backend Communication ---
11
+ def call_modal_backend(user_input):
12
+ if not MODAL_BACKEND_URL:
13
+ return {
14
+ "final_markdown": "### ❗ Configuration Error\n**Reason**: `MODAL_BACKEND_URL` is not set. Please configure the secret in your Hugging Face Space settings.",
15
+ "detected_lang": "Unknown"
16
+ }
17
 
 
 
 
 
 
 
 
 
 
 
18
  headers = {"Content-Type": "application/json"}
19
+ data = {"user_input": user_input}
20
 
21
  try:
22
+ response = requests.post(MODAL_BACKEND_URL, headers=headers, json=data, timeout=300)
 
 
 
23
  response.raise_for_status()
24
+ return response.json()
 
 
 
 
 
 
 
 
25
  except requests.exceptions.RequestException as e:
26
+ return {
27
+ "final_markdown": f"### ❗ Network Error\n**Reason**: Could not connect to the backend API.\n**Details**: {str(e)}",
28
+ "detected_lang": "Error"
29
+ }
30
 
31
+ # --- Gradio UI Definition ---
32
+ with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important;}") as demo:
33
+ gr.Markdown("""
34
+ # 🧠 GHAPA: Global Health AI Prompt Assistant
35
+ _Ask trusted health questions in **English**, **Hindi**, **Spanish**, or **French**._
36
+ """)
37
 
38
  with gr.Row():
39
+ inp = gr.Textbox(
40
+ label="❓ Enter your health question",
41
+ placeholder="e.g., What are the symptoms of dengue?",
42
+ scale=4
43
+ )
44
+ btn = gr.Button("πŸ” Submit", variant="primary", scale=1)
45
+
46
+ lang_detected = gr.Markdown("🌐 Language detected: _waiting..._", visible=False)
47
+ output = gr.Markdown(label="βœ… Answer & Explanation")
48
+ backend_result = gr.State()
49
+
50
+ def start_processing(user_input):
51
+ if not user_input.strip():
52
+ return {
53
+ inp: gr.update(),
54
+ btn: gr.update(),
55
+ output: "Please enter a question before submitting.",
56
+ lang_detected: gr.update(visible=False),
57
+ }
58
+ return {
59
+ inp: gr.update(interactive=False),
60
+ btn: gr.update(interactive=False, value="⏳ Processing..."),
61
+ output: "",
62
+ lang_detected: gr.update(visible=True, value="🌐 Detecting language and processing..."),
63
+ }
64
+
65
+ def finish_processing(response_json):
66
+ return {
67
+ inp: gr.update(interactive=True),
68
+ btn: gr.update(interactive=True, value="πŸ” Submit"),
69
+ output: response_json.get("final_markdown", "Error: No response from backend."),
70
+ lang_detected: gr.update(visible=True, value=f"🌐 Language detected: **{response_json.get('detected_lang', 'Unknown')}**"),
71
+ }
72
 
73
+ btn.click(
74
+ fn=start_processing,
75
+ inputs=[inp],
76
+ outputs=[inp, btn, output, lang_detected]
77
+ ).then(
78
+ fn=call_modal_backend,
79
+ inputs=[inp],
80
+ outputs=[backend_result]
81
+ ).then(
82
+ fn=finish_processing,
83
+ inputs=[backend_result],
84
+ outputs=[inp, btn, output, lang_detected]
85
+ )
86
 
87
  if __name__ == "__main__":
88
+ demo.launch()