Tonic commited on
Commit
ff4e34f
1 Parent(s): 32cbfb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -5
app.py CHANGED
@@ -8,8 +8,38 @@ from decouple import Config
8
  config = Config('.env')
9
 
10
 
11
- def query_vectara(question):
12
- user_message = question
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Read authentication parameters from the .env file
15
  CUSTOMER_ID = config('CUSTOMER_ID')
@@ -132,10 +162,27 @@ def convert_to_markdown(vectara_response_json):
132
  return f"{markdown_summary}**Sources:**\n{markdown_sources}"
133
  else:
134
  return "No data found in the response."
135
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  iface = gr.Interface(
137
- fn=lambda text: convert_to_markdown(query_vectara(text)),
138
- inputs=[gr.Textbox(label="Input Text")],
 
 
 
139
  outputs=[gr.Markdown(label="Output Text")],
140
  title="👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷",
141
  description = '''
 
8
  config = Config('.env')
9
 
10
 
11
+ def process_image_with_openai(image):
12
+ image_data = convert_image_to_required_format(image)
13
+ openai_api_key = config('OPENAI_API_KEY') # Make sure to have this in your .env file
14
+
15
+ data_payload = {
16
+ "model": "gpt-4-vision-preview",
17
+ "messages": [
18
+ {
19
+ "role": "user",
20
+ "content": image_data
21
+ }
22
+ ],
23
+ "max_tokens": 300
24
+ }
25
+
26
+ response = requests.post(
27
+ "https://api.openai.com/v1/chat/completions",
28
+ headers={
29
+ "Content-Type": "application/json",
30
+ "Authorization": f"Bearer {openai_api_key}"
31
+ },
32
+ json=data_payload
33
+ )
34
+
35
+ if response.status_code == 200:
36
+ return response.json()['choices'][0]['message']['content']
37
+ else:
38
+ raise Exception(f"OpenAI Error: {response.status_code}")
39
+
40
+
41
+ def query_vectara(text):
42
+ user_message = text
43
 
44
  # Read authentication parameters from the .env file
45
  CUSTOMER_ID = config('CUSTOMER_ID')
 
162
  return f"{markdown_summary}**Sources:**\n{markdown_sources}"
163
  else:
164
  return "No data found in the response."
165
+ # Main function to handle the Gradio interface logic
166
+ def process_and_query(text, image):
167
+ try:
168
+ # If an image is provided, process it with OpenAI and use the response as the text query for Vectara
169
+ if image is not None:
170
+ text = process_image_with_openai(image)
171
+
172
+ # Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
173
+ vectara_response_json = query_vectara(text)
174
+ markdown_output = convert_to_markdown(vectara_response_json)
175
+ return markdown_output
176
+ except Exception as e:
177
+ return str(e)
178
+
179
+ # Define the Gradio interface
180
  iface = gr.Interface(
181
+ fn=process_and_query,
182
+ inputs=[
183
+ gr.Textbox(label="Input Text", optional=True),
184
+ gr.Image(label="Upload Image", optional=True)
185
+ ],
186
  outputs=[gr.Markdown(label="Output Text")],
187
  title="👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷",
188
  description = '''