Update app.py
Browse files
app.py
CHANGED
@@ -4,18 +4,16 @@ from chainlit.input_widget import Switch
|
|
4 |
import google.generativeai as gemini_client
|
5 |
from qdrant_client import QdrantClient
|
6 |
|
7 |
-
import vanna
|
8 |
-
from vanna.vannadb import VannaDB_VectorStore
|
9 |
-
from vanna.google import GoogleGeminiChat
|
10 |
-
|
11 |
import textwrap
|
12 |
import os
|
|
|
|
|
13 |
|
14 |
#API KEY
|
15 |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
16 |
QDRANT_URL = os.environ.get("QDRANT_URL")
|
17 |
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY")
|
18 |
-
|
19 |
VANNA_API_KEY = os.environ.get("VANNA_API_KEY")
|
20 |
|
21 |
#CONNECTION
|
@@ -25,22 +23,45 @@ client = QdrantClient(
|
|
25 |
)
|
26 |
gemini_client.configure(api_key=GEMINI_API_KEY)
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
# Error handling for session disconnection
|
35 |
-
async def handle_request(self, scope, receive, send):
|
36 |
try:
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
vn = MyVanna()
|
44 |
|
45 |
@cl.set_chat_profiles
|
46 |
async def chat_profile():
|
@@ -106,52 +127,10 @@ async def main(message: cl.Message):
|
|
106 |
if cl.user_session.get("chat_profile") == "Databot LAN":
|
107 |
human_query = message.content
|
108 |
|
109 |
-
|
110 |
-
|
111 |
-
def gen_query(human_query: str):
|
112 |
-
sql_query = vn.generate_sql(human_query)
|
113 |
-
return sql_query
|
114 |
-
|
115 |
-
def execute_query(query):
|
116 |
-
df = vn.run_sql(query)
|
117 |
-
|
118 |
-
return df
|
119 |
-
|
120 |
-
def human_answer(human_query,df):
|
121 |
-
answer = vn.generate_summary(human_query, df)
|
122 |
-
|
123 |
-
return answer
|
124 |
-
|
125 |
-
def plot(human_query, sql, df):
|
126 |
-
plotly_code = vn.generate_plotly_code(question=human_query, sql=sql, df=df)
|
127 |
-
fig = vn.get_plotly_figure(plotly_code=plotly_code, df=df)
|
128 |
|
129 |
-
|
130 |
-
return elements
|
131 |
-
|
132 |
-
|
133 |
-
settings = await cl.ChatSettings(
|
134 |
-
[
|
135 |
-
Switch(
|
136 |
-
id="is_chart",
|
137 |
-
label="Tampilkan grafik",
|
138 |
-
initial=False
|
139 |
-
)
|
140 |
-
]
|
141 |
-
).send()
|
142 |
-
|
143 |
-
sql_query = gen_query(human_query)
|
144 |
-
df = execute_query(sql_query)
|
145 |
-
|
146 |
-
answer_human = human_answer(human_query, df)
|
147 |
-
|
148 |
-
if settings["is_chart"]:
|
149 |
-
elements = plot(human_query, sql_query, df)
|
150 |
-
await cl.Message(
|
151 |
-
content=human_query, elements=elements, author="Databot LANRI").send()
|
152 |
-
|
153 |
-
await cl.Message(
|
154 |
-
content=f"{answer_human}").send()
|
155 |
|
156 |
elif cl.user_session.get("chat_profile") == "Infobot LAN":
|
157 |
ask = message.content
|
@@ -187,7 +166,7 @@ async def main(message: cl.Message):
|
|
187 |
prompt = make_prompt(question=ask,
|
188 |
relevant_passage = search_result[0].payload['answer'],
|
189 |
data_source = search_result[0].payload['pasal'] + ' ' + search_result[0].payload['peraturan'])
|
190 |
-
model = gemini_client.GenerativeModel('models/gemini-
|
191 |
answer = model.generate_content(prompt)
|
192 |
|
193 |
await cl.Message(
|
|
|
4 |
import google.generativeai as gemini_client
|
5 |
from qdrant_client import QdrantClient
|
6 |
|
|
|
|
|
|
|
|
|
7 |
import textwrap
|
8 |
import os
|
9 |
+
import requests
|
10 |
+
import json
|
11 |
|
12 |
#API KEY
|
13 |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
14 |
QDRANT_URL = os.environ.get("QDRANT_URL")
|
15 |
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY")
|
16 |
+
VANNA_API_URL = os.environ.get("VANNA_API_URL")
|
17 |
VANNA_API_KEY = os.environ.get("VANNA_API_KEY")
|
18 |
|
19 |
#CONNECTION
|
|
|
23 |
)
|
24 |
gemini_client.configure(api_key=GEMINI_API_KEY)
|
25 |
|
26 |
+
def call_vanna_api(message):
|
27 |
+
headers = {
|
28 |
+
"Content-Type": "application/json",
|
29 |
+
"VANNA-API-KEY": VANNA_API_KEY
|
30 |
+
}
|
31 |
+
data = {
|
32 |
+
"message": message,
|
33 |
+
"user_email": "[email protected]", # Ganti dengan email pengguna
|
34 |
+
"agent_id": "landw",
|
35 |
+
"acceptable_responses": ["text"],
|
36 |
+
}
|
37 |
|
|
|
|
|
38 |
try:
|
39 |
+
response = requests.post(VANNA_API_URL, headers=headers, data=json.dumps(data), stream=True)
|
40 |
+
response.raise_for_status()
|
41 |
+
|
42 |
+
results = []
|
43 |
+
for line in response.iter_lines():
|
44 |
+
if line:
|
45 |
+
decoded_line = line.decode('utf-8')
|
46 |
+
if decoded_line.startswith("data:"):
|
47 |
+
data_string = decoded_line[5:].strip()
|
48 |
+
try:
|
49 |
+
data = json.loads(data_string)
|
50 |
+
if data['type'] == 'text':
|
51 |
+
results.append(data['text']) # Append the text response to the results list
|
52 |
+
elif data['type'] == 'end':
|
53 |
+
# Return only the last 'text' type response
|
54 |
+
return results[-1] if results else "No valid text response found"
|
55 |
+
elif data['type'] == 'error':
|
56 |
+
results.append(f"**Error:**\n{data['error']}")
|
57 |
+
except json.JSONDecodeError as e:
|
58 |
+
results.append(f"Error decoding JSON: {e} - Original data: {data_string}")
|
59 |
+
return "\n\n".join(results)
|
60 |
+
except requests.exceptions.RequestException as e:
|
61 |
+
return f"An error occurred: {e}"
|
62 |
+
except Exception as e:
|
63 |
+
return f"An unexpected error occurred: {e}"
|
64 |
|
|
|
65 |
|
66 |
@cl.set_chat_profiles
|
67 |
async def chat_profile():
|
|
|
127 |
if cl.user_session.get("chat_profile") == "Databot LAN":
|
128 |
human_query = message.content
|
129 |
|
130 |
+
# Panggil API Vanna AI untuk mendapatkan respons
|
131 |
+
vanna_response = call_vanna_api(human_query)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
+
await cl.Message(content=vanna_response).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
elif cl.user_session.get("chat_profile") == "Infobot LAN":
|
136 |
ask = message.content
|
|
|
166 |
prompt = make_prompt(question=ask,
|
167 |
relevant_passage = search_result[0].payload['answer'],
|
168 |
data_source = search_result[0].payload['pasal'] + ' ' + search_result[0].payload['peraturan'])
|
169 |
+
model = gemini_client.GenerativeModel('models/gemini-2.0-flash')
|
170 |
answer = model.generate_content(prompt)
|
171 |
|
172 |
await cl.Message(
|