mirxakamran893 commited on
Commit
e370f3a
Β·
verified Β·
1 Parent(s): 0527279

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -33
app.py CHANGED
@@ -1,58 +1,53 @@
1
- from fastapi import FastAPI
2
- from pydantic import BaseModel
3
  import requests
4
  import os
5
  import faiss
6
  import numpy as np
7
  import json
8
  from sentence_transformers import SentenceTransformer
 
9
 
10
- # βœ… Load your texts and vector index
11
  with open("texts.json", "r", encoding="utf-8") as f:
12
  texts = json.load(f)
13
 
14
  index = faiss.read_index("faiss_index.bin")
15
  embed_model = SentenceTransformer("all-MiniLM-L6-v2")
16
 
17
- # βœ… Setup your API key and model name
18
- API_KEY = os.environ.get("OPENROUTER_API_KEY") # Set this in environment or .env file
19
  MODEL = "qwen/qwen-2.5-coder-32b-instruct:free"
20
 
21
- # βœ… Define FastAPI app
22
- app = FastAPI()
23
-
24
- # βœ… Request schema
25
- class ChatRequest(BaseModel):
26
- message: str
27
- history: list = [] # Optional, not used in current logic
28
-
29
- # βœ… Function to get top-k similar chunks from FAISS
30
- def get_context(query, top_k=5):
31
  query_vec = embed_model.encode([query])
32
  D, I = index.search(np.array(query_vec), top_k)
 
 
33
  return "\n".join([texts[i] for i in I[0]])
34
 
35
- # βœ… Endpoint
36
- @app.post("/chat")
37
- def chat_api(data: ChatRequest):
38
- user_message = data.message
39
- context = get_context(user_message)
 
 
 
 
 
40
 
41
  messages = [
42
  {
43
  "role": "system",
44
- "content": f"You are a helpful coding assistant. Use the following context to answer the question:\n{context}"
45
- },
46
- {
47
- "role": "user",
48
- "content": user_message
49
  }
50
  ]
51
 
52
- headers = {
53
- "Authorization": f"Bearer {API_KEY}",
54
- "Content-Type": "application/json"
55
- }
 
56
 
57
  payload = {
58
  "model": MODEL,
@@ -62,9 +57,44 @@ def chat_api(data: ChatRequest):
62
  try:
63
  response = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload)
64
  response.raise_for_status()
65
- result = response.json()
66
- reply = result["choices"][0]["message"]["content"]
67
  except Exception as e:
68
- reply = f"❌ Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
- return {"reply": reply}
 
1
+ import gradio as gr
 
2
  import requests
3
  import os
4
  import faiss
5
  import numpy as np
6
  import json
7
  from sentence_transformers import SentenceTransformer
8
+ from datetime import datetime
9
 
10
+ # βœ… Load context and model
11
  with open("texts.json", "r", encoding="utf-8") as f:
12
  texts = json.load(f)
13
 
14
  index = faiss.read_index("faiss_index.bin")
15
  embed_model = SentenceTransformer("all-MiniLM-L6-v2")
16
 
17
+ API_KEY = os.environ.get("OPENROUTER_API_KEY")
 
18
  MODEL = "qwen/qwen-2.5-coder-32b-instruct:free"
19
 
20
+ # βœ… Get context
21
+ def get_context(query, top_k=5, threshold=0.45):
 
 
 
 
 
 
 
 
22
  query_vec = embed_model.encode([query])
23
  D, I = index.search(np.array(query_vec), top_k)
24
+ if all(score < threshold for score in D[0]):
25
+ return None
26
  return "\n".join([texts[i] for i in I[0]])
27
 
28
+ # βœ… Chat logic
29
+ def chat_fn(message, history):
30
+ context = get_context(message)
31
+ if context is None:
32
+ return history + [(message, "❌ Sorry! I cannot answer that.")], gr.update(visible=True)
33
+
34
+ headers = {
35
+ "Authorization": f"Bearer {API_KEY}",
36
+ "Content-Type": "application/json"
37
+ }
38
 
39
  messages = [
40
  {
41
  "role": "system",
42
+ "content": f"You are a helpful assistant. Only answer using this context:\n{context}\nIf the answer is not in the context, reply with 'Sorry! I cannot answer that.'"
 
 
 
 
43
  }
44
  ]
45
 
46
+ for user, assistant in history:
47
+ messages.append({"role": "user", "content": user})
48
+ messages.append({"role": "assistant", "content": assistant})
49
+
50
+ messages.append({"role": "user", "content": message})
51
 
52
  payload = {
53
  "model": MODEL,
 
57
  try:
58
  response = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload)
59
  response.raise_for_status()
60
+ reply = response.json()["choices"][0]["message"]["content"]
 
61
  except Exception as e:
62
+ reply = f"❌ Error: {e}"
63
+
64
+ return history + [(message, reply)], gr.update(visible=True)
65
+
66
+ # βœ… Export chat
67
+ def export_logs(history):
68
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
69
+ filename = f"chat_log_{timestamp}.txt"
70
+ log_text = "\n\n".join([f"You: {q}\nBot: {a}" for q, a in history])
71
+ with open(filename, "w", encoding="utf-8") as f:
72
+ f.write(log_text)
73
+ return filename
74
+
75
+ # βœ… Gradio UI with typing animation + auto-scroll + export button
76
+ with gr.Blocks(css=".footer {display: none !important;}") as demo:
77
+ chatbot = gr.Chatbot()
78
+ state = gr.State([])
79
+ with gr.Row():
80
+ msg = gr.Textbox(placeholder="Type your message and press enter...", scale=8)
81
+ export_btn = gr.Button("Export Chat", scale=1, visible=False)
82
+
83
+ typing_indicator = gr.Textbox(value="", visible=False, interactive=False, show_label=False)
84
+
85
+ def user_send(message, history):
86
+ chatbot.update(value=history + [(message, "⏳ ...")]) # Typing animation
87
+ return "", history + [(message, "⏳ ...")], gr.update(visible=False)
88
+
89
+ def complete_chat(message, history):
90
+ return chat_fn(message, history)
91
+
92
+ msg.submit(user_send, [msg, state], [msg, chatbot, export_btn]).then(
93
+ complete_chat, [msg, state], [chatbot, export_btn]
94
+ )
95
+
96
+ export_btn.click(fn=export_logs, inputs=[state], outputs=gr.File(label="Download Chat Log"))
97
+
98
+ chatbot.style(height=500)
99
 
100
+ demo.launch()