Spaces:
Sleeping
Sleeping
File size: 4,880 Bytes
9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 9d9952b 0872833 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
# app.py
from llama_index.core import VectorStoreIndex, StorageContext, ServiceContext, Document
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.together import TogetherLLM
from llama_index.core import Settings
from qdrant_client import QdrantClient
# === Qdrant Config ===
QDRANT_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.9Pj8v4ACpX3m5U3SZUrG_jzrjGF-T41J5icZ6EPMxnc"
QDRANT_URL = "https://d36718f0-be68-4040-b276-f1f39bc1aeb9.us-east4-0.gcp.cloud.qdrant.io"
COLLECTION_NAME = "demo-chatbot"
# === Embedding & LLM Setup ===
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
llm = TogetherLLM(
model="meta-llama/Llama-3-8b-chat-hf",
api_key="a36246d65d8290f43667350b364c5b6bb8562eb50a4b947eec5bd7e79f2dffc6",
temperature=0.3,
max_tokens=1024,
top_p=0.7
)
Settings.llm = llm
Settings.embed_model = embed_model
# === Qdrant Integration ===
qdrant_client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
vector_store = QdrantVectorStore(
client=qdrant_client,
collection_name=COLLECTION_NAME
)
# === Build Index ===
index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = index.as_query_engine(similarity_top_k=5)
# === Enhanced RAG Chain with References ===
def rag_chain(query: str, include_sources: bool = True) -> str:
response = query_engine.query(query)
response_text = str(response)
if include_sources:
references = get_clickable_references_from_response(response)
if references:
response_text += "\n\n🔗 **Sources:**\n" + "\n".join(references)
return response_text
# === Clickable Reference Links (top-2 from response nodes) ===
def get_clickable_references_from_response(response, max_refs: int = 2):
seen = set()
links = []
for node in response.source_nodes:
metadata = node.node.metadata
section = metadata.get("section", "Unknown")
source = metadata.get("source", "Unknown")
key = (section, source)
if key not in seen:
seen.add(key)
if source.startswith("http"):
links.append(f"- [{section}]({source})")
else:
links.append(f"- {section}: {source}")
if len(links) >= max_refs:
break
return links
from datetime import datetime
import time
import gradio as gr
# Chat handler
def chat_interface(message, history):
history = history or []
message = message.strip()
if not message:
raise ValueError("Please enter a valid question.")
timestamp_user = datetime.now().strftime("%H:%M:%S")
user_msg = f"🧑 **You**\n{message}\n\n⏱️ {timestamp_user}"
bot_msg = "⏳ _Bot is typing..._"
history.append((user_msg, bot_msg))
try:
time.sleep(0.5)
answer = rag_chain(message) # already includes references
full_response = answer.strip()
timestamp_bot = datetime.now().strftime("%H:%M:%S")
bot_msg = f"🤖 **Bot**\n{full_response}\n\n⏱️ {timestamp_bot}"
history[-1] = (user_msg, bot_msg)
except Exception as e:
timestamp_bot = datetime.now().strftime("%H:%M:%S")
error_msg = f"🤖 **Bot**\n⚠️ {str(e)}\n\n⏱️ {timestamp_bot}"
history[-1] = (user_msg, error_msg)
return history, history, ""
# Gradio UI
def launch_gradio():
with gr.Blocks(css="""
.gr-button {
background-color: orange !important;
color: white !important;
font-weight: bold;
border-radius: 6px !important;
border: 1px solid darkorange !important;
}
.gr-button:hover {
background-color: darkorange !important;
}
.gr-textbox textarea {
border: 2px solid orange !important;
border-radius: 6px !important;
padding: 0.75rem !important;
font-size: 1rem;
}
""") as demo:
gr.Markdown("# 💬 ImageOnline RAG Chatbot")
gr.Markdown("Welcome! Ask about Website Designing, Web Development, App Development, About Us, Digital Marketing etc.")
chatbot = gr.Chatbot()
state = gr.State([])
with gr.Row(equal_height=True):
msg = gr.Textbox(
placeholder="Ask your question here...",
show_label=False,
scale=9
)
send_btn = gr.Button("🚀 Send", scale=1)
msg.submit(chat_interface, inputs=[msg, state], outputs=[chatbot, state, msg])
send_btn.click(chat_interface, inputs=[msg, state], outputs=[chatbot, state, msg])
with gr.Row():
clear_btn = gr.Button("🧹 Clear Chat")
clear_btn.click(fn=lambda: ([], []), outputs=[chatbot, state])
return demo
# Launch
demo = launch_gradio()
demo.launch()
|