|
import os |
|
import requests |
|
from io import BytesIO |
|
from PyPDF2 import PdfReader |
|
from tempfile import NamedTemporaryFile |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain_community.embeddings import HuggingFaceEmbeddings |
|
from langchain_community.vectorstores import FAISS |
|
from groq import Groq |
|
import streamlit as st |
|
|
|
|
|
client = Groq(api_key=os.getenv("GROQ_API_KEY")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
drive_links = [ |
|
"https://drive.google.com/file/d/1JPf0XvDhn8QoDOlZDrxCOpu4WzKFESNz/view?usp=sharing" |
|
] |
|
|
|
|
|
def download_pdf_from_drive(drive_link): |
|
file_id = drive_link.split('/d/')[1].split('/')[0] |
|
download_url = f"https://drive.google.com/uc?id={file_id}&export=download" |
|
response = requests.get(download_url) |
|
if response.status_code == 200: |
|
return BytesIO(response.content) |
|
else: |
|
raise Exception("Failed to download the PDF file from Google Drive.") |
|
|
|
|
|
def extract_text_from_pdf(pdf_stream): |
|
pdf_reader = PdfReader(pdf_stream) |
|
text = "" |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
|
|
def chunk_text(text, chunk_size=500, chunk_overlap=50): |
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size=chunk_size, chunk_overlap=chunk_overlap |
|
) |
|
return text_splitter.split_text(text) |
|
|
|
|
|
def create_embeddings_and_store(chunks): |
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
vector_db = FAISS.from_texts(chunks, embedding=embeddings) |
|
return vector_db |
|
|
|
|
|
def query_vector_db(query, vector_db): |
|
|
|
docs = vector_db.similarity_search(query, k=3) |
|
context = "\n".join([doc.page_content for doc in docs]) |
|
|
|
|
|
chat_completion = client.chat.completions.create( |
|
messages=[ |
|
{"role": "system", "content": f"Use the following context:\n{context}"}, |
|
{"role": "user", "content": query}, |
|
], |
|
model="llama3-8b-8192", |
|
) |
|
return chat_completion.choices[0].message.content |
|
|
|
|
|
st.title("RAG-Based ChatBot (Already having Document)") |
|
|
|
st.write("Processing the Data links...") |
|
|
|
all_chunks = [] |
|
|
|
|
|
for link in drive_links: |
|
try: |
|
|
|
|
|
pdf_stream = download_pdf_from_drive(link) |
|
|
|
|
|
|
|
text = extract_text_from_pdf(pdf_stream) |
|
|
|
|
|
|
|
chunks = chunk_text(text) |
|
|
|
all_chunks.extend(chunks) |
|
except Exception as e: |
|
st.write(f"Error processing link {link}: {e}") |
|
|
|
if all_chunks: |
|
|
|
vector_db = create_embeddings_and_store(all_chunks) |
|
st.write("Data is Ready Successfully!") |
|
|
|
|
|
user_query = st.text_input("Enter your query:") |
|
if user_query: |
|
response = query_vector_db(user_query, vector_db) |
|
st.write("Response from LLM:") |
|
st.write(response) |
|
|