aklai
Added models folder
e33d078
import gradio as gr
import os
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.runnables import RunnableParallel
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_chroma import Chroma
from langchain_community.llms import GPT4All
from huggingface_hub import hf_hub_download
model_path = "models"
model_name = "bling-phi-3.gguf"
hf_hub_download(repo_id="llmware/bling-phi-3-gguf", filename=model_name, local_dir=model_path)
llm = GPT4All(model="./models/bling-phi-3.gguf")
# Initialize embedding model "all-MiniLM-L6-v2"
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# Load the existing ChromaDB database
vector_store = Chroma(persist_directory="./chroma_db", embedding_function=embedding_model)
# Prompt
template = """<human>: {context} \n {question} \n<bot>:"""
prompt = PromptTemplate.from_template(template)
# Define a new chain to return both the answer and the sources
qa_chain_with_sources = (
RunnableParallel(
{
"context": vector_store.as_retriever(),
"question": RunnablePassthrough(),
}
)
| {
"answer": prompt | llm | StrOutputParser(),
"sources": lambda x: [doc.metadata.get("source", "Unknown") for doc in x["context"]],
}
)
# Function to call a RAG LLM query
def rag_query(query, history):
# Invoke the chain
response = qa_chain_with_sources.invoke(query)
answer = response["answer"]
unique_sources = list(set(response["sources"]))
# Print answers + sources
output = f"Answer: {answer}\n\nSources:\n" + "\n".join(unique_sources)
return output
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
fn=rag_query, # Function to call for generating responses
title="WEHI Student Intern Chatbot Demo",
type='messages',
description="Ask questions related to your WEHI internship and get answers with sources.",
examples=[
"What flexibility is there for the internship?",
"What are the key things to do before the weekly meetings?",
"How do I tackle complex and ambiguous projects?",
"What happens over Easter break at WEHI?",
"What are the tasks for the REDMANE Data Ingestion team?",
"When is the final presentation due?",
"What is Nectar?",
"Is the internship remote or in person?"
],
)
demo.launch()