|
""" |
|
import json |
|
import logging |
|
import os |
|
import re |
|
import sys |
|
|
|
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings |
|
from sentence_transformers import SentenceTransformer |
|
from langchain.text_splitter import CharacterTextSplitter |
|
#from langchain.embeddings import OpenAIEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain.document_loaders import PyPDFLoader |
|
from fastapi.encoders import jsonable_encoder |
|
from dotenv import load_dotenv |
|
|
|
#load_dotenv() |
|
#logging.basicConfig(level=logging.DEBUG) |
|
|
|
ABS_PATH = os.path.dirname(os.path.abspath(__file__)) |
|
DB_DIR = os.path.join(ABS_PATH, "db") |
|
|
|
vectorstore = None |
|
#embedding_function |
|
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") |
|
|
|
def replace_newlines_and_spaces(text): |
|
# Replace all newline characters with spaces |
|
text = text.replace("\n", " ") |
|
# Replace multiple spaces with a single space |
|
text = re.sub(r'\s+', ' ', text) |
|
return text |
|
|
|
|
|
def get_documents(): |
|
return PyPDFLoader("AI-smart-water-management-systems.pdf").load() |
|
|
|
|
|
def init_chromadb(): |
|
# Delete existing index directory and recreate the directory |
|
if os.path.exists(DB_DIR): |
|
import shutil |
|
shutil.rmtree(DB_DIR, ignore_errors=True) |
|
os.mkdir(DB_DIR) |
|
|
|
documents = [] |
|
for num, doc in enumerate(get_documents()): |
|
doc.page_content = replace_newlines_and_spaces(doc.page_content) |
|
documents.append(doc) |
|
|
|
# Split the documents into chunks |
|
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) |
|
texts = text_splitter.split_documents(documents) |
|
|
|
# Select which embeddings we want to use |
|
#embeddings = OpenAIEmbeddings() |
|
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") |
|
|
|
# Create the vectorestore to use as the index |
|
vectorstore = Chroma.from_documents(texts, embeddings, persist_directory=DB_DIR) |
|
#vectorstore.persist() |
|
|
|
#print(vectorstore) |
|
#vectorstore = None |
|
|
|
#db = vectorstore |
|
#db.get() |
|
#print(len(db.get()["ids"])) |
|
|
|
# Print the list of source files |
|
for x in range(len(vectorstore.get()["ids"])): |
|
# print(db.get()["metadatas"][x]) |
|
doc = vectorstore.get()["metadatas"][x] |
|
source = doc["source"] |
|
print("Source {x} :: ",source) |
|
|
|
def query_chromadb(): |
|
if not os.path.exists(DB_DIR): |
|
raise Exception(f"{DB_DIR} does not exist, nothing can be queried") |
|
|
|
# Select which embeddings we want to use |
|
#embeddings = OpenAIEmbeddings() |
|
#embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") |
|
|
|
# Load Vector store from local disk |
|
#vectorstore = Chroma(persist_directory=DB_DIR, embedding_function=embeddings) |
|
#vectorstore.persist() |
|
|
|
result = vectorstore.similarity_search_with_score(query="how to use AI in water conservation?", k=4) |
|
|
|
jsonable_result = jsonable_encoder(result) |
|
print(json.dumps(jsonable_result, indent=2)) |
|
|
|
def main(): |
|
init_chromadb() |
|
|
|
if __name__ == '__main__': |
|
main() |
|
""" |
|
import chromadb |
|
from llama_index.vector_stores.chroma import ChromaVectorStore |
|
|
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, PromptTemplate |
|
from llama_index.core.indices.vector_store.retrievers import VectorIndexAutoRetriever |
|
from llama_index.core.query_engine import RetrieverQueryEngine |
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding |
|
|
|
|
|
|
|
chroma_client = chromadb.HttpClient(host="localhost", port="8080", ssl=False) |
|
chroma_collection = chroma_client.get_or_create_collection("example_collection") |
|
|
|
|
|
embed_model = HuggingFaceEmbedding(model_name="all-MiniLM-L6-v2") |
|
|
|
|
|
vector_store = ChromaVectorStore(chroma_collection=chroma_collection) |
|
|
|
|
|
storage_context = StorageContext.from_defaults(vector_store=vector_store) |
|
index = VectorStoreIndex(embed_model=embed_model, storage_context=storage_context) |
|
|
|
|
|
documents = [ |
|
{ |
|
"text": "Your document text here", "embedding": [0.1, 0.2, 0.3], |
|
'cricket': DirectoryLoader('/content/cricket', glob="*.pdf", loader_cls=PyPDFLoader).load(), |
|
'fifa': DirectoryLoader('/content/fifa', glob="*.pdf", loader_cls=PyPDFLoader).load(), |
|
|
|
}, |
|
] |
|
|
|
|
|
index.from_documents(documents=documents) |
|
|
|
|
|
auto_retriever = VectorIndexAutoRetriever(index) |
|
|
|
|
|
query_engine = RetrieverQueryEngine(auto_retriever) |
|
|
|
|
|
response = query_engine.query("Your query here") |
|
print(response) |
|
|
|
|
|
|