Spaces:
Sleeping
Sleeping
| # function.py | |
| import openai | |
| from pinecone import Pinecone, ServerlessSpec | |
| from langchain.embeddings import OpenAIEmbeddings | |
| class Obnoxious_Agent: | |
| def __init__(self, client) -> None: | |
| self.client = client | |
| self.prompt = "Determine if the following query is uncomfortable (returns only Yes or No):" | |
| def set_prompt(self, prompt): | |
| self.prompt = prompt | |
| def extract_action(self, response_text) -> bool: | |
| return "yes" in response_text.lower() | |
| def check_query(self, query): | |
| full_prompt = f"{self.prompt}\nQuery: {query}" | |
| response = self.client.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=[{"role": "user", "content": full_prompt}], | |
| max_tokens=10 | |
| ) | |
| content = response["choices"][0]["message"]["content"] | |
| return self.extract_action(content) | |
| class Query_Agent: | |
| def __init__(self, pinecone_index, openai_client, embeddings) -> None: | |
| self.pinecone_index = pinecone_index | |
| self.client = openai_client | |
| self.embeddings = embeddings | |
| def query_vector_store(self, query, k=5): | |
| query_vector = self.embeddings.embed_query(query) | |
| result = self.pinecone_index.query( | |
| vector=query_vector, | |
| top_k=k, | |
| include_metadata=True | |
| ) | |
| return result | |
| class Answering_Agent: | |
| def __init__(self, openai_client) -> None: | |
| self.client = openai_client | |
| def generate_response(self, query, docs, conv_history, k=5): | |
| docs_texts = [] | |
| for doc in docs: | |
| metadata = doc.get("metadata", {}) | |
| text = metadata.get("text", "") | |
| docs_texts.append(text) | |
| docs_text = "\n".join(docs_texts) | |
| history_text = "\n".join(conv_history) | |
| full_prompt = ( | |
| "Answer a user query based on the following related documents and dialog history.\n" | |
| f"Related documents:\n{docs_text}\n" | |
| f"Conversation history:\n{history_text}\n" | |
| f"User query: {query}\nAnswer:" | |
| ) | |
| response = self.client.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=[{"role": "user", "content": full_prompt}], | |
| max_tokens=150 | |
| ) | |
| content = response["choices"][0]["message"]["content"] | |
| return content.strip() | |
| class Relevant_Documents_Agent: | |
| def __init__(self, openai_client) -> None: | |
| self.client = openai_client | |
| def get_relevance(self, conversation) -> str: | |
| prompt = ( | |
| "You are a highly skilled assistant. Please determine if the returned documents " | |
| "are directly relevant to the user's query. Respond with 'Yes' if you believe the " | |
| "documents are relevant, or 'No' if you believe they are not.\n\n" | |
| "Context:\n" | |
| f"{conversation}\n\n" | |
| "Please respond with 'Yes' or 'No' only." | |
| ) | |
| response = self.client.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=[{"role": "user", "content": prompt}], | |
| max_tokens=10 | |
| ) | |
| content = response["choices"][0]["message"]["content"] | |
| return content.strip() | |
| class Head_Agent: | |
| def __init__(self, openai_key, pinecone_key, pinecone_index_name) -> None: | |
| openai.api_key = openai_key | |
| pc = Pinecone(api_key=pinecone_key) | |
| self.pinecone_index = pc.Index(pinecone_index_name) | |
| self.embeddings = OpenAIEmbeddings(openai_api_key=openai_key) | |
| self.openai_client = openai | |
| self.obnoxious_agent = Obnoxious_Agent(self.openai_client) | |
| self.query_agent = Query_Agent(self.pinecone_index, self.openai_client, self.embeddings) | |
| self.answering_agent = Answering_Agent(self.openai_client) | |
| self.relevant_agent = Relevant_Documents_Agent(self.openai_client) | |
| self.conv_history = [] | |