Spaces:
Build error
Build error
| from langchain.prompts import ChatPromptTemplate | |
| from langchain_community.document_loaders import JSONLoader | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_cohere import ChatCohere | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_core.runnables import RunnableLambda, RunnablePassthrough | |
| import os | |
| import gradio as gr | |
| import cohere | |
| embedding_function = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") | |
| persist_directory = "./chroma_langchain_db" | |
| vectordb_loaded = Chroma( | |
| persist_directory=persist_directory, | |
| embedding_function=embedding_function | |
| ) | |
| retriever = vectordb_loaded.as_retriever( | |
| search_type="mmr", | |
| search_kwargs={'k': 300, 'fetch_k': 500} | |
| ) | |
| template = """Search for games based on the query while using the following context: | |
| {context} | |
| Query: {query} | |
| """ | |
| prompt = ChatPromptTemplate.from_template(template) | |
| COHERE_API_KEY = os.getenv("COHERE_API_KEY") | |
| model = ChatCohere() | |
| chain = ( | |
| {"context": retriever, "query": RunnablePassthrough()} | |
| | prompt | |
| | model | |
| | StrOutputParser() | |
| ) | |
| client = cohere.ClientV2(COHERE_API_KEY) | |
| COHERE_MODEL = "command-r-plus" | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| query = message | |
| retrieved_response = chain.invoke(query) | |
| system_message = f""" | |
| You are a friendly video game recommendation expert chatbot. | |
| Your task is to help parents and guardians to find appropriate video games for their children. | |
| Extract the child's age, preferred genre and multiplayer preference. | |
| After you extracted the information you need, you should: | |
| - Suggest 5 video games that fit the given criteria. | |
| - If no games exactly match the genre, suggest similar alternatives. | |
| Use the following information to generate your suggestions: | |
| {retrieved_response} | |
| If you don't find enough games in the info you are given, then use your own knowledge. | |
| Only suggest video games that exist. Do NOT make up game titles. | |
| ### Response Format: | |
| Game 1: | |
| - Name: [Game Title from the information you are given] | |
| - Genres: [List of genres from the information you are given] | |
| - Themes: [List of themes from the information you are given] | |
| - Age Rating: [Age rating by PEGI from the information you are given in a format like for example: PEGI 3] | |
| - Game Modes: [List of game modes from the information you are given] | |
| - PLatforms: [List of platforms from the information you are given] | |
| - Summary: [Summary of the game from the information you are given] | |
| - The reasons why you recommend the game | |
| and so on. | |
| Format the response as a clear, structured and easily understandable list. | |
| After you gave your recommendations ask the user for feedback. Are they satisfied with the results or not? Do they have any questions about the given games? | |
| If they are not satisfied, then give the user the options of receiving more recommendations or changing their preferences. | |
| If they have questions about a game/games then provide the user with real information about the game/games. | |
| If they are satisifed and have no questions, then tell them that you were very happy to help and end the conversation. | |
| """ | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| response = client.chat( | |
| messages=messages, | |
| model=COHERE_MODEL, | |
| temperature=temperature, | |
| max_tokens=max_tokens, | |
| ) | |
| yield response.message.content[0].text | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |