KnowledgeQuest / app.py
Afeezee's picture
Update app.py
9086240 verified
import gradio as gr
from groq import Groq
import os
api_key = os.getenv("KnowledgeQuest")
# Initialize the Groq client with the API key
client = Groq(api_key= api_key)
# List to maintain the conversation history, starting with a default prompt
conversation_history = [
{"role": "system", "content": "You are an experienced fact-checker and facts curator with over 30 years of experience in gathering and verifying facts. You will present a fact/claim and ask the user to verify if it is true or false. The fact or false claim must be concise and you will award points for each correct answer."}
]
# Function to count tokens (approximation)
def count_tokens(messages):
return sum(len(message["content"].split()) for message in messages)
# Function to get the initial LLM output and start the conversation
def start_trivia_game():
# Initial message to start the game
initial_message = "You will present a fact or false claim randomly and ask me to verify if it is true or false. The fact or false claim must be concise. On inputing stop, show the total score and percentage score concisely. Let the question be a mix of facts and false claims randomly presented."
# Add the initial message to the conversation history
conversation_history.append({"role": "user", "content": initial_message})
# Get completion from the LLM for the initial question
completion = client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=conversation_history,
temperature=0.6,
max_tokens=8000,
top_p=1,
stream=True,
stop=None,
)
llm_output = ""
for chunk in completion:
llm_output += chunk.choices[0].delta.content or ""
# Add the assistant's response to the conversation history
conversation_history.append({"role": "assistant", "content": llm_output})
return llm_output
# Function to handle user response and continue the conversation
def continue_trivia_game(user_response):
# Add user's response to the conversation history
conversation_history.append({"role": "user", "content": user_response})
# Token limit management
max_tokens = 2048 # Maximum token limit for the LLM (example value)
current_tokens = count_tokens(conversation_history)
while current_tokens > max_tokens:
# Remove the oldest user-assistant pair
if len(conversation_history) > 2:
conversation_history.pop(1) # Removing the second item as the first is the system message
conversation_history.pop(1) # Remove the corresponding assistant response
current_tokens = count_tokens(conversation_history)
# Get completion from the LLM for the user's response
try:
completion = client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=conversation_history,
temperature=0.6,
max_tokens=8000,
top_p=1,
stream=True,
stop=None,
)
llm_output = ""
for chunk in completion:
llm_output += chunk.choices[0].delta.content or ""
# Add the assistant's response to the conversation history
conversation_history.append({"role": "assistant", "content": llm_output})
return llm_output
except Exception as e:
# Check for specific rate limit error
if "rate_limit_exceeded" in str(e):
return "You've reached the maximum number of requests. Please wait a few minutes before trying again."
else:
return f"An error occurred Try again in 10 minutes: {str(e)}"
# Start the game and get the initial LLM output
initial_output = start_trivia_game()
# Using gr.Blocks to create the interface
with gr.Blocks() as demo:
# Title and Description
gr.Markdown("# Knowledge Quest\nA trivia game to test your knowledge by verifying if the given claims are true or false. Points are awarded for each correct answer. Type 'Stop' to end the game and see your score. Bored of this? Type: Start a new game and the system will initiate a new facts-based game")
# LLM Output Textbox
llm_output = gr.Textbox(label="LLM Output", placeholder="", lines=10, value=initial_output)
# User Response Textbox
user_response = gr.Textbox(label="Your Response", placeholder="Type your response here", lines=3)
# Button to submit the user's response and update the LLM output
submit_button = gr.Button("Submit")
# Function to update the LLM output upon submission
def update_llm_output(user_input):
return continue_trivia_game(user_input)
# Define interactions
submit_button.click(fn=update_llm_output, inputs=user_response, outputs=llm_output)
# Launch the Gradio app
demo.launch()