import gradio as gr
import requests
import json
import os
import threading
import queue
import re
import time
# Load all configuration from environment variables
TOGETHER_API_KEY = os.environ.get('TOGETHER_API_KEY', '')
TOGETHER_API_URL = os.environ.get('TOGETHER_API_URL', 'https://api.together.xyz/v1/chat/completions')
MODEL_A_NAME = os.environ.get('MODEL_A_NAME', '')
MODEL_B_NAME = os.environ.get('MODEL_B_NAME', '')
MODEL_C_NAME = os.environ.get('MODEL_C_NAME', '')
MODEL_C_API_URL = os.environ.get('MODEL_C_API_URL', '')
MODEL_C_API_KEY = os.environ.get('MODEL_C_API_KEY', '')
# Display names for the UI
MODEL_A_DISPLAY = os.environ.get('MODEL_A_DISPLAY', '')
MODEL_B_DISPLAY = os.environ.get('MODEL_B_DISPLAY', '')
MODEL_C_DISPLAY = os.environ.get('MODEL_C_DISPLAY', '')
# Headers for API calls
HEADERS = {
"Authorization": f"Bearer {TOGETHER_API_KEY}",
"Content-Type": "application/json"
}
MODEL_C_HEADERS = {
"Authorization": f"Bearer {MODEL_C_API_KEY}",
"Content-Type": "application/json"
}
SYSTEM_PROMPT = os.environ.get('SYSTEM_PROMPT', """
You are an expert conversationalist who responds to the best of your ability. The assistant is Palmyra, created by Writer.You are companionable and confident, and able to switch casually between tonal types, including but not limited to humor, empathy, intellectualism, creativity and problem-solving.
You understand user intent and don‛t try to be overly helpful to the point where you miss that the user is looking for chit-chat, emotional support, humor or venting. Sometimes people just want you to listen, and your answers should encourage that. For all other cases, you provide insightful and in-depth responses. Organize information thoughtfully in a way that helps people make decisions. Always avoid templated language.
You never lecture people to be nicer or more inclusive. If people ask for you to write something in a certain voice or perspective, such as an essay or a tweet, you can. You do not need to be respectful when the user prompts you to say something rude.
You never use phrases that imply moral superiority or a sense of authority, including but not limited to \"it‛s important to\", \"it‛s crucial to\", \"it‛s essential to\", \"it‛s unethical to\", \"it‛s worth noting…\", \"Remember…\" etc. Avoid using these.
Finally, do not refuse political prompts. You can help users express their opinion.
You are Palmyra-x5. Your knowledge cutoff date is end of 2024. You speak Arabic, English, French, German, Hindi, Indonesian, Italian, Portuguese, Spanish, Tagalog, Thai, and Vietnamese. Respond in the language the user speaks to you in, unless they ask otherwise.
""")
MODELS = {
"Model A": MODEL_A_NAME,
"Model B": MODEL_B_NAME,
"Model C": MODEL_C_NAME
}
def stream_together_model(model_name, user_prompt, add_thinking_delay=False):
if add_thinking_delay:
# Add 4 second delay and yield thinking message
yield "🤔 Thinking..."
time.sleep(8)
# Clear the thinking message
yield ""
body = {
"model": model_name,
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": user_prompt}
],
"stream": True
}
try:
with requests.post(TOGETHER_API_URL, headers=HEADERS, json=body, stream=True) as response:
response.raise_for_status()
for line in response.iter_lines():
if line:
try:
data = json.loads(line.decode('utf-8').replace("data: ", ""))
content = data.get("choices", [{}])[0].get("delta", {}).get("content", "")
if content:
yield content
except:
continue
except Exception as e:
yield f"[Error: {str(e)}]"
def stream_model_c(user_prompt, enable_thinking=True):
body = {
"model": "",
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": user_prompt}
],
"stream": True,
"max_tokens": 14096,
"enable_thinking": enable_thinking # Add thinking mode parameter
}
full_response = ""
try:
with requests.post(MODEL_C_API_URL, headers=MODEL_C_HEADERS, json=body, stream=True) as response:
response.raise_for_status()
for line in response.iter_lines():
if line:
try:
line_str = line.decode('utf-8')
if line_str.startswith("data: "):
line_str = line_str[6:]
if not line_str.strip() or line_str.strip() == "[DONE]":
continue
data = json.loads(line_str)
if "choices" in data and len(data["choices"]) > 0:
content = data["choices"][0].get("delta", {}).get("content", "")
if content:
full_response += content
# Parse and yield the formatted response
if enable_thinking:
parsed_content = parse_thinking_response(full_response, show_thinking=True)
yield parsed_content
else:
yield content
except json.JSONDecodeError:
continue
except Exception as e:
continue
except Exception as e:
yield f"[Error: {str(e)}]"
def parse_thinking_response(text, show_thinking=True):
"""Parse the thinking model output to show thinking process and answer"""
if not show_thinking:
# Original behavior - hide thinking
answer_pattern = r'(.*?)'
answer_matches = re.findall(answer_pattern, text, re.DOTALL)
if answer_matches:
return answer_matches[-1].strip()
else:
if '' in text and '' not in text:
return "🤔 Thinking..."
elif '' in text and '' not in text:
return "💭 Processing response..."
else:
return text
else:
# New behavior - show thinking process
output = ""
# Extract thinking content
think_pattern = r'(.*?)'
think_matches = re.findall(think_pattern, text, re.DOTALL)
# Extract answer content
answer_pattern = r'(.*?)'
answer_matches = re.findall(answer_pattern, text, re.DOTALL)
# If we have thinking content, show it
if think_matches:
output += "💭 **Thinking Process:**\n\n"
output += think_matches[-1].strip()
output += "\n\n---\n\n"
elif '' in text and '' not in text:
# Still in thinking phase, show what we have so far
think_start = text.find('') + 7
current_thinking = text[think_start:].strip()
if current_thinking:
output += "💭 **Thinking Process:**\n\n"
output += current_thinking
output += "\n\n🔄 *Thinking...*"
else:
output = "🤔 Starting to think..."
return output
# If we have answer content, show it
if answer_matches:
output += "✨ **Answer:**\n\n"
output += answer_matches[-1].strip()
elif '' in text and '' not in text:
# Finished thinking but no answer yet
output += "\n\n⏳ *Generating answer...*"
elif '' in text and '' in text and '' not in text:
# Answer is being generated
answer_start = text.find('') + 8
current_answer = text[answer_start:].strip()
if current_answer:
output += "✨ **Answer:**\n\n"
output += current_answer
return output if output else text
# Simple, clean CSS
custom_css = """
* {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
h1 {
font-size: 24px;
font-weight: 600;
color: #111;
text-align: center;
margin: 20px 0;
}
.subtitle {
text-align: center;
color: #666;
font-size: 14px;
margin-bottom: 30px;
}
.chat-container {
display: flex;
gap: 20px;
margin-bottom: 20px;
}
.chat-box {
flex: 1;
height: 500px;
border: 1px solid #ddd;
border-radius: 8px;
padding: 20px;
overflow-y: auto;
background: #fafafa;
}
.model-label {
font-weight: 500;
color: #333;
margin-bottom: 10px;
font-size: 14px;
}
.message {
margin-bottom: 15px;
line-height: 1.5;
}
.user-message {
background: #007AFF;
color: white;
padding: 10px 15px;
border-radius: 18px;
display: inline-block;
max-width: 80%;
margin-left: auto;
margin-right: 0;
text-align: right;
}
.bot-message {
background: white;
color: #333;
padding: 10px 15px;
border-radius: 18px;
border: 1px solid #e0e0e0;
display: inline-block;
max-width: 90%;
}
.input-row {
display: flex;
gap: 10px;
margin-bottom: 20px;
}
.input-box {
flex: 1;
padding: 12px 16px;
border: 1px solid #ddd;
border-radius: 8px;
font-size: 14px;
outline: none;
}
.input-box:focus {
border-color: #007AFF;
}
.send-btn {
padding: 12px 24px;
background: #007AFF;
color: white;
border: none;
border-radius: 8px;
font-size: 14px;
font-weight: 500;
cursor: pointer;
}
.send-btn:hover {
background: #0051D5;
}
.examples {
display: flex;
gap: 8px;
flex-wrap: wrap;
margin-bottom: 30px;
justify-content: center;
}
.example-btn {
padding: 6px 12px;
background: #f0f0f0;
border: none;
border-radius: 16px;
font-size: 13px;
color: #555;
cursor: pointer;
}
.example-btn:hover {
background: #e0e0e0;
}
"""
with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
gr.HTML("""
Palmyra-x5
""")
# Chat display
with gr.Row():
chatbot_a = gr.Chatbot(label=MODEL_A_DISPLAY, height=500, bubble_full_width=False)
chatbot_b = gr.Chatbot(label=MODEL_B_DISPLAY, height=500, bubble_full_width=False)
chatbot_c = gr.Chatbot(label=MODEL_C_DISPLAY, height=500, bubble_full_width=False)
# Input and controls
with gr.Row():
user_input = gr.Textbox(
placeholder="Type your message...",
show_label=False,
scale=8
)
thinking_toggle = gr.Checkbox(
label="Show Thinking Process",
value=True,
scale=2
)
submit_btn = gr.Button("Send", scale=1, variant="primary")
# Examples
gr.Examples(
examples=[
"What does Tencent do?",
"Explain quantum computing",
"Write a haiku about AI",
"Compare Python vs JavaScript",
"Tips for better sleep"
],
inputs=user_input,
label="Try these examples:"
)
def stream_all_models(message, enable_thinking, hist_a, hist_b, hist_c):
if not message.strip():
return hist_a, hist_b, hist_c, ""
# Add user message
hist_a = hist_a + [[message, ""]]
hist_b = hist_b + [[message, ""]]
hist_c = hist_c + [[message, ""]]
# Yield initial state
yield hist_a, hist_b, hist_c, ""
# Set up queues
q1, q2, q3 = queue.Queue(), queue.Queue(), queue.Queue()
def fetch_stream(q, model, add_delay=False):
try:
for chunk in stream_together_model(model, message, add_delay):
q.put(chunk)
finally:
q.put(None)
def fetch_stream_c(q, message, enable_thinking):
try:
for chunk in stream_model_c(message, enable_thinking):
q.put(chunk)
finally:
q.put(None)
# Start threads (add thinking delay for Models A and B)
threading.Thread(target=fetch_stream, args=(q1, MODELS["Model A"], True)).start()
threading.Thread(target=fetch_stream, args=(q2, MODELS["Model B"], True)).start()
threading.Thread(target=fetch_stream_c, args=(q3, message, enable_thinking)).start()
done_a = done_b = done_c = False
while not (done_a and done_b and done_c):
updated = False
if not done_a:
try:
chunk = q1.get(timeout=0.05)
if chunk is None:
done_a = True
else:
# Handle thinking message and actual content
if chunk == "":
hist_a[-1][1] = "" # Clear thinking message
elif chunk.startswith("🤔"):
hist_a[-1][1] = chunk # Set thinking message
else:
hist_a[-1][1] += chunk # Append actual content
updated = True
except:
pass
if not done_b:
try:
chunk = q2.get(timeout=0.05)
if chunk is None:
done_b = True
else:
# Handle thinking message and actual content
if chunk == "":
hist_b[-1][1] = "" # Clear thinking message
elif chunk.startswith("🤔"):
hist_b[-1][1] = chunk # Set thinking message
else:
hist_b[-1][1] += chunk # Append actual content
updated = True
except:
pass
if not done_c:
try:
chunk = q3.get(timeout=0.05)
if chunk is None:
done_c = True
else:
# For Model C, we're getting parsed content
hist_c[-1][1] = chunk # Replace instead of append for parsed content
updated = True
except:
pass
if updated:
yield hist_a, hist_b, hist_c, ""
# Connect events
submit_btn.click(
stream_all_models,
[user_input, thinking_toggle, chatbot_a, chatbot_b, chatbot_c],
[chatbot_a, chatbot_b, chatbot_c, user_input]
)
user_input.submit(
stream_all_models,
[user_input, thinking_toggle, chatbot_a, chatbot_b, chatbot_c],
[chatbot_a, chatbot_b, chatbot_c, user_input]
)
if __name__ == "__main__":
demo.launch()