|
|
|
import os |
|
import time |
|
import traceback |
|
import openai |
|
import gradio as gr |
|
|
|
|
|
MODEL_NAME = "gpt-3.5-turbo" |
|
DEFAULT_SYSTEM_PROMPT = ( |
|
"You are a kind, empathetic bilingual assistant (Urdu & English). " |
|
"Answer concisely and helpfully. If user writes in Urdu, reply in Urdu; " |
|
"if in English, reply in English. Preserve politeness and clarity." |
|
) |
|
|
|
|
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
def detect_language(text: str) -> str: |
|
|
|
for ch in text: |
|
if '\u0600' <= ch <= '\u06FF': |
|
return "ur" |
|
return "en" |
|
|
|
def fallback_reply(user_message: str): |
|
"""If no API key or API fails, return a helpful local reply.""" |
|
lang = detect_language(user_message) |
|
if lang == "ur": |
|
|
|
responses = [ |
|
"معذرت — ابھی سرور سے رابطہ ممکن نہیں۔ میں آپ کا پیغام نوٹ کر رہا ہوں: " + user_message, |
|
"میں فی الحال API سے جڑ نہیں پارہا، مگر بتائیں میں ابھی بھی سن رہا ہوں۔", |
|
"آپ نے کہا: " + user_message + " — جب سروس واپس آئے گی تو مفصل جواب دوں گا۔" |
|
] |
|
else: |
|
responses = [ |
|
"Sorry — I'm temporarily offline. Noting your message: " + user_message, |
|
"I can't reach the API right now, but I'm listening. Please try again shortly.", |
|
"You said: " + user_message + " — I'll respond fully once service is available." |
|
] |
|
|
|
return responses[int(time.time()) % len(responses)] |
|
|
|
def call_openai_chat(system_prompt, user_message, max_tokens=512, temperature=0.7, top_p=0.95): |
|
""" |
|
Call OpenAI ChatCompletion and return assistant text. |
|
Raises Exception on failure. |
|
""" |
|
|
|
messages = [{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_message}] |
|
resp = openai.ChatCompletion.create( |
|
model=MODEL_NAME, |
|
messages=messages, |
|
max_tokens=max_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
) |
|
|
|
try: |
|
text = resp.choices[0].message.content |
|
except Exception: |
|
text = resp.choices[0].text if hasattr(resp.choices[0], "text") else "" |
|
return text.strip() |
|
|
|
|
|
def respond(message, chat_history, system_message, max_tokens, temperature, top_p): |
|
""" |
|
message: user input string |
|
chat_history: list of tuples [(user, bot), ...] |
|
system_message: custom system prompt (string) |
|
max_tokens, temperature, top_p: numeric controls |
|
""" |
|
if chat_history is None: |
|
chat_history = [] |
|
|
|
system_prompt = system_message.strip() or DEFAULT_SYSTEM_PROMPT |
|
|
|
|
|
if not message or not message.strip(): |
|
return chat_history + [("","Please type something or ask a question.")] |
|
|
|
|
|
if openai.api_key: |
|
try: |
|
reply = call_openai_chat(system_prompt, message, |
|
max_tokens=int(max_tokens), |
|
temperature=float(temperature), |
|
top_p=float(top_p)) |
|
except Exception as e: |
|
|
|
print("OpenAI call failed:", str(e)) |
|
traceback.print_exc() |
|
reply = ( |
|
"معذرت! سروس سے رابطے میں مسئلہ آیا۔ براہِ کرم بعد میں دوبارہ کوشش کریں۔" |
|
if detect_language(message) == "ur" |
|
else "Sorry! There was an issue contacting the service. Please try again later." |
|
) |
|
else: |
|
|
|
reply = fallback_reply(message) |
|
|
|
chat_history = chat_history + [(message, reply)] |
|
return chat_history |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Default()) as demo: |
|
gr.Markdown("<h2>🤝 What In Mind AI — Urdu & English Chatbot</h2>") |
|
gr.Markdown("A warm, bilingual assistant. If OpenAI key is not set, the bot uses a safe local fallback.") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
chatbot = gr.Chatbot(label="Chat") |
|
message = gr.Textbox(placeholder="Type in Urdu or English...", show_label=False) |
|
with gr.Row(): |
|
send = gr.Button("Send") |
|
clear = gr.Button("Clear") |
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("### Settings") |
|
system_message = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, label="System prompt (optional)") |
|
max_tokens = gr.Slider(minimum=64, maximum=1024, value=512, step=64, label="Max new tokens") |
|
temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.05, label="Temperature") |
|
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus)") |
|
|
|
|
|
history_state = gr.State([]) |
|
|
|
def submit_click(msg, history, sysmsg, max_tokens, temperature, top_p): |
|
new_hist = respond(msg, history, sysmsg, max_tokens, temperature, top_p) |
|
|
|
return "", new_hist |
|
|
|
send.click(fn=submit_click, inputs=[message, history_state, system_message, max_tokens, temperature, top_p], outputs=[message, chatbot,]) |
|
message.submit(fn=submit_click, inputs=[message, history_state, system_message, max_tokens, temperature, top_p], outputs=[message, chatbot,]) |
|
clear.click(lambda: ([], ""), [], [chatbot, message]) |
|
|
|
|
|
def sync_state(history): |
|
return history |
|
chatbot.change(fn=lambda history: None, inputs=[chatbot], outputs=[]) |
|
|
|
gr.Markdown("---") |
|
gr.Markdown("**Note:** To enable OpenAI responses, add your OpenAI API key as a secret named `OPENAI_API_KEY` in your Hugging Face Space settings. Do not share your key publicly.") |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |