# -*- coding: utf-8 -*- """simple pre-trained bot Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1UgXple_p_R-0mq9p5vhOmFPo9cgdayJy """ import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "microsoft/DialoGPT-small" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) rules = { "hi": "Hello! How can I help you?", "hello": "Hi there!", "how are you": "I'm just a bot, but I'm doing great! 😊", "good morning": "Good morning! Hope you have a great day!", "bye": "Goodbye! Have a nice day!", "what is your name": "I'm your friendly chatbot assistant!", "who are you": "I'm a chatbot here to chat with you!", "am zara": "Nice to meet you, Zara!", "thank you": "You're welcome! 😊", } def respond(user_input, history): if history is None: history = [] user_input_clean = user_input.lower().strip() if user_input_clean in rules: bot_reply = rules[user_input_clean] else: prompt = "" for user_msg, bot_msg in history: prompt += f"\nUser: {user_msg}\nBot: {bot_msg}" prompt += f"\nUser: {user_input}\nBot:" inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.7, top_p=0.9, ) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) bot_reply = generated_text.split("Bot:")[-1].strip() history.append([user_input, bot_reply]) return history, history with gr.Blocks() as demo: chatbot = gr.Chatbot() msg = gr.Textbox(placeholder="Type your message here...") state = gr.State() msg.submit(respond, inputs=[msg, state], outputs=[chatbot, state]) demo.launch()