import gradio as gr import logging import gradio as gr from queue import Queue import time gr.load("models/Sevixdd/roberta-base-finetuned-ner").launch() # --- Logging Setup --- logging.basicConfig(filename="chat_log.txt", level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # --- Queue and Metrics --- chat_queue = Queue() response_times = [] # --- Chat Function --- def chat_function(message, history): try: start_time = time.time() # Record start time for metrics chat_queue.put(message) # Add message to queue logging.info(f"User: {message}") # ... (Your chatbot processing logic here) ... # Simulate processing delay time.sleep(2) # Replace with actual processing response = chat_queue.get() logging.info(f"Bot: {response}") # --- Metrics Calculation --- end_time = time.time() response_time = end_time - start_time response_times.append(response_time) avg_response_time = sum(response_times) / len(response_times) if response_times else 0 logging.info(f"Response Time: {response_time:.2f} seconds, Avg: {avg_response_time:.2f} seconds") return response except Exception as e: logging.error(f"Error in chat processing: {e}") return "An error occurred. Please try again." # --- Gradio Interface --- with gr.Blocks() as demo: gr.Markdown("## Chat with the Bot") chatbot = gr.ChatInterface(fn=chat_function) demo.launch() # --- Additional Monitoring (Optional) --- def monitor_queue(): while True: queue_size = chat_queue.qsize() logging.info(f"Queue Size: {queue_size}") time.sleep(60) # Check every 60 seconds # Uncomment to enable queue monitoring # import threading # threading.Thread(target=monitor_queue, daemon=True).start()