Spaces:
Sleeping
Sleeping
| import os | |
| import requests | |
| import gradio as gr | |
| API_TOKEN=os.getenv("HF_TOKEN") | |
| headers = {"Authorization":f"Bearer {API_TOKEN}"} | |
| API_URL = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-large" | |
| pass_user_inputs=[] | |
| generated_responses=[] | |
| def query(message, history): | |
| global pass_user_inputs | |
| global generated_responses | |
| payload = {"inputs": { | |
| "past_user_inputs": pass_user_inputs, | |
| "generated_responses": pass_user_inputs, | |
| "text": message, | |
| }} | |
| try: | |
| response=requests.post(API_URL, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| pass_user_inputs=response.json()['conversation']['past_user_inputs'] | |
| generated_responses=response.json()['conversation']['generated_responses'] | |
| return response.json()['generated_text'] | |
| return "Sorry, the inference API did not complete successfully." | |
| except Exception as e: | |
| return "Sorry, internal error. Please refresh the page and try again." | |
| demo=gr.ChatInterface(query).launch() | |