Spaces:
Sleeping
Sleeping
| import uvicorn | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| import threading | |
| app = FastAPI() | |
| model_name = "daikooo/DialogGPT-finetune-mental-health-chatbot" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| class Message(BaseModel): | |
| message: str | |
| def predict(data: Message): | |
| user_input = data.message | |
| prompt = f"""You are a caring mental health support assistant. Please respond to the user's message with empathy and understanding. | |
| User: {user_input} | |
| You:""" | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
| output = model.generate( | |
| input_ids, | |
| max_length=150, | |
| temperature=0.7, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| result = tokenizer.decode(output[0], skip_special_tokens=True) | |
| reply = result.split("You:")[-1].strip() | |
| return {"reply": reply} | |
| def start(): | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |
| threading.Thread(target=start).start() | |