DearAI / app.py
namnam9252's picture
Update app.py
9a2cc54 verified
raw
history blame contribute delete
486 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "TheBloke/MythoMax-L2-13B-GGUF"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chat(prompt):
response = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)[0]["generated_text"]
return response
gr.ChatInterface(chat).launch()