import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "TheBloke/MythoMax-L2-13B-GGUF" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) def chat(prompt): response = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)[0]["generated_text"] return response gr.ChatInterface(chat).launch()