from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import gradio as gr model_id = "chanystrange/mistral-agri-merged_143" # Load model tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", load_in_4bit=True ) generator = pipeline("text-generation", model=model, tokenizer=tokenizer) def chat(prompt): output = generator(prompt, max_new_tokens=200, do_sample=True) return output[0]["generated_text"] demo = gr.Interface(fn=chat, inputs="text", outputs="text") if __name__ == "__main__": demo.launch(share=True, server_name="0.0.0.0", server_port=7860, show_api=True)