import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "NousResearch/Llama-2-7b-chat-hf" # tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) def extract_event(text): prompt = f"""Extract the following fields from the input text: - Event title - Date (YYYY-MM-DD) - Start Time (HH:MM) - End Time (HH:MM) - Duration - Location - People Text: {text} Return in JSON format. """ response = pipe(prompt, max_new_tokens=300, do_sample=False)[0]['generated_text'] return response gr.Interface