import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification device = torch.device("cuda" if torch.cuda.is_available() else "cpu") MODEL_NAME = "vinai/phobert-base-v2" model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=3) model.load_state_dict(torch.load("best_model_state.bin", map_location=device)) model.to(device) model.eval() tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) label_map = {0: "An toàn", 1: "Tiêu cực", 2: "Nguy cơ bạo lực"} def predict(text): inputs = tokenizer.encode_plus( text.lower(), return_tensors="pt", max_length=128, padding="max_length", truncation=True ) input_ids = inputs["input_ids"].to(device) attention_mask = inputs["attention_mask"].to(device) with torch.no_grad(): outputs = model(input_ids=input_ids, attention_mask=attention_mask) probs = torch.nn.functional.softmax(outputs.logits, dim=-1) pred = torch.argmax(probs, dim=1).item() return f"{label_map[pred]} (độ tin cậy: {probs[0][pred]:.2f})" demo = gr.Interface(fn=predict, inputs="text", outputs="text", title="PhoBERT - Phân tích cảm xúc học sinh") demo.launch()