|
import os |
|
os.system('pip install transformers gradio torch') |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import gradio as gr |
|
import torch.nn.functional as F |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("EXt1/mdeberta-v3-base-thai-fakenews") |
|
model = AutoModelForSequenceClassification.from_pretrained("EXt1/mdeberta-v3-base-thai-fakenews") |
|
|
|
def classify_fake_news(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
probs = F.softmax(logits, dim=1) |
|
probs = probs.detach().cpu().numpy()[0] |
|
|
|
labels = ["Real News", "Fake News"] |
|
predicted_class = probs.argmax() |
|
|
|
label = labels[predicted_class] |
|
prob = float(probs[predicted_class]) * 100 |
|
|
|
return label, f"{prob:.2f}%" |
|
|
|
|
|
gr.Interface( |
|
fn=classify_fake_news, |
|
inputs=gr.Textbox(lines=8, placeholder="Enter text here..."), |
|
outputs="text", |
|
title="Thai Fake News Classification using mdeberta-v3-base", |
|
description="Classifies Thai News as Fake or Real with 91 percent accuracy using a fine-tuned BERT model", |
|
theme="compact" |
|
).launch() |