Spaces:
Sleeping
Sleeping
| import gradio | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| from scipy.special import softmax | |
| def predict(text): | |
| model_name = "deepset/bert-base-german-cased-hatespeech-GermEval18Coarse" | |
| short_score_descriptions = {0: "Kein Hasskommentar", 1: "Hasskommentar"} | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| model_input = tokenizer(*([text],), padding=True, return_tensors="pt") | |
| with torch.no_grad(): | |
| output = model(**model_input) | |
| logits = softmax(output[0][0].detach().numpy()).tolist() | |
| return {short_score_descriptions[k]: v for k, v in enumerate(logits)} | |
| gradio.Interface( | |
| title="Klassifikator deutschsprachiger Hasskommentare", | |
| inputs=[ | |
| gradio.Textbox(label="Kommentar"), | |
| ], | |
| fn=predict, | |
| outputs=[ | |
| gradio.Label(label="Klassifikation"), | |
| ], | |
| ).launch() | |