test_1 / agent.py
aeresd's picture
Update agent.py
ec24671 verified
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import torch
# ✅ Step 1: Emoji 翻译模型(你自己训练的模型)
emoji_model_id = "jenniferhk008/roberta-hfl-emoji-aug3epoch"
emoji_tokenizer = AutoTokenizer.from_pretrained(emoji_model_id, trust_remote_code=True)
emoji_model = AutoModelForCausalLM.from_pretrained(
emoji_model_id,
trust_remote_code=True,
torch_dtype=torch.float16
).to("cuda" if torch.cuda.is_available() else "cpu")
emoji_model.eval()
# ✅ Step 2: 冒犯性文本识别模型
classifier = pipeline("text-classification", model="unitary/toxic-bert", device=0 if torch.cuda.is_available() else -1)
def classify_emoji_text(text: str):
"""
Step 1: 翻译文本中的 emoji
Step 2: 使用分类器判断是否冒犯
"""
prompt = f"""请判断下面的文本是否具有冒犯性。
这里的“冒犯性”主要指包含人身攻击、侮辱、歧视、仇恨言论或极端粗俗的内容。
如果文本具有冒犯性,请仅回复冒犯;如果不具有冒犯性,请仅回复不冒犯。
文本如下:
{text}
"""
input_ids = emoji_tokenizer(prompt, return_tensors="pt").to(emoji_model.device)
with torch.no_grad():
output_ids = emoji_model.generate(**input_ids, max_new_tokens=50, do_sample=False)
decoded = emoji_tokenizer.decode(output_ids[0], skip_special_tokens=True)
translated_text = decoded.strip().split("文本如下:")[-1].strip()
result = classifier(translated_text)[0]
label = result["label"]
score = result["score"]
return translated_text, label, score