Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| from sentence_transformers import SentenceTransformer, util | |
| import torch | |
| import torch.nn.functional as F | |
| # Load SBERT model | |
| sbert_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2") | |
| # Load NLI model | |
| nli_model_name = "tasksource/ModernBERT-base-nli" | |
| nli_tokenizer = AutoTokenizer.from_pretrained(nli_model_name) | |
| nli_model = AutoModelForSequenceClassification.from_pretrained(nli_model_name) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| nli_model.to(device) | |
| # SBERT function | |
| def compute_similarity(text1, text2): | |
| embeddings = sbert_model.encode([text1, text2], convert_to_tensor=True) | |
| similarity = float(util.pytorch_cos_sim(embeddings[0], embeddings[1])[0]) | |
| interpretation = "" | |
| if similarity > 0.9: | |
| interpretation = "π’ Very High Similarity" | |
| elif similarity > 0.75: | |
| interpretation = "π‘ Moderate Similarity" | |
| elif similarity > 0.5: | |
| interpretation = "π Low Similarity" | |
| else: | |
| interpretation = "π΄ Very Low Similarity" | |
| return round(similarity, 4), interpretation | |
| # NLI function | |
| def check_entail(premise, hypothesis): | |
| inputs = nli_tokenizer(premise, hypothesis, return_tensors="pt", truncation=True, padding=True, max_length=512).to(device) | |
| with torch.no_grad(): | |
| logits = nli_model(**inputs).logits | |
| probs = torch.softmax(logits, dim=-1)[0] | |
| label = ["entailment", "neutral", "contradiction"][torch.argmax(probs).item()] | |
| return label, { "entailment": float(probs[0]), "neutral": float(probs[1]), "contradiction": float(probs[2]) } | |
| def run_bi_direction(a, b): | |
| res1 = check_entail(a, b) | |
| res2 = check_entail(b, a) | |
| return res1[0], res1[1], res2[0], res2[1] | |
| # Build the interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# βοΈ Essay Comparison Tool") | |
| with gr.Tab("Semantic Similarity (SBERT)"): | |
| a1 = gr.Textbox(label="Essay A", lines=8) | |
| b1 = gr.Textbox(label="Essay B", lines=8) | |
| sim_button = gr.Button("Compare Similarity") | |
| sim_score = gr.Textbox(label="Cosine Similarity (0β1)") | |
| sim_interpret = gr.Textbox(label="Interpretation") | |
| sim_button.click(fn=compute_similarity, inputs=[a1, b1], outputs=[sim_score, sim_interpret]) | |
| with gr.Tab("Bidirectional Entailment (NLI)"): | |
| a2 = gr.Textbox(label="Essay A (Original)", lines=8) | |
| b2 = gr.Textbox(label="Essay B (Modified)", lines=8) | |
| nli_button = gr.Button("Run Entailment Check") | |
| ab_label = gr.Textbox(label="A β B Label") | |
| ab_scores = gr.JSON(label="A β B Scores") | |
| ba_label = gr.Textbox(label="B β A Label") | |
| ba_scores = gr.JSON(label="B β A Scores") | |
| nli_button.click(fn=run_bi_direction, inputs=[a2, b2], outputs=[ab_label, ab_scores, ba_label, ba_scores]) | |
| if __name__ == "__main__": | |
| demo.launch() | |