# llm_interface.py import requests from llm_utils import build_prompt EXPLAIN_API_URL = "http:///explain" # ← Replace this with your actual backend URL def explain_with_llm(latex_str): if not latex_str.strip(): return "⚠️ No LaTeX input provided." prompt = build_prompt(latex_str) try: response = requests.post(EXPLAIN_API_URL, json={"latex": prompt}) if response.status_code == 200: return response.json().get("explanation", "No explanation returned.") else: return f"❌ Error: {response.status_code} - {response.text}" except Exception as e: return f"❌ Exception: {e}"