Initial Commit
Browse files- agent.py +152 -0
- app.py +305 -0
- patterns.py +206 -0
- requirements.txt +14 -0
agent.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Optional, Union
|
2 |
+
import spacy
|
3 |
+
from transformers import AutoTokenizer, AutoModel
|
4 |
+
import torch
|
5 |
+
import numpy as np
|
6 |
+
import re
|
7 |
+
from patterns import (
|
8 |
+
PATRONES_AMBIGUEDAD_LEXICA,
|
9 |
+
PATRONES_AMBIGUEDAD_SINTACTICA,
|
10 |
+
SUGERENCIAS_MEJORA
|
11 |
+
)
|
12 |
+
|
13 |
+
class SemanticAnalyzer:
|
14 |
+
"""
|
15 |
+
Analizador semántico que utiliza embeddings para comparar textos.
|
16 |
+
"""
|
17 |
+
def __init__(self, model_name: str = "PlanTL-GOB-ES/roberta-base-bne"):
|
18 |
+
"""
|
19 |
+
Inicializa el analizador semántico.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
model_name (str): Nombre del modelo de HuggingFace a utilizar
|
23 |
+
"""
|
24 |
+
try:
|
25 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
+
self.model = AutoModel.from_pretrained(model_name)
|
27 |
+
except Exception as e:
|
28 |
+
raise RuntimeError(f"Error cargando el modelo {model_name}: {str(e)}")
|
29 |
+
|
30 |
+
def get_embedding(self, texto: str) -> np.ndarray:
|
31 |
+
"""
|
32 |
+
Obtiene el embedding de un texto usando el modelo de transformers.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
texto (str): Texto a procesar
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
np.ndarray: Vector de embedding
|
39 |
+
"""
|
40 |
+
inputs = self.tokenizer(texto, return_tensors="pt", padding=True, truncation=True)
|
41 |
+
with torch.no_grad():
|
42 |
+
outputs = self.model(**inputs)
|
43 |
+
return outputs.last_hidden_state.mean(dim=1).numpy()[0]
|
44 |
+
|
45 |
+
def calcular_similitud(self, texto1: str, texto2: str) -> float:
|
46 |
+
"""
|
47 |
+
Compara la similitud semántica entre dos textos.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
texto1 (str): Primer texto
|
51 |
+
texto2 (str): Segundo texto
|
52 |
+
|
53 |
+
Returns:
|
54 |
+
float: Score de similitud entre 0 y 1
|
55 |
+
"""
|
56 |
+
emb1 = self.get_embedding(texto1)
|
57 |
+
emb2 = self.get_embedding(texto2)
|
58 |
+
similarity = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))
|
59 |
+
return float(similarity)
|
60 |
+
|
61 |
+
class AmbiguityClassifier:
|
62 |
+
"""
|
63 |
+
Clasificador de ambigüedades en historias de usuario.
|
64 |
+
Detecta ambigüedades léxicas y sintácticas, y proporciona sugerencias de mejora.
|
65 |
+
"""
|
66 |
+
|
67 |
+
def __init__(self, model_name: str = "PlanTL-GOB-ES/roberta-base-bne"):
|
68 |
+
"""
|
69 |
+
Inicializa el clasificador de ambigüedades.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
model_name (str): Nombre del modelo de HuggingFace a utilizar
|
73 |
+
"""
|
74 |
+
try:
|
75 |
+
self.nlp = spacy.load("es_core_news_sm")
|
76 |
+
except OSError:
|
77 |
+
raise RuntimeError("Es necesario instalar el modelo es_core_news_sm. Ejecute: python -m spacy download es_core_news_sm")
|
78 |
+
|
79 |
+
self.semantic_analyzer = SemanticAnalyzer(model_name)
|
80 |
+
|
81 |
+
def __call__(self, texto: str) -> Dict[str, Union[bool, List[str], float]]:
|
82 |
+
"""
|
83 |
+
Analiza una historia de usuario en busca de ambigüedades.
|
84 |
+
|
85 |
+
Args:
|
86 |
+
texto (str): Historia de usuario a analizar
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
Dict: Resultado del análisis con tipos de ambigüedad y sugerencias
|
90 |
+
"""
|
91 |
+
if not texto or not isinstance(texto, str):
|
92 |
+
return {
|
93 |
+
"tiene_ambiguedad": False,
|
94 |
+
"ambiguedad_lexica": [],
|
95 |
+
"ambiguedad_sintactica": [],
|
96 |
+
"sugerencias": ["El texto está vacío o no es válido"],
|
97 |
+
"score_ambiguedad": 0.0
|
98 |
+
}
|
99 |
+
|
100 |
+
# Procesar el texto con spaCy
|
101 |
+
doc = self.nlp(texto.strip())
|
102 |
+
|
103 |
+
# Detectar ambigüedades léxicas
|
104 |
+
ambiguedades_lexicas = []
|
105 |
+
for patron in PATRONES_AMBIGUEDAD_LEXICA:
|
106 |
+
if re.search(patron["patron"], texto, re.IGNORECASE):
|
107 |
+
ambiguedades_lexicas.append({
|
108 |
+
"tipo": patron["tipo"],
|
109 |
+
"descripcion": patron["descripcion"]
|
110 |
+
})
|
111 |
+
|
112 |
+
# Detectar ambigüedades sintácticas
|
113 |
+
ambiguedades_sintacticas = []
|
114 |
+
for patron in PATRONES_AMBIGUEDAD_SINTACTICA:
|
115 |
+
if re.search(patron["patron"], texto, re.IGNORECASE):
|
116 |
+
ambiguedades_sintacticas.append({
|
117 |
+
"tipo": patron["tipo"],
|
118 |
+
"descripcion": patron["descripcion"]
|
119 |
+
})
|
120 |
+
|
121 |
+
# Generar sugerencias de mejora
|
122 |
+
sugerencias = []
|
123 |
+
if ambiguedades_lexicas or ambiguedades_sintacticas:
|
124 |
+
for ambiguedad in ambiguedades_lexicas + ambiguedades_sintacticas:
|
125 |
+
tipo = ambiguedad["tipo"]
|
126 |
+
if tipo in SUGERENCIAS_MEJORA:
|
127 |
+
sugerencias.extend(SUGERENCIAS_MEJORA[tipo])
|
128 |
+
|
129 |
+
# Calcular score de ambigüedad
|
130 |
+
score = len(ambiguedades_lexicas) * 0.4 + len(ambiguedades_sintacticas) * 0.6
|
131 |
+
score_normalizado = min(1.0, score / 5.0) # Normalizar a un rango de 0 a 1
|
132 |
+
|
133 |
+
return {
|
134 |
+
"tiene_ambiguedad": bool(ambiguedades_lexicas or ambiguedades_sintacticas),
|
135 |
+
"ambiguedad_lexica": [amb["descripcion"] for amb in ambiguedades_lexicas],
|
136 |
+
"ambiguedad_sintactica": [amb["descripcion"] for amb in ambiguedades_sintacticas],
|
137 |
+
"sugerencias": sugerencias if sugerencias else ["No se encontraron ambigüedades"],
|
138 |
+
"score_ambiguedad": round(score_normalizado, 2)
|
139 |
+
}
|
140 |
+
|
141 |
+
def analizar_similitud_semantica(self, texto1: str, texto2: str) -> float:
|
142 |
+
"""
|
143 |
+
Compara la similitud semántica entre dos textos.
|
144 |
+
|
145 |
+
Args:
|
146 |
+
texto1 (str): Primer texto
|
147 |
+
texto2 (str): Segundo texto
|
148 |
+
|
149 |
+
Returns:
|
150 |
+
float: Score de similitud entre 0 y 1
|
151 |
+
"""
|
152 |
+
return self.semantic_analyzer.calcular_similitud(texto1, texto2)
|
app.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import inspect
|
5 |
+
import pandas as pd
|
6 |
+
from agent import AmbiguityClassifier
|
7 |
+
import json
|
8 |
+
|
9 |
+
# (Keep Constants as is)
|
10 |
+
# --- Constants ---
|
11 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
12 |
+
|
13 |
+
# --- Basic Agent Definition ---
|
14 |
+
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
15 |
+
class BasicAgent:
|
16 |
+
"""A langgraph agent that detects and classifies ambiguities in user stories."""
|
17 |
+
def __init__(self):
|
18 |
+
print("BasicAgent initialized.")
|
19 |
+
self.analizar_historia = AmbiguityClassifier()
|
20 |
+
|
21 |
+
def __call__(self, question: str) -> str:
|
22 |
+
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
23 |
+
try:
|
24 |
+
resultado = self.analizar_historia(question)
|
25 |
+
|
26 |
+
# Formatear la respuesta
|
27 |
+
respuesta = []
|
28 |
+
if resultado["tiene_ambiguedad"]:
|
29 |
+
respuesta.append("Se encontraron las siguientes ambigüedades:")
|
30 |
+
|
31 |
+
if resultado["ambiguedad_lexica"]:
|
32 |
+
respuesta.append("\nAmbigüedades léxicas:")
|
33 |
+
for amb in resultado["ambiguedad_lexica"]:
|
34 |
+
respuesta.append(f"- {amb}")
|
35 |
+
|
36 |
+
if resultado["ambiguedad_sintactica"]:
|
37 |
+
respuesta.append("\nAmbigüedades sintácticas:")
|
38 |
+
for amb in resultado["ambiguedad_sintactica"]:
|
39 |
+
respuesta.append(f"- {amb}")
|
40 |
+
|
41 |
+
respuesta.append(f"\nScore de ambigüedad: {resultado['score_ambiguedad']}")
|
42 |
+
respuesta.append("\nSugerencias de mejora:")
|
43 |
+
for sug in resultado["sugerencias"]:
|
44 |
+
respuesta.append(f"- {sug}")
|
45 |
+
else:
|
46 |
+
respuesta.append("No se encontraron ambigüedades en la historia de usuario.")
|
47 |
+
respuesta.append(f"Score de ambigüedad: {resultado['score_ambiguedad']}")
|
48 |
+
|
49 |
+
return "\n".join(respuesta)
|
50 |
+
except Exception as e:
|
51 |
+
error_msg = f"Error analizando la historia: {str(e)}"
|
52 |
+
print(error_msg)
|
53 |
+
return error_msg
|
54 |
+
|
55 |
+
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
56 |
+
"""
|
57 |
+
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
58 |
+
and displays the results.
|
59 |
+
"""
|
60 |
+
# --- Determine HF Space Runtime URL and Repo URL ---
|
61 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
62 |
+
|
63 |
+
if profile:
|
64 |
+
username= f"{profile.username}"
|
65 |
+
print(f"User logged in: {username}")
|
66 |
+
else:
|
67 |
+
print("User not logged in.")
|
68 |
+
return "Please Login to Hugging Face with the button.", None
|
69 |
+
|
70 |
+
api_url = DEFAULT_API_URL
|
71 |
+
questions_url = f"{api_url}/questions"
|
72 |
+
submit_url = f"{api_url}/submit"
|
73 |
+
|
74 |
+
# 1. Instantiate Agent ( modify this part to create your agent)
|
75 |
+
try:
|
76 |
+
agent = BasicAgent()
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Error instantiating agent: {e}")
|
79 |
+
return f"Error initializing agent: {e}", None
|
80 |
+
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
81 |
+
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
82 |
+
print(agent_code)
|
83 |
+
|
84 |
+
# 2. Fetch Questions
|
85 |
+
print(f"Fetching questions from: {questions_url}")
|
86 |
+
try:
|
87 |
+
response = requests.get(questions_url, timeout=15)
|
88 |
+
response.raise_for_status()
|
89 |
+
questions_data = response.json()
|
90 |
+
if not questions_data:
|
91 |
+
print("Fetched questions list is empty.")
|
92 |
+
return "Fetched questions list is empty or invalid format.", None
|
93 |
+
print(f"Fetched {len(questions_data)} questions.")
|
94 |
+
except requests.exceptions.RequestException as e:
|
95 |
+
print(f"Error fetching questions: {e}")
|
96 |
+
return f"Error fetching questions: {e}", None
|
97 |
+
except requests.exceptions.JSONDecodeError as e:
|
98 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
99 |
+
print(f"Response text: {response.text[:500]}")
|
100 |
+
return f"Error decoding server response for questions: {e}", None
|
101 |
+
except Exception as e:
|
102 |
+
print(f"An unexpected error occurred fetching questions: {e}")
|
103 |
+
return f"An unexpected error occurred fetching questions: {e}", None
|
104 |
+
|
105 |
+
# 3. Run your Agent
|
106 |
+
results_log = []
|
107 |
+
answers_payload = []
|
108 |
+
print(f"Running agent on {len(questions_data)} questions...")
|
109 |
+
for item in questions_data:
|
110 |
+
task_id = item.get("task_id")
|
111 |
+
question_text = item.get("question")
|
112 |
+
if not task_id or question_text is None:
|
113 |
+
print(f"Skipping item with missing task_id or question: {item}")
|
114 |
+
continue
|
115 |
+
try:
|
116 |
+
submitted_answer = agent(question_text)
|
117 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
118 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
119 |
+
except Exception as e:
|
120 |
+
print(f"Error running agent on task {task_id}: {e}")
|
121 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
122 |
+
|
123 |
+
if not answers_payload:
|
124 |
+
print("Agent did not produce any answers to submit.")
|
125 |
+
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
126 |
+
|
127 |
+
# 4. Prepare Submission
|
128 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
129 |
+
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
130 |
+
print(status_update)
|
131 |
+
|
132 |
+
# 5. Submit
|
133 |
+
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
134 |
+
try:
|
135 |
+
response = requests.post(submit_url, json=submission_data, timeout=60)
|
136 |
+
response.raise_for_status()
|
137 |
+
result_data = response.json()
|
138 |
+
final_status = (
|
139 |
+
f"Submission Successful!\n"
|
140 |
+
f"User: {result_data.get('username')}\n"
|
141 |
+
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
142 |
+
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
143 |
+
f"Message: {result_data.get('message', 'No message received.')}"
|
144 |
+
)
|
145 |
+
print("Submission successful.")
|
146 |
+
results_df = pd.DataFrame(results_log)
|
147 |
+
return final_status, results_df
|
148 |
+
except requests.exceptions.HTTPError as e:
|
149 |
+
error_detail = f"Server responded with status {e.response.status_code}."
|
150 |
+
try:
|
151 |
+
error_json = e.response.json()
|
152 |
+
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
153 |
+
except requests.exceptions.JSONDecodeError:
|
154 |
+
error_detail += f" Response: {e.response.text[:500]}"
|
155 |
+
status_message = f"Submission Failed: {error_detail}"
|
156 |
+
print(status_message)
|
157 |
+
results_df = pd.DataFrame(results_log)
|
158 |
+
return status_message, results_df
|
159 |
+
except requests.exceptions.Timeout:
|
160 |
+
status_message = "Submission Failed: The request timed out."
|
161 |
+
print(status_message)
|
162 |
+
results_df = pd.DataFrame(results_log)
|
163 |
+
return status_message, results_df
|
164 |
+
except requests.exceptions.RequestException as e:
|
165 |
+
status_message = f"Submission Failed: Network error - {e}"
|
166 |
+
print(status_message)
|
167 |
+
results_df = pd.DataFrame(results_log)
|
168 |
+
return status_message, results_df
|
169 |
+
except Exception as e:
|
170 |
+
status_message = f"An unexpected error occurred during submission: {e}"
|
171 |
+
print(status_message)
|
172 |
+
results_df = pd.DataFrame(results_log)
|
173 |
+
return status_message, results_df
|
174 |
+
|
175 |
+
# Inicializar el clasificador
|
176 |
+
classifier = AmbiguityClassifier()
|
177 |
+
|
178 |
+
def analyze_user_story(user_story: str) -> str:
|
179 |
+
"""Analiza una historia de usuario y retorna los resultados formateados."""
|
180 |
+
if not user_story.strip():
|
181 |
+
return "Por favor, ingrese una historia de usuario para analizar."
|
182 |
+
|
183 |
+
# Analizar la historia
|
184 |
+
result = classifier(user_story)
|
185 |
+
|
186 |
+
# Formatear resultados
|
187 |
+
output = []
|
188 |
+
output.append(f"📝 Historia analizada:\n{user_story}\n")
|
189 |
+
output.append(f"🎯 Score de ambigüedad: {result['score_ambiguedad']}")
|
190 |
+
|
191 |
+
if result['ambiguedad_lexica']:
|
192 |
+
output.append("\n📚 Ambigüedades léxicas encontradas:")
|
193 |
+
for amb in result['ambiguedad_lexica']:
|
194 |
+
output.append(f"• {amb}")
|
195 |
+
|
196 |
+
if result['ambiguedad_sintactica']:
|
197 |
+
output.append("\n🔍 Ambigüedades sintácticas encontradas:")
|
198 |
+
for amb in result['ambiguedad_sintactica']:
|
199 |
+
output.append(f"• {amb}")
|
200 |
+
|
201 |
+
if result['sugerencias']:
|
202 |
+
output.append("\n💡 Sugerencias de mejora:")
|
203 |
+
for sug in result['sugerencias']:
|
204 |
+
output.append(f"• {sug}")
|
205 |
+
|
206 |
+
return "\n".join(output)
|
207 |
+
|
208 |
+
def analyze_multiple_stories(user_stories: str) -> str:
|
209 |
+
"""Analiza múltiples historias de usuario separadas por líneas."""
|
210 |
+
if not user_stories.strip():
|
211 |
+
return "Por favor, ingrese al menos una historia de usuario para analizar."
|
212 |
+
|
213 |
+
stories = [s.strip() for s in user_stories.split('\n') if s.strip()]
|
214 |
+
all_results = []
|
215 |
+
|
216 |
+
for i, story in enumerate(stories, 1):
|
217 |
+
result = classifier(story)
|
218 |
+
story_result = {
|
219 |
+
"historia": story,
|
220 |
+
"score": result['score_ambiguedad'],
|
221 |
+
"ambiguedades_lexicas": result['ambiguedad_lexica'],
|
222 |
+
"ambiguedades_sintacticas": result['ambiguedad_sintactica'],
|
223 |
+
"sugerencias": result['sugerencias']
|
224 |
+
}
|
225 |
+
all_results.append(story_result)
|
226 |
+
|
227 |
+
return json.dumps(all_results, indent=2, ensure_ascii=False)
|
228 |
+
|
229 |
+
# Crear la interfaz
|
230 |
+
with gr.Blocks(title="Detector de Ambigüedades en Historias de Usuario") as demo:
|
231 |
+
gr.Markdown("""
|
232 |
+
# 🔍 Detector de Ambigüedades en Historias de Usuario
|
233 |
+
|
234 |
+
Esta herramienta analiza historias de usuario en busca de ambigüedades léxicas y sint��cticas,
|
235 |
+
proporcionando sugerencias para mejorarlas.
|
236 |
+
|
237 |
+
## 📝 Instrucciones:
|
238 |
+
1. Ingrese una historia de usuario en el campo de texto
|
239 |
+
2. Haga clic en "Analizar"
|
240 |
+
3. Revise los resultados y las sugerencias de mejora
|
241 |
+
""")
|
242 |
+
|
243 |
+
with gr.Tab("Análisis Individual"):
|
244 |
+
input_text = gr.Textbox(
|
245 |
+
label="Historia de Usuario",
|
246 |
+
placeholder="Como usuario quiero...",
|
247 |
+
lines=3
|
248 |
+
)
|
249 |
+
analyze_btn = gr.Button("Analizar")
|
250 |
+
output = gr.Textbox(
|
251 |
+
label="Resultados del Análisis",
|
252 |
+
lines=10
|
253 |
+
)
|
254 |
+
analyze_btn.click(
|
255 |
+
analyze_user_story,
|
256 |
+
inputs=[input_text],
|
257 |
+
outputs=[output]
|
258 |
+
)
|
259 |
+
|
260 |
+
with gr.Tab("Análisis Múltiple"):
|
261 |
+
input_stories = gr.Textbox(
|
262 |
+
label="Historias de Usuario (una por línea)",
|
263 |
+
placeholder="Como usuario quiero...\nComo administrador necesito...",
|
264 |
+
lines=5
|
265 |
+
)
|
266 |
+
analyze_multi_btn = gr.Button("Analizar Todas")
|
267 |
+
output_json = gr.JSON(label="Resultados del Análisis")
|
268 |
+
analyze_multi_btn.click(
|
269 |
+
analyze_multiple_stories,
|
270 |
+
inputs=[input_stories],
|
271 |
+
outputs=[output_json]
|
272 |
+
)
|
273 |
+
|
274 |
+
gr.Markdown("""
|
275 |
+
## 🚀 Ejemplos de Uso
|
276 |
+
|
277 |
+
Pruebe con estas historias de usuario:
|
278 |
+
- Como usuario quiero un sistema rápido y eficiente para gestionar mis tareas
|
279 |
+
- El sistema debe permitir exportar varios tipos de archivos
|
280 |
+
- Como administrador necesito acceder fácilmente a los reportes
|
281 |
+
""")
|
282 |
+
|
283 |
+
if __name__ == "__main__":
|
284 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
285 |
+
# Check for SPACE_HOST and SPACE_ID at startup for information
|
286 |
+
space_host_startup = os.getenv("SPACE_HOST")
|
287 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
288 |
+
|
289 |
+
if space_host_startup:
|
290 |
+
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
291 |
+
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
292 |
+
else:
|
293 |
+
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
294 |
+
|
295 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
296 |
+
print(f"✅ SPACE_ID found: {space_id_startup}")
|
297 |
+
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
298 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
299 |
+
else:
|
300 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
301 |
+
|
302 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
303 |
+
|
304 |
+
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
305 |
+
demo.launch(debug=True, share=False)
|
patterns.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Patrones comunes de ambigüedad en historias de usuario.
|
3 |
+
"""
|
4 |
+
|
5 |
+
AMBIGUITY_PATTERNS = {
|
6 |
+
"lexicos": {
|
7 |
+
# Palabras con múltiples significados comunes
|
8 |
+
"fácil", "simple", "rápido", "eficiente",
|
9 |
+
"mejor", "adecuado", "apropiado",
|
10 |
+
"flexible", "dinámico", "intuitivo",
|
11 |
+
"amigable", "óptimo", "robusto",
|
12 |
+
|
13 |
+
# Cuantificadores ambiguos
|
14 |
+
"algunos", "varios", "muchos", "pocos",
|
15 |
+
"más", "menos", "suficiente",
|
16 |
+
"bastante", "demasiado", "aproximadamente",
|
17 |
+
|
18 |
+
# Temporales ambiguos
|
19 |
+
"pronto", "rápidamente", "periódicamente",
|
20 |
+
"regularmente", "ocasionalmente", "frecuentemente",
|
21 |
+
"en tiempo real", "instantáneamente", "ágilmente",
|
22 |
+
|
23 |
+
# Calificadores ambiguos
|
24 |
+
"moderno", "innovador", "avanzado",
|
25 |
+
"inteligente", "sofisticado", "elegante",
|
26 |
+
|
27 |
+
# Términos de usabilidad ambiguos
|
28 |
+
"user-friendly", "intuitivo", "natural",
|
29 |
+
"seamless", "fluido", "sin problemas"
|
30 |
+
},
|
31 |
+
|
32 |
+
"sintacticos": {
|
33 |
+
# Patrones de estructura ambigua
|
34 |
+
r"(.*?) y (.*?) con (.*?)", # Ambigüedad de alcance de "con"
|
35 |
+
r"no (.*?) y (.*?)", # Ambigüedad de alcance de negación
|
36 |
+
r"(.*?) o (.*?) y (.*?)", # Ambigüedad de operadores lógicos
|
37 |
+
r"(.*?) pero (.*?) si (.*?)", # Condiciones ambiguas
|
38 |
+
r"(.*?) cuando (.*?) o (.*?)", # Temporalidad ambigua
|
39 |
+
r"(.*?) excepto (.*?) y (.*?)", # Exclusiones ambiguas
|
40 |
+
r"(.*?) antes de (.*?) y (.*?)", # Secuencia temporal ambigua
|
41 |
+
r"(.*?) después de (.*?) o (.*?)", # Secuencia condicional ambigua
|
42 |
+
},
|
43 |
+
|
44 |
+
"semanticos": {
|
45 |
+
# Frases que suelen indicar ambigüedad
|
46 |
+
"si es posible",
|
47 |
+
"cuando sea necesario",
|
48 |
+
"si se requiere",
|
49 |
+
"según corresponda",
|
50 |
+
"como sea apropiado",
|
51 |
+
"en caso de ser necesario",
|
52 |
+
"dependiendo del caso",
|
53 |
+
"si aplica",
|
54 |
+
"cuando corresponda",
|
55 |
+
"si es factible",
|
56 |
+
"en la medida de lo posible",
|
57 |
+
"siempre que sea posible"
|
58 |
+
},
|
59 |
+
|
60 |
+
"contextuales": {
|
61 |
+
# Dependencias implícitas
|
62 |
+
r"(?i)similar a (.*?)", # Referencias vagas
|
63 |
+
r"(?i)como en (.*?)", # Comparaciones ambiguas
|
64 |
+
r"(?i)igual que (.*?)", # Referencias no específicas
|
65 |
+
|
66 |
+
# Suposiciones de conocimiento
|
67 |
+
r"(?i)de la manera usual",
|
68 |
+
r"(?i)como siempre",
|
69 |
+
r"(?i)de forma estándar",
|
70 |
+
|
71 |
+
# Referencias ambiguas
|
72 |
+
r"(?i)esto",
|
73 |
+
r"(?i)eso",
|
74 |
+
r"(?i)aquello",
|
75 |
+
r"(?i)lo mismo"
|
76 |
+
}
|
77 |
+
}
|
78 |
+
|
79 |
+
# Términos técnicos que no son ambiguos en el contexto
|
80 |
+
TECHNICAL_TERMS = {
|
81 |
+
# Autenticación y Seguridad
|
82 |
+
"OAuth", "autenticación", "autorización",
|
83 |
+
"token", "JWT", "SSO", "2FA", "MFA",
|
84 |
+
|
85 |
+
# Datos y Almacenamiento
|
86 |
+
"base de datos", "SQL", "NoSQL", "cache",
|
87 |
+
"índice", "backup", "restauración",
|
88 |
+
|
89 |
+
# Frontend
|
90 |
+
"responsive", "CSS", "HTML", "JavaScript",
|
91 |
+
"React", "Angular", "Vue", "DOM",
|
92 |
+
|
93 |
+
# Backend
|
94 |
+
"API", "REST", "GraphQL", "webhook",
|
95 |
+
"microservicio", "contenedor", "Docker",
|
96 |
+
|
97 |
+
# Operaciones
|
98 |
+
"logging", "monitoreo", "alertas",
|
99 |
+
"deployment", "CI/CD", "pipeline",
|
100 |
+
|
101 |
+
# Términos de negocio
|
102 |
+
"ROI", "KPI", "SLA", "métrica",
|
103 |
+
"dashboard", "reporte", "análisis"
|
104 |
+
}
|
105 |
+
|
106 |
+
# Patrones de estructura de historia de usuario
|
107 |
+
USER_STORY_PATTERNS = {
|
108 |
+
'estandar': r"(?i)^como\s+(.+?),?\s+quiero\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$",
|
109 |
+
'modal': r"(?i)^(?:un|una|el|la)\s+(.+?)\s+(?:puede|debe|debería)\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$",
|
110 |
+
'pasiva': r"(?i)^(?:el|la|los|las)\s+(.+?)\s+(?:debe|deben|debería|deberían)\s+(?:ser|estar)\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$",
|
111 |
+
'declarativa': r"(?i)^(?:los|las)\s+(.+?)\s+(?:deben|deberían)\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$",
|
112 |
+
'necesidad': r"(?i)^(?:necesito|necesitamos|se\s+necesita)\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$",
|
113 |
+
'deseo': r"(?i)^(?:deseo|deseamos|se\s+desea)\s+(.+?)(?:\s+para\s+(?:que\s+)?(.+))?$"
|
114 |
+
}
|
115 |
+
|
116 |
+
# Patrones para detectar ambigüedades léxicas
|
117 |
+
PATRONES_AMBIGUEDAD_LEXICA = [
|
118 |
+
{
|
119 |
+
"patron": r"\b(rápido|eficiente|fácil|simple|intuitivo|amigable|flexible|robusto)\b(?![^{]*\})",
|
120 |
+
"tipo": "adjetivo_subjetivo",
|
121 |
+
"descripcion": "Uso de adjetivos subjetivos que pueden interpretarse de diferentes maneras"
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"patron": r"\b(varios|algunos|muchos|pocos|diversos|múltiples)\b(?!\s+(?:formatos?|tipos?|archivos?|reportes?)\s+(?:como|:|\(|\{))",
|
125 |
+
"tipo": "cuantificador_ambiguo",
|
126 |
+
"descripcion": "Uso de cuantificadores ambiguos que no especifican una cantidad concreta"
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"patron": r"\b(etc|etcétera|entre otros|y más|y otros)\b",
|
130 |
+
"tipo": "enumeracion_incompleta",
|
131 |
+
"descripcion": "Uso de expresiones que dejan la enumeración incompleta o abierta"
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"patron": r"\b(sistema|aplicación|plataforma|herramienta|solución)\b(?!\s+(?:debe|debería|tiene que|ha de))",
|
135 |
+
"tipo": "termino_generico",
|
136 |
+
"descripcion": "Uso de términos genéricos que no especifican la funcionalidad concreta"
|
137 |
+
}
|
138 |
+
]
|
139 |
+
|
140 |
+
# Patrones para detectar ambigüedades sintácticas
|
141 |
+
PATRONES_AMBIGUEDAD_SINTACTICA = [
|
142 |
+
{
|
143 |
+
"patron": r"(?i)(?<![\w{])(y|o|y/o)(?!\s+(?:\d+|\{|\w+\s*[=:<>]))",
|
144 |
+
"tipo": "coordinacion_ambigua",
|
145 |
+
"descripcion": "Uso de coordinaciones que pueden crear ambigüedad en la interpretación"
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"patron": r"(?i)\b(esto|eso|aquello|el cual|la cual|lo cual|que)\b(?!\s+(?:significa|implica|requiere|incluye))",
|
149 |
+
"tipo": "referencia_ambigua",
|
150 |
+
"descripcion": "Uso de referencias ambiguas que pueden tener múltiples antecedentes"
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"patron": r"(?i)\b(si|cuando|mientras|después|antes|luego)\b(?!\s+(?:el|la|los|las|se)\s+(?:\w+\s+){0,3}(?:\d+|específico|definido))",
|
154 |
+
"tipo": "condicion_temporal_ambigua",
|
155 |
+
"descripcion": "Uso de condiciones o referencias temporales ambiguas"
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"patron": r"(?i)(poder|deber|necesitar|querer)\s+\w+\s+(y|o)\s+\w+(?!\s+(?:en|durante|cada|por)\s+(?:\d+|un|una)\s+(?:segundo|minuto|hora)s?)",
|
159 |
+
"tipo": "alcance_verbo_modal",
|
160 |
+
"descripcion": "Ambigüedad en el alcance de verbos modales con múltiples acciones"
|
161 |
+
}
|
162 |
+
]
|
163 |
+
|
164 |
+
# Sugerencias de mejora para cada tipo de ambigüedad
|
165 |
+
SUGERENCIAS_MEJORA = {
|
166 |
+
"adjetivo_subjetivo": [
|
167 |
+
"Especificar métricas o criterios medibles (ej: tiempo de respuesta en segundos)",
|
168 |
+
"Definir valores concretos o rangos aceptables",
|
169 |
+
"Usar términos más específicos y cuantificables"
|
170 |
+
],
|
171 |
+
"cuantificador_ambiguo": [
|
172 |
+
"Especificar cantidades exactas o rangos definidos",
|
173 |
+
"Listar explícitamente los elementos o tipos",
|
174 |
+
"Definir límites mínimos y máximos"
|
175 |
+
],
|
176 |
+
"enumeracion_incompleta": [
|
177 |
+
"Listar todos los elementos requeridos",
|
178 |
+
"Especificar criterios de inclusión/exclusión",
|
179 |
+
"Definir el alcance completo de la funcionalidad"
|
180 |
+
],
|
181 |
+
"termino_generico": [
|
182 |
+
"Especificar la funcionalidad concreta",
|
183 |
+
"Describir las características técnicas específicas",
|
184 |
+
"Detallar los componentes o módulos involucrados"
|
185 |
+
],
|
186 |
+
"coordinacion_ambigua": [
|
187 |
+
"Separar en historias de usuario independientes",
|
188 |
+
"Usar listas numeradas o viñetas para clarificar",
|
189 |
+
"Especificar la relación entre los elementos"
|
190 |
+
],
|
191 |
+
"referencia_ambigua": [
|
192 |
+
"Repetir el sustantivo al que se hace referencia",
|
193 |
+
"Usar referencias específicas y directas",
|
194 |
+
"Evitar pronombres ambiguos"
|
195 |
+
],
|
196 |
+
"condicion_temporal_ambigua": [
|
197 |
+
"Especificar intervalos de tiempo exactos",
|
198 |
+
"Definir el orden preciso de las acciones",
|
199 |
+
"Usar referencias temporales específicas (ej: cada 5 minutos)"
|
200 |
+
],
|
201 |
+
"alcance_verbo_modal": [
|
202 |
+
"Separar las acciones en requisitos independientes",
|
203 |
+
"Especificar las condiciones para cada acción",
|
204 |
+
"Definir la prioridad o secuencia de las acciones"
|
205 |
+
]
|
206 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=5.25.2
|
2 |
+
requests
|
3 |
+
spacy>=3.7.0
|
4 |
+
es-core-news-sm @ https://github.com/explosion/spacy-models/releases/download/es_core_news_sm-3.7.0/es_core_news_sm-3.7.0-py3-none-any.whl
|
5 |
+
pytest>=8.0.0
|
6 |
+
typing-extensions>=4.9.0
|
7 |
+
nltk>=3.8.1
|
8 |
+
gradio>=4.19.2
|
9 |
+
requests>=2.31.0
|
10 |
+
pandas>=2.2.0
|
11 |
+
transformers>=4.30.0
|
12 |
+
torch>=2.0.0
|
13 |
+
numpy>=1.24.0
|
14 |
+
setuptools>=69.1.0
|