Spaces:
Running
Running
Commit
·
1139524
1
Parent(s):
c70a5b7
new committ
Browse files- app.py +213 -79
- model/generate.py +507 -77
- requirements.txt +0 -0
app.py
CHANGED
@@ -100,28 +100,97 @@ def ensure_initialized():
|
|
100 |
logger.warning("⚠️ Model initialization failed, using template mode")
|
101 |
_initialized = True
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
# Initialize on startup
|
104 |
ensure_initialized()
|
105 |
|
106 |
@smart_memory_monitor
|
107 |
-
def generate_test_cases_api(
|
108 |
-
"""Main API function for generating test cases"""
|
109 |
-
|
|
|
|
|
|
|
|
|
110 |
return {
|
111 |
-
"error":
|
112 |
"test_cases": [],
|
113 |
"count": 0
|
114 |
}
|
115 |
-
|
116 |
-
srs_text = srs_text.strip()
|
117 |
|
118 |
-
if len(
|
119 |
-
logger.warning(f"
|
120 |
-
|
121 |
|
122 |
try:
|
123 |
-
logger.info(f"🎯 Generating test cases for input ({len(
|
124 |
-
test_cases = generate_test_cases(
|
125 |
|
126 |
if not test_cases or len(test_cases) == 0:
|
127 |
logger.error("No test cases generated")
|
@@ -141,8 +210,8 @@ def generate_test_cases_api(srs_text):
|
|
141 |
generation_method = "template_mode"
|
142 |
|
143 |
if model_used == "Template-Based Generator":
|
144 |
-
model_algorithm = "Rule-based Template"
|
145 |
-
model_reason = "Used rule-based generation
|
146 |
elif "distilgpt2" in model_used:
|
147 |
model_algorithm = "Transformer-based LM"
|
148 |
model_reason = "Used DistilGPT2 for balanced performance and memory efficiency."
|
@@ -161,7 +230,9 @@ def generate_test_cases_api(srs_text):
|
|
161 |
"model_used": model_used,
|
162 |
"generation_method": generation_method,
|
163 |
"model_algorithm": model_algorithm,
|
164 |
-
"model_reason": model_reason
|
|
|
|
|
165 |
}
|
166 |
|
167 |
except Exception as e:
|
@@ -184,38 +255,59 @@ def format_test_cases_output(result):
|
|
184 |
|
185 |
# Format test cases for display
|
186 |
formatted_output = f"✅ Generated {result['count']} Test Cases\n\n"
|
|
|
187 |
formatted_output += f"🤖 Model: {result['model_used']}\n"
|
188 |
formatted_output += f"🔧 Algorithm: {result['model_algorithm']}\n"
|
189 |
formatted_output += f"💡 Reason: {result['model_reason']}\n\n"
|
190 |
|
191 |
-
formatted_output += "=" *
|
192 |
-
formatted_output += "TEST CASES
|
193 |
-
formatted_output += "=" *
|
194 |
|
195 |
for i, tc in enumerate(test_cases, 1):
|
196 |
formatted_output += f"🔹 Test Case {i}:\n"
|
197 |
formatted_output += f" ID: {tc.get('id', f'TC_{i:03d}')}\n"
|
198 |
formatted_output += f" Title: {tc.get('title', 'N/A')}\n"
|
|
|
|
|
199 |
formatted_output += f" Description: {tc.get('description', 'N/A')}\n"
|
200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
steps = tc.get('steps', [])
|
202 |
-
if isinstance(steps, list):
|
203 |
-
formatted_output += f" Steps:\n"
|
204 |
for j, step in enumerate(steps, 1):
|
205 |
formatted_output += f" {j}. {step}\n"
|
206 |
else:
|
207 |
-
formatted_output += f" Steps: {steps}\n"
|
208 |
|
209 |
-
formatted_output += f" Expected Result: {tc.get('expected', 'N/A')}\n
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
|
211 |
# Return JSON for API access
|
212 |
json_output = json.dumps(result, indent=2)
|
213 |
|
214 |
return formatted_output, json_output
|
215 |
|
216 |
-
def gradio_generate_test_cases(
|
217 |
"""Gradio interface function"""
|
218 |
-
result = generate_test_cases_api(
|
219 |
return format_test_cases_output(result)
|
220 |
|
221 |
def get_system_status():
|
@@ -226,7 +318,7 @@ def get_system_status():
|
|
226 |
model_info = generator.get_model_info()
|
227 |
except Exception:
|
228 |
model_info = {
|
229 |
-
"model_name": "Template-Based Generator",
|
230 |
"status": "template_mode",
|
231 |
"optimization": "memory_safe"
|
232 |
}
|
@@ -246,9 +338,11 @@ Optimization: {model_info.get("optimization", "standard")}
|
|
246 |
|
247 |
🚀 APPLICATION INFO
|
248 |
==================
|
249 |
-
Version:
|
250 |
Environment: Hugging Face Spaces
|
251 |
Backend: Gradio
|
|
|
|
|
252 |
"""
|
253 |
return status_info
|
254 |
|
@@ -279,50 +373,71 @@ Environment: {"Hugging Face Spaces" if os.environ.get('SPACE_ID') else "Local"}
|
|
279 |
Backend: Gradio
|
280 |
Threading: Enabled
|
281 |
Memory Monitoring: Active
|
|
|
|
|
282 |
"""
|
283 |
return detailed_info
|
284 |
except Exception as e:
|
285 |
return f"❌ Error getting model info: {str(e)}"
|
286 |
|
287 |
# Create Gradio interface
|
288 |
-
with gr.Blocks(title="AI Test Case Generator", theme=gr.themes.Soft()) as app:
|
289 |
gr.Markdown("""
|
290 |
-
# 🧪 AI Test Case Generator
|
291 |
|
292 |
Generate comprehensive test cases from Software Requirements Specification (SRS) documents using AI models.
|
293 |
|
294 |
-
**Features:**
|
295 |
-
-
|
296 |
-
-
|
297 |
-
-
|
298 |
-
-
|
|
|
299 |
""")
|
300 |
|
301 |
with gr.Tab("🧪 Generate Test Cases"):
|
|
|
|
|
302 |
with gr.Row():
|
303 |
with gr.Column(scale=2):
|
|
|
304 |
srs_input = gr.Textbox(
|
305 |
-
label="
|
306 |
-
placeholder="Enter your
|
307 |
-
lines=
|
308 |
-
max_lines=
|
309 |
)
|
310 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
generate_btn = gr.Button("🚀 Generate Test Cases", variant="primary", size="lg")
|
312 |
|
313 |
with gr.Column(scale=3):
|
314 |
output_display = gr.Textbox(
|
315 |
label="📋 Generated Test Cases",
|
316 |
-
lines=
|
317 |
-
max_lines=
|
318 |
interactive=False
|
319 |
)
|
320 |
|
321 |
with gr.Row():
|
322 |
json_output = gr.Textbox(
|
323 |
label="📄 JSON Output (for API use)",
|
324 |
-
lines=
|
325 |
-
max_lines=
|
326 |
interactive=False
|
327 |
)
|
328 |
|
@@ -330,7 +445,7 @@ with gr.Blocks(title="AI Test Case Generator", theme=gr.themes.Soft()) as app:
|
|
330 |
with gr.Column():
|
331 |
status_display = gr.Textbox(
|
332 |
label="🏥 System Health & Status",
|
333 |
-
lines=
|
334 |
interactive=False
|
335 |
)
|
336 |
refresh_status_btn = gr.Button("🔄 Refresh Status", variant="secondary")
|
@@ -339,82 +454,100 @@ with gr.Blocks(title="AI Test Case Generator", theme=gr.themes.Soft()) as app:
|
|
339 |
with gr.Column():
|
340 |
model_info_display = gr.Textbox(
|
341 |
label="🤖 Detailed Model Information",
|
342 |
-
lines=
|
343 |
interactive=False
|
344 |
)
|
345 |
refresh_model_btn = gr.Button("🔄 Refresh Model Info", variant="secondary")
|
346 |
|
347 |
with gr.Tab("📚 API Documentation"):
|
348 |
gr.Markdown("""
|
349 |
-
## 🔌 API Endpoints
|
350 |
|
351 |
-
This Gradio app
|
352 |
|
353 |
-
### Generate Test Cases
|
354 |
**Endpoint:** `/api/predict`
|
355 |
**Method:** POST
|
356 |
**Body:**
|
357 |
```json
|
358 |
{
|
359 |
-
"data": ["Your SRS text here"]
|
360 |
}
|
361 |
```
|
362 |
|
363 |
-
|
|
|
|
|
|
|
|
|
|
|
364 |
```json
|
365 |
{
|
366 |
"data": [
|
367 |
"Formatted test cases output",
|
368 |
-
"JSON output with test cases"
|
369 |
]
|
370 |
}
|
371 |
```
|
372 |
|
373 |
-
###
|
374 |
-
```python
|
375 |
-
import requests
|
376 |
-
|
377 |
-
response = requests.post(
|
378 |
-
"YOUR_SPACE_URL/api/predict",
|
379 |
-
json={"data": ["User login system requirements..."]}
|
380 |
-
)
|
381 |
-
result = response.json()
|
382 |
-
test_cases = result["data"][1] # JSON output
|
383 |
-
```
|
384 |
-
|
385 |
-
### Example Usage (cURL):
|
386 |
-
```bash
|
387 |
-
curl -X POST "YOUR_SPACE_URL/api/predict" \\
|
388 |
-
-H "Content-Type: application/json" \\
|
389 |
-
-d '{"data":["Your SRS text here"]}'
|
390 |
-
```
|
391 |
-
|
392 |
-
## 📋 Response Format
|
393 |
-
|
394 |
-
The API returns test cases in this format:
|
395 |
```json
|
396 |
{
|
397 |
"test_cases": [
|
398 |
{
|
399 |
"id": "TC_001",
|
400 |
-
"title": "Test Case Title",
|
401 |
-
"
|
402 |
-
"
|
403 |
-
"
|
|
|
|
|
|
|
|
|
|
|
404 |
}
|
405 |
],
|
406 |
-
"count":
|
407 |
"model_used": "distilgpt2",
|
408 |
-
"model_algorithm": "
|
409 |
-
"model_reason": "
|
|
|
410 |
}
|
411 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
""")
|
413 |
|
414 |
# Event handlers
|
415 |
generate_btn.click(
|
416 |
fn=gradio_generate_test_cases,
|
417 |
-
inputs=[srs_input],
|
418 |
outputs=[output_display, json_output]
|
419 |
)
|
420 |
|
@@ -443,8 +576,9 @@ with gr.Blocks(title="AI Test Case Generator", theme=gr.themes.Soft()) as app:
|
|
443 |
if __name__ == "__main__":
|
444 |
port = int(os.environ.get("PORT", 7860))
|
445 |
|
446 |
-
logger.info(f"🚀 Starting Gradio app on port {port}")
|
447 |
logger.info(f"🖥️ Environment: {'Hugging Face Spaces' if os.environ.get('SPACE_ID') else 'Local'}")
|
|
|
448 |
|
449 |
app.launch(
|
450 |
server_name="0.0.0.0",
|
|
|
100 |
logger.warning("⚠️ Model initialization failed, using template mode")
|
101 |
_initialized = True
|
102 |
|
103 |
+
def read_uploaded_file(file_obj):
|
104 |
+
"""Read and extract text from uploaded file"""
|
105 |
+
if file_obj is None:
|
106 |
+
return ""
|
107 |
+
|
108 |
+
try:
|
109 |
+
file_path = file_obj.name
|
110 |
+
file_extension = os.path.splitext(file_path)[1].lower()
|
111 |
+
|
112 |
+
if file_extension in ['.txt', '.md']:
|
113 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
114 |
+
content = f.read()
|
115 |
+
elif file_extension in ['.doc', '.docx']:
|
116 |
+
try:
|
117 |
+
import docx
|
118 |
+
doc = docx.Document(file_path)
|
119 |
+
content = '\n'.join([paragraph.text for paragraph in doc.paragraphs])
|
120 |
+
except ImportError:
|
121 |
+
logger.warning("python-docx not available, trying to read as text")
|
122 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
123 |
+
content = f.read()
|
124 |
+
elif file_extension == '.pdf':
|
125 |
+
try:
|
126 |
+
import PyPDF2
|
127 |
+
with open(file_path, 'rb') as f:
|
128 |
+
reader = PyPDF2.PdfReader(f)
|
129 |
+
content = ''
|
130 |
+
for page in reader.pages:
|
131 |
+
content += page.extract_text() + '\n'
|
132 |
+
except ImportError:
|
133 |
+
logger.warning("PyPDF2 not available, cannot read PDF files")
|
134 |
+
return "❌ PDF support requires PyPDF2. Please install it or use text/Word files."
|
135 |
+
else:
|
136 |
+
# Try to read as plain text
|
137 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
138 |
+
content = f.read()
|
139 |
+
|
140 |
+
logger.info(f"📄 File read successfully: {len(content)} characters")
|
141 |
+
return content
|
142 |
+
|
143 |
+
except Exception as e:
|
144 |
+
logger.error(f"❌ Error reading file: {str(e)}")
|
145 |
+
return f"❌ Error reading file: {str(e)}"
|
146 |
+
|
147 |
+
def combine_inputs(prompt_text, uploaded_file):
|
148 |
+
"""Combine prompt text and uploaded file content"""
|
149 |
+
file_content = ""
|
150 |
+
|
151 |
+
if uploaded_file is not None:
|
152 |
+
file_content = read_uploaded_file(uploaded_file)
|
153 |
+
if file_content.startswith("❌"):
|
154 |
+
return file_content # Return error message
|
155 |
+
|
156 |
+
# Combine both inputs
|
157 |
+
combined_text = ""
|
158 |
+
|
159 |
+
if prompt_text and prompt_text.strip():
|
160 |
+
combined_text += "PROMPT:\n" + prompt_text.strip() + "\n\n"
|
161 |
+
|
162 |
+
if file_content and not file_content.startswith("❌"):
|
163 |
+
combined_text += "DOCUMENT CONTENT:\n" + file_content.strip()
|
164 |
+
|
165 |
+
if not combined_text.strip():
|
166 |
+
return "❌ Please provide either text input or upload a document."
|
167 |
+
|
168 |
+
return combined_text.strip()
|
169 |
+
|
170 |
# Initialize on startup
|
171 |
ensure_initialized()
|
172 |
|
173 |
@smart_memory_monitor
|
174 |
+
def generate_test_cases_api(prompt_text, uploaded_file):
|
175 |
+
"""Main API function for generating test cases with dual input support"""
|
176 |
+
|
177 |
+
# Combine inputs
|
178 |
+
combined_input = combine_inputs(prompt_text, uploaded_file)
|
179 |
+
|
180 |
+
if combined_input.startswith("❌"):
|
181 |
return {
|
182 |
+
"error": combined_input,
|
183 |
"test_cases": [],
|
184 |
"count": 0
|
185 |
}
|
|
|
|
|
186 |
|
187 |
+
if len(combined_input) > 8000:
|
188 |
+
logger.warning(f"Input text truncated from {len(combined_input)} to 8000 characters")
|
189 |
+
combined_input = combined_input[:8000]
|
190 |
|
191 |
try:
|
192 |
+
logger.info(f"🎯 Generating test cases for combined input ({len(combined_input)} chars)")
|
193 |
+
test_cases = generate_test_cases(combined_input)
|
194 |
|
195 |
if not test_cases or len(test_cases) == 0:
|
196 |
logger.error("No test cases generated")
|
|
|
210 |
generation_method = "template_mode"
|
211 |
|
212 |
if model_used == "Template-Based Generator":
|
213 |
+
model_algorithm = "Enhanced Rule-based Template"
|
214 |
+
model_reason = "Used enhanced rule-based generation with pattern matching and context analysis."
|
215 |
elif "distilgpt2" in model_used:
|
216 |
model_algorithm = "Transformer-based LM"
|
217 |
model_reason = "Used DistilGPT2 for balanced performance and memory efficiency."
|
|
|
230 |
"model_used": model_used,
|
231 |
"generation_method": generation_method,
|
232 |
"model_algorithm": model_algorithm,
|
233 |
+
"model_reason": model_reason,
|
234 |
+
"input_source": "Combined (Prompt + Document)" if (prompt_text and uploaded_file) else
|
235 |
+
"Document Upload" if uploaded_file else "Text Prompt"
|
236 |
}
|
237 |
|
238 |
except Exception as e:
|
|
|
255 |
|
256 |
# Format test cases for display
|
257 |
formatted_output = f"✅ Generated {result['count']} Test Cases\n\n"
|
258 |
+
formatted_output += f"📥 Input Source: {result.get('input_source', 'Unknown')}\n"
|
259 |
formatted_output += f"🤖 Model: {result['model_used']}\n"
|
260 |
formatted_output += f"🔧 Algorithm: {result['model_algorithm']}\n"
|
261 |
formatted_output += f"💡 Reason: {result['model_reason']}\n\n"
|
262 |
|
263 |
+
formatted_output += "=" * 60 + "\n"
|
264 |
+
formatted_output += "GENERATED TEST CASES\n"
|
265 |
+
formatted_output += "=" * 60 + "\n\n"
|
266 |
|
267 |
for i, tc in enumerate(test_cases, 1):
|
268 |
formatted_output += f"🔹 Test Case {i}:\n"
|
269 |
formatted_output += f" ID: {tc.get('id', f'TC_{i:03d}')}\n"
|
270 |
formatted_output += f" Title: {tc.get('title', 'N/A')}\n"
|
271 |
+
formatted_output += f" Priority: {tc.get('priority', 'Medium')}\n"
|
272 |
+
formatted_output += f" Category: {tc.get('category', 'Functional')}\n"
|
273 |
formatted_output += f" Description: {tc.get('description', 'N/A')}\n"
|
274 |
|
275 |
+
# Pre-conditions
|
276 |
+
preconditions = tc.get('preconditions', [])
|
277 |
+
if preconditions:
|
278 |
+
formatted_output += f" Pre-conditions:\n"
|
279 |
+
for j, precond in enumerate(preconditions, 1):
|
280 |
+
formatted_output += f" • {precond}\n"
|
281 |
+
|
282 |
+
# Test steps
|
283 |
steps = tc.get('steps', [])
|
284 |
+
if isinstance(steps, list) and steps:
|
285 |
+
formatted_output += f" Test Steps:\n"
|
286 |
for j, step in enumerate(steps, 1):
|
287 |
formatted_output += f" {j}. {step}\n"
|
288 |
else:
|
289 |
+
formatted_output += f" Test Steps: {steps if steps else 'N/A'}\n"
|
290 |
|
291 |
+
formatted_output += f" Expected Result: {tc.get('expected', 'N/A')}\n"
|
292 |
+
|
293 |
+
# Post-conditions
|
294 |
+
postconditions = tc.get('postconditions', [])
|
295 |
+
if postconditions:
|
296 |
+
formatted_output += f" Post-conditions:\n"
|
297 |
+
for postcond in postconditions:
|
298 |
+
formatted_output += f" • {postcond}\n"
|
299 |
+
|
300 |
+
formatted_output += f" Test Data: {tc.get('test_data', 'N/A')}\n"
|
301 |
+
formatted_output += "\n" + "-" * 40 + "\n\n"
|
302 |
|
303 |
# Return JSON for API access
|
304 |
json_output = json.dumps(result, indent=2)
|
305 |
|
306 |
return formatted_output, json_output
|
307 |
|
308 |
+
def gradio_generate_test_cases(prompt_text, uploaded_file):
|
309 |
"""Gradio interface function"""
|
310 |
+
result = generate_test_cases_api(prompt_text, uploaded_file)
|
311 |
return format_test_cases_output(result)
|
312 |
|
313 |
def get_system_status():
|
|
|
318 |
model_info = generator.get_model_info()
|
319 |
except Exception:
|
320 |
model_info = {
|
321 |
+
"model_name": "Enhanced Template-Based Generator",
|
322 |
"status": "template_mode",
|
323 |
"optimization": "memory_safe"
|
324 |
}
|
|
|
338 |
|
339 |
🚀 APPLICATION INFO
|
340 |
==================
|
341 |
+
Version: 2.0.0-enhanced-input
|
342 |
Environment: Hugging Face Spaces
|
343 |
Backend: Gradio
|
344 |
+
Features: Text Input + File Upload
|
345 |
+
Supported Files: .txt, .md, .doc, .docx, .pdf
|
346 |
"""
|
347 |
return status_info
|
348 |
|
|
|
373 |
Backend: Gradio
|
374 |
Threading: Enabled
|
375 |
Memory Monitoring: Active
|
376 |
+
Input Methods: Text + File Upload
|
377 |
+
File Support: TXT, MD, DOC, DOCX, PDF
|
378 |
"""
|
379 |
return detailed_info
|
380 |
except Exception as e:
|
381 |
return f"❌ Error getting model info: {str(e)}"
|
382 |
|
383 |
# Create Gradio interface
|
384 |
+
with gr.Blocks(title="AI Test Case Generator - Enhanced", theme=gr.themes.Soft()) as app:
|
385 |
gr.Markdown("""
|
386 |
+
# 🧪 AI Test Case Generator - Enhanced Edition
|
387 |
|
388 |
Generate comprehensive test cases from Software Requirements Specification (SRS) documents using AI models.
|
389 |
|
390 |
+
**New Features:**
|
391 |
+
- 📝 **Dual Input Support**: Text prompt AND/OR document upload
|
392 |
+
- 📄 **File Upload**: Support for .txt, .md, .doc, .docx, .pdf files
|
393 |
+
- 🎯 **Enhanced Test Cases**: More detailed and comprehensive test case generation
|
394 |
+
- 🔧 **Improved Templates**: Better rule-based fallback with pattern matching
|
395 |
+
- 📊 **Better Formatting**: Enhanced output with priorities, categories, and conditions
|
396 |
""")
|
397 |
|
398 |
with gr.Tab("🧪 Generate Test Cases"):
|
399 |
+
gr.Markdown("### Choose your input method: Enter text directly, upload a document, or use both!")
|
400 |
+
|
401 |
with gr.Row():
|
402 |
with gr.Column(scale=2):
|
403 |
+
# Text input
|
404 |
srs_input = gr.Textbox(
|
405 |
+
label="📝 Text Input (SRS, Requirements, or Prompt)",
|
406 |
+
placeholder="Enter your requirements, user stories, or specific prompt here...\n\nExample:\n- The system shall provide user authentication with username and password\n- Users should be able to login, logout, and reset passwords\n- The system should validate input and display appropriate error messages\n- Performance requirement: Login should complete within 3 seconds",
|
407 |
+
lines=8,
|
408 |
+
max_lines=15
|
409 |
)
|
410 |
|
411 |
+
# File upload
|
412 |
+
file_upload = gr.File(
|
413 |
+
label="📄 Upload Document (Optional)",
|
414 |
+
file_types=[".txt", ".md", ".doc", ".docx", ".pdf"],
|
415 |
+
type="filepath"
|
416 |
+
)
|
417 |
+
|
418 |
+
gr.Markdown("""
|
419 |
+
**💡 Tips:**
|
420 |
+
- Use **text input** for quick requirements or specific prompts
|
421 |
+
- Use **file upload** for complete SRS documents
|
422 |
+
- Use **both** to combine a specific prompt with a detailed document
|
423 |
+
- Supported formats: TXT, Markdown, Word (.doc/.docx), PDF
|
424 |
+
""")
|
425 |
+
|
426 |
generate_btn = gr.Button("🚀 Generate Test Cases", variant="primary", size="lg")
|
427 |
|
428 |
with gr.Column(scale=3):
|
429 |
output_display = gr.Textbox(
|
430 |
label="📋 Generated Test Cases",
|
431 |
+
lines=25,
|
432 |
+
max_lines=35,
|
433 |
interactive=False
|
434 |
)
|
435 |
|
436 |
with gr.Row():
|
437 |
json_output = gr.Textbox(
|
438 |
label="📄 JSON Output (for API use)",
|
439 |
+
lines=12,
|
440 |
+
max_lines=20,
|
441 |
interactive=False
|
442 |
)
|
443 |
|
|
|
445 |
with gr.Column():
|
446 |
status_display = gr.Textbox(
|
447 |
label="🏥 System Health & Status",
|
448 |
+
lines=18,
|
449 |
interactive=False
|
450 |
)
|
451 |
refresh_status_btn = gr.Button("🔄 Refresh Status", variant="secondary")
|
|
|
454 |
with gr.Column():
|
455 |
model_info_display = gr.Textbox(
|
456 |
label="🤖 Detailed Model Information",
|
457 |
+
lines=22,
|
458 |
interactive=False
|
459 |
)
|
460 |
refresh_model_btn = gr.Button("🔄 Refresh Model Info", variant="secondary")
|
461 |
|
462 |
with gr.Tab("📚 API Documentation"):
|
463 |
gr.Markdown("""
|
464 |
+
## 🔌 Enhanced API Endpoints
|
465 |
|
466 |
+
This Gradio app supports both text input and file upload through API:
|
467 |
|
468 |
+
### Generate Test Cases (Text Only)
|
469 |
**Endpoint:** `/api/predict`
|
470 |
**Method:** POST
|
471 |
**Body:**
|
472 |
```json
|
473 |
{
|
474 |
+
"data": ["Your SRS text here", null]
|
475 |
}
|
476 |
```
|
477 |
|
478 |
+
### Generate Test Cases (With File)
|
479 |
+
**Endpoint:** `/api/predict`
|
480 |
+
**Method:** POST (multipart/form-data)
|
481 |
+
- Upload file and include text in the data array
|
482 |
+
|
483 |
+
**Response Format:**
|
484 |
```json
|
485 |
{
|
486 |
"data": [
|
487 |
"Formatted test cases output",
|
488 |
+
"JSON output with enhanced test cases"
|
489 |
]
|
490 |
}
|
491 |
```
|
492 |
|
493 |
+
### Enhanced Test Case Structure
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
494 |
```json
|
495 |
{
|
496 |
"test_cases": [
|
497 |
{
|
498 |
"id": "TC_001",
|
499 |
+
"title": "Test Case Title",
|
500 |
+
"priority": "High/Medium/Low",
|
501 |
+
"category": "Functional/Security/Performance/UI",
|
502 |
+
"description": "Detailed test description",
|
503 |
+
"preconditions": ["Pre-condition 1", "Pre-condition 2"],
|
504 |
+
"steps": ["Step 1", "Step 2", "Step 3"],
|
505 |
+
"expected": "Expected result",
|
506 |
+
"postconditions": ["Post-condition 1"],
|
507 |
+
"test_data": "Required test data"
|
508 |
}
|
509 |
],
|
510 |
+
"count": 5,
|
511 |
"model_used": "distilgpt2",
|
512 |
+
"model_algorithm": "Enhanced Rule-based Template",
|
513 |
+
"model_reason": "Detailed selection reasoning...",
|
514 |
+
"input_source": "Combined (Prompt + Document)"
|
515 |
}
|
516 |
```
|
517 |
+
|
518 |
+
### Example Usage (Python with File):
|
519 |
+
```python
|
520 |
+
import requests
|
521 |
+
|
522 |
+
# Text only
|
523 |
+
response = requests.post(
|
524 |
+
"YOUR_SPACE_URL/api/predict",
|
525 |
+
json={"data": ["User login requirements...", None]}
|
526 |
+
)
|
527 |
+
|
528 |
+
# With file upload (requires multipart handling)
|
529 |
+
files = {'file': open('requirements.pdf', 'rb')}
|
530 |
+
data = {'data': json.dumps(["Additional prompt", "file_placeholder"])}
|
531 |
+
response = requests.post("YOUR_SPACE_URL/api/predict", files=files, data=data)
|
532 |
+
```
|
533 |
+
|
534 |
+
## 📋 Supported File Formats
|
535 |
+
- **Text Files**: .txt, .md
|
536 |
+
- **Word Documents**: .doc, .docx (requires python-docx)
|
537 |
+
- **PDF Files**: .pdf (requires PyPDF2)
|
538 |
+
- **Fallback**: Any text-readable format
|
539 |
+
|
540 |
+
## 🎯 Enhanced Features
|
541 |
+
- **Dual Input**: Combine text prompts with document uploads
|
542 |
+
- **Better Test Cases**: Includes priorities, categories, pre/post-conditions
|
543 |
+
- **Smart Parsing**: Automatically detects requirement types and generates appropriate tests
|
544 |
+
- **Memory Optimized**: Handles large documents efficiently
|
545 |
""")
|
546 |
|
547 |
# Event handlers
|
548 |
generate_btn.click(
|
549 |
fn=gradio_generate_test_cases,
|
550 |
+
inputs=[srs_input, file_upload],
|
551 |
outputs=[output_display, json_output]
|
552 |
)
|
553 |
|
|
|
576 |
if __name__ == "__main__":
|
577 |
port = int(os.environ.get("PORT", 7860))
|
578 |
|
579 |
+
logger.info(f"🚀 Starting Enhanced Gradio app on port {port}")
|
580 |
logger.info(f"🖥️ Environment: {'Hugging Face Spaces' if os.environ.get('SPACE_ID') else 'Local'}")
|
581 |
+
logger.info("📝 Features: Text Input + File Upload Support")
|
582 |
|
583 |
app.launch(
|
584 |
server_name="0.0.0.0",
|
model/generate.py
CHANGED
@@ -5,6 +5,8 @@ import logging
|
|
5 |
import psutil
|
6 |
import re
|
7 |
import gc
|
|
|
|
|
8 |
|
9 |
# Initialize logger
|
10 |
logger = logging.getLogger(__name__)
|
@@ -21,6 +23,60 @@ MEMORY_OPTIMIZED_MODELS = [
|
|
21 |
# Singleton state
|
22 |
_generator_instance = None
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def get_optimal_model_for_memory():
|
25 |
"""Select the best model based on available memory."""
|
26 |
available_memory = psutil.virtual_memory().available / (1024 * 1024) # MB
|
@@ -60,60 +116,405 @@ def load_model_with_memory_optimization(model_name):
|
|
60 |
logger.error(f"❌ Failed to load model {model_name}: {e}")
|
61 |
return None, None
|
62 |
|
63 |
-
def
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
]
|
69 |
-
words = re.findall(r'\b\w+\b', text.lower())
|
70 |
-
return [word for word in words if word in common_keywords]
|
71 |
|
72 |
-
def
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
"
|
87 |
-
"
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
"
|
99 |
-
"
|
100 |
-
"
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
def parse_generated_test_cases(generated_text):
|
|
|
117 |
lines = generated_text.split('\n')
|
118 |
test_cases = []
|
119 |
current_case = {}
|
@@ -125,11 +526,16 @@ def parse_generated_test_cases(generated_text):
|
|
125 |
if current_case:
|
126 |
test_cases.append(current_case)
|
127 |
current_case = {
|
128 |
-
"id": f"
|
129 |
"title": line,
|
|
|
|
|
130 |
"description": line,
|
131 |
-
"
|
132 |
-
"
|
|
|
|
|
|
|
133 |
}
|
134 |
case_counter += 1
|
135 |
|
@@ -138,21 +544,27 @@ def parse_generated_test_cases(generated_text):
|
|
138 |
|
139 |
if not test_cases:
|
140 |
return [{
|
141 |
-
"id": "
|
142 |
-
"title": "Generated Test Case",
|
143 |
-
"
|
144 |
-
"
|
145 |
-
"
|
|
|
|
|
|
|
|
|
|
|
146 |
}]
|
147 |
|
148 |
return test_cases
|
149 |
|
150 |
-
def generate_with_ai_model(srs_text, tokenizer, model):
|
151 |
-
|
|
|
152 |
if len(srs_text) > max_input_length:
|
153 |
srs_text = srs_text[:max_input_length]
|
154 |
|
155 |
-
prompt = f"""Generate test cases for this software requirement:
|
156 |
{srs_text}
|
157 |
|
158 |
Test Cases:
|
@@ -162,14 +574,14 @@ Test Cases:
|
|
162 |
inputs = tokenizer.encode(
|
163 |
prompt,
|
164 |
return_tensors="pt",
|
165 |
-
max_length=
|
166 |
truncation=True
|
167 |
)
|
168 |
|
169 |
with torch.no_grad():
|
170 |
outputs = model.generate(
|
171 |
inputs,
|
172 |
-
max_new_tokens=
|
173 |
num_return_sequences=1,
|
174 |
temperature=0.7,
|
175 |
do_sample=True,
|
@@ -186,7 +598,8 @@ Test Cases:
|
|
186 |
logger.error(f"❌ AI generation failed: {e}")
|
187 |
raise
|
188 |
|
189 |
-
def generate_with_fallback(srs_text):
|
|
|
190 |
model_name = get_optimal_model_for_memory()
|
191 |
|
192 |
if model_name:
|
@@ -197,17 +610,19 @@ def generate_with_fallback(srs_text):
|
|
197 |
reason = get_algorithm_reason(model_name)
|
198 |
return test_cases, model_name, "transformer (causal LM)", reason
|
199 |
except Exception as e:
|
200 |
-
logger.warning(f"AI generation failed: {e}, falling back to templates")
|
201 |
|
202 |
-
logger.info("⚠️ Using
|
203 |
test_cases = generate_template_based_test_cases(srs_text)
|
204 |
-
return test_cases, "Template-Based Generator", "rule-based", "
|
205 |
|
206 |
# ✅ Function exposed to app.py
|
207 |
-
def generate_test_cases(srs_text):
|
|
|
208 |
return generate_with_fallback(srs_text)[0]
|
209 |
|
210 |
def get_generator():
|
|
|
211 |
global _generator_instance
|
212 |
if _generator_instance is None:
|
213 |
class Generator:
|
@@ -221,10 +636,10 @@ def get_generator():
|
|
221 |
def get_model_info(self):
|
222 |
mem = psutil.Process().memory_info().rss / 1024 / 1024
|
223 |
return {
|
224 |
-
"model_name": self.model_name if self.model_name else "Template-Based Generator",
|
225 |
-
"status": "loaded" if self.model else "
|
226 |
"memory_usage": f"{mem:.1f}MB",
|
227 |
-
"optimization": "
|
228 |
}
|
229 |
|
230 |
_generator_instance = Generator()
|
@@ -232,14 +647,15 @@ def get_generator():
|
|
232 |
return _generator_instance
|
233 |
|
234 |
def monitor_memory():
|
|
|
235 |
mem = psutil.Process().memory_info().rss / 1024 / 1024
|
236 |
logger.info(f"Memory usage: {mem:.1f}MB")
|
237 |
if mem > 450:
|
238 |
gc.collect()
|
239 |
logger.info("Memory cleanup triggered")
|
240 |
|
241 |
-
|
242 |
-
|
243 |
test_cases, model_name, algorithm_used, reason = generate_with_fallback(input_text)
|
244 |
return {
|
245 |
"model": model_name,
|
@@ -248,15 +664,29 @@ def generate_test_cases_and_info(input_text):
|
|
248 |
"test_cases": test_cases
|
249 |
}
|
250 |
|
251 |
-
|
252 |
-
|
253 |
if model_name == "microsoft/DialoGPT-small":
|
254 |
-
return "Selected due to low memory availability; DialoGPT-small provides conversational understanding in limited memory environments."
|
255 |
elif model_name == "distilgpt2":
|
256 |
-
return "Selected for its balance between performance and low memory usage. Ideal for small environments needing causal language modeling."
|
257 |
elif model_name == "gpt2":
|
258 |
-
return "Chosen for general-purpose text generation with moderate memory headroom."
|
259 |
elif model_name is None:
|
260 |
-
return "
|
261 |
else:
|
262 |
-
return "Model selected based on
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import psutil
|
6 |
import re
|
7 |
import gc
|
8 |
+
import random
|
9 |
+
from typing import List, Dict, Any
|
10 |
|
11 |
# Initialize logger
|
12 |
logger = logging.getLogger(__name__)
|
|
|
23 |
# Singleton state
|
24 |
_generator_instance = None
|
25 |
|
26 |
+
# Enhanced pattern matching for comprehensive test case generation
|
27 |
+
REQUIREMENT_PATTERNS = {
|
28 |
+
'authentication': {
|
29 |
+
'keywords': ['login', 'authentication', 'signin', 'sign in', 'password', 'username', 'credential', 'auth'],
|
30 |
+
'priority': 'High',
|
31 |
+
'category': 'Security'
|
32 |
+
},
|
33 |
+
'authorization': {
|
34 |
+
'keywords': ['permission', 'role', 'access', 'privilege', 'authorize', 'admin', 'user level'],
|
35 |
+
'priority': 'High',
|
36 |
+
'category': 'Security'
|
37 |
+
},
|
38 |
+
'data_validation': {
|
39 |
+
'keywords': ['validate', 'validation', 'input', 'format', 'check', 'verify', 'constraint'],
|
40 |
+
'priority': 'High',
|
41 |
+
'category': 'Functional'
|
42 |
+
},
|
43 |
+
'database': {
|
44 |
+
'keywords': ['database', 'db', 'store', 'save', 'persist', 'record', 'data storage', 'crud'],
|
45 |
+
'priority': 'Medium',
|
46 |
+
'category': 'Functional'
|
47 |
+
},
|
48 |
+
'performance': {
|
49 |
+
'keywords': ['performance', 'speed', 'time', 'response', 'load', 'concurrent', 'scalability'],
|
50 |
+
'priority': 'Medium',
|
51 |
+
'category': 'Performance'
|
52 |
+
},
|
53 |
+
'ui_interface': {
|
54 |
+
'keywords': ['interface', 'ui', 'user interface', 'display', 'screen', 'form', 'button', 'menu'],
|
55 |
+
'priority': 'Medium',
|
56 |
+
'category': 'UI/UX'
|
57 |
+
},
|
58 |
+
'api': {
|
59 |
+
'keywords': ['api', 'endpoint', 'service', 'request', 'response', 'rest', 'http'],
|
60 |
+
'priority': 'High',
|
61 |
+
'category': 'Integration'
|
62 |
+
},
|
63 |
+
'error_handling': {
|
64 |
+
'keywords': ['error', 'exception', 'failure', 'invalid', 'incorrect', 'wrong'],
|
65 |
+
'priority': 'High',
|
66 |
+
'category': 'Error Handling'
|
67 |
+
},
|
68 |
+
'reporting': {
|
69 |
+
'keywords': ['report', 'export', 'generate', 'analytics', 'dashboard', 'chart'],
|
70 |
+
'priority': 'Medium',
|
71 |
+
'category': 'Reporting'
|
72 |
+
},
|
73 |
+
'security': {
|
74 |
+
'keywords': ['security', 'encrypt', 'secure', 'ssl', 'https', 'token', 'session'],
|
75 |
+
'priority': 'High',
|
76 |
+
'category': 'Security'
|
77 |
+
}
|
78 |
+
}
|
79 |
+
|
80 |
def get_optimal_model_for_memory():
|
81 |
"""Select the best model based on available memory."""
|
82 |
available_memory = psutil.virtual_memory().available / (1024 * 1024) # MB
|
|
|
116 |
logger.error(f"❌ Failed to load model {model_name}: {e}")
|
117 |
return None, None
|
118 |
|
119 |
+
def analyze_requirements(text: str) -> Dict[str, Any]:
|
120 |
+
"""Analyze requirements text to identify patterns and generate appropriate test cases"""
|
121 |
+
text_lower = text.lower()
|
122 |
+
detected_patterns = {}
|
123 |
+
|
124 |
+
for pattern_name, pattern_info in REQUIREMENT_PATTERNS.items():
|
125 |
+
matches = []
|
126 |
+
for keyword in pattern_info['keywords']:
|
127 |
+
if keyword in text_lower:
|
128 |
+
# Find context around the keyword
|
129 |
+
pattern = rf'.{{0,50}}{re.escape(keyword)}.{{0,50}}'
|
130 |
+
context_matches = re.findall(pattern, text_lower, re.IGNORECASE)
|
131 |
+
matches.extend(context_matches)
|
132 |
+
|
133 |
+
if matches:
|
134 |
+
detected_patterns[pattern_name] = {
|
135 |
+
'matches': matches[:3], # Limit to 3 matches
|
136 |
+
'priority': pattern_info['priority'],
|
137 |
+
'category': pattern_info['category']
|
138 |
+
}
|
139 |
+
|
140 |
+
return detected_patterns
|
141 |
+
|
142 |
+
def generate_authentication_tests(matches: List[str]) -> List[Dict]:
|
143 |
+
"""Generate comprehensive authentication test cases"""
|
144 |
+
base_tests = [
|
145 |
+
{
|
146 |
+
"title": "Valid User Login",
|
147 |
+
"description": "Verify that users can successfully log in with valid credentials",
|
148 |
+
"preconditions": ["User account exists", "Application is accessible"],
|
149 |
+
"steps": [
|
150 |
+
"Navigate to login page",
|
151 |
+
"Enter valid username",
|
152 |
+
"Enter valid password",
|
153 |
+
"Click login button"
|
154 |
+
],
|
155 |
+
"expected": "User is successfully authenticated and redirected to dashboard/home page",
|
156 |
+
"postconditions": ["User session is created", "User is logged in"],
|
157 |
+
"test_data": "Valid username: [email protected], Valid password: Test@123"
|
158 |
+
},
|
159 |
+
{
|
160 |
+
"title": "Invalid Username Login",
|
161 |
+
"description": "Verify that login fails with invalid username",
|
162 |
+
"preconditions": ["Application is accessible"],
|
163 |
+
"steps": [
|
164 |
+
"Navigate to login page",
|
165 |
+
"Enter invalid/non-existent username",
|
166 |
+
"Enter valid password format",
|
167 |
+
"Click login button"
|
168 |
+
],
|
169 |
+
"expected": "Login fails with appropriate error message 'Invalid credentials'",
|
170 |
+
"postconditions": ["User remains on login page", "Account security maintained"],
|
171 |
+
"test_data": "Valid username: [email protected], Invalid password: WrongPass123"
|
172 |
+
},
|
173 |
+
{
|
174 |
+
"title": "Empty Fields Login Attempt",
|
175 |
+
"description": "Verify validation when login attempted with empty fields",
|
176 |
+
"preconditions": ["Application is accessible"],
|
177 |
+
"steps": [
|
178 |
+
"Navigate to login page",
|
179 |
+
"Leave username field empty",
|
180 |
+
"Leave password field empty",
|
181 |
+
"Click login button"
|
182 |
+
],
|
183 |
+
"expected": "Validation errors displayed for required fields",
|
184 |
+
"postconditions": ["User remains on login page", "Form validation active"],
|
185 |
+
"test_data": "Username: (empty), Password: (empty)"
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"title": "SQL Injection Attack Prevention",
|
189 |
+
"description": "Verify that login form prevents SQL injection attacks",
|
190 |
+
"preconditions": ["Application is accessible"],
|
191 |
+
"steps": [
|
192 |
+
"Navigate to login page",
|
193 |
+
"Enter SQL injection payload in username field",
|
194 |
+
"Enter any password",
|
195 |
+
"Click login button"
|
196 |
+
],
|
197 |
+
"expected": "Login fails safely without database compromise or error exposure",
|
198 |
+
"postconditions": ["System security maintained", "No unauthorized access"],
|
199 |
+
"test_data": "Username: admin'; DROP TABLE users; --, Password: anypass"
|
200 |
+
}
|
201 |
+
]
|
202 |
+
|
203 |
+
return base_tests
|
204 |
+
|
205 |
+
def generate_data_validation_tests(matches: List[str]) -> List[Dict]:
|
206 |
+
"""Generate comprehensive data validation test cases"""
|
207 |
+
return [
|
208 |
+
{
|
209 |
+
"title": "Valid Data Input Validation",
|
210 |
+
"description": "Verify system accepts valid data formats correctly",
|
211 |
+
"preconditions": ["Form/API endpoint is accessible", "User has appropriate permissions"],
|
212 |
+
"steps": [
|
213 |
+
"Access the input form/endpoint",
|
214 |
+
"Enter data in valid format",
|
215 |
+
"Submit the form/request",
|
216 |
+
"Verify data is accepted"
|
217 |
+
],
|
218 |
+
"expected": "Data is accepted and processed successfully with confirmation message",
|
219 |
+
"postconditions": ["Data is stored correctly", "User receives success feedback"],
|
220 |
+
"test_data": "Valid email: [email protected], Valid phone: +1-234-567-8900"
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"title": "Invalid Data Format Rejection",
|
224 |
+
"description": "Verify system rejects invalid data formats",
|
225 |
+
"preconditions": ["Form/API endpoint is accessible"],
|
226 |
+
"steps": [
|
227 |
+
"Access the input form/endpoint",
|
228 |
+
"Enter data in invalid format",
|
229 |
+
"Submit the form/request",
|
230 |
+
"Verify validation error is shown"
|
231 |
+
],
|
232 |
+
"expected": "System rejects invalid data with clear error message",
|
233 |
+
"postconditions": ["Invalid data is not stored", "User guided to correct format"],
|
234 |
+
"test_data": "Invalid email: notanemail, Invalid phone: 123-abc-defg"
|
235 |
+
},
|
236 |
+
{
|
237 |
+
"title": "Boundary Value Testing",
|
238 |
+
"description": "Test data validation at boundary values",
|
239 |
+
"preconditions": ["System has defined data length/value limits"],
|
240 |
+
"steps": [
|
241 |
+
"Test with minimum allowed value",
|
242 |
+
"Test with maximum allowed value",
|
243 |
+
"Test with value just below minimum",
|
244 |
+
"Test with value just above maximum"
|
245 |
+
],
|
246 |
+
"expected": "Min/max values accepted, out-of-range values rejected appropriately",
|
247 |
+
"postconditions": ["Boundary validation working correctly"],
|
248 |
+
"test_data": "Min: 1, Max: 100, Below: 0, Above: 101"
|
249 |
+
},
|
250 |
+
{
|
251 |
+
"title": "Special Characters Handling",
|
252 |
+
"description": "Verify proper handling of special characters in input",
|
253 |
+
"preconditions": ["Input fields accept text data"],
|
254 |
+
"steps": [
|
255 |
+
"Enter text with special characters (!@#$%^&*)",
|
256 |
+
"Enter text with unicode characters (émañ)",
|
257 |
+
"Enter text with HTML tags (<script>)",
|
258 |
+
"Submit and verify handling"
|
259 |
+
],
|
260 |
+
"expected": "Special characters handled safely without breaking functionality",
|
261 |
+
"postconditions": ["Data integrity maintained", "No XSS vulnerabilities"],
|
262 |
+
"test_data": "Special: Test!@#$, Unicode: Café, HTML: <b>test</b>"
|
263 |
+
}
|
264 |
]
|
|
|
|
|
265 |
|
266 |
+
def generate_performance_tests(matches: List[str]) -> List[Dict]:
|
267 |
+
"""Generate comprehensive performance test cases"""
|
268 |
+
return [
|
269 |
+
{
|
270 |
+
"title": "Response Time Under Normal Load",
|
271 |
+
"description": "Verify system response time meets requirements under normal usage",
|
272 |
+
"preconditions": ["System is running in production-like environment", "Normal user load"],
|
273 |
+
"steps": [
|
274 |
+
"Execute typical user operations",
|
275 |
+
"Measure response times for key functions",
|
276 |
+
"Record average response times",
|
277 |
+
"Compare against SLA requirements"
|
278 |
+
],
|
279 |
+
"expected": "All operations complete within specified time limits (e.g., <3 seconds)",
|
280 |
+
"postconditions": ["Performance baseline established"],
|
281 |
+
"test_data": "Target: <3 sec for page loads, <1 sec for API calls"
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"title": "Load Testing with Multiple Users",
|
285 |
+
"description": "Test system performance with concurrent users",
|
286 |
+
"preconditions": ["Load testing tools configured", "Test environment ready"],
|
287 |
+
"steps": [
|
288 |
+
"Simulate 100 concurrent users",
|
289 |
+
"Execute common user workflows",
|
290 |
+
"Monitor system resources (CPU, memory)",
|
291 |
+
"Measure response times and error rates"
|
292 |
+
],
|
293 |
+
"expected": "System maintains acceptable performance with <5% error rate",
|
294 |
+
"postconditions": ["Load capacity documented", "Performance bottlenecks identified"],
|
295 |
+
"test_data": "Concurrent users: 100, Duration: 30 minutes"
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"title": "Memory Usage Optimization",
|
299 |
+
"description": "Verify system memory usage remains within acceptable limits",
|
300 |
+
"preconditions": ["System monitoring tools available"],
|
301 |
+
"steps": [
|
302 |
+
"Monitor memory usage during normal operations",
|
303 |
+
"Execute memory-intensive operations",
|
304 |
+
"Check for memory leaks over extended periods",
|
305 |
+
"Verify garbage collection effectiveness"
|
306 |
+
],
|
307 |
+
"expected": "Memory usage stays within allocated limits, no memory leaks detected",
|
308 |
+
"postconditions": ["Memory optimization verified"],
|
309 |
+
"test_data": "Memory limit: 512MB, Test duration: 2 hours"
|
310 |
+
}
|
311 |
+
]
|
312 |
|
313 |
+
def generate_api_tests(matches: List[str]) -> List[Dict]:
|
314 |
+
"""Generate comprehensive API test cases"""
|
315 |
+
return [
|
316 |
+
{
|
317 |
+
"title": "Valid API Request Processing",
|
318 |
+
"description": "Verify API correctly processes valid requests",
|
319 |
+
"preconditions": ["API endpoint is accessible", "Valid authentication token available"],
|
320 |
+
"steps": [
|
321 |
+
"Send GET/POST request with valid parameters",
|
322 |
+
"Include proper authentication headers",
|
323 |
+
"Verify response status code",
|
324 |
+
"Validate response data structure"
|
325 |
+
],
|
326 |
+
"expected": "API returns 200 OK with expected data format",
|
327 |
+
"postconditions": ["Request logged", "Data processed correctly"],
|
328 |
+
"test_data": "Endpoint: /api/users, Method: GET, Auth: Bearer token123"
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"title": "Invalid API Request Handling",
|
332 |
+
"description": "Verify API properly handles invalid requests",
|
333 |
+
"preconditions": ["API endpoint is accessible"],
|
334 |
+
"steps": [
|
335 |
+
"Send request with invalid parameters",
|
336 |
+
"Send request with missing required fields",
|
337 |
+
"Send malformed JSON in request body",
|
338 |
+
"Verify error responses"
|
339 |
+
],
|
340 |
+
"expected": "API returns appropriate error codes (400, 422) with descriptive messages",
|
341 |
+
"postconditions": ["Errors logged appropriately", "System remains stable"],
|
342 |
+
"test_data": "Invalid param: user_id='invalid', Missing: required field 'name'"
|
343 |
+
},
|
344 |
+
{
|
345 |
+
"title": "API Authentication and Authorization",
|
346 |
+
"description": "Test API security and access controls",
|
347 |
+
"preconditions": ["API requires authentication"],
|
348 |
+
"steps": [
|
349 |
+
"Send request without authentication token",
|
350 |
+
"Send request with invalid/expired token",
|
351 |
+
"Send request with valid token but insufficient permissions",
|
352 |
+
"Verify security responses"
|
353 |
+
],
|
354 |
+
"expected": "Unauthorized requests return 401/403 with security maintained",
|
355 |
+
"postconditions": ["Security audit trail created"],
|
356 |
+
"test_data": "Valid token: Bearer abc123, Invalid: Bearer expired456"
|
357 |
+
}
|
358 |
+
]
|
359 |
|
360 |
+
def generate_error_handling_tests(matches: List[str]) -> List[Dict]:
|
361 |
+
"""Generate comprehensive error handling test cases"""
|
362 |
+
return [
|
363 |
+
{
|
364 |
+
"title": "Graceful Error Message Display",
|
365 |
+
"description": "Verify system displays user-friendly error messages",
|
366 |
+
"preconditions": ["Error conditions can be triggered"],
|
367 |
+
"steps": [
|
368 |
+
"Trigger various error conditions",
|
369 |
+
"Verify error messages are displayed",
|
370 |
+
"Check that messages are user-friendly",
|
371 |
+
"Ensure no technical details exposed"
|
372 |
+
],
|
373 |
+
"expected": "Clear, helpful error messages shown without exposing system internals",
|
374 |
+
"postconditions": ["User experience maintained during errors"],
|
375 |
+
"test_data": "Error scenarios: network timeout, invalid input, server error"
|
376 |
+
},
|
377 |
+
{
|
378 |
+
"title": "System Recovery After Errors",
|
379 |
+
"description": "Test system's ability to recover from error states",
|
380 |
+
"preconditions": ["System can be put into error state"],
|
381 |
+
"steps": [
|
382 |
+
"Trigger system error condition",
|
383 |
+
"Verify error is handled gracefully",
|
384 |
+
"Attempt normal operations after error",
|
385 |
+
"Verify system functionality restored"
|
386 |
+
],
|
387 |
+
"expected": "System recovers fully and continues normal operation",
|
388 |
+
"postconditions": ["System stability maintained", "No data corruption"],
|
389 |
+
"test_data": "Recovery scenarios: database disconnect, memory overflow"
|
390 |
+
}
|
391 |
+
]
|
392 |
|
393 |
+
def generate_template_based_test_cases(srs_text: str) -> List[Dict]:
|
394 |
+
"""Generate comprehensive template-based test cases using pattern analysis"""
|
395 |
+
detected_patterns = analyze_requirements(srs_text)
|
396 |
+
all_test_cases = []
|
397 |
+
|
398 |
+
# Generate specific test cases based on detected patterns
|
399 |
+
for pattern_name, pattern_data in detected_patterns.items():
|
400 |
+
if pattern_name == 'authentication':
|
401 |
+
tests = generate_authentication_tests(pattern_data['matches'])
|
402 |
+
elif pattern_name == 'data_validation':
|
403 |
+
tests = generate_data_validation_tests(pattern_data['matches'])
|
404 |
+
elif pattern_name == 'performance':
|
405 |
+
tests = generate_performance_tests(pattern_data['matches'])
|
406 |
+
elif pattern_name == 'api':
|
407 |
+
tests = generate_api_tests(pattern_data['matches'])
|
408 |
+
elif pattern_name == 'error_handling':
|
409 |
+
tests = generate_error_handling_tests(pattern_data['matches'])
|
410 |
+
else:
|
411 |
+
# Generate generic tests for other patterns
|
412 |
+
tests = generate_generic_tests(pattern_name, pattern_data)
|
413 |
+
|
414 |
+
# Add pattern-specific metadata to each test
|
415 |
+
for i, test in enumerate(tests):
|
416 |
+
test['id'] = f"TC_{pattern_name.upper()}_{i+1:03d}"
|
417 |
+
test['priority'] = pattern_data['priority']
|
418 |
+
test['category'] = pattern_data['category']
|
419 |
+
|
420 |
+
all_test_cases.extend(tests)
|
421 |
+
|
422 |
+
# If no specific patterns detected, generate generic functional tests
|
423 |
+
if not all_test_cases:
|
424 |
+
all_test_cases = generate_generic_functional_tests(srs_text)
|
425 |
+
|
426 |
+
# Limit to reasonable number of test cases
|
427 |
+
return all_test_cases[:12]
|
428 |
+
|
429 |
+
def generate_generic_tests(pattern_name: str, pattern_data: Dict) -> List[Dict]:
|
430 |
+
"""Generate generic test cases for unspecified patterns"""
|
431 |
+
return [
|
432 |
+
{
|
433 |
+
"title": f"{pattern_name.replace('_', ' ').title()} - Positive Test",
|
434 |
+
"description": f"Verify {pattern_name.replace('_', ' ')} functionality works correctly",
|
435 |
+
"preconditions": ["System is accessible", "User has required permissions"],
|
436 |
+
"steps": [
|
437 |
+
f"Access {pattern_name.replace('_', ' ')} feature",
|
438 |
+
"Perform valid operation",
|
439 |
+
"Verify expected behavior"
|
440 |
+
],
|
441 |
+
"expected": f"{pattern_name.replace('_', ' ').title()} functionality works as expected",
|
442 |
+
"postconditions": ["System state is valid"],
|
443 |
+
"test_data": "Valid test data as per requirements"
|
444 |
+
},
|
445 |
+
{
|
446 |
+
"title": f"{pattern_name.replace('_', ' ').title()} - Negative Test",
|
447 |
+
"description": f"Verify {pattern_name.replace('_', ' ')} handles invalid scenarios",
|
448 |
+
"preconditions": ["System is accessible"],
|
449 |
+
"steps": [
|
450 |
+
f"Access {pattern_name.replace('_', ' ')} feature",
|
451 |
+
"Perform invalid operation",
|
452 |
+
"Verify error handling"
|
453 |
+
],
|
454 |
+
"expected": f"Invalid {pattern_name.replace('_', ' ')} operation handled gracefully",
|
455 |
+
"postconditions": ["System remains stable"],
|
456 |
+
"test_data": "Invalid test data to trigger error conditions"
|
457 |
+
}
|
458 |
+
]
|
459 |
|
460 |
+
def generate_generic_functional_tests(srs_text: str) -> List[Dict]:
|
461 |
+
"""Generate generic functional test cases when no specific patterns are detected"""
|
462 |
+
return [
|
463 |
+
{
|
464 |
+
"id": "TC_FUNC_001",
|
465 |
+
"title": "Basic System Functionality",
|
466 |
+
"priority": "High",
|
467 |
+
"category": "Functional",
|
468 |
+
"description": "Verify core system functionality works as specified",
|
469 |
+
"preconditions": ["System is deployed and accessible", "Test environment is configured"],
|
470 |
+
"steps": [
|
471 |
+
"Access the system/application",
|
472 |
+
"Navigate through main features",
|
473 |
+
"Execute primary use cases",
|
474 |
+
"Verify all functions work correctly"
|
475 |
+
],
|
476 |
+
"expected": "All core functionality operates according to requirements",
|
477 |
+
"postconditions": ["System demonstrates full functionality"],
|
478 |
+
"test_data": "Standard test data set as defined in requirements"
|
479 |
+
},
|
480 |
+
{
|
481 |
+
"id": "TC_FUNC_002",
|
482 |
+
"title": "Input Validation and Processing",
|
483 |
+
"priority": "High",
|
484 |
+
"category": "Functional",
|
485 |
+
"description": "Test system's ability to validate and process various inputs",
|
486 |
+
"preconditions": ["System accepts user input"],
|
487 |
+
"steps": [
|
488 |
+
"Enter valid data in all input fields",
|
489 |
+
"Submit data and verify processing",
|
490 |
+
"Enter invalid data and verify rejection",
|
491 |
+
"Test boundary conditions"
|
492 |
+
],
|
493 |
+
"expected": "Valid data processed correctly, invalid data rejected with appropriate messages",
|
494 |
+
"postconditions": ["Data integrity maintained"],
|
495 |
+
"test_data": "Mix of valid, invalid, and boundary test data"
|
496 |
+
},
|
497 |
+
{
|
498 |
+
"id": "TC_FUNC_003",
|
499 |
+
"title": "System Integration and Workflow",
|
500 |
+
"priority": "Medium",
|
501 |
+
"category": "Integration",
|
502 |
+
"description": "Verify end-to-end workflow and system integration",
|
503 |
+
"preconditions": ["All system components are integrated"],
|
504 |
+
"steps": [
|
505 |
+
"Execute complete business workflow",
|
506 |
+
"Verify data flow between components",
|
507 |
+
"Test system integration points",
|
508 |
+
"Validate end-to-end functionality"
|
509 |
+
],
|
510 |
+
"expected": "Complete workflow executes successfully with proper data flow",
|
511 |
+
"postconditions": ["Workflow completion confirmed"],
|
512 |
+
"test_data": "Complete dataset for end-to-end testing"
|
513 |
+
}
|
514 |
+
]
|
515 |
|
516 |
+
def parse_generated_test_cases(generated_text: str) -> List[Dict]:
|
517 |
+
"""Parse AI-generated text into structured test cases"""
|
518 |
lines = generated_text.split('\n')
|
519 |
test_cases = []
|
520 |
current_case = {}
|
|
|
526 |
if current_case:
|
527 |
test_cases.append(current_case)
|
528 |
current_case = {
|
529 |
+
"id": f"TC_AI_{case_counter:03d}",
|
530 |
"title": line,
|
531 |
+
"priority": "Medium",
|
532 |
+
"category": "Functional",
|
533 |
"description": line,
|
534 |
+
"preconditions": ["System is accessible"],
|
535 |
+
"steps": ["Execute the test procedure"],
|
536 |
+
"expected": "Test should pass according to requirements",
|
537 |
+
"postconditions": ["System state verified"],
|
538 |
+
"test_data": "As specified in requirements"
|
539 |
}
|
540 |
case_counter += 1
|
541 |
|
|
|
544 |
|
545 |
if not test_cases:
|
546 |
return [{
|
547 |
+
"id": "TC_AI_001",
|
548 |
+
"title": "AI Generated Test Case",
|
549 |
+
"priority": "Medium",
|
550 |
+
"category": "Functional",
|
551 |
+
"description": "Auto-generated test case based on AI analysis",
|
552 |
+
"preconditions": ["System meets specified requirements"],
|
553 |
+
"steps": ["Review requirements", "Execute test procedure", "Verify results"],
|
554 |
+
"expected": "Requirements should be met as specified",
|
555 |
+
"postconditions": ["Test completion verified"],
|
556 |
+
"test_data": "Test data as defined in requirements"
|
557 |
}]
|
558 |
|
559 |
return test_cases
|
560 |
|
561 |
+
def generate_with_ai_model(srs_text: str, tokenizer, model) -> List[Dict]:
|
562 |
+
"""Generate test cases using AI model"""
|
563 |
+
max_input_length = 300
|
564 |
if len(srs_text) > max_input_length:
|
565 |
srs_text = srs_text[:max_input_length]
|
566 |
|
567 |
+
prompt = f"""Generate comprehensive test cases for this software requirement:
|
568 |
{srs_text}
|
569 |
|
570 |
Test Cases:
|
|
|
574 |
inputs = tokenizer.encode(
|
575 |
prompt,
|
576 |
return_tensors="pt",
|
577 |
+
max_length=200,
|
578 |
truncation=True
|
579 |
)
|
580 |
|
581 |
with torch.no_grad():
|
582 |
outputs = model.generate(
|
583 |
inputs,
|
584 |
+
max_new_tokens=150,
|
585 |
num_return_sequences=1,
|
586 |
temperature=0.7,
|
587 |
do_sample=True,
|
|
|
598 |
logger.error(f"❌ AI generation failed: {e}")
|
599 |
raise
|
600 |
|
601 |
+
def generate_with_fallback(srs_text: str):
|
602 |
+
"""Generate test cases with AI model fallback to enhanced templates"""
|
603 |
model_name = get_optimal_model_for_memory()
|
604 |
|
605 |
if model_name:
|
|
|
610 |
reason = get_algorithm_reason(model_name)
|
611 |
return test_cases, model_name, "transformer (causal LM)", reason
|
612 |
except Exception as e:
|
613 |
+
logger.warning(f"AI generation failed: {e}, falling back to enhanced templates")
|
614 |
|
615 |
+
logger.info("⚠️ Using enhanced template-based generation")
|
616 |
test_cases = generate_template_based_test_cases(srs_text)
|
617 |
+
return test_cases, "Enhanced Template-Based Generator", "pattern-matching + rule-based", "Enhanced template generation with comprehensive pattern analysis and structured test case creation"
|
618 |
|
619 |
# ✅ Function exposed to app.py
|
620 |
+
def generate_test_cases(srs_text: str) -> List[Dict]:
|
621 |
+
"""Main function to generate test cases"""
|
622 |
return generate_with_fallback(srs_text)[0]
|
623 |
|
624 |
def get_generator():
|
625 |
+
"""Get generator instance"""
|
626 |
global _generator_instance
|
627 |
if _generator_instance is None:
|
628 |
class Generator:
|
|
|
636 |
def get_model_info(self):
|
637 |
mem = psutil.Process().memory_info().rss / 1024 / 1024
|
638 |
return {
|
639 |
+
"model_name": self.model_name if self.model_name else "Enhanced Template-Based Generator",
|
640 |
+
"status": "loaded" if self.model else "enhanced_template_mode",
|
641 |
"memory_usage": f"{mem:.1f}MB",
|
642 |
+
"optimization": "low_memory_enhanced"
|
643 |
}
|
644 |
|
645 |
_generator_instance = Generator()
|
|
|
647 |
return _generator_instance
|
648 |
|
649 |
def monitor_memory():
|
650 |
+
"""Monitor and manage memory usage"""
|
651 |
mem = psutil.Process().memory_info().rss / 1024 / 1024
|
652 |
logger.info(f"Memory usage: {mem:.1f}MB")
|
653 |
if mem > 450:
|
654 |
gc.collect()
|
655 |
logger.info("Memory cleanup triggered")
|
656 |
|
657 |
+
def generate_test_cases_and_info(input_text: str) -> Dict[str, Any]:
|
658 |
+
"""Generate test cases with full information"""
|
659 |
test_cases, model_name, algorithm_used, reason = generate_with_fallback(input_text)
|
660 |
return {
|
661 |
"model": model_name,
|
|
|
664 |
"test_cases": test_cases
|
665 |
}
|
666 |
|
667 |
+
def get_algorithm_reason(model_name: str) -> str:
|
668 |
+
"""Get explanation for algorithm selection"""
|
669 |
if model_name == "microsoft/DialoGPT-small":
|
670 |
+
return "Selected due to low memory availability; DialoGPT-small provides conversational understanding in limited memory environments with enhanced context processing."
|
671 |
elif model_name == "distilgpt2":
|
672 |
+
return "Selected for its balance between performance and low memory usage. Ideal for small environments needing causal language modeling with good text generation quality."
|
673 |
elif model_name == "gpt2":
|
674 |
+
return "Chosen for general-purpose text generation with moderate memory headroom and superior language understanding capabilities."
|
675 |
elif model_name is None:
|
676 |
+
return "Enhanced template-based generation selected due to memory constraints. Uses comprehensive pattern matching, requirement analysis, and structured test case templates for robust test coverage."
|
677 |
else:
|
678 |
+
return "Model selected based on optimal tradeoff between memory usage, language generation capability, and test case quality requirements." fails with appropriate error message 'Invalid credentials'",
|
679 |
+
"postconditions": ["User remains on login page", "No session created"],
|
680 |
+
"test_data": "Invalid username: [email protected], Password: Test@123"
|
681 |
+
},
|
682 |
+
{
|
683 |
+
"title": "Invalid Password Login",
|
684 |
+
"description": "Verify that login fails with incorrect password",
|
685 |
+
"preconditions": ["Valid user account exists"],
|
686 |
+
"steps": [
|
687 |
+
"Navigate to login page",
|
688 |
+
"Enter valid username",
|
689 |
+
"Enter incorrect password",
|
690 |
+
"Click login button"
|
691 |
+
],
|
692 |
+
"expected": "Login
|
requirements.txt
CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
|
|