brickfrog commited on
Commit
d6f5eba
·
verified ·
1 Parent(s): 993126d

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +626 -335
  2. requirements.txt +2 -0
app.py CHANGED
@@ -20,6 +20,8 @@ import json
20
  import tempfile
21
  from pathlib import Path
22
  import pandas as pd
 
 
23
 
24
 
25
  class Step(BaseModel):
@@ -194,6 +196,76 @@ def structured_output_completion(
194
  raise
195
 
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  def generate_cards_batch(
198
  client, model, topic, num_cards, system_prompt, generate_cloze=False, batch_size=3
199
  ):
@@ -319,30 +391,29 @@ GENERATION_MODES = [
319
  def generate_cards(
320
  api_key_input,
321
  subject,
322
- model_name="gpt-4.1-mini",
 
 
 
323
  topic_number=1,
324
  cards_per_topic=2,
325
  preference_prompt="assume I'm a beginner",
326
  generate_cloze=False,
327
  ):
328
- logger.info(f"Starting card generation for subject: {subject}")
329
  logger.debug(
330
- f"Parameters: topics={topic_number}, cards_per_topic={cards_per_topic}, cloze={generate_cloze}"
331
  )
332
 
333
- # Input validation
334
  if not api_key_input:
335
  logger.warning("No API key provided")
336
  raise gr.Error("OpenAI API key is required")
337
  if not api_key_input.startswith("sk-"):
338
  logger.warning("Invalid API key format")
339
  raise gr.Error("Invalid API key format. OpenAI keys should start with 'sk-'")
340
- if not subject.strip():
341
- logger.warning("No subject provided")
342
- raise gr.Error("Subject is required")
343
-
344
- gr.Info("🚀 Starting card generation...")
345
 
 
346
  try:
347
  logger.debug("Initializing OpenAI client")
348
  client = OpenAI(api_key=api_key_input)
@@ -353,101 +424,268 @@ def generate_cards(
353
  model = model_name
354
  flattened_data = []
355
  total = 0
356
-
357
  progress_tracker = gr.Progress(track_tqdm=True)
358
-
359
- system_prompt = f"""
360
- You are an expert educator in {subject}, creating an optimized learning sequence.
361
- Your goal is to:
362
- 1. Break down the subject into logical concepts
363
- 2. Identify prerequisites and learning outcomes
364
- 3. Generate cards that build upon each other
365
- 4. Address and correct common misconceptions
366
- 5. Include verification steps to minimize hallucinations
367
- 6. Provide a recommended study order
368
-
369
- For explanations and examples:
370
- - Keep explanations in plain text
371
- - Format code examples with triple backticks (```)
372
- - Separate conceptual examples from code examples
373
- - Use clear, concise language
374
-
375
- Keep in mind the user's preferences: {preference_prompt}
376
- """
377
-
378
- topic_prompt = f"""
379
- Generate the top {topic_number} important subjects to know about {subject} in
380
- order of ascending difficulty. Return your response as a JSON object with the following structure:
381
- {{
382
- "topics": [
383
- {{
384
- "name": "topic name",
385
- "difficulty": "beginner/intermediate/advanced",
386
- "description": "brief description"
387
- }}
388
- ]
389
- }}
390
- """
391
 
392
  try:
393
- logger.info("Generating topics...")
394
- topics_response = structured_output_completion(
395
- client, model, {"type": "json_object"}, system_prompt, topic_prompt
396
- )
397
-
398
- if not topics_response or "topics" not in topics_response:
399
- logger.error("Invalid topics response format")
400
- raise gr.Error("Failed to generate topics. Please try again.")
401
-
402
- topics = topics_response["topics"]
403
 
404
- gr.Info(f"✨ Generated {len(topics)} topics successfully!")
 
 
 
 
 
405
 
406
- # Generate cards for each topic
407
- for i, topic in enumerate(
408
- progress_tracker.tqdm(topics, desc="Generating cards")
409
- ):
410
  try:
411
- cards = generate_cards_batch(
412
- client,
413
- model,
414
- topic["name"],
415
- cards_per_topic,
416
- system_prompt,
417
- generate_cloze=generate_cloze,
418
- batch_size=3,
419
  )
420
-
421
- if cards:
422
- for card_index, card in enumerate(cards, start=1):
423
- index = f"{i + 1}.{card_index}"
424
- metadata = card.metadata or {}
425
-
426
- row = [
427
- index,
428
- topic["name"],
429
- card.card_type,
430
- card.front.question,
431
- card.back.answer,
432
- card.back.explanation,
433
- card.back.example,
434
- metadata.get("prerequisites", []),
435
- metadata.get("learning_outcomes", []),
436
- metadata.get("misconceptions", []),
437
- metadata.get("difficulty", "beginner"),
438
- ]
439
- flattened_data.append(row)
440
- total += 1
441
-
442
- gr.Info(f"✅ Generated {len(cards)} cards for {topic['name']}")
443
-
444
- except Exception as e:
445
  logger.error(
446
- f"Failed to generate cards for topic {topic['name']}: {str(e)}"
447
  )
448
- gr.Warning(f"Failed to generate cards for '{topic['name']}'")
449
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
  final_html = f"""
452
  <div style="text-align: center">
453
  <p>✅ Generation complete!</p>
@@ -455,7 +693,6 @@ def generate_cards(
455
  </div>
456
  """
457
 
458
- # Convert to DataFrame with all columns
459
  df = pd.DataFrame(
460
  flattened_data,
461
  columns=[
@@ -472,12 +709,15 @@ def generate_cards(
472
  "Difficulty",
473
  ],
474
  )
475
-
476
  return df, final_html, total
477
 
478
  except Exception as e:
479
  logger.error(f"Card generation failed: {str(e)}", exc_info=True)
480
- raise gr.Error(f"Card generation failed: {str(e)}")
 
 
 
 
481
 
482
 
483
  # Update the BASIC_MODEL definition with enhanced CSS/HTML
@@ -1059,7 +1299,7 @@ with gr.Blocks(
1059
  css="""
1060
  #footer {display:none !important}
1061
  .tall-dataframe {min-height: 500px !important}
1062
- .contain {max-width: 95% !important; margin: auto;}
1063
  .output-cards {border-radius: 8px; box-shadow: 0 4px 6px -1px rgba(0,0,0,0.1);}
1064
  .hint-text {font-size: 0.9em; color: #666; margin-top: 4px;}
1065
  .export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
@@ -1072,146 +1312,175 @@ with gr.Blocks(
1072
  #### Generate comprehensive Anki flashcards using AI.
1073
  """)
1074
 
1075
- with gr.Row():
1076
- with gr.Column(scale=1):
1077
- gr.Markdown("### Configuration")
1078
-
1079
- # Add mode selection
1080
- generation_mode = gr.Radio(
1081
- choices=["subject", "path"],
1082
- value="subject",
1083
- label="Generation Mode",
1084
- info="Choose how you want to generate content",
1085
- )
1086
-
1087
- # Create containers for different modes
1088
- with gr.Group() as subject_mode:
1089
- subject = gr.Textbox(
1090
- label="Subject",
1091
- placeholder="Enter the subject, e.g., 'Basic SQL Concepts'",
1092
- info="The topic you want to generate flashcards for",
1093
- )
1094
-
1095
- with gr.Group(visible=False) as path_mode:
1096
- description = gr.Textbox(
1097
- label="Learning Goal",
1098
- placeholder="Paste a job description or describe what you want to learn...",
1099
- info="We'll break this down into learnable subjects",
1100
- lines=5,
1101
- )
1102
- analyze_button = gr.Button(
1103
- "Analyze & Break Down", variant="secondary"
1104
- )
1105
-
1106
- # Common settings
1107
- api_key_input = gr.Textbox(
1108
- label="OpenAI API Key",
1109
- type="password",
1110
- placeholder="Enter your OpenAI API key",
1111
- value=os.getenv("OPENAI_API_KEY", ""),
1112
- info="Your OpenAI API key starting with 'sk-'",
1113
- )
1114
-
1115
- # Generation Button
1116
- generate_button = gr.Button("Generate Cards", variant="primary")
1117
-
1118
- # Advanced Settings in Accordion
1119
- with gr.Accordion("Advanced Settings", open=False):
1120
- model_choice = gr.Dropdown(
1121
- choices=["gpt-4.1", "gpt-4.1-nano"], # Corrected choices
1122
- value="gpt-4.1-nano", # Changed default to nano as it's faster/cheaper
1123
- label="Model Selection",
1124
- info="Select the AI model to use for generation",
1125
- )
1126
-
1127
- # Add tooltip/description for models
1128
- model_info = gr.Markdown("""
1129
- **Model Information:**
1130
- - **gpt-4.1**: Highest quality, slower generation
1131
- - **gpt-4.1-nano**: Optimized for speed and lower cost
1132
- """) # Corrected descriptions
1133
-
1134
- topic_number = gr.Slider(
1135
- label="Number of Topics",
1136
- minimum=2,
1137
- maximum=20,
1138
- step=1,
1139
- value=2,
1140
- info="How many distinct topics to cover within the subject",
1141
- )
1142
- cards_per_topic = gr.Slider(
1143
- label="Cards per Topic",
1144
- minimum=2,
1145
- maximum=30,
1146
- step=1,
1147
- value=3,
1148
- info="How many flashcards to generate for each topic",
1149
- )
1150
- preference_prompt = gr.Textbox(
1151
- label="Learning Preferences",
1152
- placeholder="e.g., 'Assume I'm a beginner' or 'Focus on practical examples'",
1153
- info="Customize how the content is presented",
1154
- lines=3,
1155
- )
1156
- generate_cloze_checkbox = gr.Checkbox(
1157
- label="Generate Cloze Cards (Experimental)",
1158
- value=False,
1159
- info="Allow the AI to generate fill-in-the-blank style cards where appropriate.",
1160
  )
1161
 
1162
- # Right column - add a new container for learning path results
1163
- with gr.Column(scale=2):
1164
- with gr.Group(visible=False) as path_results:
1165
- gr.Markdown("### Learning Path Analysis")
1166
- subjects_list = gr.Dataframe(
1167
- headers=["Subject", "Prerequisites", "Time Estimate"],
1168
- label="Recommended Subjects",
1169
- interactive=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170
  )
1171
- learning_order = gr.Markdown("### Recommended Learning Order")
1172
- projects = gr.Markdown("### Suggested Projects")
1173
 
1174
- # Replace generate_selected with use_subjects
1175
- use_subjects = gr.Button(
1176
- "Use These Subjects ℹ️", # Added info emoji to button text
1177
- variant="primary",
1178
- )
1179
- gr.Markdown(
1180
- "*Click to copy subjects to main input for card generation*",
1181
- elem_classes="hint-text",
1182
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183
 
1184
- # Existing output components
1185
- with gr.Group() as cards_output:
1186
- gr.Markdown("### Generated Cards")
 
 
 
 
 
1187
 
1188
- # Output Format Documentation
1189
- with gr.Accordion("Output Format", open=False):
1190
- gr.Markdown("""
1191
- The generated cards include:
1192
-
1193
- * **Index**: Unique identifier for each card
1194
- * **Topic**: The specific subtopic within your subject
1195
- * **Card_Type**: Type of card (basic or cloze)
1196
- * **Question**: Clear, focused question for the flashcard front
1197
- * **Answer**: Concise core answer
1198
- * **Explanation**: Detailed conceptual explanation
1199
- * **Example**: Practical implementation or code example
1200
- * **Prerequisites**: Required knowledge for this concept
1201
- * **Learning Outcomes**: What you should understand after mastering this card
1202
- * **Common Misconceptions**: Incorrect assumptions debunked with explanations
1203
- * **Difficulty**: Concept complexity level for optimal study sequencing
1204
-
1205
- Export options:
1206
- - **CSV**: Raw data for custom processing
1207
- - **Anki Deck**: Ready-to-use deck with formatted cards and metadata
1208
- """)
1209
-
1210
- # Add near the output format documentation
1211
- with gr.Accordion("Example Card Format", open=False):
1212
- gr.Code(
1213
- label="Example Card",
1214
- value="""
 
 
1215
  {
1216
  "front": {
1217
  "question": "What is a PRIMARY KEY constraint in SQL?"
@@ -1231,64 +1500,56 @@ with gr.Blocks(
1231
  "difficulty": "beginner"
1232
  }
1233
  }
1234
- """,
1235
- language="json",
1236
- )
1237
-
1238
- # Dataframe Output
1239
- output = gr.Dataframe(
1240
- value=example_data,
1241
- headers=[
1242
- "Index",
1243
- "Topic",
1244
- "Card_Type",
1245
- "Question",
1246
- "Answer",
1247
- "Explanation",
1248
- "Example",
1249
- "Prerequisites",
1250
- "Learning_Outcomes",
1251
- "Common_Misconceptions",
1252
- "Difficulty",
1253
- ],
1254
- interactive=True,
1255
- elem_classes="tall-dataframe",
1256
- wrap=True,
1257
- column_widths=[
1258
- 50,
1259
- 100,
1260
- 80,
1261
- 200,
1262
- 200,
1263
- 250,
1264
- 200,
1265
- 150,
1266
- 150,
1267
- 150,
1268
- 100,
1269
- ],
1270
  )
1271
 
1272
- # Export Controls
1273
- with gr.Group(elem_classes="export-group"):
1274
- gr.Markdown("#### Export Generated Cards")
1275
- with gr.Row():
1276
- export_csv_button = gr.Button(
1277
- "Export to CSV", variant="secondary"
1278
- )
1279
- export_anki_button = gr.Button(
1280
- "Export to Anki Deck (.apkg)", variant="secondary"
1281
- )
1282
- # Re-wrap File components in an invisible Row
1283
- with gr.Row(visible=False):
1284
- download_csv = gr.File(
1285
- label="Download CSV", interactive=False, visible=False
1286
- )
1287
- download_anki = gr.File(
1288
- label="Download Anki Deck",
1289
- interactive=False,
1290
- visible=False,
1291
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292
 
1293
  # Add near the top of the Blocks
1294
  with gr.Row():
@@ -1297,62 +1558,69 @@ with gr.Blocks(
1297
  label="Total Cards Generated", value=0, visible=False
1298
  )
1299
 
1300
- # Add JavaScript to handle mode switching
1301
  def update_mode_visibility(mode):
1302
- """Update component visibility based on selected mode and clear values"""
1303
  is_subject = mode == "subject"
1304
  is_path = mode == "path"
 
 
1305
 
1306
- # Clear values when switching modes
1307
- if is_path:
1308
- subject.value = "" # Clear subject when switching to path mode
1309
- else:
1310
- description.value = (
1311
- "" # Clear description when switching to subject mode
1312
- )
1313
 
1314
  return {
1315
  subject_mode: gr.update(visible=is_subject),
1316
  path_mode: gr.update(visible=is_path),
 
 
1317
  path_results: gr.update(visible=is_path),
1318
- cards_output: gr.update(visible=not is_path),
1319
- subject: gr.update(value="") if is_path else gr.update(),
1320
- description: gr.update(value="") if not is_path else gr.update(),
1321
- output: gr.update(value=None), # Clear previous output
 
 
 
 
 
1322
  progress: gr.update(value="", visible=False),
1323
  total_cards: gr.update(value=0, visible=False),
1324
  }
1325
 
1326
- # Update the mode switching handler to include all components that need clearing
1327
  generation_mode.change(
1328
  fn=update_mode_visibility,
1329
  inputs=[generation_mode],
1330
  outputs=[
1331
  subject_mode,
1332
  path_mode,
 
 
1333
  path_results,
1334
  cards_output,
1335
  subject,
1336
  description,
 
 
1337
  output,
 
 
 
1338
  progress,
1339
  total_cards,
1340
  ],
1341
  )
1342
 
1343
- # Add handler for path analysis
1344
  analyze_button.click(
1345
  fn=analyze_learning_path,
1346
  inputs=[api_key_input, description, model_choice],
1347
  outputs=[subjects_list, learning_order, projects],
1348
  )
1349
 
1350
- # Add this function to handle copying subjects to main input
1351
  def use_selected_subjects(subjects_df):
1352
- """Copy selected subjects to main input and switch to subject mode"""
1353
  if subjects_df is None or subjects_df.empty:
1354
  gr.Warning("No subjects available to copy from Learning Path analysis.")
1355
- # Return updates for all relevant output components to avoid errors
1356
  return (
1357
  gr.update(),
1358
  gr.update(),
@@ -1363,51 +1631,74 @@ with gr.Blocks(
1363
  gr.update(),
1364
  gr.update(),
1365
  gr.update(),
 
 
 
 
 
 
1366
  )
1367
 
1368
  subjects = subjects_df["Subject"].tolist()
1369
  combined_subject = ", ".join(subjects)
1370
- suggested_topics = min(
1371
- len(subjects) + 1, 20
1372
- ) # Suggest topics = num subjects + 1
1373
-
1374
- # Return updates for relevant components
1375
- return (
1376
- "subject", # Set mode to subject
1377
- gr.update(visible=True), # Show subject_mode group
1378
- gr.update(visible=False), # Hide path_mode group
1379
- gr.update(visible=False), # Hide path_results group
1380
- gr.update(visible=True), # Show cards_output group
1381
- combined_subject, # Update subject textbox value
1382
- suggested_topics, # Update topic_number slider value
1383
- # Update preference prompt
1384
- "Focus on connections between these subjects and their practical applications.",
1385
- example_data, # Reset output to example data - THIS NOW WORKS
1386
- )
 
 
 
 
 
 
1387
 
1388
- # Correct the outputs for the use_subjects click handler
1389
  use_subjects.click(
1390
  fn=use_selected_subjects,
1391
- inputs=[subjects_list], # Only needs the dataframe
1392
- outputs=[ # Match the return tuple of the function
1393
  generation_mode,
1394
- subject_mode, # Group visibility
1395
- path_mode, # Group visibility
1396
- path_results, # Group visibility
1397
- cards_output, # Group visibility
1398
- subject, # Component value
1399
- topic_number, # Component value
1400
- preference_prompt, # Component value
1401
- output, # Component value
 
 
 
 
 
 
 
 
 
 
1402
  ],
1403
  )
1404
 
1405
- # Simplified event handlers
1406
  generate_button.click(
1407
  fn=generate_cards,
1408
  inputs=[
1409
  api_key_input,
1410
  subject,
 
 
 
1411
  model_choice,
1412
  topic_number,
1413
  cards_per_topic,
 
20
  import tempfile
21
  from pathlib import Path
22
  import pandas as pd
23
+ import requests
24
+ from bs4 import BeautifulSoup
25
 
26
 
27
  class Step(BaseModel):
 
196
  raise
197
 
198
 
199
+ def fetch_webpage_text(url: str) -> str:
200
+ """Fetches and extracts main text content from a URL."""
201
+ try:
202
+ logger.info(f"Fetching content from URL: {url}")
203
+ headers = {
204
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
205
+ }
206
+ response = requests.get(url, headers=headers, timeout=15) # Added timeout
207
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
208
+
209
+ logger.debug(f"Parsing HTML content for {url}")
210
+ # Use lxml for speed if available, fallback to html.parser
211
+ try:
212
+ soup = BeautifulSoup(response.text, "lxml")
213
+ except ImportError:
214
+ logger.warning("lxml not found, using html.parser instead.")
215
+ soup = BeautifulSoup(response.text, "html.parser")
216
+
217
+ # Remove script and style elements
218
+ for script_or_style in soup(["script", "style"]):
219
+ script_or_style.extract()
220
+
221
+ # Attempt to find main content tags
222
+ main_content = soup.find("main")
223
+ if not main_content:
224
+ main_content = soup.find("article")
225
+
226
+ # If specific tags found, use their text, otherwise fallback to body
227
+ if main_content:
228
+ text = main_content.get_text()
229
+ logger.debug(f"Extracted text from <{main_content.name}> tag.")
230
+ else:
231
+ body = soup.find("body")
232
+ if body:
233
+ text = body.get_text()
234
+ logger.debug("Extracted text from <body> tag (fallback).")
235
+ else:
236
+ text = "" # No body tag found?
237
+ logger.warning(f"Could not find <body> tag in {url}")
238
+
239
+ # Break into lines and remove leading/trailing space on each
240
+ lines = (line.strip() for line in text.splitlines())
241
+ # Break multi-headlines into a line each
242
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
243
+ # Drop blank lines
244
+ text = "\n".join(chunk for chunk in chunks if chunk)
245
+
246
+ if not text:
247
+ logger.warning(f"Could not extract meaningful text from {url}")
248
+ raise ValueError("Could not extract text content from the URL.")
249
+
250
+ logger.info(
251
+ f"Successfully extracted text from {url} (Length: {len(text)} chars)"
252
+ )
253
+ return text
254
+
255
+ except requests.exceptions.RequestException as e:
256
+ logger.error(f"Network error fetching URL {url}: {e}")
257
+ raise ConnectionError(f"Could not fetch URL: {e}")
258
+ except Exception as e:
259
+ logger.error(f"Error processing URL {url}: {e}", exc_info=True)
260
+ # Re-raise specific internal errors or a general one
261
+ if isinstance(e, (ValueError, ConnectionError)):
262
+ raise e
263
+ else:
264
+ raise RuntimeError(
265
+ f"An unexpected error occurred while processing the URL: {e}"
266
+ )
267
+
268
+
269
  def generate_cards_batch(
270
  client, model, topic, num_cards, system_prompt, generate_cloze=False, batch_size=3
271
  ):
 
391
  def generate_cards(
392
  api_key_input,
393
  subject,
394
+ generation_mode,
395
+ source_text,
396
+ url_input,
397
+ model_name="gpt-4.1-nano",
398
  topic_number=1,
399
  cards_per_topic=2,
400
  preference_prompt="assume I'm a beginner",
401
  generate_cloze=False,
402
  ):
403
+ logger.info(f"Starting card generation in {generation_mode} mode")
404
  logger.debug(
405
+ f"Parameters: mode={generation_mode}, topics={topic_number}, cards_per_topic={cards_per_topic}, cloze={generate_cloze}"
406
  )
407
 
408
+ # --- Common Setup ---
409
  if not api_key_input:
410
  logger.warning("No API key provided")
411
  raise gr.Error("OpenAI API key is required")
412
  if not api_key_input.startswith("sk-"):
413
  logger.warning("Invalid API key format")
414
  raise gr.Error("Invalid API key format. OpenAI keys should start with 'sk-'")
 
 
 
 
 
415
 
416
+ # Moved client initialization up
417
  try:
418
  logger.debug("Initializing OpenAI client")
419
  client = OpenAI(api_key=api_key_input)
 
424
  model = model_name
425
  flattened_data = []
426
  total = 0
 
427
  progress_tracker = gr.Progress(track_tqdm=True)
428
+ # ---------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
 
430
  try:
431
+ page_text_for_generation = "" # Initialize variable to hold text for AI
 
 
 
 
 
 
 
 
 
432
 
433
+ # --- Web Mode --- (Fetch text first)
434
+ if generation_mode == "web":
435
+ logger.info("Generation mode: Web")
436
+ if not url_input or not url_input.strip():
437
+ logger.warning("No URL provided for web generation mode.")
438
+ raise gr.Error("URL is required for 'From Web' mode.")
439
 
440
+ gr.Info(f"🕸️ Fetching content from {url_input}...")
 
 
 
441
  try:
442
+ page_text_for_generation = fetch_webpage_text(url_input)
443
+ gr.Info(
444
+ f"✅ Successfully fetched text (approx. {len(page_text_for_generation)} chars). Starting AI generation..."
 
 
 
 
 
445
  )
446
+ except (ConnectionError, ValueError, RuntimeError) as e:
447
+ logger.error(f"Failed to fetch or process URL {url_input}: {e}")
448
+ raise gr.Error(
449
+ f"Failed to get content from URL: {e}"
450
+ ) # Display fetch error to user
451
+ except Exception as e: # Catch any other unexpected errors during fetch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
  logger.error(
453
+ f"Unexpected error fetching URL {url_input}: {e}", exc_info=True
454
  )
455
+ raise gr.Error(f"An unexpected error occurred fetching the URL.")
456
+
457
+ # --- Text Mode --- (Use provided text)
458
+ elif generation_mode == "text":
459
+ logger.info("Generation mode: Text Input")
460
+ if not source_text or not source_text.strip():
461
+ logger.warning("No source text provided for text generation mode.")
462
+ raise gr.Error("Source text is required for 'From Text' mode.")
463
+ page_text_for_generation = source_text # Use the input text directly
464
+ gr.Info("🚀 Starting card generation from text...")
465
+
466
+ # --- Generation from Text/Web Content ---
467
+ if generation_mode == "text" or generation_mode == "web":
468
+ # Shared logic for generating cards from fetched/provided text
469
+ text_system_prompt = f"""
470
+ You are an expert educator specializing in extracting key information and creating flashcards from provided text.
471
+ Your goal is to generate clear, concise, and accurate flashcards based *only* on the text given by the user.
472
+ Focus on the most important concepts, definitions, facts, or processes mentioned.
473
+ Generate {cards_per_topic} cards.
474
+ Adhere to the user's learning preferences: {preference_prompt}
475
+ Use the specified JSON output format.
476
+ For explanations and examples:
477
+ - Keep explanations in plain text
478
+ - Format code examples with triple backticks (```)
479
+ - Separate conceptual examples from code examples
480
+ - Use clear, concise language
481
+ """
482
+ json_structure_prompt = """
483
+ Return your response as a JSON object with the following structure:
484
+ {
485
+ "cards": [
486
+ {
487
+ "card_type": "basic or cloze",
488
+ "front": {
489
+ "question": "question text (potentially with {{c1::cloze syntax}})"
490
+ },
491
+ "back": {
492
+ "answer": "concise answer or full text for cloze",
493
+ "explanation": "detailed explanation",
494
+ "example": "practical example"
495
+ },
496
+ "metadata": {
497
+ "prerequisites": ["list", "of", "prerequisites"],
498
+ "learning_outcomes": ["list", "of", "outcomes"],
499
+ "misconceptions": ["list", "of", "misconceptions"],
500
+ "difficulty": "beginner/intermediate/advanced"
501
+ }
502
+ }
503
+ // ... more cards
504
+ ]
505
+ }
506
+ """
507
+ cloze_instruction = ""
508
+ if generate_cloze:
509
+ cloze_instruction = """
510
+ Where appropriate, generate Cloze deletion cards.
511
+ - For Cloze cards, set "card_type" to "cloze".
512
+ - Format the question field using Anki's cloze syntax (e.g., "The capital of France is {{{{c1::Paris}}}}.").
513
+ - The "answer" field should contain the full, non-cloze text or specific context for the cloze.
514
+ - For standard question/answer cards, set "card_type" to "basic".
515
+ """
516
+ text_user_prompt = f"""
517
+ Generate {cards_per_topic} flashcards based *only* on the following text:
518
+ --- TEXT START ---
519
+ {page_text_for_generation}
520
+ --- TEXT END ---
521
+ {cloze_instruction}
522
+ {json_structure_prompt}
523
+ """
524
+ response = structured_output_completion(
525
+ client,
526
+ model,
527
+ {"type": "json_object"},
528
+ text_system_prompt,
529
+ text_user_prompt,
530
+ )
531
+ if not response or "cards" not in response:
532
+ logger.error("Invalid cards response format from text generation.")
533
+ raise gr.Error("Failed to generate cards from text. Please try again.")
534
+
535
+ # Process the cards (similar to generate_cards_batch processing)
536
+ cards_data = response["cards"]
537
+ topic_name = "From Web" if generation_mode == "web" else "From Text"
538
+ for card_index, card_data in enumerate(cards_data, start=1):
539
+ if "front" not in card_data or "back" not in card_data:
540
+ logger.warning(
541
+ f"Skipping card due to missing front/back data: {card_data}"
542
+ )
543
+ continue
544
+ if "question" not in card_data["front"]:
545
+ logger.warning(
546
+ f"Skipping card due to missing question: {card_data}"
547
+ )
548
+ continue
549
+ if (
550
+ "answer" not in card_data["back"]
551
+ or "explanation" not in card_data["back"]
552
+ or "example" not in card_data["back"]
553
+ ):
554
+ logger.warning(
555
+ f"Skipping card due to missing answer/explanation/example: {card_data}"
556
+ )
557
+ continue
558
+
559
+ card = Card(
560
+ card_type=card_data.get("card_type", "basic"),
561
+ front=CardFront(**card_data["front"]),
562
+ back=CardBack(**card_data["back"]),
563
+ metadata=card_data.get("metadata", {}),
564
+ )
565
+ metadata = card.metadata or {}
566
+ row = [
567
+ f"1.{card_index}",
568
+ topic_name, # Use dynamic topic name
569
+ card.card_type,
570
+ card.front.question,
571
+ card.back.answer,
572
+ card.back.explanation,
573
+ card.back.example,
574
+ metadata.get("prerequisites", []),
575
+ metadata.get("learning_outcomes", []),
576
+ metadata.get("misconceptions", []),
577
+ metadata.get("difficulty", "beginner"),
578
+ ]
579
+ flattened_data.append(row)
580
+ total += 1
581
+ gr.Info(f"✅ Generated {total} cards from the provided content.")
582
+
583
+ # --- Subject Mode --- (Existing logic)
584
+ elif generation_mode == "subject":
585
+ logger.info(f"Generating cards for subject: {subject}")
586
+ if not subject or not subject.strip():
587
+ logger.warning("No subject provided for subject generation mode.")
588
+ raise gr.Error("Subject is required for 'Single Subject' mode.")
589
+
590
+ gr.Info("🚀 Starting card generation for subject...")
591
+
592
+ # Note: system_prompt uses subject variable
593
+ system_prompt = f"""
594
+ You are an expert educator in {subject}, creating an optimized learning sequence.
595
+ Your goal is to:
596
+ 1. Break down the subject into logical concepts
597
+ 2. Identify prerequisites and learning outcomes
598
+ 3. Generate cards that build upon each other
599
+ 4. Address and correct common misconceptions
600
+ 5. Include verification steps to minimize hallucinations
601
+ 6. Provide a recommended study order
602
+
603
+ For explanations and examples:
604
+ - Keep explanations in plain text
605
+ - Format code examples with triple backticks (```)
606
+ - Separate conceptual examples from code examples
607
+ - Use clear, concise language
608
+
609
+ Keep in mind the user's preferences: {preference_prompt}
610
+ """
611
+
612
+ topic_prompt = f"""
613
+ Generate the top {topic_number} important subjects to know about {subject} in
614
+ order of ascending difficulty. Return your response as a JSON object with the following structure:
615
+ {{
616
+ "topics": [
617
+ {{
618
+ "name": "topic name",
619
+ "difficulty": "beginner/intermediate/advanced",
620
+ "description": "brief description"
621
+ }}
622
+ ]
623
+ }}
624
+ """
625
 
626
+ logger.info("Generating topics...")
627
+ topics_response = structured_output_completion(
628
+ client, model, {"type": "json_object"}, system_prompt, topic_prompt
629
+ )
630
+
631
+ if not topics_response or "topics" not in topics_response:
632
+ logger.error("Invalid topics response format")
633
+ raise gr.Error("Failed to generate topics. Please try again.")
634
+
635
+ topics = topics_response["topics"]
636
+ gr.Info(f"✨ Generated {len(topics)} topics successfully!")
637
+
638
+ # Generate cards for each topic
639
+ for i, topic in enumerate(
640
+ progress_tracker.tqdm(topics, desc="Generating cards")
641
+ ):
642
+ try:
643
+ # Re-use the system_prompt defined above for topic generation
644
+ cards = generate_cards_batch(
645
+ client,
646
+ model,
647
+ topic["name"],
648
+ cards_per_topic,
649
+ system_prompt, # Use the same system prompt
650
+ generate_cloze=generate_cloze,
651
+ batch_size=3,
652
+ )
653
+
654
+ if cards:
655
+ for card_index, card in enumerate(cards, start=1):
656
+ index = f"{i + 1}.{card_index}"
657
+ metadata = card.metadata or {}
658
+
659
+ row = [
660
+ index,
661
+ topic["name"],
662
+ card.card_type,
663
+ card.front.question,
664
+ card.back.answer,
665
+ card.back.explanation,
666
+ card.back.example,
667
+ metadata.get("prerequisites", []),
668
+ metadata.get("learning_outcomes", []),
669
+ metadata.get("misconceptions", []),
670
+ metadata.get("difficulty", "beginner"),
671
+ ]
672
+ flattened_data.append(row)
673
+ total += 1
674
+
675
+ gr.Info(f"✅ Generated {len(cards)} cards for {topic['name']}")
676
+
677
+ except Exception as e:
678
+ logger.error(
679
+ f"Failed to generate cards for topic {topic['name']}: {str(e)}"
680
+ )
681
+ gr.Warning(f"Failed to generate cards for '{topic['name']}'")
682
+ continue
683
+ else:
684
+ # Handle other modes or invalid mode if necessary
685
+ logger.error(f"Invalid generation mode: {generation_mode}")
686
+ raise gr.Error(f"Unsupported generation mode: {generation_mode}")
687
+
688
+ # --- Common Completion Logic ---
689
  final_html = f"""
690
  <div style="text-align: center">
691
  <p>✅ Generation complete!</p>
 
693
  </div>
694
  """
695
 
 
696
  df = pd.DataFrame(
697
  flattened_data,
698
  columns=[
 
709
  "Difficulty",
710
  ],
711
  )
 
712
  return df, final_html, total
713
 
714
  except Exception as e:
715
  logger.error(f"Card generation failed: {str(e)}", exc_info=True)
716
+ # Check if e is already a gr.Error
717
+ if isinstance(e, gr.Error):
718
+ raise e
719
+ else:
720
+ raise gr.Error(f"Card generation failed: {str(e)}")
721
 
722
 
723
  # Update the BASIC_MODEL definition with enhanced CSS/HTML
 
1299
  css="""
1300
  #footer {display:none !important}
1301
  .tall-dataframe {min-height: 500px !important}
1302
+ .contain {max-width: 100% !important; margin: auto;}
1303
  .output-cards {border-radius: 8px; box-shadow: 0 4px 6px -1px rgba(0,0,0,0.1);}
1304
  .hint-text {font-size: 0.9em; color: #666; margin-top: 4px;}
1305
  .export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
 
1312
  #### Generate comprehensive Anki flashcards using AI.
1313
  """)
1314
 
1315
+ # Configuration Section in an Accordion
1316
+ with gr.Accordion("Configuration Settings", open=True):
1317
+ # Create a row to hold two columns for settings
1318
+ with gr.Row():
1319
+ # Column 1: Basic settings
1320
+ with gr.Column(scale=1):
1321
+ # Add mode selection
1322
+ generation_mode = gr.Radio(
1323
+ choices=[
1324
+ ("Single Subject", "subject"),
1325
+ ("Learning Path", "path"),
1326
+ ("From Text", "text"),
1327
+ ("From Web", "web"),
1328
+ ],
1329
+ value="subject",
1330
+ label="Generation Mode",
1331
+ info="Choose how you want to generate content",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1332
  )
1333
 
1334
+ # Create containers for different modes
1335
+ with gr.Group() as subject_mode:
1336
+ subject = gr.Textbox(
1337
+ label="Subject",
1338
+ placeholder="Enter the subject, e.g., 'Basic SQL Concepts'",
1339
+ info="The topic you want to generate flashcards for",
1340
+ )
1341
+
1342
+ with gr.Group(visible=False) as path_mode:
1343
+ description = gr.Textbox(
1344
+ label="Learning Goal",
1345
+ placeholder="Paste a job description or describe what you want to learn...",
1346
+ info="We'll break this down into learnable subjects",
1347
+ lines=5,
1348
+ )
1349
+ analyze_button = gr.Button(
1350
+ "Analyze & Break Down", variant="secondary"
1351
+ )
1352
+
1353
+ # Add group for text input mode
1354
+ with gr.Group(visible=False) as text_mode:
1355
+ source_text = gr.Textbox(
1356
+ label="Source Text",
1357
+ placeholder="Paste the text you want to generate cards from here...",
1358
+ info="The AI will extract key information from this text to create cards.",
1359
+ lines=15,
1360
+ )
1361
+
1362
+ # Add group for web input mode
1363
+ with gr.Group(visible=False) as web_mode:
1364
+ url_input = gr.Textbox(
1365
+ label="Web Page URL",
1366
+ placeholder="Paste the URL of the page you want to generate cards from...",
1367
+ info="The AI will attempt to extract content from this URL.",
1368
+ )
1369
+
1370
+ # Common settings moved inside the accordion, in column 1
1371
+ api_key_input = gr.Textbox(
1372
+ label="OpenAI API Key",
1373
+ type="password",
1374
+ placeholder="Enter your OpenAI API key",
1375
+ value=os.getenv("OPENAI_API_KEY", ""),
1376
+ info="Your OpenAI API key starting with 'sk-'",
1377
  )
 
 
1378
 
1379
+ # Column 2: Advanced settings accordion
1380
+ with gr.Column(scale=1):
1381
+ # Advanced Settings Accordion moved inside the main accordion, in column 2
1382
+ with gr.Accordion("Advanced Settings", open=False):
1383
+ model_choice = gr.Dropdown(
1384
+ choices=["gpt-4.1", "gpt-4.1-nano"], # Corrected choices
1385
+ value="gpt-4.1-nano", # Changed default to nano as it's faster/cheaper
1386
+ label="Model Selection",
1387
+ info="Select the AI model to use for generation",
1388
+ )
1389
+
1390
+ # Add tooltip/description for models
1391
+ model_info = gr.Markdown(
1392
+ """
1393
+ **Model Information:**
1394
+ - **gpt-4.1**: Highest quality, slower generation
1395
+ - **gpt-4.1-nano**: Optimized for speed and lower cost
1396
+ """ # Corrected descriptions
1397
+ )
1398
+
1399
+ topic_number = gr.Slider(
1400
+ label="Number of Topics",
1401
+ minimum=2,
1402
+ maximum=20,
1403
+ step=1,
1404
+ value=2,
1405
+ info="How many distinct topics to cover within the subject",
1406
+ )
1407
+ cards_per_topic = gr.Slider(
1408
+ label="Cards per Topic",
1409
+ minimum=2,
1410
+ maximum=30,
1411
+ step=1,
1412
+ value=3,
1413
+ info="How many flashcards to generate for each topic",
1414
+ )
1415
+ preference_prompt = gr.Textbox(
1416
+ label="Learning Preferences",
1417
+ placeholder="e.g., 'Assume I'm a beginner' or 'Focus on practical examples'",
1418
+ info="Customize how the content is presented",
1419
+ lines=3,
1420
+ )
1421
+ generate_cloze_checkbox = gr.Checkbox(
1422
+ label="Generate Cloze Cards (Experimental)",
1423
+ value=False,
1424
+ info="Allow the AI to generate fill-in-the-blank style cards where appropriate.",
1425
+ )
1426
+ # End of Advanced Settings Accordion
1427
+ # End of Row containing settings columns
1428
+ # End of Configuration Settings Accordion
1429
+
1430
+ # Generation Button moved outside the Accordion
1431
+ generate_button = gr.Button("Generate Cards", variant="primary")
1432
+
1433
+ # Output Area remains below the button
1434
+ with gr.Group(
1435
+ visible=False
1436
+ ) as path_results: # Initial visibility controlled by mode
1437
+ gr.Markdown("### Learning Path Analysis")
1438
+ subjects_list = gr.Dataframe(
1439
+ headers=["Subject", "Prerequisites", "Time Estimate"],
1440
+ label="Recommended Subjects",
1441
+ interactive=False,
1442
+ )
1443
+ learning_order = gr.Markdown("### Recommended Learning Order")
1444
+ projects = gr.Markdown("### Suggested Projects")
1445
 
1446
+ use_subjects = gr.Button(
1447
+ "Use These Subjects ℹ️",
1448
+ variant="primary",
1449
+ )
1450
+ gr.Markdown(
1451
+ "*Click to copy subjects to main input for card generation*",
1452
+ elem_classes="hint-text",
1453
+ )
1454
 
1455
+ with gr.Group() as cards_output: # Initial visibility controlled by mode
1456
+ gr.Markdown("### Generated Cards")
1457
+
1458
+ # Output Format Documentation (can stay here)
1459
+ with gr.Accordion("Output Format", open=False):
1460
+ gr.Markdown("""
1461
+ The generated cards include:
1462
+
1463
+ * **Index**: Unique identifier for each card
1464
+ * **Topic**: The specific subtopic within your subject
1465
+ * **Card_Type**: Type of card (basic or cloze)
1466
+ * **Question**: Clear, focused question for the flashcard front
1467
+ * **Answer**: Concise core answer
1468
+ * **Explanation**: Detailed conceptual explanation
1469
+ * **Example**: Practical implementation or code example
1470
+ * **Prerequisites**: Required knowledge for this concept
1471
+ * **Learning Outcomes**: What you should understand after mastering this card
1472
+ * **Common Misconceptions**: Incorrect assumptions debunked with explanations
1473
+ * **Difficulty**: Concept complexity level for optimal study sequencing
1474
+
1475
+ Export options:
1476
+ - **CSV**: Raw data for custom processing
1477
+ - **Anki Deck**: Ready-to-use deck with formatted cards and metadata
1478
+ """)
1479
+
1480
+ with gr.Accordion("Example Card Format", open=False):
1481
+ gr.Code(
1482
+ label="Example Card",
1483
+ value="""
1484
  {
1485
  "front": {
1486
  "question": "What is a PRIMARY KEY constraint in SQL?"
 
1500
  "difficulty": "beginner"
1501
  }
1502
  }
1503
+ """,
1504
+ language="json",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505
  )
1506
 
1507
+ output = gr.Dataframe(
1508
+ value=example_data,
1509
+ headers=[
1510
+ "Index",
1511
+ "Topic",
1512
+ "Card_Type",
1513
+ "Question",
1514
+ "Answer",
1515
+ "Explanation",
1516
+ "Example",
1517
+ "Prerequisites",
1518
+ "Learning_Outcomes",
1519
+ "Common_Misconceptions",
1520
+ "Difficulty",
1521
+ ],
1522
+ interactive=True,
1523
+ elem_classes="tall-dataframe",
1524
+ wrap=True,
1525
+ column_widths=[
1526
+ 50,
1527
+ 100,
1528
+ 80,
1529
+ 200,
1530
+ 200,
1531
+ 250,
1532
+ 200,
1533
+ 150,
1534
+ 150,
1535
+ 150,
1536
+ 100,
1537
+ ],
1538
+ )
1539
+
1540
+ with gr.Group(elem_classes="export-group"):
1541
+ gr.Markdown("#### Export Generated Cards")
1542
+ with gr.Row():
1543
+ export_csv_button = gr.Button("Export to CSV", variant="secondary")
1544
+ export_anki_button = gr.Button(
1545
+ "Export to Anki Deck (.apkg)", variant="secondary"
1546
+ )
1547
+ with gr.Row(): # Row containing File components is now visible
1548
+ download_csv = gr.File(label="Download CSV", interactive=False)
1549
+ download_anki = gr.File(
1550
+ label="Download Anki Deck",
1551
+ interactive=False,
1552
+ )
1553
 
1554
  # Add near the top of the Blocks
1555
  with gr.Row():
 
1558
  label="Total Cards Generated", value=0, visible=False
1559
  )
1560
 
1561
+ # Adjust JavaScript handler for mode switching
1562
  def update_mode_visibility(mode):
 
1563
  is_subject = mode == "subject"
1564
  is_path = mode == "path"
1565
+ is_text = mode == "text"
1566
+ is_web = mode == "web"
1567
 
1568
+ subject_val = subject.value if is_subject else ""
1569
+ description_val = description.value if is_path else ""
1570
+ text_val = source_text.value if is_text else ""
1571
+ url_val = url_input.value if is_web else ""
 
 
 
1572
 
1573
  return {
1574
  subject_mode: gr.update(visible=is_subject),
1575
  path_mode: gr.update(visible=is_path),
1576
+ text_mode: gr.update(visible=is_text),
1577
+ web_mode: gr.update(visible=is_web),
1578
  path_results: gr.update(visible=is_path),
1579
+ cards_output: gr.update(visible=is_subject or is_text or is_web),
1580
+ subject: gr.update(value=subject_val),
1581
+ description: gr.update(value=description_val),
1582
+ source_text: gr.update(value=text_val),
1583
+ url_input: gr.update(value=url_val),
1584
+ output: gr.update(value=None),
1585
+ subjects_list: gr.update(value=None),
1586
+ learning_order: gr.update(value=""),
1587
+ projects: gr.update(value=""),
1588
  progress: gr.update(value="", visible=False),
1589
  total_cards: gr.update(value=0, visible=False),
1590
  }
1591
 
 
1592
  generation_mode.change(
1593
  fn=update_mode_visibility,
1594
  inputs=[generation_mode],
1595
  outputs=[
1596
  subject_mode,
1597
  path_mode,
1598
+ text_mode,
1599
+ web_mode,
1600
  path_results,
1601
  cards_output,
1602
  subject,
1603
  description,
1604
+ source_text,
1605
+ url_input,
1606
  output,
1607
+ subjects_list,
1608
+ learning_order,
1609
+ projects,
1610
  progress,
1611
  total_cards,
1612
  ],
1613
  )
1614
 
 
1615
  analyze_button.click(
1616
  fn=analyze_learning_path,
1617
  inputs=[api_key_input, description, model_choice],
1618
  outputs=[subjects_list, learning_order, projects],
1619
  )
1620
 
 
1621
  def use_selected_subjects(subjects_df):
 
1622
  if subjects_df is None or subjects_df.empty:
1623
  gr.Warning("No subjects available to copy from Learning Path analysis.")
 
1624
  return (
1625
  gr.update(),
1626
  gr.update(),
 
1631
  gr.update(),
1632
  gr.update(),
1633
  gr.update(),
1634
+ gr.update(),
1635
+ gr.update(),
1636
+ gr.update(),
1637
+ gr.update(),
1638
+ gr.update(),
1639
+ gr.update(),
1640
  )
1641
 
1642
  subjects = subjects_df["Subject"].tolist()
1643
  combined_subject = ", ".join(subjects)
1644
+ suggested_topics = min(len(subjects) + 1, 20)
1645
+
1646
+ return {
1647
+ generation_mode: "subject",
1648
+ subject_mode: gr.update(visible=True),
1649
+ path_mode: gr.update(visible=False),
1650
+ text_mode: gr.update(visible=False),
1651
+ web_mode: gr.update(visible=False),
1652
+ path_results: gr.update(visible=False),
1653
+ cards_output: gr.update(visible=True),
1654
+ subject: combined_subject,
1655
+ description: "",
1656
+ source_text: "",
1657
+ url_input: "",
1658
+ topic_number: suggested_topics,
1659
+ preference_prompt: "Focus on connections between these subjects and their practical applications.",
1660
+ output: example_data,
1661
+ subjects_list: subjects_df,
1662
+ learning_order: gr.update(),
1663
+ projects: gr.update(),
1664
+ progress: gr.update(visible=False),
1665
+ total_cards: gr.update(visible=False),
1666
+ }
1667
 
 
1668
  use_subjects.click(
1669
  fn=use_selected_subjects,
1670
+ inputs=[subjects_list],
1671
+ outputs=[
1672
  generation_mode,
1673
+ subject_mode,
1674
+ path_mode,
1675
+ text_mode,
1676
+ web_mode,
1677
+ path_results,
1678
+ cards_output,
1679
+ subject,
1680
+ description,
1681
+ source_text,
1682
+ url_input,
1683
+ topic_number,
1684
+ preference_prompt,
1685
+ output,
1686
+ subjects_list,
1687
+ learning_order,
1688
+ projects,
1689
+ progress,
1690
+ total_cards,
1691
  ],
1692
  )
1693
 
 
1694
  generate_button.click(
1695
  fn=generate_cards,
1696
  inputs=[
1697
  api_key_input,
1698
  subject,
1699
+ generation_mode,
1700
+ source_text,
1701
+ url_input,
1702
  model_choice,
1703
  topic_number,
1704
  cards_per_topic,
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  aiofiles==23.2.1
2
  annotated-types==0.7.0
3
  anyio==4.9.0
 
4
  cached-property==2.0.1
5
  certifi==2025.1.31
6
  charset-normalizer==3.4.1
@@ -22,6 +23,7 @@ huggingface-hub==0.30.2
22
  idna==3.10
23
  jinja2==3.1.6
24
  jiter==0.9.0
 
25
  markdown-it-py==3.0.0
26
  markupsafe==2.1.5
27
  mdurl==0.1.2
 
1
  aiofiles==23.2.1
2
  annotated-types==0.7.0
3
  anyio==4.9.0
4
+ beautifulsoup4==4.12.3
5
  cached-property==2.0.1
6
  certifi==2025.1.31
7
  charset-normalizer==3.4.1
 
23
  idna==3.10
24
  jinja2==3.1.6
25
  jiter==0.9.0
26
+ lxml==5.2.2
27
  markdown-it-py==3.0.0
28
  markupsafe==2.1.5
29
  mdurl==0.1.2