GreenGoat commited on
Commit
6a32c68
·
verified ·
1 Parent(s): c0981e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -11
app.py CHANGED
@@ -45,7 +45,32 @@ except Exception as e:
45
 
46
  # Model setup
47
  sd15_name = 'stablediffusionapi/realistic-vision-v51'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
 
49
 
50
  print(f"Using device: {device}")
51
  print("Loading models...")
@@ -354,11 +379,17 @@ def process_relight(input_fg, input_bg, prompt, image_width, image_height, num_s
354
  try:
355
  # Input validation
356
  if input_fg is None:
357
- raise ValueError("Please upload a foreground image")
 
 
358
  if input_bg is None and bg_source == "Use Background Image":
359
- raise ValueError("Please upload a background image or choose a lighting direction")
 
 
360
  if not prompt.strip():
361
- raise ValueError("Please enter a prompt")
 
 
362
 
363
  print(f"Processing with device: {device}")
364
  print(f"Input shapes - FG: {input_fg.shape}, BG: {input_bg.shape if input_bg is not None else 'None'}")
@@ -392,25 +423,46 @@ def process_relight(input_fg, input_bg, prompt, image_width, image_height, num_s
392
  print("Reduced num_samples to 1 for CPU processing")
393
 
394
  print("Running background removal...")
395
- input_fg, matting = run_rmbg(input_fg)
 
 
 
 
 
 
396
 
397
  print("Starting main processing...")
398
- results, extra_images = process(input_fg, input_bg, prompt, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, bg_source)
 
 
 
 
 
 
 
 
399
 
400
  print("Converting results...")
401
- results = [(x * 255.0).clip(0, 255).astype(np.uint8) for x in results]
 
 
 
 
 
 
402
 
403
  print("Processing completed successfully!")
404
  return results + extra_images
405
 
 
 
 
406
  except Exception as e:
407
- print(f"Error in process_relight: {str(e)}")
 
408
  import traceback
409
  traceback.print_exc()
410
- # Return error image
411
- error_img = np.zeros((512, 512, 3), dtype=np.uint8)
412
- error_img[:, :] = [255, 0, 0] # Red error image
413
- return [error_img]
414
 
415
  # Quick prompts for easy testing
416
  quick_prompts = [
 
45
 
46
  # Model setup
47
  sd15_name = 'stablediffusionapi/realistic-vision-v51'
48
+
49
+ # Better CUDA detection and debugging
50
+ print("=== GPU Detection Debug ===")
51
+ print(f"PyTorch version: {torch.__version__}")
52
+ print(f"CUDA available: {torch.cuda.is_available()}")
53
+ if torch.cuda.is_available():
54
+ print(f"CUDA version: {torch.version.cuda}")
55
+ print(f"GPU count: {torch.cuda.device_count()}")
56
+ print(f"Current GPU: {torch.cuda.current_device()}")
57
+ print(f"GPU name: {torch.cuda.get_device_name()}")
58
+ else:
59
+ print("CUDA not available - checking reasons...")
60
+ try:
61
+ import subprocess
62
+ result = subprocess.run(['nvidia-smi'], capture_output=True, text=True)
63
+ if result.returncode == 0:
64
+ print("nvidia-smi works, GPU hardware detected")
65
+ print("Issue might be with PyTorch CUDA installation")
66
+ else:
67
+ print("nvidia-smi failed, no GPU hardware detected")
68
+ except:
69
+ print("nvidia-smi command not found")
70
+
71
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
72
+ print(f"Selected device: {device}")
73
+ print("=== End GPU Debug ===")
74
 
75
  print(f"Using device: {device}")
76
  print("Loading models...")
 
379
  try:
380
  # Input validation
381
  if input_fg is None:
382
+ error_msg = "Please upload a foreground image"
383
+ print(error_msg)
384
+ raise gr.Error(error_msg)
385
  if input_bg is None and bg_source == "Use Background Image":
386
+ error_msg = "Please upload a background image or choose a lighting direction"
387
+ print(error_msg)
388
+ raise gr.Error(error_msg)
389
  if not prompt.strip():
390
+ error_msg = "Please enter a prompt"
391
+ print(error_msg)
392
+ raise gr.Error(error_msg)
393
 
394
  print(f"Processing with device: {device}")
395
  print(f"Input shapes - FG: {input_fg.shape}, BG: {input_bg.shape if input_bg is not None else 'None'}")
 
423
  print("Reduced num_samples to 1 for CPU processing")
424
 
425
  print("Running background removal...")
426
+ try:
427
+ input_fg, matting = run_rmbg(input_fg)
428
+ print("Background removal completed successfully")
429
+ except Exception as e:
430
+ print(f"Background removal failed: {e}")
431
+ # Continue without background removal
432
+ matting = np.ones((input_fg.shape[0], input_fg.shape[1]), dtype=np.float32)
433
 
434
  print("Starting main processing...")
435
+ try:
436
+ results, extra_images = process(input_fg, input_bg, prompt, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, bg_source)
437
+ print("Main processing completed successfully")
438
+ except Exception as e:
439
+ error_msg = f"❌ Processing failed: {str(e)}"
440
+ print(error_msg)
441
+ import traceback
442
+ traceback.print_exc()
443
+ raise gr.Error(error_msg)
444
 
445
  print("Converting results...")
446
+ try:
447
+ results = [(x * 255.0).clip(0, 255).astype(np.uint8) for x in results]
448
+ print("Results converted successfully")
449
+ except Exception as e:
450
+ error_msg = f"❌ Result conversion failed: {str(e)}"
451
+ print(error_msg)
452
+ raise gr.Error(error_msg)
453
 
454
  print("Processing completed successfully!")
455
  return results + extra_images
456
 
457
+ except gr.Error:
458
+ # Re-raise Gradio errors to show them in the UI
459
+ raise
460
  except Exception as e:
461
+ error_msg = f" Unexpected error: {str(e)}"
462
+ print(error_msg)
463
  import traceback
464
  traceback.print_exc()
465
+ raise gr.Error(error_msg)
 
 
 
466
 
467
  # Quick prompts for easy testing
468
  quick_prompts = [