seawolf2357 commited on
Commit
a095d8e
·
verified ·
1 Parent(s): 1d2e120

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -31
app.py CHANGED
@@ -9,26 +9,35 @@ from diffusers import EulerAncestralDiscreteScheduler
9
  import torch
10
  from compel import Compel, ReturnedEmbeddingsType
11
  import gc
 
12
 
13
  # Check if CUDA is available
14
  print(f"CUDA available: {torch.cuda.is_available()}")
15
  if torch.cuda.is_available():
16
  print(f"CUDA device: {torch.cuda.get_device_name(0)}")
17
 
18
- # Initialize the pipeline with CPU first to avoid CUDA errors during model loading
 
19
  pipe = StableDiffusionXLPipeline.from_pretrained(
20
  "dhead/wai-nsfw-illustrious-sdxl-v140-sdxl",
21
  torch_dtype=torch.float16,
22
  variant="fp16",
23
  use_safetensors=True,
24
- device_map=None, # Don't auto-assign devices
25
- low_cpu_mem_usage=True # Optimize CPU memory usage
26
  )
27
 
28
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
29
 
30
- # Initialize Compel for long prompt processing (will be moved to GPU when needed)
31
- compel = None # Will be initialized in the GPU function
 
 
 
 
 
 
 
 
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1216
@@ -40,45 +49,50 @@ def initialize_compel():
40
  """Initialize Compel with the current pipeline's components"""
41
  global compel
42
  if compel is None:
43
- compel = Compel(
44
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
45
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
46
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
47
- requires_pooled=[False, True],
48
- truncate_long_prompts=False
49
- )
 
 
 
 
50
  return compel
51
 
52
  def process_long_prompt(prompt, negative_prompt=""):
53
  """Simple long prompt processing using Compel"""
54
  try:
55
  comp = initialize_compel()
 
 
56
  conditioning, pooled = comp([prompt, negative_prompt])
57
  return conditioning, pooled
58
  except Exception as e:
59
  print(f"Long prompt processing failed: {e}, falling back to standard processing")
60
  return None, None
61
 
62
- @spaces.GPU(duration=60) # Add duration parameter for longer generation times
63
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
64
- # Move pipeline to GPU inside the GPU-decorated function
65
- global pipe
66
- pipe = pipe.to("cuda")
67
-
68
- # Ensure all components are on GPU with correct dtype
69
- pipe.text_encoder = pipe.text_encoder.to(dtype=torch.float16)
70
- pipe.text_encoder_2 = pipe.text_encoder_2.to(dtype=torch.float16)
71
- pipe.vae = pipe.vae.to(dtype=torch.float16)
72
- pipe.unet = pipe.unet.to(dtype=torch.float16)
73
-
74
- use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
75
 
76
- if randomize_seed:
77
- seed = random.randint(0, MAX_SEED)
 
 
 
 
 
 
 
 
78
 
79
- generator = torch.Generator(device="cuda").manual_seed(seed)
80
-
81
- try:
82
  # Try long prompt processing first if prompt is long
83
  if use_long_prompt:
84
  print("Using long prompt processing...")
@@ -96,6 +110,11 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
96
  height=height,
97
  generator=generator
98
  ).images[0]
 
 
 
 
 
99
  return output_image
100
 
101
  # Fall back to standard processing
@@ -109,13 +128,195 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
109
  generator=generator
110
  ).images[0]
111
 
 
 
 
 
112
  return output_image
 
113
  except RuntimeError as e:
114
- print(f"Error during generation: {e}")
 
 
115
  # Return a blank image with error message
116
  error_img = Image.new('RGB', (width, height), color=(50, 50, 50))
117
  return error_img
118
  except Exception as e:
119
  print(f"Unexpected error: {e}")
 
 
120
  error_img = Image.new('RGB', (width, height), color=(100, 0, 0))
121
- return error_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import torch
10
  from compel import Compel, ReturnedEmbeddingsType
11
  import gc
12
+ import os
13
 
14
  # Check if CUDA is available
15
  print(f"CUDA available: {torch.cuda.is_available()}")
16
  if torch.cuda.is_available():
17
  print(f"CUDA device: {torch.cuda.get_device_name(0)}")
18
 
19
+ # Initialize the pipeline ONCE at startup
20
+ print("Loading pipeline...")
21
  pipe = StableDiffusionXLPipeline.from_pretrained(
22
  "dhead/wai-nsfw-illustrious-sdxl-v140-sdxl",
23
  torch_dtype=torch.float16,
24
  variant="fp16",
25
  use_safetensors=True,
26
+ low_cpu_mem_usage=True
 
27
  )
28
 
29
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
30
 
31
+ # Enable memory efficient attention if available
32
+ if hasattr(pipe, "enable_model_cpu_offload"):
33
+ pipe.enable_model_cpu_offload()
34
+ elif hasattr(pipe, "enable_attention_slicing"):
35
+ pipe.enable_attention_slicing()
36
+
37
+ print("Pipeline loaded successfully!")
38
+
39
+ # Initialize Compel for long prompt processing
40
+ compel = None
41
 
42
  MAX_SEED = np.iinfo(np.int32).max
43
  MAX_IMAGE_SIZE = 1216
 
49
  """Initialize Compel with the current pipeline's components"""
50
  global compel
51
  if compel is None:
52
+ try:
53
+ compel = Compel(
54
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
55
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
56
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
57
+ requires_pooled=[False, True],
58
+ truncate_long_prompts=False
59
+ )
60
+ except Exception as e:
61
+ print(f"Failed to initialize Compel: {e}")
62
+ compel = None
63
  return compel
64
 
65
  def process_long_prompt(prompt, negative_prompt=""):
66
  """Simple long prompt processing using Compel"""
67
  try:
68
  comp = initialize_compel()
69
+ if comp is None:
70
+ return None, None
71
  conditioning, pooled = comp([prompt, negative_prompt])
72
  return conditioning, pooled
73
  except Exception as e:
74
  print(f"Long prompt processing failed: {e}, falling back to standard processing")
75
  return None, None
76
 
77
+ @spaces.GPU(duration=90) # Increased duration for stability
78
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
79
+ try:
80
+ # Move pipeline to GPU inside the GPU-decorated function
81
+ pipe.to("cuda")
 
 
 
 
 
 
 
 
82
 
83
+ # Ensure all components are on GPU with correct dtype
84
+ pipe.text_encoder = pipe.text_encoder.to(dtype=torch.float16)
85
+ pipe.text_encoder_2 = pipe.text_encoder_2.to(dtype=torch.float16)
86
+ pipe.vae = pipe.vae.to(dtype=torch.float16)
87
+ pipe.unet = pipe.unet.to(dtype=torch.float16)
88
+
89
+ use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
90
+
91
+ if randomize_seed:
92
+ seed = random.randint(0, MAX_SEED)
93
 
94
+ generator = torch.Generator(device="cuda").manual_seed(seed)
95
+
 
96
  # Try long prompt processing first if prompt is long
97
  if use_long_prompt:
98
  print("Using long prompt processing...")
 
110
  height=height,
111
  generator=generator
112
  ).images[0]
113
+
114
+ # Clear GPU cache
115
+ torch.cuda.empty_cache()
116
+ gc.collect()
117
+
118
  return output_image
119
 
120
  # Fall back to standard processing
 
128
  generator=generator
129
  ).images[0]
130
 
131
+ # Clear GPU cache
132
+ torch.cuda.empty_cache()
133
+ gc.collect()
134
+
135
  return output_image
136
+
137
  except RuntimeError as e:
138
+ print(f"Runtime error during generation: {e}")
139
+ torch.cuda.empty_cache()
140
+ gc.collect()
141
  # Return a blank image with error message
142
  error_img = Image.new('RGB', (width, height), color=(50, 50, 50))
143
  return error_img
144
  except Exception as e:
145
  print(f"Unexpected error: {e}")
146
+ torch.cuda.empty_cache()
147
+ gc.collect()
148
  error_img = Image.new('RGB', (width, height), color=(100, 0, 0))
149
+ return error_img
150
+
151
+ css = """
152
+ /* Main container styling */
153
+ #col-container {
154
+ margin: 0 auto;
155
+ max-width: 1024px;
156
+ }
157
+ /* Gradient background for the entire app */
158
+ .gradio-container {
159
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 25%, #f093fb 50%, #f5576c 75%, #ffc947 100%);
160
+ min-height: 100vh;
161
+ }
162
+ /* Main block styling with semi-transparent background */
163
+ .contain {
164
+ background: rgba(255, 255, 255, 0.95);
165
+ border-radius: 20px;
166
+ padding: 20px;
167
+ box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.37);
168
+ backdrop-filter: blur(4px);
169
+ border: 1px solid rgba(255, 255, 255, 0.18);
170
+ }
171
+ /* Input field styling */
172
+ .gr-text-input {
173
+ background: rgba(255, 255, 255, 0.9) !important;
174
+ border: 2px solid rgba(102, 126, 234, 0.3) !important;
175
+ border-radius: 10px !important;
176
+ }
177
+ /* Button styling */
178
+ .gr-button {
179
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
180
+ border: none !important;
181
+ color: white !important;
182
+ font-weight: bold !important;
183
+ transition: all 0.3s ease !important;
184
+ }
185
+ .gr-button:hover {
186
+ transform: translateY(-2px);
187
+ box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
188
+ }
189
+ /* Accordion styling */
190
+ .gr-accordion {
191
+ background: rgba(255, 255, 255, 0.8) !important;
192
+ border-radius: 10px !important;
193
+ margin-top: 10px !important;
194
+ }
195
+ /* Result image container */
196
+ .gr-image {
197
+ border-radius: 15px !important;
198
+ box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1) !important;
199
+ }
200
+ /* Slider styling */
201
+ .gr-slider {
202
+ background: rgba(255, 255, 255, 0.8) !important;
203
+ }
204
+ /* Additional styling for headers */
205
+ h1, h2, h3 {
206
+ color: #333 !important;
207
+ text-align: center;
208
+ }
209
+ /* Markdown text styling */
210
+ .gr-markdown {
211
+ text-align: center;
212
+ margin-bottom: 20px;
213
+ }
214
+ """
215
+
216
+ print("Building Gradio interface...")
217
+
218
+ # Build the Gradio interface
219
+ with gr.Blocks(css=css) as demo:
220
+ with gr.Column(elem_id="col-container"):
221
+ gr.Markdown(
222
+ """
223
+ # 🎨 Stable Diffusion XL Image Generator
224
+ ### Create stunning AI-generated images with advanced controls
225
+ """
226
+ )
227
+
228
+ # Badge section
229
+ gr.HTML(
230
+ """
231
+ <div style="display: flex; justify-content: center; align-items: center; gap: 20px; margin: 20px 0;">
232
+ <a href="https://huggingface.co/spaces/Heartsync/Wan-2.2-ADULT" target="_blank">
233
+ <img src="https://img.shields.io/static/v1?label=T2I%20%26%20TI2V&message=Wan-2.2-ADULT&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
234
+ </a>
235
+ <a href="https://huggingface.co/spaces/Heartsync/PornHUB" target="_blank">
236
+ <img src="https://img.shields.io/static/v1?label=T2I%20&message=PornHUB&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
237
+ </a>
238
+ <a href="https://huggingface.co/spaces/Heartsync/Hentai-Adult" target="_blank">
239
+ <img src="https://img.shields.io/static/v1?label=T2I%20&message=Hentai-Adult&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
240
+ </a>
241
+ </div>
242
+ """
243
+ )
244
+
245
+ with gr.Row():
246
+ prompt = gr.Text(
247
+ label="Prompt",
248
+ show_label=False,
249
+ max_lines=1,
250
+ placeholder="Enter your prompt (long prompts are automatically supported)",
251
+ container=False,
252
+ value=DEFAULT_PROMPT
253
+ )
254
+
255
+ run_button = gr.Button("Run", scale=0)
256
+
257
+ result = gr.Image(format="png", label="Result", show_label=False)
258
+
259
+ with gr.Accordion("Advanced Settings", open=False):
260
+ negative_prompt = gr.Text(
261
+ label="Negative prompt",
262
+ max_lines=1,
263
+ placeholder="Enter a negative prompt",
264
+ value="monochrome, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn,"
265
+ )
266
+
267
+ seed = gr.Slider(
268
+ label="Seed",
269
+ minimum=0,
270
+ maximum=MAX_SEED,
271
+ step=1,
272
+ value=0,
273
+ )
274
+
275
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
276
+
277
+ with gr.Row():
278
+ width = gr.Slider(
279
+ label="Width",
280
+ minimum=256,
281
+ maximum=MAX_IMAGE_SIZE,
282
+ step=32,
283
+ value=1024,
284
+ )
285
+
286
+ height = gr.Slider(
287
+ label="Height",
288
+ minimum=256,
289
+ maximum=MAX_IMAGE_SIZE,
290
+ step=32,
291
+ value=MAX_IMAGE_SIZE,
292
+ )
293
+
294
+ with gr.Row():
295
+ guidance_scale = gr.Slider(
296
+ label="Guidance scale",
297
+ minimum=0.0,
298
+ maximum=20.0,
299
+ step=0.1,
300
+ value=7,
301
+ )
302
+
303
+ num_inference_steps = gr.Slider(
304
+ label="Number of inference steps",
305
+ minimum=1,
306
+ maximum=28,
307
+ step=1,
308
+ value=28,
309
+ )
310
+
311
+ # Connect the run button to the inference function
312
+ run_button.click(
313
+ fn=infer,
314
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
315
+ outputs=[result]
316
+ )
317
+
318
+ print("Starting Gradio app...")
319
+
320
+ # Launch the app - CRITICAL: This must be at the module level for Spaces
321
+ demo.queue(max_size=20)
322
+ demo.launch()