seawolf2357 commited on
Commit
1d2e120
·
verified ·
1 Parent(s): cea97f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -209
app.py CHANGED
@@ -8,37 +8,27 @@ from diffusers import StableDiffusionXLPipeline
8
  from diffusers import EulerAncestralDiscreteScheduler
9
  import torch
10
  from compel import Compel, ReturnedEmbeddingsType
11
- #import os
12
- #from gradio_client import Client
13
- #client = Client("dhead/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl", hf_token=os.getenv("HUGGING_FACE_TOKEN"))
14
 
15
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
16
 
17
- # Make sure to use torch.float16 consistently throughout the pipeline
18
  pipe = StableDiffusionXLPipeline.from_pretrained(
19
  "dhead/wai-nsfw-illustrious-sdxl-v140-sdxl",
20
  torch_dtype=torch.float16,
21
- variant="fp16", # Explicitly use fp16 variant
22
- use_safetensors=True # Use safetensors if available
 
 
23
  )
24
 
25
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
26
- pipe.to(device)
27
 
28
- # Force all components to use the same dtype
29
- pipe.text_encoder.to(torch.float16)
30
- pipe.text_encoder_2.to(torch.float16)
31
- pipe.vae.to(torch.float16)
32
- pipe.unet.to(torch.float16)
33
-
34
- # 追加: Initialize Compel for long prompt processing
35
- compel = Compel(
36
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
37
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
38
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
39
- requires_pooled=[False, True],
40
- truncate_long_prompts=False
41
- )
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1216
@@ -46,28 +36,50 @@ MAX_IMAGE_SIZE = 1216
46
  # Default prompt
47
  DEFAULT_PROMPT = "Detailed illustration, realistic style, portrait of a beautiful Japanese woman, wearing an elegant traditional Japanese uniform, neatly tailored with intricate patterns and subtle textures, serene expression, soft natural lighting, standing gracefully in a traditional Japanese garden with cherry blossom petals gently falling in the background, cinematic quality, ultra-detailed, high-resolution, warm tones"
48
 
49
- # 追加: Simple long prompt processing function
 
 
 
 
 
 
 
 
 
 
 
 
50
  def process_long_prompt(prompt, negative_prompt=""):
51
  """Simple long prompt processing using Compel"""
52
  try:
53
- conditioning, pooled = compel([prompt, negative_prompt])
 
54
  return conditioning, pooled
55
  except Exception as e:
56
  print(f"Long prompt processing failed: {e}, falling back to standard processing")
57
  return None, None
58
-
59
- @spaces.GPU
60
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
61
- # 変更: Remove the 60-word limit warning and add long prompt check
 
 
 
 
 
 
 
 
 
62
  use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
63
 
64
  if randomize_seed:
65
  seed = random.randint(0, MAX_SEED)
66
 
67
- generator = torch.Generator(device=device).manual_seed(seed)
68
 
69
  try:
70
- # 追加: Try long prompt processing first if prompt is long
71
  if use_long_prompt:
72
  print("Using long prompt processing...")
73
  conditioning, pooled = process_long_prompt(prompt, negative_prompt)
@@ -101,184 +113,9 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
101
  except RuntimeError as e:
102
  print(f"Error during generation: {e}")
103
  # Return a blank image with error message
104
- error_img = Image.new('RGB', (width, height), color=(0, 0, 0))
105
  return error_img
106
-
107
-
108
- css = """
109
- /* Main container styling */
110
- #col-container {
111
- margin: 0 auto;
112
- max-width: 1024px;
113
- }
114
-
115
- /* Gradient background for the entire app */
116
- .gradio-container {
117
- background: linear-gradient(135deg, #667eea 0%, #764ba2 25%, #f093fb 50%, #f5576c 75%, #ffc947 100%);
118
- min-height: 100vh;
119
- }
120
-
121
- /* Main block styling with semi-transparent background */
122
- .contain {
123
- background: rgba(255, 255, 255, 0.95);
124
- border-radius: 20px;
125
- padding: 20px;
126
- box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.37);
127
- backdrop-filter: blur(4px);
128
- border: 1px solid rgba(255, 255, 255, 0.18);
129
- }
130
-
131
- /* Input field styling */
132
- .gr-text-input {
133
- background: rgba(255, 255, 255, 0.9) !important;
134
- border: 2px solid rgba(102, 126, 234, 0.3) !important;
135
- border-radius: 10px !important;
136
- }
137
-
138
- /* Button styling */
139
- .gr-button {
140
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
141
- border: none !important;
142
- color: white !important;
143
- font-weight: bold !important;
144
- transition: all 0.3s ease !important;
145
- }
146
-
147
- .gr-button:hover {
148
- transform: translateY(-2px);
149
- box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
150
- }
151
-
152
- /* Accordion styling */
153
- .gr-accordion {
154
- background: rgba(255, 255, 255, 0.8) !important;
155
- border-radius: 10px !important;
156
- margin-top: 10px !important;
157
- }
158
-
159
- /* Result image container */
160
- .gr-image {
161
- border-radius: 15px !important;
162
- box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1) !important;
163
- }
164
-
165
- /* Slider styling */
166
- .gr-slider {
167
- background: rgba(255, 255, 255, 0.8) !important;
168
- }
169
-
170
- /* Additional styling for headers */
171
- h1, h2, h3 {
172
- color: #333 !important;
173
- text-align: center;
174
- }
175
-
176
- /* Markdown text styling */
177
- .gr-markdown {
178
- text-align: center;
179
- margin-bottom: 20px;
180
- }
181
- """
182
-
183
- with gr.Blocks(css=css) as demo:
184
-
185
- with gr.Column(elem_id="col-container"):
186
- gr.Markdown(
187
- """
188
- # 🎨 Stable Diffusion XL Image Generator
189
- ### Create stunning AI-generated images with advanced controls
190
- """
191
- )
192
-
193
- # Badge section
194
- gr.HTML(
195
- """
196
- <div style="display: flex; justify-content: center; align-items: center; gap: 20px; margin: 20px 0;">
197
- <a href="https://huggingface.co/spaces/Heartsync/Wan-2.2-ADULT" target="_blank">
198
- <img src="https://img.shields.io/static/v1?label=T2I%20%26%20TI2V&message=Wan-2.2-ADULT&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
199
- </a>
200
- <a href="https://huggingface.co/spaces/Heartsync/PornHUB" target="_blank">
201
- <img src="https://img.shields.io/static/v1?label=T2I%20&message=PornHUB&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
202
- </a>
203
- <a href="https://huggingface.co/spaces/Heartsync/Hentai-Adult" target="_blank">
204
- <img src="https://img.shields.io/static/v1?label=T2I%20&message=Hentai-Adult&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
205
- </a>
206
- </div>
207
- """
208
- )
209
-
210
- with gr.Row():
211
- prompt = gr.Text(
212
- label="Prompt",
213
- show_label=False,
214
- max_lines=1,
215
- placeholder="Enter your prompt (long prompts are automatically supported)",
216
- container=False,
217
- value=DEFAULT_PROMPT # Set default prompt
218
- )
219
-
220
- run_button = gr.Button("Run", scale=0)
221
-
222
- result = gr.Image(format="png", label="Result", show_label=False)
223
-
224
- with gr.Accordion("Advanced Settings", open=False):
225
-
226
- negative_prompt = gr.Text(
227
- label="Negative prompt",
228
- max_lines=1,
229
- placeholder="Enter a negative prompt",
230
- # value="bad quality,worst quality,worst detail,sketch,censor,"
231
- value="monochrome, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn,"
232
- )
233
-
234
- seed = gr.Slider(
235
- label="Seed",
236
- minimum=0,
237
- maximum=MAX_SEED,
238
- step=1,
239
- value=0,
240
- )
241
-
242
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
243
-
244
- with gr.Row():
245
- width = gr.Slider(
246
- label="Width",
247
- minimum=256,
248
- maximum=MAX_IMAGE_SIZE,
249
- step=32,
250
- value=1024,
251
- )
252
-
253
- height = gr.Slider(
254
- label="Height",
255
- minimum=256,
256
- maximum=MAX_IMAGE_SIZE,
257
- step=32,
258
- value=MAX_IMAGE_SIZE,
259
- )
260
-
261
- with gr.Row():
262
- guidance_scale = gr.Slider(
263
- label="Guidance scale",
264
- minimum=0.0,
265
- maximum=20.0,
266
- step=0.1,
267
- value=7,
268
- )
269
-
270
- num_inference_steps = gr.Slider(
271
- label="Number of inference steps",
272
- minimum=1,
273
- maximum=28,
274
- step=1,
275
- value=28,
276
- )
277
-
278
- run_button.click(
279
- fn=infer,
280
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
281
- outputs=[result]
282
- )
283
-
284
- demo.queue().launch()
 
8
  from diffusers import EulerAncestralDiscreteScheduler
9
  import torch
10
  from compel import Compel, ReturnedEmbeddingsType
11
+ import gc
 
 
12
 
13
+ # Check if CUDA is available
14
+ print(f"CUDA available: {torch.cuda.is_available()}")
15
+ if torch.cuda.is_available():
16
+ print(f"CUDA device: {torch.cuda.get_device_name(0)}")
17
 
18
+ # Initialize the pipeline with CPU first to avoid CUDA errors during model loading
19
  pipe = StableDiffusionXLPipeline.from_pretrained(
20
  "dhead/wai-nsfw-illustrious-sdxl-v140-sdxl",
21
  torch_dtype=torch.float16,
22
+ variant="fp16",
23
+ use_safetensors=True,
24
+ device_map=None, # Don't auto-assign devices
25
+ low_cpu_mem_usage=True # Optimize CPU memory usage
26
  )
27
 
28
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
29
 
30
+ # Initialize Compel for long prompt processing (will be moved to GPU when needed)
31
+ compel = None # Will be initialized in the GPU function
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1216
 
36
  # Default prompt
37
  DEFAULT_PROMPT = "Detailed illustration, realistic style, portrait of a beautiful Japanese woman, wearing an elegant traditional Japanese uniform, neatly tailored with intricate patterns and subtle textures, serene expression, soft natural lighting, standing gracefully in a traditional Japanese garden with cherry blossom petals gently falling in the background, cinematic quality, ultra-detailed, high-resolution, warm tones"
38
 
39
+ def initialize_compel():
40
+ """Initialize Compel with the current pipeline's components"""
41
+ global compel
42
+ if compel is None:
43
+ compel = Compel(
44
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
45
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
46
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
47
+ requires_pooled=[False, True],
48
+ truncate_long_prompts=False
49
+ )
50
+ return compel
51
+
52
  def process_long_prompt(prompt, negative_prompt=""):
53
  """Simple long prompt processing using Compel"""
54
  try:
55
+ comp = initialize_compel()
56
+ conditioning, pooled = comp([prompt, negative_prompt])
57
  return conditioning, pooled
58
  except Exception as e:
59
  print(f"Long prompt processing failed: {e}, falling back to standard processing")
60
  return None, None
61
+
62
+ @spaces.GPU(duration=60) # Add duration parameter for longer generation times
63
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
64
+ # Move pipeline to GPU inside the GPU-decorated function
65
+ global pipe
66
+ pipe = pipe.to("cuda")
67
+
68
+ # Ensure all components are on GPU with correct dtype
69
+ pipe.text_encoder = pipe.text_encoder.to(dtype=torch.float16)
70
+ pipe.text_encoder_2 = pipe.text_encoder_2.to(dtype=torch.float16)
71
+ pipe.vae = pipe.vae.to(dtype=torch.float16)
72
+ pipe.unet = pipe.unet.to(dtype=torch.float16)
73
+
74
  use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
75
 
76
  if randomize_seed:
77
  seed = random.randint(0, MAX_SEED)
78
 
79
+ generator = torch.Generator(device="cuda").manual_seed(seed)
80
 
81
  try:
82
+ # Try long prompt processing first if prompt is long
83
  if use_long_prompt:
84
  print("Using long prompt processing...")
85
  conditioning, pooled = process_long_prompt(prompt, negative_prompt)
 
113
  except RuntimeError as e:
114
  print(f"Error during generation: {e}")
115
  # Return a blank image with error message
116
+ error_img = Image.new('RGB', (width, height), color=(50, 50, 50))
117
  return error_img
118
+ except Exception as e:
119
+ print(f"Unexpected error: {e}")
120
+ error_img = Image.new('RGB', (width, height), color=(100, 0, 0))
121
+ return error_img