Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -87,7 +87,7 @@ pipe.to(device)
|
|
87 |
|
88 |
pipe.enable_vae_slicing()
|
89 |
pipe.enable_vae_tiling()
|
90 |
-
|
91 |
@spaces.GPU(duration=120)
|
92 |
def generate_image(
|
93 |
upload_images,
|
@@ -165,22 +165,23 @@ def generate_image(
|
|
165 |
if start_merge_step > 30:
|
166 |
start_merge_step = 30
|
167 |
print(start_merge_step)
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
184 |
return images, gr.update(visible=True)
|
185 |
|
186 |
def swap_to_gallery(images):
|
|
|
87 |
|
88 |
pipe.enable_vae_slicing()
|
89 |
pipe.enable_vae_tiling()
|
90 |
+
|
91 |
@spaces.GPU(duration=120)
|
92 |
def generate_image(
|
93 |
upload_images,
|
|
|
165 |
if start_merge_step > 30:
|
166 |
start_merge_step = 30
|
167 |
print(start_merge_step)
|
168 |
+
with torch.no_grad(), torch.inference_mode():
|
169 |
+
images = pipe(
|
170 |
+
prompt=prompt,
|
171 |
+
width=output_w,
|
172 |
+
height=output_h,
|
173 |
+
input_id_images=input_id_images,
|
174 |
+
negative_prompt=negative_prompt,
|
175 |
+
num_images_per_prompt=num_outputs,
|
176 |
+
num_inference_steps=num_steps,
|
177 |
+
start_merge_step=start_merge_step,
|
178 |
+
generator=generator,
|
179 |
+
guidance_scale=guidance_scale,
|
180 |
+
id_embeds=id_embeds,
|
181 |
+
image=sketch_image,
|
182 |
+
adapter_conditioning_scale=adapter_conditioning_scale,
|
183 |
+
adapter_conditioning_factor=adapter_conditioning_factor,
|
184 |
+
).images
|
185 |
return images, gr.update(visible=True)
|
186 |
|
187 |
def swap_to_gallery(images):
|