Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
072d8d2
1
Parent(s):
dbf5021
Try to run on ZERO
Browse files
app.py
CHANGED
|
@@ -1,11 +1,17 @@
|
|
| 1 |
-
import random
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
| 5 |
import gradio as gr
|
|
|
|
|
|
|
| 6 |
|
| 7 |
from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
xlp_kwargs = {
|
| 10 |
'custom_pipeline': 'pipeline_stable_diffusion_xl_differential_img2img'
|
| 11 |
}
|
|
@@ -17,9 +23,12 @@ if torch.cuda.is_available():
|
|
| 17 |
else:
|
| 18 |
device = 'cpu'
|
| 19 |
device_dtype = torch.float32
|
|
|
|
| 20 |
|
| 21 |
-
|
|
|
|
| 22 |
|
|
|
|
| 23 |
|
| 24 |
def merge_images(original, new_image, offset, direction):
|
| 25 |
if direction in ["left", "right"]:
|
|
@@ -173,54 +182,56 @@ def image_resize(image, new_size=1024):
|
|
| 173 |
|
| 174 |
return image
|
| 175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
-
|
| 178 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 179 |
-
**xlp_kwargs
|
| 180 |
-
).to(device)
|
| 181 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
| 182 |
-
pipeline.scheduler.config, use_karras_sigmas=True)
|
| 183 |
|
| 184 |
-
pipeline
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
generator = torch.Generator(device="cpu").manual_seed(seed)
|
| 200 |
-
|
| 201 |
-
image = pipeline(
|
| 202 |
-
prompt=prompt,
|
| 203 |
-
negative_prompt=negative_prompt,
|
| 204 |
-
width=1024,
|
| 205 |
-
height=1024,
|
| 206 |
-
guidance_scale=4.0,
|
| 207 |
-
num_inference_steps=25,
|
| 208 |
-
original_image=image,
|
| 209 |
-
image=image,
|
| 210 |
-
strength=1.0,
|
| 211 |
-
map=mask,
|
| 212 |
-
generator=generator,
|
| 213 |
-
ip_adapter_image=[ip_adapter_image],
|
| 214 |
-
output_type="np",
|
| 215 |
-
).images[0]
|
| 216 |
-
|
| 217 |
-
image = (image * 255).astype(np.uint8)
|
| 218 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 219 |
|
| 220 |
-
|
|
|
|
|
|
|
|
|
|
| 221 |
|
| 222 |
|
| 223 |
-
def outpaint(pil_image, direction='right', times_to_expand=4):
|
| 224 |
prompt = ""
|
| 225 |
negative_prompt = ""
|
| 226 |
inpaint_mask_color = 50 # lighter use more of the Telea inpainting
|
|
@@ -272,14 +283,8 @@ gradio_app = gr.Interface(
|
|
| 272 |
],
|
| 273 |
outputs=[gr.Image(label="Processed Image")],
|
| 274 |
title="Outpainting with differential diffusion demo",
|
| 275 |
-
description=
|
| 276 |
-
# Outpainting with differential diffusion demo
|
| 277 |
-
This uses code lifted almost verbatim from
|
| 278 |
-
[Outpainting II - Differential Diffusion](https://huggingface.co/blog/OzzyGT/outpainting-differential-diffusion).
|
| 279 |
-
|
| 280 |
-
If this Space is running on a CPU, it will take hours to get results. You may [duplicate this space](https://huggingface.co/spaces/clinteroni/outpainting-demo?duplicate=true) and pay for an upgraded runtime instead.
|
| 281 |
-
'''
|
| 282 |
)
|
| 283 |
|
| 284 |
if __name__ == "__main__":
|
| 285 |
-
gradio_app.launch()
|
|
|
|
|
|
|
| 1 |
import cv2
|
| 2 |
import numpy as np
|
| 3 |
import torch
|
| 4 |
import gradio as gr
|
| 5 |
+
import random
|
| 6 |
+
import spaces
|
| 7 |
|
| 8 |
from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline
|
| 9 |
|
| 10 |
+
DESCRIPTION='''
|
| 11 |
+
This uses code lifted almost verbatim from
|
| 12 |
+
[Outpainting II - Differential Diffusion](https://huggingface.co/blog/OzzyGT/outpainting-differential-diffusion).
|
| 13 |
+
'''
|
| 14 |
+
|
| 15 |
xlp_kwargs = {
|
| 16 |
'custom_pipeline': 'pipeline_stable_diffusion_xl_differential_img2img'
|
| 17 |
}
|
|
|
|
| 23 |
else:
|
| 24 |
device = 'cpu'
|
| 25 |
device_dtype = torch.float32
|
| 26 |
+
DESCRIPTION+='''
|
| 27 |
|
| 28 |
+
This Space appears to be running on a CPU; it will take hours to get results. You may [duplicate this space](https://huggingface.co/spaces/clinteroni/outpainting-demo?duplicate=true) and pay for an upgraded runtime instead.
|
| 29 |
+
'''
|
| 30 |
|
| 31 |
+
xlp_kwargs['torch_dtype'] = device_dtype
|
| 32 |
|
| 33 |
def merge_images(original, new_image, offset, direction):
|
| 34 |
if direction in ["left", "right"]:
|
|
|
|
| 182 |
|
| 183 |
return image
|
| 184 |
|
| 185 |
+
@spaces.GPU
|
| 186 |
+
def outpaint(pil_image, direction='right', times_to_expand=4):
|
| 187 |
+
if torch.cuda.is_available():
|
| 188 |
+
torch.cuda.empty_cache()
|
| 189 |
+
|
| 190 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 191 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 192 |
+
**xlp_kwargs
|
| 193 |
+
).to(device)
|
| 194 |
+
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
|
| 195 |
+
pipeline.scheduler.config, use_karras_sigmas=True)
|
| 196 |
+
|
| 197 |
+
pipeline.load_ip_adapter(
|
| 198 |
+
"h94/IP-Adapter",
|
| 199 |
+
subfolder="sdxl_models",
|
| 200 |
+
weight_name=[
|
| 201 |
+
"ip-adapter-plus_sdxl_vit-h.safetensors",
|
| 202 |
+
],
|
| 203 |
+
image_encoder_folder="models/image_encoder",
|
| 204 |
+
)
|
| 205 |
+
pipeline.set_ip_adapter_scale(0.1)
|
| 206 |
+
|
| 207 |
+
def generate_image(prompt, negative_prompt, image, mask, ip_adapter_image, seed: int = None):
|
| 208 |
+
if seed is None:
|
| 209 |
+
seed = random.randint(0, 2**32 - 1)
|
| 210 |
|
| 211 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
+
image = pipeline(
|
| 214 |
+
prompt=prompt,
|
| 215 |
+
negative_prompt=negative_prompt,
|
| 216 |
+
width=1024,
|
| 217 |
+
height=1024,
|
| 218 |
+
guidance_scale=4.0,
|
| 219 |
+
num_inference_steps=25,
|
| 220 |
+
original_image=image,
|
| 221 |
+
image=image,
|
| 222 |
+
strength=1.0,
|
| 223 |
+
map=mask,
|
| 224 |
+
generator=generator,
|
| 225 |
+
ip_adapter_image=[ip_adapter_image],
|
| 226 |
+
output_type="np",
|
| 227 |
+
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
+
image = (image * 255).astype(np.uint8)
|
| 230 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 231 |
+
|
| 232 |
+
return image
|
| 233 |
|
| 234 |
|
|
|
|
| 235 |
prompt = ""
|
| 236 |
negative_prompt = ""
|
| 237 |
inpaint_mask_color = 50 # lighter use more of the Telea inpainting
|
|
|
|
| 283 |
],
|
| 284 |
outputs=[gr.Image(label="Processed Image")],
|
| 285 |
title="Outpainting with differential diffusion demo",
|
| 286 |
+
description=DESCRIPTION
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
)
|
| 288 |
|
| 289 |
if __name__ == "__main__":
|
| 290 |
+
gradio_app.queue(max_size=20).launch()
|