Izac commited on
Commit
1e92513
·
verified ·
1 Parent(s): 2144f7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -27
app.py CHANGED
@@ -1,16 +1,25 @@
1
  import spaces
 
2
  import os
3
  import requests
4
  import time
 
5
  import torch
 
6
  from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler
 
7
  from diffusers.models import AutoencoderKL
 
 
8
  from PIL import Image
9
  import cv2
10
  import numpy as np
 
11
  from RealESRGAN import RealESRGAN
 
12
  import gradio as gr
13
  from gradio_imageslider import ImageSlider
 
14
  from huggingface_hub import hf_hub_download
15
 
16
  USE_TORCH_COMPILE = False
@@ -30,6 +39,7 @@ def download_models():
30
  "CONTROLNET": ("lllyasviel/ControlNet-v1-1", "control_v11f1e_sd15_tile.pth", "models/ControlNet"),
31
  "VAE": ("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors", "models/VAE"),
32
  }
 
33
  for model, (repo_id, filename, local_dir) in models.items():
34
  hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
35
 
@@ -54,8 +64,7 @@ class LazyLoadPipeline:
54
  print("Starting to load the pipeline...")
55
  self.pipe = self.setup_pipeline()
56
  print(f"Moving pipeline to device: {device}")
57
- self.pipe.to(device=device, dtype=torch.float16)
58
-
59
  if USE_TORCH_COMPILE:
60
  print("Compiling the model...")
61
  self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
@@ -63,49 +72,89 @@ class LazyLoadPipeline:
63
  @timer_func
64
  def setup_pipeline(self):
65
  print("Setting up the pipeline...")
66
-
67
- # Load ControlNet model correctly
68
  controlnet = ControlNetModel.from_single_file(
69
- "models/ControlNet/control_v11f1e_sd15_tile.pth"
70
- ).to(device=device, dtype=torch.float16)
71
-
72
  model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
73
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
74
  model_path,
75
  controlnet=controlnet,
76
  torch_dtype=torch.float16,
77
  use_safetensors=True,
78
- safety_checker=None
79
- ).to(device=device, dtype=torch.float16)
80
-
81
- # Load VAE
82
  vae = AutoencoderKL.from_single_file(
83
- "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors"
84
- ).to(device=device, dtype=torch.float16)
 
85
  pipe.vae = vae
86
-
87
- # Load textual inversions and Lora
88
  pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
89
  pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
90
  pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
91
  pipe.fuse_lora(lora_scale=0.5)
92
  pipe.load_lora_weights("models/Lora/more_details.safetensors")
93
- pipe.fuse_lora(lora_scale=1.0)
94
-
95
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
96
  pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
97
-
98
  return pipe
99
 
100
  def __call__(self, *args, **kwargs):
101
  return self.pipe(*args, **kwargs)
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  lazy_pipe = LazyLoadPipeline()
104
  lazy_pipe.load()
105
 
106
  def prepare_image(input_image, resolution, hdr):
107
- input_image = input_image.convert("RGB")
108
- return input_image
 
109
 
110
  @spaces.GPU
111
  @timer_func
@@ -116,7 +165,7 @@ def gradio_process_image(input_image, resolution, num_inference_steps, strength,
116
  condition_image = prepare_image(input_image, resolution, hdr)
117
 
118
  prompt = "masterpiece, best quality, highres"
119
- negative_prompt = "low quality, ugly, blurry, lowres, bad anatomy, bad hands, cropped, worst quality"
120
 
121
  options = {
122
  "prompt": prompt,
@@ -135,9 +184,19 @@ def gradio_process_image(input_image, resolution, num_inference_steps, strength,
135
  result = lazy_pipe(**options).images[0]
136
  print("Image processing completed successfully")
137
 
138
- return [np.array(input_image), np.array(result)]
 
 
 
 
139
 
140
- title = """<h1 align="center">Image Upscaler with Tile Controlnet</h1>"""
 
 
 
 
 
 
141
 
142
  with gr.Blocks() as demo:
143
  gr.HTML(title)
@@ -149,11 +208,26 @@ with gr.Blocks() as demo:
149
  output_slider = ImageSlider(label="Before / After", type="numpy")
150
  with gr.Accordion("Advanced Options", open=False):
151
  resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
152
- num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Inference Steps")
153
  strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
154
  hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
155
  guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
156
 
157
- run_button.click(fn=gradio_process_image, inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale], outputs=output_slider)
158
-
159
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import spaces
2
+
3
  import os
4
  import requests
5
  import time
6
+
7
  import torch
8
+
9
  from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler
10
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
11
  from diffusers.models import AutoencoderKL
12
+ from diffusers.models.attention_processor import AttnProcessor2_0
13
+
14
  from PIL import Image
15
  import cv2
16
  import numpy as np
17
+
18
  from RealESRGAN import RealESRGAN
19
+
20
  import gradio as gr
21
  from gradio_imageslider import ImageSlider
22
+
23
  from huggingface_hub import hf_hub_download
24
 
25
  USE_TORCH_COMPILE = False
 
39
  "CONTROLNET": ("lllyasviel/ControlNet-v1-1", "control_v11f1e_sd15_tile.pth", "models/ControlNet"),
40
  "VAE": ("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors", "models/VAE"),
41
  }
42
+
43
  for model, (repo_id, filename, local_dir) in models.items():
44
  hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
45
 
 
64
  print("Starting to load the pipeline...")
65
  self.pipe = self.setup_pipeline()
66
  print(f"Moving pipeline to device: {device}")
67
+ self.pipe.to(device)
 
68
  if USE_TORCH_COMPILE:
69
  print("Compiling the model...")
70
  self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
 
72
  @timer_func
73
  def setup_pipeline(self):
74
  print("Setting up the pipeline...")
 
 
75
  controlnet = ControlNetModel.from_single_file(
76
+ "models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
77
+ )
78
+ safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
79
  model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
80
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
81
  model_path,
82
  controlnet=controlnet,
83
  torch_dtype=torch.float16,
84
  use_safetensors=True,
85
+ safety_checker=safety_checker
86
+ )
 
 
87
  vae = AutoencoderKL.from_single_file(
88
+ "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
89
+ torch_dtype=torch.float16
90
+ )
91
  pipe.vae = vae
 
 
92
  pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
93
  pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
94
  pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
95
  pipe.fuse_lora(lora_scale=0.5)
96
  pipe.load_lora_weights("models/Lora/more_details.safetensors")
97
+ pipe.fuse_lora(lora_scale=1.)
 
98
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
99
  pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
 
100
  return pipe
101
 
102
  def __call__(self, *args, **kwargs):
103
  return self.pipe(*args, **kwargs)
104
 
105
+ class LazyRealESRGAN:
106
+ def __init__(self, device, scale):
107
+ self.device = device
108
+ self.scale = scale
109
+ self.model = None
110
+
111
+ def load_model(self):
112
+ if self.model is None:
113
+ self.model = RealESRGAN(self.device, scale=self.scale)
114
+ self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
115
+ def predict(self, img):
116
+ self.load_model()
117
+ return self.model.predict(img)
118
+
119
+ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
120
+ lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
121
+
122
+ @timer_func
123
+ def resize_and_upscale(input_image, resolution):
124
+ scale = 2 if resolution <= 2048 else 4
125
+ input_image = input_image.convert("RGB")
126
+ W, H = input_image.size
127
+ k = float(resolution) / min(H, W)
128
+ H = int(round(H * k / 64.0)) * 64
129
+ W = int(round(W * k / 64.0)) * 64
130
+ img = input_image.resize((W, H), resample=Image.LANCZOS)
131
+ if scale == 2:
132
+ img = lazy_realesrgan_x2.predict(img)
133
+ else:
134
+ img = lazy_realesrgan_x4.predict(img)
135
+ return img
136
+
137
+ @timer_func
138
+ def create_hdr_effect(original_image, hdr):
139
+ if hdr == 0:
140
+ return original_image
141
+ cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
142
+ factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
143
+ 1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
144
+ 1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
145
+ images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
146
+ merge_mertens = cv2.createMergeMertens()
147
+ hdr_image = merge_mertens.process(images)
148
+ hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
149
+ return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
150
+
151
  lazy_pipe = LazyLoadPipeline()
152
  lazy_pipe.load()
153
 
154
  def prepare_image(input_image, resolution, hdr):
155
+ condition_image = resize_and_upscale(input_image, resolution)
156
+ condition_image = create_hdr_effect(condition_image, hdr)
157
+ return condition_image
158
 
159
  @spaces.GPU
160
  @timer_func
 
165
  condition_image = prepare_image(input_image, resolution, hdr)
166
 
167
  prompt = "masterpiece, best quality, highres"
168
+ negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
169
 
170
  options = {
171
  "prompt": prompt,
 
184
  result = lazy_pipe(**options).images[0]
185
  print("Image processing completed successfully")
186
 
187
+ # Convert input_image and result to numpy arrays
188
+ input_array = np.array(input_image)
189
+ result_array = np.array(result)
190
+
191
+ return [input_array, result_array]
192
 
193
+ title = """<h1 align="center">Image Upscaler with Tile Controlnet</h1>
194
+ <p align="center">The main ideas come from</p>
195
+ <p><center>
196
+ <a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
197
+ <a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
198
+ </center></p>
199
+ """
200
 
201
  with gr.Blocks() as demo:
202
  gr.HTML(title)
 
208
  output_slider = ImageSlider(label="Before / After", type="numpy")
209
  with gr.Accordion("Advanced Options", open=False):
210
  resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
211
+ num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
212
  strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
213
  hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
214
  guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
215
 
216
+ run_button.click(fn=gradio_process_image,
217
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
218
+ outputs=output_slider)
219
+
220
+ # Add examples with all required inputs
221
+ gr.Examples(
222
+ examples=[
223
+ ["image1.jpg", 512, 20, 0.4, 0, 3],
224
+ ["image2.png", 512, 20, 0.4, 0, 3],
225
+ ["image3.png", 512, 20, 0.4, 0, 3],
226
+ ],
227
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
228
+ outputs=output_slider,
229
+ fn=gradio_process_image,
230
+ cache_examples=True,
231
+ )
232
+
233
+ demo.launch(share=True)