Fabrice-TIERCELIN commited on
Commit
a047c88
·
verified ·
1 Parent(s): 8dde991

Upload 4 files

Browse files
Files changed (4) hide show
  1. CKPT_PTH.py +1 -1
  2. README.md +18 -11
  3. app.py +1037 -526
  4. requirements.txt +46 -21
CKPT_PTH.py CHANGED
@@ -1,2 +1,2 @@
1
- SDXL_CLIP1_PATH = 'openai/clip-vit-large-patch14'
2
  SDXL_CLIP2_CKPT_PTH = 'laion_CLIP-ViT-bigG-14-laion2B-39B-b160k/open_clip_pytorch_model.bin'
 
1
+ SDXL_CLIP1_PATH = 'openai/clip-vit-large-patch14'
2
  SDXL_CLIP2_CKPT_PTH = 'laion_CLIP-ViT-bigG-14-laion2B-39B-b160k/open_clip_pytorch_model.bin'
README.md CHANGED
@@ -1,14 +1,21 @@
1
  ---
2
- title: FramePack F1 + V2V + EF
3
- emoji: 👽
4
- colorFrom: pink
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.29.0
 
8
  app_file: app.py
9
- pinned: true
10
- license: apache-2.0
11
- short_description: fast video generation from images & text
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: SUPIR Image Upscaler
 
 
 
3
  sdk: gradio
4
+ emoji: 📷
5
+ sdk_version: 4.38.1
6
  app_file: app.py
7
+ license: mit
8
+ colorFrom: blue
9
+ colorTo: pink
10
+ tags:
11
+ - Upscaling
12
+ - Restoring
13
+ - Image-to-Image
14
+ - Image-2-Image
15
+ - Img-to-Img
16
+ - Img-2-Img
17
+ - language models
18
+ - LLMs
19
+ short_description: Restore blurred or small images with prompt
20
+ suggested_hardware: zero-a10g
21
+ ---
app.py CHANGED
@@ -1,526 +1,1037 @@
1
- from diffusers_helper.hf_login import login
2
-
3
- import os
4
-
5
- os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
6
-
7
- import gradio as gr
8
- import torch
9
- import traceback
10
- import einops
11
- import safetensors.torch as sf
12
- import numpy as np
13
- import math
14
- import spaces
15
-
16
- from PIL import Image
17
- from diffusers import AutoencoderKLHunyuanVideo
18
- from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
19
- from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
20
- from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
21
- from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
22
- from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
23
- from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
24
- from diffusers_helper.thread_utils import AsyncStream, async_run
25
- from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
26
- from transformers import SiglipImageProcessor, SiglipVisionModel
27
- from diffusers_helper.clip_vision import hf_clip_vision_encode
28
- from diffusers_helper.bucket_tools import find_nearest_bucket
29
-
30
-
31
- free_mem_gb = get_cuda_free_memory_gb(gpu)
32
- high_vram = free_mem_gb > 80
33
-
34
- print(f'Free VRAM {free_mem_gb} GB')
35
- print(f'High-VRAM Mode: {high_vram}')
36
-
37
- text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
38
- text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
39
- tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
40
- tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
41
- vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
42
-
43
- feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
44
- image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
-
46
- # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
48
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
49
- transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
50
-
51
- vae.eval()
52
- text_encoder.eval()
53
- text_encoder_2.eval()
54
- image_encoder.eval()
55
- transformer.eval()
56
-
57
- if not high_vram:
58
- vae.enable_slicing()
59
- vae.enable_tiling()
60
-
61
- transformer.high_quality_fp32_output_for_inference = True
62
- print('transformer.high_quality_fp32_output_for_inference = True')
63
-
64
- transformer.to(dtype=torch.bfloat16)
65
- vae.to(dtype=torch.float16)
66
- image_encoder.to(dtype=torch.float16)
67
- text_encoder.to(dtype=torch.float16)
68
- text_encoder_2.to(dtype=torch.float16)
69
-
70
- vae.requires_grad_(False)
71
- text_encoder.requires_grad_(False)
72
- text_encoder_2.requires_grad_(False)
73
- image_encoder.requires_grad_(False)
74
- transformer.requires_grad_(False)
75
-
76
- if not high_vram:
77
- # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
78
- DynamicSwapInstaller.install_model(transformer, device=gpu)
79
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
80
- else:
81
- text_encoder.to(gpu)
82
- text_encoder_2.to(gpu)
83
- image_encoder.to(gpu)
84
- vae.to(gpu)
85
- transformer.to(gpu)
86
-
87
- stream = AsyncStream()
88
-
89
- outputs_folder = './outputs/'
90
- os.makedirs(outputs_folder, exist_ok=True)
91
-
92
- examples = [
93
- ["img_examples/1.png", "The girl dances gracefully, with clear movements, full of charm.",],
94
- ["img_examples/2.jpg", "The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair."],
95
- ["img_examples/3.png", "The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements."],
96
- ]
97
-
98
- input_image_debug_value = None
99
- prompt_debug_value = None
100
- total_second_length_debug_value = None
101
-
102
- def generate_examples(input_image, prompt):
103
-
104
- t2v=False
105
- n_prompt=""
106
- seed=31337
107
- total_second_length=5
108
- latent_window_size=9
109
- steps=25
110
- cfg=1.0
111
- gs=10.0
112
- rs=0.0
113
- gpu_memory_preservation=6
114
- use_teacache=True
115
- mp4_crf=16
116
-
117
- global stream
118
-
119
- # assert input_image is not None, 'No input image!'
120
- if t2v:
121
- default_height, default_width = 640, 640
122
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
123
- print("No input image provided. Using a blank white image.")
124
-
125
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
126
-
127
- stream = AsyncStream()
128
-
129
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
130
-
131
- output_filename = None
132
-
133
- while True:
134
- flag, data = stream.output_queue.next()
135
-
136
- if flag == 'file':
137
- output_filename = data
138
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
139
-
140
- if flag == 'progress':
141
- preview, desc, html = data
142
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
143
-
144
- if flag == 'end':
145
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
146
- break
147
-
148
-
149
-
150
- @torch.no_grad()
151
- def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
152
- total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
153
- total_latent_sections = int(max(round(total_latent_sections), 1))
154
-
155
- job_id = generate_timestamp()
156
-
157
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
158
-
159
- try:
160
- # Clean GPU
161
- if not high_vram:
162
- unload_complete_models(
163
- text_encoder, text_encoder_2, image_encoder, vae, transformer
164
- )
165
-
166
- # Text encoding
167
-
168
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
169
-
170
- if not high_vram:
171
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
172
- load_model_as_complete(text_encoder_2, target_device=gpu)
173
-
174
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
175
-
176
- if cfg == 1:
177
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
178
- else:
179
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
180
-
181
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
182
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
183
-
184
- # Processing input image
185
-
186
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
187
-
188
- H, W, C = input_image.shape
189
- height, width = find_nearest_bucket(H, W, resolution=640)
190
- input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
191
-
192
- Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
193
-
194
- input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
195
- input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
196
-
197
- # VAE encoding
198
-
199
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
200
-
201
- if not high_vram:
202
- load_model_as_complete(vae, target_device=gpu)
203
-
204
- start_latent = vae_encode(input_image_pt, vae)
205
-
206
- # CLIP Vision
207
-
208
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
209
-
210
- if not high_vram:
211
- load_model_as_complete(image_encoder, target_device=gpu)
212
-
213
- image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
214
- image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
215
-
216
- # Dtype
217
-
218
- llama_vec = llama_vec.to(transformer.dtype)
219
- llama_vec_n = llama_vec_n.to(transformer.dtype)
220
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
221
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
222
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
223
-
224
- # Sampling
225
-
226
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
227
-
228
- rnd = torch.Generator("cpu").manual_seed(seed)
229
-
230
- history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
231
- history_pixels = None
232
-
233
- history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
234
- total_generated_latent_frames = 1
235
-
236
- for section_index in range(total_latent_sections):
237
- if stream.input_queue.top() == 'end':
238
- stream.output_queue.push(('end', None))
239
- return
240
-
241
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
242
-
243
- if not high_vram:
244
- unload_complete_models()
245
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
246
-
247
- if use_teacache:
248
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
249
- else:
250
- transformer.initialize_teacache(enable_teacache=False)
251
-
252
- def callback(d):
253
- preview = d['denoised']
254
- preview = vae_decode_fake(preview)
255
-
256
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
257
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
258
-
259
- if stream.input_queue.top() == 'end':
260
- stream.output_queue.push(('end', None))
261
- raise KeyboardInterrupt('User ends the task.')
262
-
263
- current_step = d['i'] + 1
264
- percentage = int(100.0 * current_step / steps)
265
- hint = f'Sampling {current_step}/{steps}'
266
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
267
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
268
- return
269
-
270
- indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
271
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
272
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
273
-
274
- clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
275
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
276
-
277
- generated_latents = sample_hunyuan(
278
- transformer=transformer,
279
- sampler='unipc',
280
- width=width,
281
- height=height,
282
- frames=latent_window_size * 4 - 3,
283
- real_guidance_scale=cfg,
284
- distilled_guidance_scale=gs,
285
- guidance_rescale=rs,
286
- # shift=3.0,
287
- num_inference_steps=steps,
288
- generator=rnd,
289
- prompt_embeds=llama_vec,
290
- prompt_embeds_mask=llama_attention_mask,
291
- prompt_poolers=clip_l_pooler,
292
- negative_prompt_embeds=llama_vec_n,
293
- negative_prompt_embeds_mask=llama_attention_mask_n,
294
- negative_prompt_poolers=clip_l_pooler_n,
295
- device=gpu,
296
- dtype=torch.bfloat16,
297
- image_embeddings=image_encoder_last_hidden_state,
298
- latent_indices=latent_indices,
299
- clean_latents=clean_latents,
300
- clean_latent_indices=clean_latent_indices,
301
- clean_latents_2x=clean_latents_2x,
302
- clean_latent_2x_indices=clean_latent_2x_indices,
303
- clean_latents_4x=clean_latents_4x,
304
- clean_latent_4x_indices=clean_latent_4x_indices,
305
- callback=callback,
306
- )
307
-
308
- total_generated_latent_frames += int(generated_latents.shape[2])
309
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
310
-
311
- if not high_vram:
312
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
313
- load_model_as_complete(vae, target_device=gpu)
314
-
315
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
316
-
317
- if history_pixels is None:
318
- history_pixels = vae_decode(real_history_latents, vae).cpu()
319
- else:
320
- section_latent_frames = latent_window_size * 2
321
- overlapped_frames = latent_window_size * 4 - 3
322
-
323
- current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
324
- history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
325
-
326
- if not high_vram:
327
- unload_complete_models()
328
-
329
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
330
-
331
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
332
-
333
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
334
-
335
- stream.output_queue.push(('file', output_filename))
336
- except:
337
- traceback.print_exc()
338
-
339
- if not high_vram:
340
- unload_complete_models(
341
- text_encoder, text_encoder_2, image_encoder, vae, transformer
342
- )
343
-
344
- stream.output_queue.push(('end', None))
345
- return
346
-
347
- def get_duration(input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
348
- global total_second_length_debug_value
349
-
350
- if total_second_length_debug_value is not None:
351
- return total_second_length_debug_value * 60
352
- return total_second_length * 60
353
-
354
- @spaces.GPU(duration=get_duration)
355
- def process(input_image, prompt,
356
- t2v=False,
357
- n_prompt="",
358
- seed=31337,
359
- total_second_length=5,
360
- latent_window_size=9,
361
- steps=25,
362
- cfg=1.0,
363
- gs=10.0,
364
- rs=0.0,
365
- gpu_memory_preservation=6,
366
- use_teacache=True,
367
- mp4_crf=16
368
- ):
369
- global stream, input_image_debug_value, prompt_debug_value, total_second_length_debug_value
370
-
371
- if input_image_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
372
- input_image = input_image_debug_value
373
- prompt = prompt_debug_value
374
- total_second_length = total_second_length_debug_value
375
- input_image_debug_value = prompt_debug_value = total_second_length_debug_value = None
376
-
377
- # assert input_image is not None, 'No input image!'
378
- if t2v:
379
- default_height, default_width = 640, 640
380
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
381
- print("No input image provided. Using a blank white image.")
382
-
383
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
384
-
385
- stream = AsyncStream()
386
-
387
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
388
-
389
- output_filename = None
390
-
391
- while True:
392
- flag, data = stream.output_queue.next()
393
-
394
- if flag == 'file':
395
- output_filename = data
396
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
397
-
398
- if flag == 'progress':
399
- preview, desc, html = data
400
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
401
-
402
- if flag == 'end':
403
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
404
- break
405
-
406
-
407
- def end_process():
408
- stream.input_queue.push('end')
409
-
410
-
411
- css = make_progress_bar_css()
412
- block = gr.Blocks(css=css).queue()
413
- with block:
414
- gr.Markdown('# FramePack Essentials | Experimentation in Progress')
415
- gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
416
- """)
417
- with gr.Row():
418
- with gr.Column():
419
- input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
420
- prompt = gr.Textbox(label="Prompt", value='')
421
- t2v = gr.Checkbox(label="do text-to-video", value=False)
422
-
423
- with gr.Row():
424
- start_button = gr.Button(value="Start Generation")
425
- end_button = gr.Button(value="End Generation", interactive=False)
426
-
427
- total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=5, value=2, step=0.1)
428
- with gr.Group():
429
- with gr.Accordion("Advanced settings", open=False):
430
- use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
431
-
432
- n_prompt = gr.Textbox(label="Negative Prompt", value="") # Not used
433
- seed = gr.Number(label="Seed", value=31337, precision=0)
434
-
435
-
436
- latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1) # Should not change
437
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
438
-
439
- cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01) # Should not change
440
- gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended; 3=blurred motions& & unsharped; 10 focus motion')
441
- rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01) # Should not change
442
-
443
- gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
444
-
445
- mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
446
-
447
- with gr.Accordion("Debug", open=False):
448
- input_image_debug = gr.Image(type="numpy", label="Image Debug", height=320)
449
- prompt_debug = gr.Textbox(label="Prompt Debug", value='')
450
- total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=5, step=0.1)
451
-
452
- with gr.Column():
453
- preview_image = gr.Image(label="Next Latents", height=200)
454
- result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
455
- progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
456
- progress_bar = gr.HTML('', elem_classes='no-generating-animation')
457
-
458
- gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
459
-
460
- ips = [input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
461
- start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
462
- end_button.click(fn=end_process)
463
-
464
- # gr.Examples(
465
- # examples,
466
- # inputs=[input_image, prompt],
467
- # outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
468
- # fn=generate_examples,
469
- # cache_examples=True
470
- # )
471
-
472
- with gr.Row(visible=False):
473
- gr.Examples(
474
- examples = [
475
- [
476
- "./img_examples/Example1.png", # input_image
477
- "View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
478
- False, # t2v
479
- "", # n_prompt
480
- 42, # seed
481
- 1, # total_second_length
482
- 9, # latent_window_size
483
- 25, # steps
484
- 1.0, # cfg
485
- 10.0, # gs
486
- 0.0, # rs
487
- 6, # gpu_memory_preservation
488
- True, # use_teacache
489
- 16 # mp4_crf
490
- ],
491
- ],
492
- run_on_click = True,
493
- fn = process,
494
- inputs = ips,
495
- outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
496
- cache_examples = True,
497
- )
498
-
499
-
500
- def handle_field_debug_change(input_image_debug_data, prompt_debug_data, total_second_length_debug_data):
501
- global input_image_debug_value, prompt_debug_value, total_second_length_debug_value
502
- input_image_debug_value = input_image_debug_data
503
- prompt_debug_value = prompt_debug_data
504
- total_second_length_debug_value = total_second_length_debug_data
505
- return []
506
-
507
- input_image_debug.upload(
508
- fn=handle_field_debug_change,
509
- inputs=[input_image_debug, prompt_debug, total_second_length_debug],
510
- outputs=[]
511
- )
512
-
513
- prompt_debug.change(
514
- fn=handle_field_debug_change,
515
- inputs=[input_image_debug, prompt_debug, total_second_length_debug],
516
- outputs=[]
517
- )
518
-
519
- total_second_length_debug.change(
520
- fn=handle_field_debug_change,
521
- inputs=[input_image_debug, prompt_debug, total_second_length_debug],
522
- outputs=[]
523
- )
524
-
525
-
526
- block.launch(ssr_mode=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import argparse
4
+ import numpy as np
5
+ import torch
6
+ import einops
7
+ import copy
8
+ import math
9
+ import time
10
+ import random
11
+ import spaces
12
+ import re
13
+ import uuid
14
+
15
+ from gradio_imageslider import ImageSlider
16
+ from PIL import Image
17
+ from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
18
+ from huggingface_hub import hf_hub_download
19
+ from pillow_heif import register_heif_opener
20
+
21
+ register_heif_opener()
22
+
23
+ max_64_bit_int = np.iinfo(np.int32).max
24
+
25
+ hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
26
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR")
27
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR")
28
+ hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0Q.ckpt", local_dir="yushan777_SUPIR")
29
+ hf_hub_download(repo_id="RunDiffusion/Juggernaut-XL-Lightning", filename="Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", local_dir="RunDiffusion_Juggernaut-XL-Lightning")
30
+
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument("--opt", type=str, default='options/SUPIR_v0.yaml')
33
+ parser.add_argument("--ip", type=str, default='127.0.0.1')
34
+ parser.add_argument("--port", type=int, default='6688')
35
+ parser.add_argument("--no_llava", action='store_true', default=True)#False
36
+ parser.add_argument("--use_image_slider", action='store_true', default=False)#False
37
+ parser.add_argument("--log_history", action='store_true', default=False)
38
+ parser.add_argument("--loading_half_params", action='store_true', default=False)#False
39
+ parser.add_argument("--use_tile_vae", action='store_true', default=True)#False
40
+ parser.add_argument("--encoder_tile_size", type=int, default=512)
41
+ parser.add_argument("--decoder_tile_size", type=int, default=64)
42
+ parser.add_argument("--load_8bit_llava", action='store_true', default=False)
43
+ args = parser.parse_args()
44
+
45
+ if torch.cuda.device_count() > 0:
46
+ SUPIR_device = 'cuda:0'
47
+
48
+ # Load SUPIR
49
+ model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True)
50
+ if args.loading_half_params:
51
+ model = model.half()
52
+ if args.use_tile_vae:
53
+ model.init_tile_vae(encoder_tile_size=args.encoder_tile_size, decoder_tile_size=args.decoder_tile_size)
54
+ model = model.to(SUPIR_device)
55
+ model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
56
+ model.current_model = 'v0-Q'
57
+ ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
58
+
59
+ def check_upload(input_image):
60
+ if input_image is None:
61
+ raise gr.Error("Please provide an image to restore.")
62
+ return gr.update(visible = True)
63
+
64
+ def update_seed(is_randomize_seed, seed):
65
+ if is_randomize_seed:
66
+ return random.randint(0, max_64_bit_int)
67
+ return seed
68
+
69
+ def reset():
70
+ return [
71
+ None,
72
+ 0,
73
+ None,
74
+ None,
75
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
76
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
77
+ 1,
78
+ 1024,
79
+ 1,
80
+ 2,
81
+ 50,
82
+ -1.0,
83
+ 1.,
84
+ default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0,
85
+ True,
86
+ random.randint(0, max_64_bit_int),
87
+ 5,
88
+ 1.003,
89
+ "Wavelet",
90
+ "fp32",
91
+ "fp32",
92
+ 1.0,
93
+ True,
94
+ False,
95
+ default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
96
+ 0.,
97
+ "v0-Q",
98
+ "input",
99
+ 179
100
+ ]
101
+
102
+ def check_and_update(input_image):
103
+ if input_image is None:
104
+ raise gr.Error("Please provide an image to restore.")
105
+ return gr.update(visible = True)
106
+
107
+ @spaces.GPU(duration=420)
108
+ def stage1_process(
109
+ input_image,
110
+ gamma_correction,
111
+ diff_dtype,
112
+ ae_dtype
113
+ ):
114
+ print('stage1_process ==>>')
115
+ if torch.cuda.device_count() == 0:
116
+ gr.Warning('Set this space to GPU config to make it work.')
117
+ return None, None
118
+ torch.cuda.set_device(SUPIR_device)
119
+ LQ = HWC3(np.array(Image.open(input_image)))
120
+ LQ = fix_resize(LQ, 512)
121
+ # stage1
122
+ LQ = np.array(LQ) / 255 * 2 - 1
123
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
124
+
125
+ model.ae_dtype = convert_dtype(ae_dtype)
126
+ model.model.dtype = convert_dtype(diff_dtype)
127
+
128
+ LQ = model.batchify_denoise(LQ, is_stage1=True)
129
+ LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
130
+ # gamma correction
131
+ LQ = LQ / 255.0
132
+ LQ = np.power(LQ, gamma_correction)
133
+ LQ *= 255.0
134
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
135
+ print('<<== stage1_process')
136
+ return LQ, gr.update(visible = True)
137
+
138
+ def stage2_process_example(*args, **kwargs):
139
+ [result_slider, result_gallery, restore_information, reset_btn] = restore_in_Xmin(*args, **kwargs)
140
+ return [result_slider, restore_information, reset_btn]
141
+
142
+ def stage2_process(*args, **kwargs):
143
+ try:
144
+ return restore_in_Xmin(*args, **kwargs)
145
+ except Exception as e:
146
+ # NO_GPU_MESSAGE_INQUEUE
147
+ print("gradio.exceptions.Error 'No GPU is currently available for you after 60s'")
148
+ print('str(type(e)): ' + str(type(e))) # <class 'gradio.exceptions.Error'>
149
+ print('str(e): ' + str(e)) # You have exceeded your GPU quota...
150
+ try:
151
+ print('e.message: ' + e.message) # No GPU is currently available for you after 60s
152
+ except Exception as e2:
153
+ print('Failure')
154
+ if str(e).startswith("No GPU is currently available for you after 60s"):
155
+ print('Exception identified!!!')
156
+ #if str(type(e)) == "<class 'gradio.exceptions.Error'>":
157
+ #print('Exception of name ' + type(e).__name__)
158
+ raise e
159
+
160
+ def restore_in_Xmin(
161
+ noisy_image,
162
+ rotation,
163
+ denoise_image,
164
+ prompt,
165
+ a_prompt,
166
+ n_prompt,
167
+ num_samples,
168
+ min_size,
169
+ downscale,
170
+ upscale,
171
+ edm_steps,
172
+ s_stage1,
173
+ s_stage2,
174
+ s_cfg,
175
+ randomize_seed,
176
+ seed,
177
+ s_churn,
178
+ s_noise,
179
+ color_fix_type,
180
+ diff_dtype,
181
+ ae_dtype,
182
+ gamma_correction,
183
+ linear_CFG,
184
+ linear_s_stage2,
185
+ spt_linear_CFG,
186
+ spt_linear_s_stage2,
187
+ model_select,
188
+ output_format,
189
+ allocation
190
+ ):
191
+ print("noisy_image:\n" + str(noisy_image))
192
+ print("denoise_image:\n" + str(denoise_image))
193
+ print("rotation: " + str(rotation))
194
+ print("prompt: " + str(prompt))
195
+ print("a_prompt: " + str(a_prompt))
196
+ print("n_prompt: " + str(n_prompt))
197
+ print("num_samples: " + str(num_samples))
198
+ print("min_size: " + str(min_size))
199
+ print("downscale: " + str(downscale))
200
+ print("upscale: " + str(upscale))
201
+ print("edm_steps: " + str(edm_steps))
202
+ print("s_stage1: " + str(s_stage1))
203
+ print("s_stage2: " + str(s_stage2))
204
+ print("s_cfg: " + str(s_cfg))
205
+ print("randomize_seed: " + str(randomize_seed))
206
+ print("seed: " + str(seed))
207
+ print("s_churn: " + str(s_churn))
208
+ print("s_noise: " + str(s_noise))
209
+ print("color_fix_type: " + str(color_fix_type))
210
+ print("diff_dtype: " + str(diff_dtype))
211
+ print("ae_dtype: " + str(ae_dtype))
212
+ print("gamma_correction: " + str(gamma_correction))
213
+ print("linear_CFG: " + str(linear_CFG))
214
+ print("linear_s_stage2: " + str(linear_s_stage2))
215
+ print("spt_linear_CFG: " + str(spt_linear_CFG))
216
+ print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
217
+ print("model_select: " + str(model_select))
218
+ print("GPU time allocation: " + str(allocation) + " min")
219
+ print("output_format: " + str(output_format))
220
+
221
+ input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", noisy_image)
222
+
223
+ if input_format not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
224
+ gr.Warning('Invalid image format. Please first convert into *.png, *.webp, *.jpg, *.jpeg, *.gif, *.bmp or *.heic.')
225
+ return None, None, None, None
226
+
227
+ if output_format == "input":
228
+ if noisy_image is None:
229
+ output_format = "png"
230
+ else:
231
+ output_format = input_format
232
+ print("final output_format: " + str(output_format))
233
+
234
+ if prompt is None:
235
+ prompt = ""
236
+
237
+ if a_prompt is None:
238
+ a_prompt = ""
239
+
240
+ if n_prompt is None:
241
+ n_prompt = ""
242
+
243
+ if prompt != "" and a_prompt != "":
244
+ a_prompt = prompt + ", " + a_prompt
245
+ else:
246
+ a_prompt = prompt + a_prompt
247
+ print("Final prompt: " + str(a_prompt))
248
+
249
+ denoise_image = np.array(Image.open(noisy_image if denoise_image is None else denoise_image))
250
+
251
+ if rotation == 90:
252
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
253
+ elif rotation == 180:
254
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
255
+ denoise_image = np.array(list(zip(*denoise_image[::-1])))
256
+ elif rotation == -90:
257
+ denoise_image = np.array(list(zip(*denoise_image))[::-1])
258
+
259
+ if 1 < downscale:
260
+ input_height, input_width, input_channel = denoise_image.shape
261
+ denoise_image = np.array(Image.fromarray(denoise_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
262
+
263
+ denoise_image = HWC3(denoise_image)
264
+
265
+ if torch.cuda.device_count() == 0:
266
+ gr.Warning('Set this space to GPU config to make it work.')
267
+ return [noisy_image, denoise_image], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = [denoise_image]), None, gr.update(visible=True)
268
+
269
+ if model_select != model.current_model:
270
+ print('load ' + model_select)
271
+ if model_select == 'v0-Q':
272
+ model.load_state_dict(ckpt_Q, strict=False)
273
+ elif model_select == 'v0-F':
274
+ model.load_state_dict(ckpt_F, strict=False)
275
+ model.current_model = model_select
276
+
277
+ model.ae_dtype = convert_dtype(ae_dtype)
278
+ model.model.dtype = convert_dtype(diff_dtype)
279
+
280
+ return restore_on_gpu(
281
+ noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
282
+ )
283
+
284
+ def get_duration(
285
+ noisy_image,
286
+ input_image,
287
+ prompt,
288
+ a_prompt,
289
+ n_prompt,
290
+ num_samples,
291
+ min_size,
292
+ downscale,
293
+ upscale,
294
+ edm_steps,
295
+ s_stage1,
296
+ s_stage2,
297
+ s_cfg,
298
+ randomize_seed,
299
+ seed,
300
+ s_churn,
301
+ s_noise,
302
+ color_fix_type,
303
+ diff_dtype,
304
+ ae_dtype,
305
+ gamma_correction,
306
+ linear_CFG,
307
+ linear_s_stage2,
308
+ spt_linear_CFG,
309
+ spt_linear_s_stage2,
310
+ model_select,
311
+ output_format,
312
+ allocation
313
+ ):
314
+ return allocation
315
+
316
+ @spaces.GPU(duration=get_duration)
317
+ def restore_on_gpu(
318
+ noisy_image,
319
+ input_image,
320
+ prompt,
321
+ a_prompt,
322
+ n_prompt,
323
+ num_samples,
324
+ min_size,
325
+ downscale,
326
+ upscale,
327
+ edm_steps,
328
+ s_stage1,
329
+ s_stage2,
330
+ s_cfg,
331
+ randomize_seed,
332
+ seed,
333
+ s_churn,
334
+ s_noise,
335
+ color_fix_type,
336
+ diff_dtype,
337
+ ae_dtype,
338
+ gamma_correction,
339
+ linear_CFG,
340
+ linear_s_stage2,
341
+ spt_linear_CFG,
342
+ spt_linear_s_stage2,
343
+ model_select,
344
+ output_format,
345
+ allocation
346
+ ):
347
+ start = time.time()
348
+ print('restore ==>>')
349
+
350
+ torch.cuda.set_device(SUPIR_device)
351
+
352
+ with torch.no_grad():
353
+ input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
354
+ LQ = np.array(input_image) / 255.0
355
+ LQ = np.power(LQ, gamma_correction)
356
+ LQ *= 255.0
357
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
358
+ LQ = LQ / 255 * 2 - 1
359
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
360
+ captions = ['']
361
+
362
+ samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
363
+ s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
364
+ num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
365
+ use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
366
+ cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
367
+
368
+ x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
369
+ 0, 255).astype(np.uint8)
370
+ results = [x_samples[i] for i in range(num_samples)]
371
+ torch.cuda.empty_cache()
372
+
373
+ # All the results have the same size
374
+ input_height, input_width, input_channel = np.array(input_image).shape
375
+ result_height, result_width, result_channel = np.array(results[0]).shape
376
+
377
+ print('<<== restore')
378
+ end = time.time()
379
+ secondes = int(end - start)
380
+ minutes = math.floor(secondes / 60)
381
+ secondes = secondes - (minutes * 60)
382
+ hours = math.floor(minutes / 60)
383
+ minutes = minutes - (hours * 60)
384
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
385
+ "If you don't get the image you wanted, add more details in the « Image description ». " + \
386
+ "Wait " + str(allocation) + " min before a new run to avoid quota penalty or use another computer. " + \
387
+ "The image" + (" has" if len(results) == 1 else "s have") + " been generated in " + \
388
+ ((str(hours) + " h, ") if hours != 0 else "") + \
389
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
390
+ str(secondes) + " sec. " + \
391
+ "The new image resolution is " + str(result_width) + \
392
+ " pixels large and " + str(result_height) + \
393
+ " pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
394
+ print(information)
395
+ try:
396
+ print("Initial resolution: " + f'{input_width * input_height:,}')
397
+ print("Final resolution: " + f'{result_width * result_height:,}')
398
+ print("edm_steps: " + str(edm_steps))
399
+ print("num_samples: " + str(num_samples))
400
+ print("downscale: " + str(downscale))
401
+ print("Estimated minutes: " + f'{(((result_width * result_height**(1/1.75)) * input_width * input_height * (edm_steps**(1/2)) * (num_samples**(1/2.5)))**(1/2.5)) / 25000:,}')
402
+ except Exception as e:
403
+ print('Exception of Estimation')
404
+
405
+ # Only one image can be shown in the slider
406
+ return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True), gr.update(visible=True)
407
+
408
+ def load_and_reset(param_setting):
409
+ print('load_and_reset ==>>')
410
+ if torch.cuda.device_count() == 0:
411
+ gr.Warning('Set this space to GPU config to make it work.')
412
+ return None, None, None, None, None, None, None, None, None, None, None, None, None, None
413
+ edm_steps = default_setting.edm_steps
414
+ s_stage2 = 1.0
415
+ s_stage1 = -1.0
416
+ s_churn = 5
417
+ s_noise = 1.003
418
+ a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \
419
+ 'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \
420
+ 'detailing, hyper sharpness, perfect without deformations.'
421
+ n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \
422
+ '3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \
423
+ 'signature, jpeg artifacts, deformed, lowres, over-smooth'
424
+ color_fix_type = 'Wavelet'
425
+ spt_linear_s_stage2 = 0.0
426
+ linear_s_stage2 = False
427
+ linear_CFG = True
428
+ if param_setting == "Quality":
429
+ s_cfg = default_setting.s_cfg_Quality
430
+ spt_linear_CFG = default_setting.spt_linear_CFG_Quality
431
+ model_select = "v0-Q"
432
+ elif param_setting == "Fidelity":
433
+ s_cfg = default_setting.s_cfg_Fidelity
434
+ spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
435
+ model_select = "v0-F"
436
+ else:
437
+ raise NotImplementedError
438
+ gr.Info('The parameters are reset.')
439
+ print('<<== load_and_reset')
440
+ return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
441
+ linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select
442
+
443
+ def log_information(result_gallery):
444
+ print('log_information')
445
+ if result_gallery is not None:
446
+ for i, result in enumerate(result_gallery):
447
+ print(result[0])
448
+
449
+ def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
450
+ print('on_select_result')
451
+ if result_gallery is not None:
452
+ for i, result in enumerate(result_gallery):
453
+ print(result[0])
454
+ return [result_slider[0], result_gallery[evt.index][0]]
455
+
456
+ title_html = """
457
+ <h1><center>SUPIR</center></h1>
458
+ <big><center>Upscale your images up to x10 freely, without account, without watermark and download it</center></big>
459
+ <center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
460
+
461
+ <p>This is an online demo of SUPIR, a practicing model scaling for photo-realistic image restoration.
462
+ The content added by SUPIR is <b><u>imagination, not real-world information</u></b>.
463
+ SUPIR is for beauty and illustration only.
464
+ Most of the processes last few minutes.
465
+ If you want to upscale AI-generated images, be noticed that <i>PixArt Sigma</i> space can directly generate 5984x5984 images.
466
+ Due to Gradio issues, the generated image is slightly less satured than the original.
467
+ Please leave a <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">message in discussion</a> if you encounter issues.
468
+ You can also use <a href="https://huggingface.co/spaces/gokaygokay/AuraSR">AuraSR</a> to upscale x4.
469
+
470
+ <p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a> &emsp; <a href="http://supir.xpixel.group/">Project Page</a> &emsp; <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
471
+ <p><center><a style="display:inline-block" href='https://github.com/Fanghua-Yu/SUPIR'><img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/Fanghua-Yu/SUPIR?style=social"></a></center></p>
472
+ """
473
+
474
+
475
+ claim_md = """
476
+ ## **Piracy**
477
+ The images are not stored but the logs are saved during a month.
478
+ ## **How to get SUPIR**
479
+ You can get SUPIR on HuggingFace by [duplicating this space](https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true) and set GPU.
480
+ You can also install SUPIR on your computer following [this tutorial](https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai).
481
+ You can install _Pinokio_ on your computer and then install _SUPIR_ into it. It should be quite easy if you have an Nvidia GPU.
482
+ ## **Terms of use**
483
+ By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
484
+ ## **License**
485
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/Fanghua-Yu/SUPIR) of SUPIR.
486
+ """
487
+
488
+ # Gradio interface
489
+ with gr.Blocks() as interface:
490
+ if torch.cuda.device_count() == 0:
491
+ with gr.Row():
492
+ gr.HTML("""
493
+ <p style="background-color: red;"><big><big><big><b>⚠️To use SUPIR, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
494
+
495
+ You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
496
+ </big></big></big></p>
497
+ """)
498
+ gr.HTML(title_html)
499
+
500
+ input_image = gr.Image(label="Input (*.png, *.webp, *.jpeg, *.jpg, *.gif, *.bmp, *.heic)", show_label=True, type="filepath", height=600, elem_id="image-input")
501
+ rotation = gr.Radio([["No rotation", 0], ["⤵ Rotate +90°", 90], ["↩ Return 180°", 180], ["⤴ Rotate -90°", -90]], label="Orientation correction", info="Will apply the following rotation before restoring the image; the AI needs a good orientation to understand the content", value=0, interactive=True, visible=False)
502
+ with gr.Group():
503
+ prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
504
+ prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/badayvedat/LLaVA'"'>LlaVa space</a> to auto-generate the description of your image.")
505
+ upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
506
+ output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
507
+ allocation = gr.Slider(label="GPU allocation time (in seconds)", info="lower=May abort run, higher=Quota penalty for next runs; only useful for ZeroGPU", value=179, minimum=59, maximum=320, step=1)
508
+
509
+ with gr.Accordion("Pre-denoising (optional)", open=False):
510
+ gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
511
+ denoise_button = gr.Button(value="Pre-denoise")
512
+ denoise_image = gr.Image(label="Denoised image", show_label=True, type="filepath", sources=[], interactive = False, height=600, elem_id="image-s1")
513
+ denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
514
+
515
+ with gr.Accordion("Advanced options", open=False):
516
+ a_prompt = gr.Textbox(label="Additional image description",
517
+ info="Completes the main image description",
518
+ value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
519
+ 'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
520
+ 'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, clothing fabric detailing, '
521
+ 'hyper sharpness, perfect without deformations.',
522
+ lines=3)
523
+ n_prompt = gr.Textbox(label="Negative image description",
524
+ info="Disambiguate by listing what the image does NOT represent",
525
+ value='painting, oil painting, illustration, drawing, art, sketch, anime, '
526
+ 'cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, '
527
+ 'worst quality, low quality, frames, watermark, signature, jpeg artifacts, '
528
+ 'deformed, lowres, over-smooth',
529
+ lines=3)
530
+ edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details; too many steps create a checker effect", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
531
+ num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
532
+ , value=1, step=1)
533
+ min_size = gr.Slider(label="Minimum size", info="Minimum height, minimum width of the result", minimum=32, maximum=4096, value=1024, step=32)
534
+ downscale = gr.Radio([["/1", 1], ["/2", 2], ["/3", 3], ["/4", 4], ["/5", 5], ["/6", 6], ["/7", 7], ["/8", 8], ["/9", 9], ["/10", 10]], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
535
+ with gr.Row():
536
+ with gr.Column():
537
+ model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
538
+ interactive=True)
539
+ with gr.Column():
540
+ color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="AdaIn",
541
+ interactive=True)
542
+ s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
543
+ value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
544
+ s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05)
545
+ s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
546
+ s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
547
+ s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
548
+ with gr.Row():
549
+ with gr.Column():
550
+ linear_CFG = gr.Checkbox(label="Linear CFG", value=True)
551
+ spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0,
552
+ maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
553
+ with gr.Column():
554
+ linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False)
555
+ spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
556
+ maximum=1., value=0., step=0.05)
557
+ with gr.Column():
558
+ diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="fp32",
559
+ interactive=True)
560
+ with gr.Column():
561
+ ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
562
+ interactive=True)
563
+ randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
564
+ seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
565
+ with gr.Group():
566
+ param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value = "Quality")
567
+ restart_button = gr.Button(value="Apply presetting")
568
+
569
+ with gr.Column():
570
+ diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id = "process_button")
571
+ reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
572
+
573
+ warning = gr.HTML(value = "<center><big>Your computer must <u>not</u> enter into standby mode.</big><br/>On Chrome, you can force to keep a tab alive in <code>chrome://discards/</code></center>", visible = False)
574
+ restore_information = gr.HTML(value = "Restart the process to get another result.", visible = False)
575
+ result_slider = ImageSlider(label = 'Comparator', show_label = False, interactive = False, elem_id = "slider1", show_download_button = False)
576
+ result_gallery = gr.Gallery(label = 'Downloadable results', show_label = True, interactive = False, elem_id = "gallery1")
577
+
578
+ gr.Examples(
579
+ examples = [
580
+ [
581
+ "./Examples/Example1.png",
582
+ 0,
583
+ None,
584
+ "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
585
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
586
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
587
+ 2,
588
+ 1024,
589
+ 1,
590
+ 8,
591
+ 100,
592
+ -1,
593
+ 1,
594
+ 7.5,
595
+ False,
596
+ 42,
597
+ 5,
598
+ 1.003,
599
+ "AdaIn",
600
+ "fp16",
601
+ "bf16",
602
+ 1.0,
603
+ True,
604
+ 4,
605
+ False,
606
+ 0.,
607
+ "v0-Q",
608
+ "input",
609
+ 179
610
+ ],
611
+ [
612
+ "./Examples/Example2.jpeg",
613
+ 0,
614
+ None,
615
+ "La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
616
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
617
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
618
+ 1,
619
+ 1024,
620
+ 1,
621
+ 1,
622
+ 200,
623
+ -1,
624
+ 1,
625
+ 7.5,
626
+ False,
627
+ 42,
628
+ 5,
629
+ 1.003,
630
+ "Wavelet",
631
+ "fp16",
632
+ "bf16",
633
+ 1.0,
634
+ True,
635
+ 4,
636
+ False,
637
+ 0.,
638
+ "v0-Q",
639
+ "input",
640
+ 179
641
+ ],
642
+ [
643
+ "./Examples/Example3.webp",
644
+ 0,
645
+ None,
646
+ "A red apple",
647
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
648
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
649
+ 1,
650
+ 1024,
651
+ 1,
652
+ 1,
653
+ 200,
654
+ -1,
655
+ 1,
656
+ 7.5,
657
+ False,
658
+ 42,
659
+ 5,
660
+ 1.003,
661
+ "Wavelet",
662
+ "fp16",
663
+ "bf16",
664
+ 1.0,
665
+ True,
666
+ 4,
667
+ False,
668
+ 0.,
669
+ "v0-Q",
670
+ "input",
671
+ 179
672
+ ],
673
+ [
674
+ "./Examples/Example3.webp",
675
+ 0,
676
+ None,
677
+ "A red marble",
678
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
679
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
680
+ 1,
681
+ 1024,
682
+ 1,
683
+ 1,
684
+ 200,
685
+ -1,
686
+ 1,
687
+ 7.5,
688
+ False,
689
+ 42,
690
+ 5,
691
+ 1.003,
692
+ "Wavelet",
693
+ "fp16",
694
+ "bf16",
695
+ 1.0,
696
+ True,
697
+ 4,
698
+ False,
699
+ 0.,
700
+ "v0-Q",
701
+ "input",
702
+ 179
703
+ ],
704
+ ],
705
+ run_on_click = True,
706
+ fn = stage2_process,
707
+ inputs = [
708
+ input_image,
709
+ rotation,
710
+ denoise_image,
711
+ prompt,
712
+ a_prompt,
713
+ n_prompt,
714
+ num_samples,
715
+ min_size,
716
+ downscale,
717
+ upscale,
718
+ edm_steps,
719
+ s_stage1,
720
+ s_stage2,
721
+ s_cfg,
722
+ randomize_seed,
723
+ seed,
724
+ s_churn,
725
+ s_noise,
726
+ color_fix_type,
727
+ diff_dtype,
728
+ ae_dtype,
729
+ gamma_correction,
730
+ linear_CFG,
731
+ linear_s_stage2,
732
+ spt_linear_CFG,
733
+ spt_linear_s_stage2,
734
+ model_select,
735
+ output_format,
736
+ allocation
737
+ ],
738
+ outputs = [
739
+ result_slider,
740
+ result_gallery,
741
+ restore_information,
742
+ reset_btn
743
+ ],
744
+ cache_examples = False,
745
+ )
746
+
747
+ with gr.Row(visible=False):
748
+ gr.Examples(
749
+ examples = [
750
+ [
751
+ "./Examples/Example1.png",
752
+ 0,
753
+ None,
754
+ "Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
755
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
756
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
757
+ 2,
758
+ 1024,
759
+ 1,
760
+ 8,
761
+ 100,
762
+ -1,
763
+ 1,
764
+ 7.5,
765
+ False,
766
+ 42,
767
+ 5,
768
+ 1.003,
769
+ "AdaIn",
770
+ "fp16",
771
+ "bf16",
772
+ 1.0,
773
+ True,
774
+ 4,
775
+ False,
776
+ 0.,
777
+ "v0-Q",
778
+ "input",
779
+ 179
780
+ ],
781
+ [
782
+ "./Examples/Example2.jpeg",
783
+ 0,
784
+ None,
785
+ "La cabeza de un gato atigrado, en una casa, fotorrealista, 8k, extremadamente detallada",
786
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
787
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
788
+ 1,
789
+ 1024,
790
+ 1,
791
+ 1,
792
+ 200,
793
+ -1,
794
+ 1,
795
+ 7.5,
796
+ False,
797
+ 42,
798
+ 5,
799
+ 1.003,
800
+ "Wavelet",
801
+ "fp16",
802
+ "bf16",
803
+ 1.0,
804
+ True,
805
+ 4,
806
+ False,
807
+ 0.,
808
+ "v0-Q",
809
+ "input",
810
+ 179
811
+ ],
812
+ [
813
+ "./Examples/Example3.webp",
814
+ 0,
815
+ None,
816
+ "A red apple",
817
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
818
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
819
+ 1,
820
+ 1024,
821
+ 1,
822
+ 1,
823
+ 200,
824
+ -1,
825
+ 1,
826
+ 7.5,
827
+ False,
828
+ 42,
829
+ 5,
830
+ 1.003,
831
+ "Wavelet",
832
+ "fp16",
833
+ "bf16",
834
+ 1.0,
835
+ True,
836
+ 4,
837
+ False,
838
+ 0.,
839
+ "v0-Q",
840
+ "input",
841
+ 179
842
+ ],
843
+ [
844
+ "./Examples/Example3.webp",
845
+ 0,
846
+ None,
847
+ "A red marble",
848
+ "Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
849
+ "painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, pixel, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
850
+ 1,
851
+ 1024,
852
+ 1,
853
+ 1,
854
+ 200,
855
+ -1,
856
+ 1,
857
+ 7.5,
858
+ False,
859
+ 42,
860
+ 5,
861
+ 1.003,
862
+ "Wavelet",
863
+ "fp16",
864
+ "bf16",
865
+ 1.0,
866
+ True,
867
+ 4,
868
+ False,
869
+ 0.,
870
+ "v0-Q",
871
+ "input",
872
+ 179
873
+ ],
874
+ ],
875
+ run_on_click = True,
876
+ fn = stage2_process_example,
877
+ inputs = [
878
+ input_image,
879
+ rotation,
880
+ denoise_image,
881
+ prompt,
882
+ a_prompt,
883
+ n_prompt,
884
+ num_samples,
885
+ min_size,
886
+ downscale,
887
+ upscale,
888
+ edm_steps,
889
+ s_stage1,
890
+ s_stage2,
891
+ s_cfg,
892
+ randomize_seed,
893
+ seed,
894
+ s_churn,
895
+ s_noise,
896
+ color_fix_type,
897
+ diff_dtype,
898
+ ae_dtype,
899
+ gamma_correction,
900
+ linear_CFG,
901
+ linear_s_stage2,
902
+ spt_linear_CFG,
903
+ spt_linear_s_stage2,
904
+ model_select,
905
+ output_format,
906
+ allocation
907
+ ],
908
+ outputs = [
909
+ result_slider,
910
+ restore_information,
911
+ reset_btn
912
+ ],
913
+ cache_examples = "lazy",
914
+ )
915
+
916
+ with gr.Row():
917
+ gr.Markdown(claim_md)
918
+
919
+ input_image.upload(fn = check_upload, inputs = [
920
+ input_image
921
+ ], outputs = [
922
+ rotation
923
+ ], queue = False, show_progress = False)
924
+
925
+ denoise_button.click(fn = check_and_update, inputs = [
926
+ input_image
927
+ ], outputs = [warning], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
928
+ input_image,
929
+ gamma_correction,
930
+ diff_dtype,
931
+ ae_dtype
932
+ ], outputs=[
933
+ denoise_image,
934
+ denoise_information
935
+ ])
936
+
937
+ diffusion_button.click(fn = update_seed, inputs = [
938
+ randomize_seed,
939
+ seed
940
+ ], outputs = [
941
+ seed
942
+ ], queue = False, show_progress = False).then(fn = check_and_update, inputs = [
943
+ input_image
944
+ ], outputs = [warning], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
945
+ input_image,
946
+ rotation,
947
+ denoise_image,
948
+ prompt,
949
+ a_prompt,
950
+ n_prompt,
951
+ num_samples,
952
+ min_size,
953
+ downscale,
954
+ upscale,
955
+ edm_steps,
956
+ s_stage1,
957
+ s_stage2,
958
+ s_cfg,
959
+ randomize_seed,
960
+ seed,
961
+ s_churn,
962
+ s_noise,
963
+ color_fix_type,
964
+ diff_dtype,
965
+ ae_dtype,
966
+ gamma_correction,
967
+ linear_CFG,
968
+ linear_s_stage2,
969
+ spt_linear_CFG,
970
+ spt_linear_s_stage2,
971
+ model_select,
972
+ output_format,
973
+ allocation
974
+ ], outputs = [
975
+ result_slider,
976
+ result_gallery,
977
+ restore_information,
978
+ reset_btn
979
+ ]).success(fn = log_information, inputs = [
980
+ result_gallery
981
+ ], outputs = [], queue = False, show_progress = False)
982
+
983
+ result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
984
+ result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
985
+
986
+ restart_button.click(fn = load_and_reset, inputs = [
987
+ param_setting
988
+ ], outputs = [
989
+ edm_steps,
990
+ s_cfg,
991
+ s_stage2,
992
+ s_stage1,
993
+ s_churn,
994
+ s_noise,
995
+ a_prompt,
996
+ n_prompt,
997
+ color_fix_type,
998
+ linear_CFG,
999
+ linear_s_stage2,
1000
+ spt_linear_CFG,
1001
+ spt_linear_s_stage2,
1002
+ model_select
1003
+ ])
1004
+
1005
+ reset_btn.click(fn = reset, inputs = [], outputs = [
1006
+ input_image,
1007
+ rotation,
1008
+ denoise_image,
1009
+ prompt,
1010
+ a_prompt,
1011
+ n_prompt,
1012
+ num_samples,
1013
+ min_size,
1014
+ downscale,
1015
+ upscale,
1016
+ edm_steps,
1017
+ s_stage1,
1018
+ s_stage2,
1019
+ s_cfg,
1020
+ randomize_seed,
1021
+ seed,
1022
+ s_churn,
1023
+ s_noise,
1024
+ color_fix_type,
1025
+ diff_dtype,
1026
+ ae_dtype,
1027
+ gamma_correction,
1028
+ linear_CFG,
1029
+ linear_s_stage2,
1030
+ spt_linear_CFG,
1031
+ spt_linear_s_stage2,
1032
+ model_select,
1033
+ output_format,
1034
+ allocation
1035
+ ], queue = False, show_progress = False)
1036
+
1037
+ interface.queue(10).launch()
requirements.txt CHANGED
@@ -1,23 +1,48 @@
1
- accelerate==1.6.0
2
- diffusers==0.33.1
3
- transformers==4.46.2
 
 
 
4
  sentencepiece==0.2.0
5
- pillow==11.1.0
6
- av==12.1.0
7
- numpy==1.26.2
8
- scipy==1.12.0
9
- requests==2.31.0
10
- torchsde==0.2.6
11
- torch>=2.0.0
12
- torchvision
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  torchaudio
14
- einops
15
- opencv-contrib-python
16
- safetensors
17
- huggingface_hub
18
- spaces
19
- decord
20
- imageio_ffmpeg
21
- sageattention
22
- xformers
23
- bitsandbytes
 
1
+ pydantic==2.10.6
2
+ fastapi==0.115.8
3
+ gradio_imageslider==0.0.20
4
+ gradio_client==1.7.0
5
+ numpy==1.26.4
6
+ requests==2.32.3
7
  sentencepiece==0.2.0
8
+ tokenizers==0.19.1
9
+ torchvision==0.18.1
10
+ uvicorn==0.30.1
11
+ wandb==0.17.4
12
+ httpx==0.27.0
13
+ transformers==4.42.4
14
+ accelerate==0.32.1
15
+ scikit-learn==1.5.1
16
+ einops==0.8.0
17
+ einops-exts==0.0.4
18
+ timm==1.0.7
19
+ openai-clip==1.0.1
20
+ fsspec==2024.6.1
21
+ kornia==0.7.3
22
+ matplotlib==3.9.1
23
+ ninja==1.11.1.1
24
+ omegaconf==2.3.0
25
+ opencv-python==4.10.0.84
26
+ pandas==2.2.2
27
+ pillow==10.4.0
28
+ pytorch-lightning==2.3.3
29
+ PyYAML==6.0.1
30
+ scipy==1.14.0
31
+ tqdm==4.66.4
32
+ triton==2.3.1
33
+ urllib3==2.2.2
34
+ webdataset==0.2.86
35
+ xformers==0.0.27
36
+ facexlib==0.3.0
37
+ k-diffusion==0.1.1.post1
38
+ diffusers==0.30.0
39
+ pillow-heif==0.18.0
40
+
41
+ open-clip-torch==2.24.0
42
+
43
  torchaudio
44
+ easydict==1.13
45
+ fairscale==0.4.13
46
+ torchsde==0.2.6
47
+ huggingface_hub==0.23.3
48
+ gradio