LPDoctor commited on
Commit
e995083
·
verified ·
1 Parent(s): 57721b8

revert commit 65ec9a5

Browse files
Files changed (1) hide show
  1. app.py +6 -19
app.py CHANGED
@@ -7,8 +7,6 @@ import numpy as np
7
  import PIL
8
  import spaces
9
  import torch
10
-
11
- import diffusers
12
  from diffusers.models import ControlNetModel
13
  from diffusers.utils import load_image
14
  from insightface.app import FaceAnalysis
@@ -55,11 +53,6 @@ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
55
  safety_checker=None,
56
  feature_extractor=None,
57
  )
58
-
59
- # load and disable LCM
60
- pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
61
- pipe.disable_lora()
62
-
63
  pipe.cuda()
64
  pipe.load_ip_adapter_instantid(face_adapter)
65
  pipe.image_proj_model.to("cuda")
@@ -214,14 +207,9 @@ def generate_image(
214
  seed,
215
  progress=gr.Progress(track_tqdm=True),
216
  ):
217
-
218
  if prompt is None:
219
  prompt = "a person"
220
 
221
- # LCM Sceduler Callback
222
- pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
223
- pipe.enable_lora()
224
-
225
  # apply the style template
226
  prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
227
 
@@ -296,7 +284,6 @@ title = r"""
296
 
297
  description = r"""
298
  <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
299
-
300
  How to use:<br>
301
  1. Upload a person image. For multiple person images, we will only detect the biggest face. Make sure face is not too small and not significantly blocked or blurred.
302
  2. (Optionally) upload another person image as reference pose. If not uploaded, we will use the first person image to extract landmarks. If you use a cropped face at step1, it is recommeneded to upload it to extract a new pose.
@@ -383,17 +370,17 @@ with gr.Blocks(css=css) as demo:
383
  )
384
  num_steps = gr.Slider(
385
  label="Number of sample steps",
386
- minimum=2,
387
- maximum=20,
388
  step=1,
389
- value=4,
390
  )
391
  guidance_scale = gr.Slider(
392
  label="Guidance scale",
393
- minimum=1.0,
394
- maximum=2.0,
395
  step=0.1,
396
- value=1.0,
397
  )
398
  seed = gr.Slider(
399
  label="Seed",
 
7
  import PIL
8
  import spaces
9
  import torch
 
 
10
  from diffusers.models import ControlNetModel
11
  from diffusers.utils import load_image
12
  from insightface.app import FaceAnalysis
 
53
  safety_checker=None,
54
  feature_extractor=None,
55
  )
 
 
 
 
 
56
  pipe.cuda()
57
  pipe.load_ip_adapter_instantid(face_adapter)
58
  pipe.image_proj_model.to("cuda")
 
207
  seed,
208
  progress=gr.Progress(track_tqdm=True),
209
  ):
 
210
  if prompt is None:
211
  prompt = "a person"
212
 
 
 
 
 
213
  # apply the style template
214
  prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
215
 
 
284
 
285
  description = r"""
286
  <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
 
287
  How to use:<br>
288
  1. Upload a person image. For multiple person images, we will only detect the biggest face. Make sure face is not too small and not significantly blocked or blurred.
289
  2. (Optionally) upload another person image as reference pose. If not uploaded, we will use the first person image to extract landmarks. If you use a cropped face at step1, it is recommeneded to upload it to extract a new pose.
 
370
  )
371
  num_steps = gr.Slider(
372
  label="Number of sample steps",
373
+ minimum=20,
374
+ maximum=100,
375
  step=1,
376
+ value=30,
377
  )
378
  guidance_scale = gr.Slider(
379
  label="Guidance scale",
380
+ minimum=0.1,
381
+ maximum=10.0,
382
  step=0.1,
383
+ value=5,
384
  )
385
  seed = gr.Slider(
386
  label="Seed",