Spaces:
Running
on
Zero
Running
on
Zero
adaface-neurips
commited on
Commit
•
973f661
1
Parent(s):
b402b3c
add infos of each input field
Browse files- README2.md +1 -1
- app.py +15 -5
- requirements.txt +2 -0
README2.md
CHANGED
@@ -8,7 +8,7 @@ Please refer to our NeurIPS 2024 submission for more details about AdaFace:
|
|
8 |
**AdaFace: A Versatile Face Encoder for Zero-Shot Diffusion Model Personalization**
|
9 |
</br>
|
10 |
|
11 |
-
[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97
|
12 |
|
13 |
This pipeline uses 4 pretrained models: [Stable Diffusion V1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [AnimateDiff v3](https://github.com/guoyww/animatediff), [ID-Animator](https://github.com/ID-Animator/ID-Animator) and [AdaFace](https://huggingface.co/adaface-neurips/adaface).
|
14 |
|
|
|
8 |
**AdaFace: A Versatile Face Encoder for Zero-Shot Diffusion Model Personalization**
|
9 |
</br>
|
10 |
|
11 |
+
[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-yellow)](https://huggingface.co/spaces/adaface-neurips/adaface-animate)
|
12 |
|
13 |
This pipeline uses 4 pretrained models: [Stable Diffusion V1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [AnimateDiff v3](https://github.com/guoyww/animatediff), [ID-Animator](https://github.com/ID-Animator/ID-Animator) and [AdaFace](https://huggingface.co/adaface-neurips/adaface).
|
14 |
|
app.py
CHANGED
@@ -229,13 +229,14 @@ with gr.Blocks(css=css) as demo:
|
|
229 |
❗️❗️❗️**Tips:**
|
230 |
- You can upload one or more subject images for generating ID-specific video.
|
231 |
- Try different parameter combinations for the best generation quality.
|
|
|
232 |
"""
|
233 |
)
|
234 |
|
235 |
with gr.Row():
|
236 |
with gr.Column():
|
237 |
files = gr.File(
|
238 |
-
label="Drag
|
239 |
file_types=["image"],
|
240 |
file_count="multiple"
|
241 |
)
|
@@ -245,7 +246,7 @@ with gr.Blocks(css=css) as demo:
|
|
245 |
remove_and_reupload = gr.ClearButton(value="Remove and upload subject images", components=files, size="sm")
|
246 |
|
247 |
init_img_files = gr.File(
|
248 |
-
label="
|
249 |
file_types=["image"],
|
250 |
file_count="multiple"
|
251 |
)
|
@@ -258,6 +259,7 @@ with gr.Blocks(css=css) as demo:
|
|
258 |
|
259 |
init_image_strength = gr.Slider(
|
260 |
label="Init Image Strength",
|
|
|
261 |
minimum=0,
|
262 |
maximum=3,
|
263 |
step=0.25,
|
@@ -265,6 +267,7 @@ with gr.Blocks(css=css) as demo:
|
|
265 |
)
|
266 |
init_image_final_weight = gr.Slider(
|
267 |
label="Final Weight of the Init Image",
|
|
|
268 |
minimum=0,
|
269 |
maximum=0.25,
|
270 |
step=0.025,
|
@@ -277,11 +280,12 @@ with gr.Blocks(css=css) as demo:
|
|
277 |
gen_init = gr.Button(value="Generate 3 new init images")
|
278 |
|
279 |
prompt = gr.Textbox(label="Prompt",
|
280 |
-
|
281 |
-
placeholder="
|
282 |
|
283 |
image_embed_scale = gr.Slider(
|
284 |
label="Image Embedding Scale",
|
|
|
285 |
minimum=0,
|
286 |
maximum=2,
|
287 |
step=0.1,
|
@@ -289,6 +293,7 @@ with gr.Blocks(css=css) as demo:
|
|
289 |
)
|
290 |
attn_scale = gr.Slider(
|
291 |
label="Attention Processor Scale",
|
|
|
292 |
minimum=0,
|
293 |
maximum=2,
|
294 |
step=0.1,
|
@@ -296,6 +301,7 @@ with gr.Blocks(css=css) as demo:
|
|
296 |
)
|
297 |
adaface_id_cfg_scale = gr.Slider(
|
298 |
label="AdaFace Embedding ID CFG Scale",
|
|
|
299 |
minimum=0.5,
|
300 |
maximum=6,
|
301 |
step=0.25,
|
@@ -307,12 +313,15 @@ with gr.Blocks(css=css) as demo:
|
|
307 |
with gr.Accordion(open=False, label="Advanced Options"):
|
308 |
video_length = gr.Slider(
|
309 |
label="video_length",
|
|
|
310 |
minimum=16,
|
311 |
maximum=21,
|
312 |
step=1,
|
313 |
value=16,
|
314 |
)
|
315 |
-
is_adaface_enabled = gr.Checkbox(label="Enable AdaFace",
|
|
|
|
|
316 |
adaface_ckpt_path = gr.Textbox(
|
317 |
label="AdaFace ckpt Path",
|
318 |
placeholder=args.adaface_ckpt_path,
|
@@ -321,6 +330,7 @@ with gr.Blocks(css=css) as demo:
|
|
321 |
|
322 |
adaface_power_scale = gr.Slider(
|
323 |
label="AdaFace Embedding Power Scale",
|
|
|
324 |
minimum=0.7,
|
325 |
maximum=1.3,
|
326 |
step=0.1,
|
|
|
229 |
❗️❗️❗️**Tips:**
|
230 |
- You can upload one or more subject images for generating ID-specific video.
|
231 |
- Try different parameter combinations for the best generation quality.
|
232 |
+
- Technical explanations and demo videos: [Readme](https://huggingface.co/spaces/adaface-neurips/adaface-animate/blob/main/README2.md).
|
233 |
"""
|
234 |
)
|
235 |
|
236 |
with gr.Row():
|
237 |
with gr.Column():
|
238 |
files = gr.File(
|
239 |
+
label="Drag / Select 1 or more photos of a person's face",
|
240 |
file_types=["image"],
|
241 |
file_count="multiple"
|
242 |
)
|
|
|
246 |
remove_and_reupload = gr.ClearButton(value="Remove and upload subject images", components=files, size="sm")
|
247 |
|
248 |
init_img_files = gr.File(
|
249 |
+
label="[Optional] Select 1 image for initialization, or generate 3 images with the button below and select 1",
|
250 |
file_types=["image"],
|
251 |
file_count="multiple"
|
252 |
)
|
|
|
259 |
|
260 |
init_image_strength = gr.Slider(
|
261 |
label="Init Image Strength",
|
262 |
+
info="How much the init image should influence each frame. 0: no influence (scenes are more dynamic), 3: strongest influence (scenes are more static).",
|
263 |
minimum=0,
|
264 |
maximum=3,
|
265 |
step=0.25,
|
|
|
267 |
)
|
268 |
init_image_final_weight = gr.Slider(
|
269 |
label="Final Weight of the Init Image",
|
270 |
+
info="How much the init image should influence the end of the video",
|
271 |
minimum=0,
|
272 |
maximum=0.25,
|
273 |
step=0.025,
|
|
|
280 |
gen_init = gr.Button(value="Generate 3 new init images")
|
281 |
|
282 |
prompt = gr.Textbox(label="Prompt",
|
283 |
+
info="Try something like 'man/woman walking on the beach'",
|
284 |
+
placeholder="woman playing guitar on a boat, ocean waves")
|
285 |
|
286 |
image_embed_scale = gr.Slider(
|
287 |
label="Image Embedding Scale",
|
288 |
+
info="The scale of the ID-Animator image embedding (influencing coarse facial features and poses)",
|
289 |
minimum=0,
|
290 |
maximum=2,
|
291 |
step=0.1,
|
|
|
293 |
)
|
294 |
attn_scale = gr.Slider(
|
295 |
label="Attention Processor Scale",
|
296 |
+
info="The scale of the ID embeddings on the attention (the higher, the more focus on the face, less on the background)" ,
|
297 |
minimum=0,
|
298 |
maximum=2,
|
299 |
step=0.1,
|
|
|
301 |
)
|
302 |
adaface_id_cfg_scale = gr.Slider(
|
303 |
label="AdaFace Embedding ID CFG Scale",
|
304 |
+
info="The scale of the AdaFace ID embeddings (influencing fine facial features and details)",
|
305 |
minimum=0.5,
|
306 |
maximum=6,
|
307 |
step=0.25,
|
|
|
313 |
with gr.Accordion(open=False, label="Advanced Options"):
|
314 |
video_length = gr.Slider(
|
315 |
label="video_length",
|
316 |
+
info="Do not change, otherwise the video will be messy",
|
317 |
minimum=16,
|
318 |
maximum=21,
|
319 |
step=1,
|
320 |
value=16,
|
321 |
)
|
322 |
+
is_adaface_enabled = gr.Checkbox(label="Enable AdaFace",
|
323 |
+
info="Enable AdaFace for better face details. If unchecked, it falls back to ID-Animator (https://huggingface.co/spaces/ID-Animator/ID-Animator).",
|
324 |
+
value=True)
|
325 |
adaface_ckpt_path = gr.Textbox(
|
326 |
label="AdaFace ckpt Path",
|
327 |
placeholder=args.adaface_ckpt_path,
|
|
|
330 |
|
331 |
adaface_power_scale = gr.Slider(
|
332 |
label="AdaFace Embedding Power Scale",
|
333 |
+
info="Increase this scale slightly only if the face is defocused or the face details are not clear",
|
334 |
minimum=0.7,
|
335 |
maximum=1.3,
|
336 |
step=0.1,
|
requirements.txt
CHANGED
@@ -14,3 +14,5 @@ diffusers
|
|
14 |
onnx>=1.16.0
|
15 |
onnxruntime
|
16 |
safetensors
|
|
|
|
|
|
14 |
onnx>=1.16.0
|
15 |
onnxruntime
|
16 |
safetensors
|
17 |
+
spaces
|
18 |
+
protobuf==3.20
|