Spaces:
Runtime error
Runtime error
add blocks interface
Browse files
app.py
CHANGED
|
@@ -221,8 +221,50 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, r
|
|
| 221 |
writer.close()
|
| 222 |
return img, 'video.mp4'
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
writer.close()
|
| 222 |
return img, 'video.mp4'
|
| 223 |
|
| 224 |
+
demo = gr.Blocks()
|
| 225 |
+
with demo:
|
| 226 |
+
gr.Markdown(
|
| 227 |
+
"""
|
| 228 |
+
# CLIP Guided Diffusion Faces Model
|
| 229 |
+
### by [Alex Spirin](https://linktr.ee/devdef)
|
| 230 |
+
Gradio Blocks demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them.
|
| 231 |
+
Based on the original [Space](https://huggingface.co/spaces/EleutherAI/clip-guided-diffusion) by akhaliq.
|
| 232 |
+
""")
|
| 233 |
+
|
| 234 |
+
with gr.Row():
|
| 235 |
+
text = gr.Textbox(placeholder="Enter a description of a face", label='Text prompt')
|
| 236 |
+
with gr.Row():
|
| 237 |
+
with gr.Column():
|
| 238 |
+
clip_guidance_scale = gr.Slider(minimum=0, maximum=3000, step=1, value=600, label="Prompt strength")
|
| 239 |
+
tv_scale = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Smoothness")
|
| 240 |
+
range_scale = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Compress color range")
|
| 241 |
+
|
| 242 |
+
with gr.Column():
|
| 243 |
+
timestep_respacing = gr.Slider(minimum=25, maximum=100, step=1, value=50, label="timestep respacing")
|
| 244 |
+
cutn = gr.Slider(minimum=4, maximum=32, step=1, value=16, label="cutn")
|
| 245 |
+
seed = gr.Number(value=0, label="Seed")
|
| 246 |
+
with gr.Row():
|
| 247 |
+
with gr.Column():
|
| 248 |
+
init_image = gr.Image(source="upload", label='initial image (optional)')
|
| 249 |
+
init_scale = gr.Slider(minimum=0, maximum=45, step=1, value=10, label="Look like the image above")
|
| 250 |
+
|
| 251 |
+
with gr.Column():
|
| 252 |
+
image_prompts = gr.Image(source="upload", label='image prompt (optional)')
|
| 253 |
+
skip_timesteps = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Look like the image above")
|
| 254 |
+
|
| 255 |
+
with gr.Row():
|
| 256 |
+
run_button = gr.Button("Run!")
|
| 257 |
+
with gr.Row():
|
| 258 |
+
gr.Markdown(
|
| 259 |
+
"""
|
| 260 |
+
# Results
|
| 261 |
+
""")
|
| 262 |
+
with gr.Row():
|
| 263 |
+
output_image = gr.Image(label='Output image', type='numpy')
|
| 264 |
+
output_video = gr.Video(label='Output video')
|
| 265 |
+
|
| 266 |
+
outputs=[output_image,output_video]
|
| 267 |
+
|
| 268 |
+
run_button.click(inference, inputs=[text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, range_scale, init_scale, seed, image_prompts,timestep_respacing, cutn], outputs=outputs)
|
| 269 |
+
|
| 270 |
+
demo.launch(enable_queue=True)
|