victor's picture
victor HF Staff
Refactor app.py: improve code formatting and simplify docstring
f6a0cc8
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
from diffusers import FluxKontextPipeline
from diffusers.utils import load_image
MAX_SEED = np.iinfo(np.int32).max
pipe = FluxKontextPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16
).to("cuda")
@spaces.GPU
def infer(
input_image,
prompt,
seed=42,
randomize_seed=False,
guidance_scale=2.5,
steps=28,
progress=gr.Progress(track_tqdm=True),
):
"""
Edit an image using AI based on text instructions.
Args:
input_image (optional): Path to the image file to edit (if None, generates from text only)
prompt (required): Text describing what to change (e.g. "remove glasses", "add a hat", "change background to beach")
seed (optional): Random seed for reproducibility (default: 42)
randomize_seed (optional): Use random seed instead of fixed seed (default: False)
guidance_scale (optional): How closely to follow the prompt, 1.0-10.0 (default: 2.5)
steps (optional): Number of generation steps, 1-30 (default: 28)
progress (optional): Gradio progress tracker (automatically provided)
Returns:
tuple: (edited_image, seed_used, gradio_update)
Example:
infer(input_image="/path/to/photo.jpg", prompt="Add sunglasses")
"""
if randomize_seed:
seed = random.randint(0, MAX_SEED)
if input_image:
input_image = input_image.convert("RGB")
image = pipe(
image=input_image,
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
generator=torch.Generator().manual_seed(seed),
).images[0]
else:
image = pipe(
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=steps,
generator=torch.Generator().manual_seed(seed),
).images[0]
return image, seed, gr.update(visible=True)
css = """
#col-container {
margin: 0 auto;
max-width: 960px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
f"""# FLUX.1 Kontext [dev]
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
"""
)
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Upload the image for editing", type="pil")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
container=False,
)
run_button = gr.Button("Run", scale=0)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=10,
step=0.1,
value=2.5,
)
steps = gr.Slider(
label="Steps", minimum=1, maximum=30, value=28, step=1
)
with gr.Column():
result = gr.Image(label="Result", show_label=False, interactive=False)
reuse_button = gr.Button("Reuse this image", visible=False)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[input_image, prompt, seed, randomize_seed, guidance_scale, steps],
outputs=[result, seed, reuse_button],
)
reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
demo.launch(mcp_server=True)