victor HF Staff Claude commited on
Commit
f6a0cc8
·
1 Parent(s): 12a14a2

Refactor app.py: improve code formatting and simplify docstring

Browse files

- Add proper line breaks and formatting throughout the file
- Simplify the infer() function docstring for better clarity
- Improve code style consistency (spacing, line breaks)
- No functional changes, only formatting improvements

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (1) hide show
  1. app.py +48 -64
app.py CHANGED
@@ -10,56 +10,46 @@ from diffusers.utils import load_image
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
 
13
- pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
 
 
 
14
 
15
  @spaces.GPU
16
- def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
17
  """
18
- Perform image editing using the FLUX.1 Kontext pipeline.
19
-
20
- This function takes an input image and a text prompt to generate a modified version
21
- of the image based on the provided instructions. It uses the FLUX.1 Kontext model
22
- for contextual image editing tasks.
23
-
24
  Args:
25
- input_image (PIL.Image.Image): The input image to be edited. Will be converted
26
- to RGB format if not already in that format.
27
- prompt (str): Text description of the desired edit to apply to the image.
28
- Examples: "Remove glasses", "Add a hat", "Change background to beach".
29
- seed (int, optional): Random seed for reproducible generation. Defaults to 42.
30
- Must be between 0 and MAX_SEED (2^31 - 1).
31
- randomize_seed (bool, optional): If True, generates a random seed instead of
32
- using the provided seed value. Defaults to False.
33
- guidance_scale (float, optional): Controls how closely the model follows the
34
- prompt. Higher values mean stronger adherence to the prompt but may reduce
35
- image quality. Range: 1.0-10.0. Defaults to 2.5.
36
- steps (int, optional): Controls how many steps to run the diffusion model for.
37
- Range: 1-30. Defaults to 28.
38
- progress (gr.Progress, optional): Gradio progress tracker for monitoring
39
- generation progress. Defaults to gr.Progress(track_tqdm=True).
40
-
41
  Returns:
42
- tuple: A 3-tuple containing:
43
- - PIL.Image.Image: The generated/edited image
44
- - int: The seed value used for generation (useful when randomize_seed=True)
45
- - gr.update: Gradio update object to make the reuse button visible
46
-
47
  Example:
48
- >>> edited_image, used_seed, button_update = infer(
49
- ... input_image=my_image,
50
- ... prompt="Add sunglasses",
51
- ... seed=123,
52
- ... randomize_seed=False,
53
- ... guidance_scale=2.5
54
- ... )
55
  """
56
  if randomize_seed:
57
  seed = random.randint(0, MAX_SEED)
58
-
59
  if input_image:
60
  input_image = input_image.convert("RGB")
61
  image = pipe(
62
- image=input_image,
63
  prompt=prompt,
64
  guidance_scale=guidance_scale,
65
  num_inference_steps=steps,
@@ -74,7 +64,8 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
74
  ).images[0]
75
  return image, seed, gr.update(visible=True)
76
 
77
- css="""
 
78
  #col-container {
79
  margin: 0 auto;
80
  max-width: 960px;
@@ -82,11 +73,13 @@ css="""
82
  """
83
 
84
  with gr.Blocks(css=css) as demo:
85
-
86
  with gr.Column(elem_id="col-container"):
87
- gr.Markdown(f"""# FLUX.1 Kontext [dev]
 
88
  Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
89
- """)
 
90
  with gr.Row():
91
  with gr.Column():
92
  input_image = gr.Image(label="Upload the image for editing", type="pil")
@@ -100,7 +93,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
100
  )
101
  run_button = gr.Button("Run", scale=0)
102
  with gr.Accordion("Advanced Settings", open=False):
103
-
104
  seed = gr.Slider(
105
  label="Seed",
106
  minimum=0,
@@ -108,40 +101,31 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
108
  step=1,
109
  value=0,
110
  )
111
-
112
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
113
-
114
  guidance_scale = gr.Slider(
115
  label="Guidance Scale",
116
  minimum=1,
117
  maximum=10,
118
  step=0.1,
119
  value=2.5,
120
- )
121
-
122
  steps = gr.Slider(
123
- label="Steps",
124
- minimum=1,
125
- maximum=30,
126
- value=28,
127
- step=1
128
  )
129
-
130
  with gr.Column():
131
  result = gr.Image(label="Result", show_label=False, interactive=False)
132
  reuse_button = gr.Button("Reuse this image", visible=False)
133
-
134
-
135
  gr.on(
136
  triggers=[run_button.click, prompt.submit],
137
- fn = infer,
138
- inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
139
- outputs = [result, seed, reuse_button]
140
- )
141
- reuse_button.click(
142
- fn = lambda image: image,
143
- inputs = [result],
144
- outputs = [input_image]
145
  )
 
146
 
147
- demo.launch(mcp_server=True)
 
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
 
13
+ pipe = FluxKontextPipeline.from_pretrained(
14
+ "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16
15
+ ).to("cuda")
16
+
17
 
18
  @spaces.GPU
19
+ def infer(
20
+ input_image,
21
+ prompt,
22
+ seed=42,
23
+ randomize_seed=False,
24
+ guidance_scale=2.5,
25
+ steps=28,
26
+ progress=gr.Progress(track_tqdm=True),
27
+ ):
28
  """
29
+ Edit an image using AI based on text instructions.
30
+
 
 
 
 
31
  Args:
32
+ input_image (optional): Path to the image file to edit (if None, generates from text only)
33
+ prompt (required): Text describing what to change (e.g. "remove glasses", "add a hat", "change background to beach")
34
+ seed (optional): Random seed for reproducibility (default: 42)
35
+ randomize_seed (optional): Use random seed instead of fixed seed (default: False)
36
+ guidance_scale (optional): How closely to follow the prompt, 1.0-10.0 (default: 2.5)
37
+ steps (optional): Number of generation steps, 1-30 (default: 28)
38
+ progress (optional): Gradio progress tracker (automatically provided)
39
+
 
 
 
 
 
 
 
 
40
  Returns:
41
+ tuple: (edited_image, seed_used, gradio_update)
42
+
 
 
 
43
  Example:
44
+ infer(input_image="/path/to/photo.jpg", prompt="Add sunglasses")
 
 
 
 
 
 
45
  """
46
  if randomize_seed:
47
  seed = random.randint(0, MAX_SEED)
48
+
49
  if input_image:
50
  input_image = input_image.convert("RGB")
51
  image = pipe(
52
+ image=input_image,
53
  prompt=prompt,
54
  guidance_scale=guidance_scale,
55
  num_inference_steps=steps,
 
64
  ).images[0]
65
  return image, seed, gr.update(visible=True)
66
 
67
+
68
+ css = """
69
  #col-container {
70
  margin: 0 auto;
71
  max-width: 960px;
 
73
  """
74
 
75
  with gr.Blocks(css=css) as demo:
76
+
77
  with gr.Column(elem_id="col-container"):
78
+ gr.Markdown(
79
+ f"""# FLUX.1 Kontext [dev]
80
  Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
81
+ """
82
+ )
83
  with gr.Row():
84
  with gr.Column():
85
  input_image = gr.Image(label="Upload the image for editing", type="pil")
 
93
  )
94
  run_button = gr.Button("Run", scale=0)
95
  with gr.Accordion("Advanced Settings", open=False):
96
+
97
  seed = gr.Slider(
98
  label="Seed",
99
  minimum=0,
 
101
  step=1,
102
  value=0,
103
  )
104
+
105
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
106
+
107
  guidance_scale = gr.Slider(
108
  label="Guidance Scale",
109
  minimum=1,
110
  maximum=10,
111
  step=0.1,
112
  value=2.5,
113
+ )
114
+
115
  steps = gr.Slider(
116
+ label="Steps", minimum=1, maximum=30, value=28, step=1
 
 
 
 
117
  )
118
+
119
  with gr.Column():
120
  result = gr.Image(label="Result", show_label=False, interactive=False)
121
  reuse_button = gr.Button("Reuse this image", visible=False)
122
+
 
123
  gr.on(
124
  triggers=[run_button.click, prompt.submit],
125
+ fn=infer,
126
+ inputs=[input_image, prompt, seed, randomize_seed, guidance_scale, steps],
127
+ outputs=[result, seed, reuse_button],
 
 
 
 
 
128
  )
129
+ reuse_button.click(fn=lambda image: image, inputs=[result], outputs=[input_image])
130
 
131
+ demo.launch(mcp_server=True)