Gemini899 commited on
Commit
123ae10
·
verified ·
1 Parent(s): f88cfbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -61
app.py CHANGED
@@ -13,18 +13,16 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
  pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
15
 
16
-
17
-
18
  def sanitize_prompt(prompt):
19
- # Allow only alphanumeric characters, spaces, and basic punctuation
20
- allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
21
- sanitized_prompt = allowed_chars.sub("", prompt)
22
- return sanitized_prompt
23
 
24
- def convert_to_fit_size(original_width_and_height, maximum_size = 2048):
25
- width, height =original_width_and_height
26
  if width <= maximum_size and height <= maximum_size:
27
- return width,height
28
 
29
  if width > height:
30
  scaling_factor = maximum_size / width
@@ -40,26 +38,26 @@ def adjust_to_multiple_of_32(width: int, height: int):
40
  height = height - (height % 32)
41
  return width, height
42
 
43
-
44
-
45
-
46
  @spaces.GPU(duration=120)
47
- def process_images(image,prompt="a girl",strength=0.75,seed=0,inference_step=4,progress=gr.Progress(track_tqdm=True)):
48
- #print("start process_images")
49
  progress(0, desc="Starting")
50
 
51
-
52
  def process_img2img(image, prompt="a person", strength=0.75, seed=0, num_inference_steps=4):
53
  if image is None:
54
  print("empty input image returned")
55
  return None
 
 
 
 
 
56
  generator = torch.Generator(device).manual_seed(seed)
57
  fit_width, fit_height = convert_to_fit_size(image.size)
58
  width, height = adjust_to_multiple_of_32(fit_width, fit_height)
59
  image = image.resize((width, height), Image.LANCZOS)
60
 
61
  output = pipe(prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height,
62
- guidance_scale=0, num_inference_steps=num_inference_steps, max_sequence_length=256)
63
 
64
  pil_image = output.images[0]
65
  new_width, new_height = pil_image.size
@@ -72,16 +70,12 @@ def process_images(image,prompt="a girl",strength=0.75,seed=0,inference_step=4,p
72
  output = process_img2img(image, prompt, strength, seed, inference_step)
73
  return output
74
 
75
-
76
-
77
  def read_file(path: str) -> str:
78
  with open(path, 'r', encoding='utf-8') as f:
79
  content = f.read()
80
-
81
  return content
82
 
83
-
84
- css="""
85
  #col-left {
86
  margin: 0 auto;
87
  max-width: 640px;
@@ -96,17 +90,14 @@ css="""
96
  justify-content: center;
97
  gap:10px
98
  }
99
-
100
  .image {
101
  width: 128px;
102
  height: 128px;
103
  object-fit: cover;
104
  }
105
-
106
  .text {
107
  font-size: 16px;
108
  }
109
-
110
  """
111
 
112
  with gr.Blocks(css=css, elem_id="demo-container") as demo:
@@ -114,49 +105,37 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
114
  gr.HTML(read_file("demo_header.html"))
115
  gr.HTML(read_file("demo_tools.html"))
116
  with gr.Row():
117
- with gr.Column():
118
- image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
119
- with gr.Row(elem_id="prompt-container", equal_height=False):
120
- with gr.Row():
121
- prompt = gr.Textbox(label="Prompt",value="a women",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
122
-
123
- btn = gr.Button("Img2Img", elem_id="run_button",variant="primary")
124
-
125
- with gr.Accordion(label="Advanced Settings", open=False):
126
- with gr.Row( equal_height=True):
127
- strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="strength")
128
- seed = gr.Number(value=100, minimum=0, step=1, label="seed")
129
- inference_step = gr.Number(value=4, minimum=1, step=4, label="inference_step")
130
- id_input=gr.Text(label="Name", visible=False)
131
-
132
- with gr.Column():
133
- image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="jpg")
134
-
135
-
136
-
137
 
138
-
139
  gr.Examples(
140
- examples=[
141
- ["examples/draw_input.jpg", "examples/draw_output.jpg","a women ,eyes closed,mouth opened"],
142
- ["examples/draw-gimp_input.jpg", "examples/draw-gimp_output.jpg","a women ,eyes closed,mouth opened"],
143
- ["examples/gimp_input.jpg", "examples/gimp_output.jpg","a women ,hand on neck"],
144
- ["examples/inpaint_input.jpg", "examples/inpaint_output.jpg","a women ,hand on neck"]
145
- ]
146
- ,
147
- inputs=[image,image_out,prompt],
148
- )
149
- gr.HTML(
150
- gr.HTML(read_file("demo_footer.html"))
151
  )
 
152
  gr.on(
153
  triggers=[btn.click, prompt.submit],
154
- fn = process_images,
155
- inputs = [image,prompt,strength,seed,inference_step],
156
- outputs = [image_out]
157
  )
158
 
159
  if __name__ == "__main__":
160
  demo.launch(share=True, show_error=True)
161
-
162
-
 
13
 
14
  pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
15
 
 
 
16
  def sanitize_prompt(prompt):
17
+ # Allow only alphanumeric characters, spaces, and basic punctuation
18
+ allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
19
+ sanitized_prompt = allowed_chars.sub("", prompt)
20
+ return sanitized_prompt
21
 
22
+ def convert_to_fit_size(original_width_and_height, maximum_size=2048):
23
+ width, height = original_width_and_height
24
  if width <= maximum_size and height <= maximum_size:
25
+ return width, height
26
 
27
  if width > height:
28
  scaling_factor = maximum_size / width
 
38
  height = height - (height % 32)
39
  return width, height
40
 
 
 
 
41
  @spaces.GPU(duration=120)
42
+ def process_images(image, prompt="a girl", strength=0.75, seed=0, inference_step=4, progress=gr.Progress(track_tqdm=True)):
 
43
  progress(0, desc="Starting")
44
 
 
45
  def process_img2img(image, prompt="a person", strength=0.75, seed=0, num_inference_steps=4):
46
  if image is None:
47
  print("empty input image returned")
48
  return None
49
+
50
+ # Ensure image is in RGB mode (this helps with WebP and other formats)
51
+ if image.mode != "RGB":
52
+ image = image.convert("RGB")
53
+
54
  generator = torch.Generator(device).manual_seed(seed)
55
  fit_width, fit_height = convert_to_fit_size(image.size)
56
  width, height = adjust_to_multiple_of_32(fit_width, fit_height)
57
  image = image.resize((width, height), Image.LANCZOS)
58
 
59
  output = pipe(prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height,
60
+ guidance_scale=0, num_inference_steps=num_inference_steps, max_sequence_length=256)
61
 
62
  pil_image = output.images[0]
63
  new_width, new_height = pil_image.size
 
70
  output = process_img2img(image, prompt, strength, seed, inference_step)
71
  return output
72
 
 
 
73
  def read_file(path: str) -> str:
74
  with open(path, 'r', encoding='utf-8') as f:
75
  content = f.read()
 
76
  return content
77
 
78
+ css = """
 
79
  #col-left {
80
  margin: 0 auto;
81
  max-width: 640px;
 
90
  justify-content: center;
91
  gap:10px
92
  }
 
93
  .image {
94
  width: 128px;
95
  height: 128px;
96
  object-fit: cover;
97
  }
 
98
  .text {
99
  font-size: 16px;
100
  }
 
101
  """
102
 
103
  with gr.Blocks(css=css, elem_id="demo-container") as demo:
 
105
  gr.HTML(read_file("demo_header.html"))
106
  gr.HTML(read_file("demo_tools.html"))
107
  with gr.Row():
108
+ with gr.Column():
109
+ image = gr.Image(height=800, sources=['upload','clipboard'], image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
110
+ with gr.Row(elem_id="prompt-container", equal_height=False):
111
+ with gr.Row():
112
+ prompt = gr.Textbox(label="Prompt", value="a women", placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
113
+ btn = gr.Button("Img2Img", elem_id="run_button", variant="primary")
114
+ with gr.Accordion(label="Advanced Settings", open=False):
115
+ with gr.Row(equal_height=True):
116
+ strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="strength")
117
+ seed = gr.Number(value=100, minimum=0, step=1, label="seed")
118
+ inference_step = gr.Number(value=4, minimum=1, step=4, label="inference_step")
119
+ id_input = gr.Text(label="Name", visible=False)
120
+ with gr.Column():
121
+ image_out = gr.Image(height=800, sources=[], label="Output", elem_id="output-img", format="jpg")
 
 
 
 
 
 
122
 
 
123
  gr.Examples(
124
+ examples=[
125
+ ["examples/draw_input.jpg", "examples/draw_output.jpg", "a women ,eyes closed,mouth opened"],
126
+ ["examples/draw-gimp_input.jpg", "examples/draw-gimp_output.jpg", "a women ,eyes closed,mouth opened"],
127
+ ["examples/gimp_input.jpg", "examples/gimp_output.jpg", "a women ,hand on neck"],
128
+ ["examples/inpaint_input.jpg", "examples/inpaint_output.jpg", "a women ,hand on neck"]
129
+ ],
130
+ inputs=[image, image_out, prompt],
 
 
 
 
131
  )
132
+ gr.HTML(read_file("demo_footer.html"))
133
  gr.on(
134
  triggers=[btn.click, prompt.submit],
135
+ fn=process_images,
136
+ inputs=[image, prompt, strength, seed, inference_step],
137
+ outputs=[image_out]
138
  )
139
 
140
  if __name__ == "__main__":
141
  demo.launch(share=True, show_error=True)