OmPrakashSingh1704 commited on
Commit
c5dcd31
1 Parent(s): fbb1cf8
app.py CHANGED
@@ -56,17 +56,82 @@ with gr.Blocks() as demo:
56
  )
57
 
58
  with gr.TabItem("Edit your Banner"):
59
- input_image_editor_component = gr.ImageEditor(
60
- label='Image',
61
- type='pil',
62
- sources=["upload", "webcam"],
63
- image_mode='RGB',
64
- layers=False,
65
- brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
66
- prompt = gr.Textbox(label="Enter the text to get a good start")
67
- out_img=gr.Image()
68
- btn = gr.Button()
69
- btn.click(Banner.Image2Image, [prompt,input_image_editor_component], out_img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  with gr.TabItem("Upgrade your Banner"):
72
  img = gr.Image()
 
56
  )
57
 
58
  with gr.TabItem("Edit your Banner"):
59
+ with gr.Row():
60
+ with gr.Column():
61
+ input_image_editor_component = gr.ImageEditor(
62
+ label='Image',
63
+ type='pil',
64
+ sources=["upload", "webcam"],
65
+ image_mode='RGB',
66
+ layers=False,
67
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
68
+
69
+ with gr.Row():
70
+ input_text_component = gr.Text(
71
+ label="Prompt",
72
+ show_label=False,
73
+ max_lines=1,
74
+ placeholder="Enter your prompt",
75
+ container=False,
76
+ )
77
+ submit_button_component = gr.Button(
78
+ value='Submit', variant='primary', scale=0)
79
+
80
+ with gr.Accordion("Advanced Settings", open=False):
81
+ seed_slicer_component = gr.Slider(
82
+ label="Seed",
83
+ minimum=0,
84
+ maximum=MAX_SEED,
85
+ step=1,
86
+ value=42,
87
+ )
88
+
89
+ randomize_seed_checkbox_component = gr.Checkbox(
90
+ label="Randomize seed", value=True)
91
+
92
+ with gr.Row():
93
+ strength_slider_component = gr.Slider(
94
+ label="Strength",
95
+ info="Indicates extent to transform the reference `image`. "
96
+ "Must be between 0 and 1. `image` is used as a starting "
97
+ "point and more noise is added the higher the `strength`.",
98
+ minimum=0,
99
+ maximum=1,
100
+ step=0.01,
101
+ value=0.85,
102
+ )
103
+
104
+ num_inference_steps_slider_component = gr.Slider(
105
+ label="Number of inference steps",
106
+ info="The number of denoising steps. More denoising steps "
107
+ "usually lead to a higher quality image at the",
108
+ minimum=1,
109
+ maximum=50,
110
+ step=1,
111
+ value=20,
112
+ )
113
+ with gr.Column():
114
+ output_image_component = gr.Image(
115
+ type='pil', image_mode='RGB', label='Generated image', format="png")
116
+ with gr.Accordion("Debug", open=False):
117
+ output_mask_component = gr.Image(
118
+ type='pil', image_mode='RGB', label='Input mask', format="png")
119
+ with gr.Row():
120
+ submit_button_component.click(
121
+ fn=Banner.Image2Image,
122
+ inputs=[
123
+ input_image_editor_component,
124
+ input_text_component,
125
+ seed_slicer_component,
126
+ randomize_seed_checkbox_component,
127
+ strength_slider_component,
128
+ num_inference_steps_slider_component
129
+ ],
130
+ outputs=[
131
+ output_image_component,
132
+ output_mask_component
133
+ ]
134
+ )
135
 
136
  with gr.TabItem("Upgrade your Banner"):
137
  img = gr.Image()
options/Banner.py CHANGED
@@ -8,8 +8,17 @@ def TextImage(prompt, width=1024, height=1024, guidance_scale=3.5,
8
  img = T2I(prompt, width, height, guidance_scale, num_inference_steps)
9
  return img
10
 
11
- def Image2Image(prompt,image):
12
- return I2I(image, prompt)
 
 
 
 
 
 
 
 
 
13
 
14
  def Image2Image_2(prompt,image,size,num_inference_steps=30):
15
  return I2I_2(image, prompt,size,num_inference_steps)
 
8
  img = T2I(prompt, width, height, guidance_scale, num_inference_steps)
9
  return img
10
 
11
+ # def Image2Image(prompt,image):
12
+ # return I2I(image, prompt)
13
+
14
+ def Image2Image(
15
+ input_image_editor: dict,
16
+ input_text: str,
17
+ seed_slicer: int,
18
+ randomize_seed_checkbox: bool,
19
+ strength_slider: float,
20
+ num_inference_steps_slider: int,
21
+ ):return I2I(input_image_editor,input_text,seed_slicer,randomize_seed_checkbox,strength_slider,num_inference_steps_slider)
22
 
23
  def Image2Image_2(prompt,image,size,num_inference_steps=30):
24
  return I2I_2(image, prompt,size,num_inference_steps)
options/Banner_Model/Image2Image.py CHANGED
@@ -1,38 +1,23 @@
1
- import imageio
 
 
 
2
  import numpy as np
 
 
 
3
  from PIL import Image
4
- import torch,random
5
- # from .controlnet_flux import FluxControlNetModel
6
- # from .transformer_flux import FluxTransformer2DModel
7
- # from .pipeline_flux_controlnet_inpaint import FluxControlNetInpaintingPipeline
8
- from typing import Tuple
9
  from diffusers import FluxInpaintPipeline
10
- import gradio as gr
11
-
12
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
13
- print(f"Using device for I2I: {DEVICE}")
14
 
15
- # # Load the inpainting pipeline
16
-
17
- # def resize_image(image, height, width):
18
- # """Resize image tensor to the desired height and width."""
19
- # return torch.nn.functional.interpolate(image, size=(height, width), mode='nearest')
20
-
21
-
22
- # def dummy(img):
23
- # """Save the composite image and generate a mask from the alpha channel."""
24
- # imageio.imwrite("output_image.png", img["composite"])
25
-
26
- # # Extract alpha channel from the first layer to create the mask
27
- # alpha_channel = img["layers"][0][:, :, 3]
28
- # mask = np.where(alpha_channel == 0, 0, 255).astype(np.uint8)
29
-
30
- # return img["background"], mask
31
  MAX_SEED = np.iinfo(np.int32).max
 
 
32
 
33
  def resize_image_dimensions(
34
  original_resolution_wh: Tuple[int, int],
35
- maximum_dimension: int = 1024
36
  ) -> Tuple[int, int]:
37
  width, height = original_resolution_wh
38
 
@@ -55,20 +40,19 @@ def resize_image_dimensions(
55
  return new_width, new_height
56
 
57
 
58
- # @spaces.GPU(duration=100)
59
  def I2I(
60
- input_image_editor,
61
  input_text: str,
62
- seed_slicer: int=42,
63
- randomize_seed_checkbox: bool=True,
64
- strength_slider: float=0.85,
65
- num_inference_steps_slider: int=20,
66
- progress=gr.Progress(track_tqdm=True)):
67
- pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
68
  if not input_text:
69
  gr.Info("Please enter a text prompt.")
70
  return None, None
71
- print(type(input_image_editor),input_image_editor)
72
 
73
  image = input_image_editor['background']
74
  mask = input_image_editor['layers'][0]
@@ -80,6 +64,9 @@ def I2I(
80
  if not mask:
81
  gr.Info("Please draw a mask on the image.")
82
  return None, None
 
 
 
83
 
84
  width, height = resize_image_dimensions(original_resolution_wh=image.size)
85
  resized_image = image.resize((width, height), Image.LANCZOS)
@@ -99,63 +86,4 @@ def I2I(
99
  num_inference_steps=num_inference_steps_slider
100
  ).images[0]
101
  print('INFERENCE DONE')
102
- imageio.imwrite("output_image.png", result)
103
- return result
104
-
105
- def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
106
- image = image.convert("RGBA")
107
- data = image.getdata()
108
- new_data = []
109
- for item in data:
110
- avg = sum(item[:3]) / 3
111
- if avg < threshold:
112
- new_data.append((0, 0, 0, 0))
113
- else:
114
- new_data.append(item)
115
-
116
- image.putdata(new_data)
117
- return image
118
-
119
-
120
- # def I2I(prompt, image, width=1024, height=1024, guidance_scale=8.0, num_inference_steps=20, strength=0.99):
121
-
122
- # controlnet = FluxControlNetModel.from_pretrained("alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Alpha", torch_dtype=torch.bfloat16)
123
- # transformer = FluxTransformer2DModel.from_pretrained(
124
- # "black-forest-labs/FLUX.1-dev", subfolder='transformer', torch_dytpe=torch.bfloat16
125
- # )
126
- # pipe = FluxControlNetInpaintingPipeline.from_pretrained(
127
- # "black-forest-labs/FLUX.1-dev",
128
- # controlnet=controlnet,
129
- # transformer=transformer,
130
- # torch_dtype=torch.bfloat16
131
- # ).to(device)
132
- # pipe.transformer.to(torch.bfloat16)
133
- # pipe.controlnet.to(torch.bfloat16)
134
- # pipe.set_attn_processor(FluxAttnProcessor2_0())
135
-
136
-
137
- # img_url, mask = dummy(image)
138
-
139
- # # Resize image and mask to the target dimensions (height x width)
140
- # img_url = Image.fromarray(img_url, mode="RGB").resize((width, height))
141
- # mask_url = Image.fromarray(mask,mode="L").resize((width, height))
142
-
143
- # # Make sure both image and mask are converted into correct tensors
144
- # generator = torch.Generator(device=device).manual_seed(0)
145
-
146
- # # Generate the inpainted image
147
- # result = pipe(
148
- # prompt=prompt,
149
- # height=size[1],
150
- # width=size[0],
151
- # control_image=image,
152
- # control_mask=mask,
153
- # num_inference_steps=28,
154
- # generator=generator,
155
- # controlnet_conditioning_scale=0.9,
156
- # guidance_scale=3.5,
157
- # negative_prompt="",
158
- # true_guidance_scale=3.5
159
- # ).images[0]
160
-
161
- # return result
 
1
+ from typing import Tuple
2
+
3
+ import requests
4
+ import random,os
5
  import numpy as np
6
+ import gradio as gr
7
+ import spaces
8
+ import torch
9
  from PIL import Image
 
 
 
 
 
10
  from diffusers import FluxInpaintPipeline
11
+ from huggingface_hub import login
12
+ login(token=os.getenv("TOKEN"))
 
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
+ IMAGE_SIZE = 1024
16
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
17
 
18
  def resize_image_dimensions(
19
  original_resolution_wh: Tuple[int, int],
20
+ maximum_dimension: int = IMAGE_SIZE
21
  ) -> Tuple[int, int]:
22
  width, height = original_resolution_wh
23
 
 
40
  return new_width, new_height
41
 
42
 
43
+ @spaces.GPU(duration=100)
44
  def I2I(
45
+ input_image_editor: dict,
46
  input_text: str,
47
+ seed_slicer: int,
48
+ randomize_seed_checkbox: bool,
49
+ strength_slider: float,
50
+ num_inference_steps_slider: int,
51
+ progress=gr.Progress(track_tqdm=True)
52
+ ):
53
  if not input_text:
54
  gr.Info("Please enter a text prompt.")
55
  return None, None
 
56
 
57
  image = input_image_editor['background']
58
  mask = input_image_editor['layers'][0]
 
64
  if not mask:
65
  gr.Info("Please draw a mask on the image.")
66
  return None, None
67
+
68
+ pipe = FluxInpaintPipeline.from_pretrained(
69
+ "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(DEVICE)
70
 
71
  width, height = resize_image_dimensions(original_resolution_wh=image.size)
72
  resized_image = image.resize((width, height), Image.LANCZOS)
 
86
  num_inference_steps=num_inference_steps_slider
87
  ).images[0]
88
  print('INFERENCE DONE')
89
+ return result, resized_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
options/Banner_Model/__pycache__/Image2Image.cpython-310.pyc CHANGED
Binary files a/options/Banner_Model/__pycache__/Image2Image.cpython-310.pyc and b/options/Banner_Model/__pycache__/Image2Image.cpython-310.pyc differ
 
options/__pycache__/Banner.cpython-310.pyc CHANGED
Binary files a/options/__pycache__/Banner.cpython-310.pyc and b/options/__pycache__/Banner.cpython-310.pyc differ