Gemini899 commited on
Commit
f53e43a
·
verified ·
1 Parent(s): 5b880ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -28
app.py CHANGED
@@ -49,41 +49,29 @@ def process_images(image,prompt="a girl",strength=0.75,seed=0,inference_step=4,p
49
  progress(0, desc="Starting")
50
 
51
 
52
- def process_img2img(image,prompt="a person",strength=0.75,seed=0,num_inference_steps=4):
53
- #print("start process_img2img")
54
- if image == None:
55
  print("empty input image returned")
56
  return None
57
-
58
- generators = []
59
  generator = torch.Generator(device).manual_seed(seed)
60
- generators.append(generator)
61
- fit_width,fit_height = convert_to_fit_size(image.size)
62
- #print(f"fit {width}x{height}")
63
- width,height = adjust_to_multiple_of_32(fit_width,fit_height)
64
- #print(f"multiple {width}x{height}")
65
  image = image.resize((width, height), Image.LANCZOS)
66
- #mask_image = mask_image.resize((width, height), Image.NEAREST)
67
-
68
- # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
69
- #print(prompt)
70
- output = pipe(prompt=prompt, image=image,generator=generator,strength=strength,width=width,height=height
71
- ,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
72
-
73
- pil_image = output.images[0]#Image.fromarray()
74
- new_width,new_height = pil_image.size
75
-
76
- # resize back multiple of 32
77
- if (new_width!=fit_width) or (new_height!=fit_height):
78
- resized_image= pil_image.resize((fit_width,fit_height),Image.LANCZOS)
79
  return resized_image
80
-
81
  return pil_image
82
-
83
- output = process_img2img(image,prompt,strength,seed,inference_step)
84
-
85
- #print("end process_images")
86
  return output
 
87
 
88
 
89
  def read_file(path: str) -> str:
 
49
  progress(0, desc="Starting")
50
 
51
 
52
+ def process_img2img(image, prompt="a person", strength=0.75, seed=0, num_inference_steps=4):
53
+ if image is None:
 
54
  print("empty input image returned")
55
  return None
 
 
56
  generator = torch.Generator(device).manual_seed(seed)
57
+ fit_width, fit_height = convert_to_fit_size(image.size)
58
+ width, height = adjust_to_multiple_of_32(fit_width, fit_height)
 
 
 
59
  image = image.resize((width, height), Image.LANCZOS)
60
+
61
+ output = pipe(prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height,
62
+ guidance_scale=0, num_inference_steps=num_inference_steps, max_sequence_length=256)
63
+
64
+ pil_image = output.images[0]
65
+ new_width, new_height = pil_image.size
66
+
67
+ if (new_width != fit_width) or (new_height != fit_height):
68
+ resized_image = pil_image.resize((fit_width, fit_height), Image.LANCZOS)
 
 
 
 
69
  return resized_image
 
70
  return pil_image
71
+
72
+ output = process_img2img(image, prompt, strength, seed, inference_step)
 
 
73
  return output
74
+
75
 
76
 
77
  def read_file(path: str) -> str: