alvarobartt HF staff commited on
Commit
fda85af
1 Parent(s): a714dfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -5,6 +5,7 @@ import numpy as np
5
  import spaces
6
  import torch
7
  from diffusers import DiffusionPipeline
 
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  repo_id = "black-forest-labs/FLUX.1-dev"
@@ -33,9 +34,14 @@ def inference(
33
  ):
34
  if randomize_seed:
35
  seed = random.randint(0, MAX_SEED)
 
36
 
37
- generator = torch.Generator().manual_seed(seed)
38
 
 
 
 
 
39
  image = pipeline(
40
  prompt=prompt,
41
  guidance_scale=guidance_scale,
@@ -43,9 +49,11 @@ def inference(
43
  width=width,
44
  height=height,
45
  generator=generator,
46
- lora_scale=lora_scale,
47
  ).images[0]
48
 
 
 
49
  return image, seed
50
 
51
 
@@ -118,14 +126,6 @@ with gr.Blocks(css=css) as demo:
118
  value=3.5,
119
  )
120
 
121
- lora_scale = gr.Slider(
122
- label="LoRA scale",
123
- minimum=0.0,
124
- maximum=1.0,
125
- step=0.1,
126
- value=1.0,
127
- )
128
-
129
  num_inference_steps = gr.Slider(
130
  label="Number of inference steps",
131
  minimum=1,
@@ -134,7 +134,15 @@ with gr.Blocks(css=css) as demo:
134
  value=30,
135
  )
136
 
137
- gr.Examples(examples=examples, inputs=[prompt])
 
 
 
 
 
 
 
 
138
 
139
  gr.on(
140
  triggers=[run_button.click, prompt.submit],
@@ -147,8 +155,10 @@ with gr.Blocks(css=css) as demo:
147
  height,
148
  guidance_scale,
149
  num_inference_steps,
 
150
  ],
151
  outputs=[result, seed],
152
  )
153
 
154
- demo.queue().launch()
 
 
5
  import spaces
6
  import torch
7
  from diffusers import DiffusionPipeline
8
+ from PIL import Image
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  repo_id = "black-forest-labs/FLUX.1-dev"
 
34
  ):
35
  if randomize_seed:
36
  seed = random.randint(0, MAX_SEED)
37
+ generator = torch.Generator(device=device).manual_seed(seed)
38
 
39
+ progress(0, "Starting image generation...")
40
 
41
+ for i in range(1, steps + 1):
42
+ if i % (steps // 10) == 0:
43
+ progress(i / steps * 100, f"Processing step {i} of {steps}...")
44
+
45
  image = pipeline(
46
  prompt=prompt,
47
  guidance_scale=guidance_scale,
 
49
  width=width,
50
  height=height,
51
  generator=generator,
52
+ joint_attention_kwargs={"scale": lora_scale},
53
  ).images[0]
54
 
55
+ progress(100, "Completed!")
56
+
57
  return image, seed
58
 
59
 
 
126
  value=3.5,
127
  )
128
 
 
 
 
 
 
 
 
 
129
  num_inference_steps = gr.Slider(
130
  label="Number of inference steps",
131
  minimum=1,
 
134
  value=30,
135
  )
136
 
137
+ lora_scale = gr.Slider(
138
+ label="LoRA scale",
139
+ minimum=0.0,
140
+ maximum=1.0,
141
+ step=0.1,
142
+ value=1.0,
143
+ )
144
+
145
+ gr.Examples(examples=examples, inputs=[prompt], outputs=[Image.open("./example.png")])
146
 
147
  gr.on(
148
  triggers=[run_button.click, prompt.submit],
 
155
  height,
156
  guidance_scale,
157
  num_inference_steps,
158
+ lora_scale,
159
  ],
160
  outputs=[result, seed],
161
  )
162
 
163
+ demo.queue()
164
+ demo.launch()