Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -28,19 +28,19 @@ examples = json.loads(open("examples.json").read())
|
|
28 |
# (1664, 928), (1472, 1140), (1328, 1328)
|
29 |
def get_image_size(aspect_ratio):
|
30 |
if aspect_ratio == "1:1":
|
31 |
-
return
|
32 |
elif aspect_ratio == "16:9":
|
33 |
-
return
|
34 |
elif aspect_ratio == "9:16":
|
35 |
-
return
|
36 |
elif aspect_ratio == "4:3":
|
37 |
-
return
|
38 |
elif aspect_ratio == "3:4":
|
39 |
-
return
|
40 |
else:
|
41 |
-
return
|
42 |
|
43 |
-
@spaces.GPU(duration=
|
44 |
|
45 |
def infer(
|
46 |
prompt,
|
@@ -52,21 +52,22 @@ def infer(
|
|
52 |
num_inference_steps=50,
|
53 |
progress=gr.Progress(track_tqdm=True),
|
54 |
):
|
55 |
-
|
56 |
|
57 |
if randomize_seed:
|
58 |
seed = random.randint(0, MAX_SEED)
|
|
|
59 |
width, height = get_image_size(aspect_ratio)
|
60 |
|
61 |
print("Generating for prompt:", prompt)
|
|
|
62 |
image = pipe(
|
63 |
prompt=prompt,
|
64 |
negative_prompt=negative_prompt,
|
65 |
width=width,
|
66 |
height=height,
|
67 |
-
num_inference_steps=
|
68 |
-
true_cfg_scale=
|
69 |
-
generator=torch.Generator(device="cuda").manual_seed(
|
70 |
).images[0]
|
71 |
|
72 |
#image.save("example.png")
|
@@ -74,8 +75,6 @@ def infer(
|
|
74 |
return image, seed
|
75 |
|
76 |
|
77 |
-
|
78 |
-
|
79 |
css = """
|
80 |
#col-container {
|
81 |
margin: 0 auto;
|
@@ -83,7 +82,6 @@ css = """
|
|
83 |
}
|
84 |
"""
|
85 |
|
86 |
-
|
87 |
with gr.Blocks(css=css) as demo:
|
88 |
with gr.Column(elem_id="col-container"):
|
89 |
# gr.Markdown('<div style="text-align: center;"><a href="https://huggingface.co/Qwen/Qwen-Image"><img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" width="400"/></a></div>')
|
@@ -102,26 +100,30 @@ with gr.Blocks(css=css) as demo:
|
|
102 |
result = gr.Image(label="Result", show_label=False)
|
103 |
|
104 |
with gr.Accordion("Advanced Settings", open=False):
|
105 |
-
negative_prompt = gr.Text(
|
106 |
-
label="Negative prompt",
|
107 |
-
max_lines=1,
|
108 |
-
placeholder="Enter a negative prompt",
|
109 |
-
visible=True,
|
110 |
-
)
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
-
|
121 |
|
122 |
with gr.Row():
|
123 |
aspect_ratio = gr.Radio(
|
124 |
-
label="Image size (ratio
|
125 |
choices=["1:1", "16:9", "9:16", "4:3", "3:4"],
|
126 |
value="16:9",
|
127 |
)
|
|
|
28 |
# (1664, 928), (1472, 1140), (1328, 1328)
|
29 |
def get_image_size(aspect_ratio):
|
30 |
if aspect_ratio == "1:1":
|
31 |
+
return 1328, 1328
|
32 |
elif aspect_ratio == "16:9":
|
33 |
+
return 1664, 928
|
34 |
elif aspect_ratio == "9:16":
|
35 |
+
return 928, 1664
|
36 |
elif aspect_ratio == "4:3":
|
37 |
+
return 1472, 1140
|
38 |
elif aspect_ratio == "3:4":
|
39 |
+
return 1140, 1472
|
40 |
else:
|
41 |
+
return 1328, 1328
|
42 |
|
43 |
+
@spaces.GPU(duration=60)
|
44 |
|
45 |
def infer(
|
46 |
prompt,
|
|
|
52 |
num_inference_steps=50,
|
53 |
progress=gr.Progress(track_tqdm=True),
|
54 |
):
|
|
|
55 |
|
56 |
if randomize_seed:
|
57 |
seed = random.randint(0, MAX_SEED)
|
58 |
+
|
59 |
width, height = get_image_size(aspect_ratio)
|
60 |
|
61 |
print("Generating for prompt:", prompt)
|
62 |
+
|
63 |
image = pipe(
|
64 |
prompt=prompt,
|
65 |
negative_prompt=negative_prompt,
|
66 |
width=width,
|
67 |
height=height,
|
68 |
+
num_inference_steps=num_inference_steps,
|
69 |
+
true_cfg_scale=guidance_scale,
|
70 |
+
generator=torch.Generator(device="cuda").manual_seed(seed)
|
71 |
).images[0]
|
72 |
|
73 |
#image.save("example.png")
|
|
|
75 |
return image, seed
|
76 |
|
77 |
|
|
|
|
|
78 |
css = """
|
79 |
#col-container {
|
80 |
margin: 0 auto;
|
|
|
82 |
}
|
83 |
"""
|
84 |
|
|
|
85 |
with gr.Blocks(css=css) as demo:
|
86 |
with gr.Column(elem_id="col-container"):
|
87 |
# gr.Markdown('<div style="text-align: center;"><a href="https://huggingface.co/Qwen/Qwen-Image"><img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" width="400"/></a></div>')
|
|
|
100 |
result = gr.Image(label="Result", show_label=False)
|
101 |
|
102 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
+
with gr.Row():
|
105 |
+
negative_prompt = gr.Text(
|
106 |
+
label="Negative prompt",
|
107 |
+
max_lines=1,
|
108 |
+
placeholder="Enter a negative prompt",
|
109 |
+
visible=True,
|
110 |
+
)
|
111 |
+
|
112 |
+
with gr.Row():
|
113 |
+
|
114 |
+
seed = gr.Slider(
|
115 |
+
label="Seed",
|
116 |
+
minimum=0,
|
117 |
+
maximum=MAX_SEED,
|
118 |
+
step=1,
|
119 |
+
value=0,
|
120 |
+
)
|
121 |
|
122 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
123 |
|
124 |
with gr.Row():
|
125 |
aspect_ratio = gr.Radio(
|
126 |
+
label="Image size (ratio)",
|
127 |
choices=["1:1", "16:9", "9:16", "4:3", "3:4"],
|
128 |
value="16:9",
|
129 |
)
|