Toy
commited on
Commit
Β·
ba2ae53
1
Parent(s):
c8b6b22
Downgrade versions
Browse files- README.md +2 -1
- app.py +99 -18
- packages.txt +0 -9
- requirements.txt +6 -0
README.md
CHANGED
@@ -4,7 +4,8 @@ emoji: π
|
|
4 |
colorFrom: gray
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
|
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
4 |
colorFrom: gray
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.44.1
|
8 |
+
python_version: 3.9.13
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
license: apache-2.0
|
app.py
CHANGED
@@ -1,20 +1,101 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
import gradio as gr
|
5 |
+
from diffusers import StableDiffusionPipeline
|
6 |
|
7 |
+
MODEL_ID = os.getenv("MODEL_ID", "runwayml/stable-diffusion-v1-5")
|
8 |
+
|
9 |
+
# Load pipeline with a dtype that fits the device
|
10 |
+
use_cuda = torch.cuda.is_available()
|
11 |
+
dtype = torch.float16 if use_cuda else torch.float32
|
12 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
13 |
+
MODEL_ID,
|
14 |
+
torch_dtype=dtype,
|
15 |
+
safety_checker=None, # set to None to disable built-in safety checker; remove this to enable it
|
16 |
+
)
|
17 |
+
if use_cuda:
|
18 |
+
pipe.to("cuda")
|
19 |
+
try:
|
20 |
+
pipe.enable_xformers_memory_efficient_attention()
|
21 |
+
except Exception:
|
22 |
+
pipe.enable_attention_slicing()
|
23 |
+
else:
|
24 |
+
pipe.enable_attention_slicing()
|
25 |
+
|
26 |
+
def generate(prompt, negative_prompt, steps, guidance, width, height, seed):
|
27 |
+
if not prompt or prompt.strip() == "":
|
28 |
+
raise gr.Error("Please enter a prompt.")
|
29 |
+
|
30 |
+
# force valid dims (diffusers expects multiples of 8)
|
31 |
+
width = int(max(256, min(1024, (width // 8) * 8)))
|
32 |
+
height = int(max(256, min(1024, (height // 8) * 8)))
|
33 |
+
|
34 |
+
if seed is None or seed < 0:
|
35 |
+
seed = random.randint(0, 2**32 - 1)
|
36 |
+
|
37 |
+
generator = torch.Generator(device="cuda" if use_cuda else "cpu").manual_seed(seed)
|
38 |
+
|
39 |
+
result = pipe(
|
40 |
+
prompt=prompt,
|
41 |
+
negative_prompt=(negative_prompt or None),
|
42 |
+
num_inference_steps=int(steps),
|
43 |
+
guidance_scale=float(guidance),
|
44 |
+
width=width,
|
45 |
+
height=height,
|
46 |
+
generator=generator,
|
47 |
+
)
|
48 |
+
image = result.images[0]
|
49 |
+
return image, seed
|
50 |
+
|
51 |
+
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
52 |
+
gr.Markdown(
|
53 |
+
"# πΈ Flowerfy β Text β Image\n"
|
54 |
+
"Type a prompt, tweak settings, and generate!"
|
55 |
+
)
|
56 |
+
|
57 |
+
with gr.Row():
|
58 |
+
with gr.Column(scale=1):
|
59 |
+
prompt = gr.Textbox(
|
60 |
+
label="Prompt",
|
61 |
+
value="a tasteful home flower arrangement in a ceramic vase, soft morning light, minimalist",
|
62 |
+
lines=2,
|
63 |
+
)
|
64 |
+
negative_prompt = gr.Textbox(
|
65 |
+
label="Negative prompt (optional)",
|
66 |
+
value="low quality, blurry, deformed, text, watermark",
|
67 |
+
lines=2,
|
68 |
+
)
|
69 |
+
steps = gr.Slider(1, 50, value=25, step=1, label="Inference steps")
|
70 |
+
guidance = gr.Slider(0.0, 15.0, value=7.5, step=0.1, label="Guidance scale")
|
71 |
+
with gr.Row():
|
72 |
+
width = gr.Slider(256, 1024, value=768, step=8, label="Width")
|
73 |
+
height = gr.Slider(256, 1024, value=768, step=8, label="Height")
|
74 |
+
seed = gr.Number(value=-1, precision=0, label="Seed (-1 = random)")
|
75 |
+
run = gr.Button("Generate", variant="primary")
|
76 |
+
|
77 |
+
with gr.Column(scale=1):
|
78 |
+
out_image = gr.Image(label="Result", type="pil")
|
79 |
+
out_seed = gr.Number(label="Used seed", interactive=False, precision=0)
|
80 |
+
|
81 |
+
run.click(
|
82 |
+
fn=generate,
|
83 |
+
inputs=[prompt, negative_prompt, steps, guidance, width, height, seed],
|
84 |
+
outputs=[out_image, out_seed],
|
85 |
+
api_name="generate"
|
86 |
+
)
|
87 |
+
|
88 |
+
gr.Examples(
|
89 |
+
examples=[
|
90 |
+
["a modern ikebana-style flower arrangement with orchids and branches, natural light, elegant, clean background",
|
91 |
+
"low quality, extra fingers, text", 25, 7.5, 768, 768, -1],
|
92 |
+
["a cozy bouquet on a kitchen table, soft bokeh background, film photography, muted colors",
|
93 |
+
"blurry, noisy, text", 24, 6.5, 768, 512, -1],
|
94 |
+
["close-up macro shot of dew on rose petals, dramatic lighting, high detail",
|
95 |
+
"cartoon, lowres", 30, 8.0, 768, 768, -1],
|
96 |
+
],
|
97 |
+
inputs=[prompt, negative_prompt, steps, guidance, width, height, seed],
|
98 |
+
label="Try these"
|
99 |
+
)
|
100 |
+
|
101 |
+
demo.queue(max_size=32).launch()
|
packages.txt
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
git
|
2 |
-
git-lfs
|
3 |
-
ffmpeg
|
4 |
-
libsm6
|
5 |
-
libxext6
|
6 |
-
cmake
|
7 |
-
rsync
|
8 |
-
libgl1-mesa-dri
|
9 |
-
mesa-utils
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.44.0
|
2 |
+
diffusers>=0.30.0
|
3 |
+
transformers>=4.44.0
|
4 |
+
accelerate>=0.33.0
|
5 |
+
safetensors>=0.4.3
|
6 |
+
torch
|