|
import os |
|
import random |
|
import torch |
|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline |
|
|
|
MODEL_ID = os.getenv("MODEL_ID", "runwayml/stable-diffusion-v1-5") |
|
|
|
|
|
use_cuda = torch.cuda.is_available() |
|
dtype = torch.float16 if use_cuda else torch.float32 |
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
MODEL_ID, |
|
torch_dtype=dtype, |
|
safety_checker=None, |
|
) |
|
if use_cuda: |
|
pipe.to("cuda") |
|
try: |
|
pipe.enable_xformers_memory_efficient_attention() |
|
except Exception: |
|
pipe.enable_attention_slicing() |
|
else: |
|
pipe.enable_attention_slicing() |
|
|
|
def generate(prompt, negative_prompt, steps, guidance, width, height, seed): |
|
if not prompt or prompt.strip() == "": |
|
raise gr.Error("Please enter a prompt.") |
|
|
|
|
|
width = int(max(256, min(1024, (width // 8) * 8))) |
|
height = int(max(256, min(1024, (height // 8) * 8))) |
|
|
|
if seed is None or seed < 0: |
|
seed = random.randint(0, 2**32 - 1) |
|
|
|
generator = torch.Generator(device="cuda" if use_cuda else "cpu").manual_seed(seed) |
|
|
|
result = pipe( |
|
prompt=prompt, |
|
negative_prompt=(negative_prompt or None), |
|
num_inference_steps=int(steps), |
|
guidance_scale=float(guidance), |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
) |
|
image = result.images[0] |
|
return image, seed |
|
|
|
with gr.Blocks(css="footer {visibility: hidden}") as demo: |
|
gr.Markdown( |
|
"# πΈ Flowerfy β Text β Image\n" |
|
"Type a prompt, tweak settings, and generate!" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
prompt = gr.Textbox( |
|
label="Prompt", |
|
value="a tasteful home flower arrangement in a ceramic vase, soft morning light, minimalist", |
|
lines=2, |
|
) |
|
negative_prompt = gr.Textbox( |
|
label="Negative prompt (optional)", |
|
value="low quality, blurry, deformed, text, watermark", |
|
lines=2, |
|
) |
|
steps = gr.Slider(1, 50, value=25, step=1, label="Inference steps") |
|
guidance = gr.Slider(0.0, 15.0, value=7.5, step=0.1, label="Guidance scale") |
|
with gr.Row(): |
|
width = gr.Slider(256, 1024, value=768, step=8, label="Width") |
|
height = gr.Slider(256, 1024, value=768, step=8, label="Height") |
|
seed = gr.Number(value=-1, precision=0, label="Seed (-1 = random)") |
|
run = gr.Button("Generate", variant="primary") |
|
|
|
with gr.Column(scale=1): |
|
out_image = gr.Image(label="Result", type="pil") |
|
out_seed = gr.Number(label="Used seed", interactive=False, precision=0) |
|
|
|
run.click( |
|
fn=generate, |
|
inputs=[prompt, negative_prompt, steps, guidance, width, height, seed], |
|
outputs=[out_image, out_seed], |
|
api_name="generate" |
|
) |
|
|
|
gr.Examples( |
|
examples=[ |
|
["a modern ikebana-style flower arrangement with orchids and branches, natural light, elegant, clean background", |
|
"low quality, extra fingers, text", 25, 7.5, 768, 768, -1], |
|
["a cozy bouquet on a kitchen table, soft bokeh background, film photography, muted colors", |
|
"blurry, noisy, text", 24, 6.5, 768, 512, -1], |
|
["close-up macro shot of dew on rose petals, dramatic lighting, high detail", |
|
"cartoon, lowres", 30, 8.0, 768, 768, -1], |
|
], |
|
inputs=[prompt, negative_prompt, steps, guidance, width, height, seed], |
|
label="Try these" |
|
) |
|
|
|
demo.queue(max_size=32).launch() |
|
|