Toy
commited on
Commit
·
c8b6b22
1
Parent(s):
fc9ec7a
Please work
Browse files
app.py
CHANGED
@@ -1,92 +1,20 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
return _PIPE_CACHE[model_id]
|
22 |
-
pipe = AutoPipelineForText2Image.from_pretrained(
|
23 |
-
model_id, torch_dtype=DTYPE
|
24 |
-
).to(DEVICE)
|
25 |
-
# SDXL-Turbo prefers guidance ~0; SDXL-Base likes ~5-8. We'll expose a slider.
|
26 |
-
_PIPE_CACHE[model_id] = pipe
|
27 |
-
return pipe
|
28 |
-
|
29 |
-
def generate(model_choice, prompt, steps, guidance, width, height, seed):
|
30 |
-
if not prompt or not prompt.strip():
|
31 |
-
return None, "Please enter a prompt."
|
32 |
-
model_id = {
|
33 |
-
"SDXL-Turbo (fast)": "stabilityai/sdxl-turbo",
|
34 |
-
"SDXL-Base 1.0 (quality)": "stabilityai/stable-diffusion-xl-base-1.0",
|
35 |
-
}[model_choice]
|
36 |
-
pipe = get_pipe(model_id)
|
37 |
-
|
38 |
-
gen = None
|
39 |
-
if seed is not None and seed >= 0:
|
40 |
-
# pick the device the pipeline actually executes on
|
41 |
-
exec_device = pipe._execution_device if hasattr(pipe, "_execution_device") else DEVICE
|
42 |
-
gen = torch.Generator(device=exec_device).manual_seed(int(seed))
|
43 |
-
|
44 |
-
out = pipe(
|
45 |
-
prompt=prompt,
|
46 |
-
num_inference_steps=int(steps),
|
47 |
-
guidance_scale=float(guidance),
|
48 |
-
width=int(width),
|
49 |
-
height=int(height),
|
50 |
-
generator=gen
|
51 |
-
)
|
52 |
-
return out.images[0], f"Model: {model_choice} | Steps: {steps} | Guidance: {guidance} | Size: {width}x{height} | Seed: {seed}"
|
53 |
-
|
54 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
55 |
-
gr.Markdown("# 🌸 Flowerfy — Text → Flower Arrangement")
|
56 |
-
gr.Markdown("Type a description and generate a floral arrangement image. Toggle **Turbo** for speed or **Base** for quality.")
|
57 |
-
|
58 |
-
with gr.Row():
|
59 |
-
with gr.Column(scale=1):
|
60 |
-
model_choice = gr.Radio(
|
61 |
-
["SDXL-Turbo (fast)", "SDXL-Base 1.0 (quality)"],
|
62 |
-
value="SDXL-Turbo (fast)", label="Model"
|
63 |
-
)
|
64 |
-
prompt = gr.Textbox(
|
65 |
-
label="Prompt",
|
66 |
-
value="airy garden-style flower arrangement, pastel roses and eucalyptus, soft natural light"
|
67 |
-
)
|
68 |
-
with gr.Row():
|
69 |
-
steps = gr.Slider(1, 40, step=1, value=4, label="Inference steps (Turbo: 1–4, Base: ~30)")
|
70 |
-
guidance = gr.Slider(0.0, 10.0, step=0.1, value=0.0, label="Guidance (Turbo≈0, Base≈6)")
|
71 |
-
with gr.Row():
|
72 |
-
width = gr.Slider(512, 1024, step=64, value=1024, label="Width")
|
73 |
-
height = gr.Slider(512, 1024, step=64, value=1024, label="Height")
|
74 |
-
seed = gr.Number(value=42, label="Seed (≥0 makes it repeatable)")
|
75 |
-
go = gr.Button("Generate 🌼", variant="primary")
|
76 |
-
|
77 |
-
with gr.Column(scale=1):
|
78 |
-
out_img = gr.Image(label="Result", type="pil")
|
79 |
-
meta = gr.Textbox(label="Run details", interactive=False)
|
80 |
-
|
81 |
-
def tweak_defaults(choice):
|
82 |
-
# Convenience: set sensible defaults when the model is toggled
|
83 |
-
if choice.startswith("SDXL-Turbo"):
|
84 |
-
return 4, 0.0
|
85 |
-
# SDXL-Base
|
86 |
-
return 30, 6.0
|
87 |
-
|
88 |
-
model_choice.change(fn=tweak_defaults, inputs=model_choice, outputs=[steps, guidance])
|
89 |
-
go.click(generate, inputs=[model_choice, prompt, steps, guidance, width, height, seed],
|
90 |
-
outputs=[out_img, meta], api_name="generate")
|
91 |
-
|
92 |
-
demo.queue(concurrency_count=1, max_size=12).launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def greet(name):
|
4 |
+
"""
|
5 |
+
A simple function that takes a name as input and returns a greeting.
|
6 |
+
"""
|
7 |
+
return "Hello " + name + "!"
|
8 |
+
|
9 |
+
# Create a Gradio Interface
|
10 |
+
# - fn: The Python function to wrap with a UI
|
11 |
+
# - inputs: The type of input component (e.g., "text" for a text box)
|
12 |
+
# - outputs: The type of output component (e.g., "text" for a text box)
|
13 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
14 |
+
|
15 |
+
# Launch the Gradio application
|
16 |
+
# When deploying to Hugging Face Spaces, demo.launch() is often not explicitly called in app.py
|
17 |
+
# as Spaces handles the launching automatically based on the presence of gr.Interface.
|
18 |
+
# However, for local testing, demo.launch() is necessary.
|
19 |
+
if __name__ == "__main__":
|
20 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|