Toy commited on
Commit
ba2ae53
Β·
1 Parent(s): c8b6b22

Downgrade versions

Browse files
Files changed (4) hide show
  1. README.md +2 -1
  2. app.py +99 -18
  3. packages.txt +0 -9
  4. requirements.txt +6 -0
README.md CHANGED
@@ -4,7 +4,8 @@ emoji: πŸƒ
4
  colorFrom: gray
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.42.0
 
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: gray
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.44.1
8
+ python_version: 3.9.13
9
  app_file: app.py
10
  pinned: false
11
  license: apache-2.0
app.py CHANGED
@@ -1,20 +1,101 @@
 
 
 
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- """
5
- A simple function that takes a name as input and returns a greeting.
6
- """
7
- return "Hello " + name + "!"
8
-
9
- # Create a Gradio Interface
10
- # - fn: The Python function to wrap with a UI
11
- # - inputs: The type of input component (e.g., "text" for a text box)
12
- # - outputs: The type of output component (e.g., "text" for a text box)
13
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
14
-
15
- # Launch the Gradio application
16
- # When deploying to Hugging Face Spaces, demo.launch() is often not explicitly called in app.py
17
- # as Spaces handles the launching automatically based on the presence of gr.Interface.
18
- # However, for local testing, demo.launch() is necessary.
19
- if __name__ == "__main__":
20
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import torch
4
  import gradio as gr
5
+ from diffusers import StableDiffusionPipeline
6
 
7
+ MODEL_ID = os.getenv("MODEL_ID", "runwayml/stable-diffusion-v1-5")
8
+
9
+ # Load pipeline with a dtype that fits the device
10
+ use_cuda = torch.cuda.is_available()
11
+ dtype = torch.float16 if use_cuda else torch.float32
12
+ pipe = StableDiffusionPipeline.from_pretrained(
13
+ MODEL_ID,
14
+ torch_dtype=dtype,
15
+ safety_checker=None, # set to None to disable built-in safety checker; remove this to enable it
16
+ )
17
+ if use_cuda:
18
+ pipe.to("cuda")
19
+ try:
20
+ pipe.enable_xformers_memory_efficient_attention()
21
+ except Exception:
22
+ pipe.enable_attention_slicing()
23
+ else:
24
+ pipe.enable_attention_slicing()
25
+
26
+ def generate(prompt, negative_prompt, steps, guidance, width, height, seed):
27
+ if not prompt or prompt.strip() == "":
28
+ raise gr.Error("Please enter a prompt.")
29
+
30
+ # force valid dims (diffusers expects multiples of 8)
31
+ width = int(max(256, min(1024, (width // 8) * 8)))
32
+ height = int(max(256, min(1024, (height // 8) * 8)))
33
+
34
+ if seed is None or seed < 0:
35
+ seed = random.randint(0, 2**32 - 1)
36
+
37
+ generator = torch.Generator(device="cuda" if use_cuda else "cpu").manual_seed(seed)
38
+
39
+ result = pipe(
40
+ prompt=prompt,
41
+ negative_prompt=(negative_prompt or None),
42
+ num_inference_steps=int(steps),
43
+ guidance_scale=float(guidance),
44
+ width=width,
45
+ height=height,
46
+ generator=generator,
47
+ )
48
+ image = result.images[0]
49
+ return image, seed
50
+
51
+ with gr.Blocks(css="footer {visibility: hidden}") as demo:
52
+ gr.Markdown(
53
+ "# 🌸 Flowerfy β€” Text β†’ Image\n"
54
+ "Type a prompt, tweak settings, and generate!"
55
+ )
56
+
57
+ with gr.Row():
58
+ with gr.Column(scale=1):
59
+ prompt = gr.Textbox(
60
+ label="Prompt",
61
+ value="a tasteful home flower arrangement in a ceramic vase, soft morning light, minimalist",
62
+ lines=2,
63
+ )
64
+ negative_prompt = gr.Textbox(
65
+ label="Negative prompt (optional)",
66
+ value="low quality, blurry, deformed, text, watermark",
67
+ lines=2,
68
+ )
69
+ steps = gr.Slider(1, 50, value=25, step=1, label="Inference steps")
70
+ guidance = gr.Slider(0.0, 15.0, value=7.5, step=0.1, label="Guidance scale")
71
+ with gr.Row():
72
+ width = gr.Slider(256, 1024, value=768, step=8, label="Width")
73
+ height = gr.Slider(256, 1024, value=768, step=8, label="Height")
74
+ seed = gr.Number(value=-1, precision=0, label="Seed (-1 = random)")
75
+ run = gr.Button("Generate", variant="primary")
76
+
77
+ with gr.Column(scale=1):
78
+ out_image = gr.Image(label="Result", type="pil")
79
+ out_seed = gr.Number(label="Used seed", interactive=False, precision=0)
80
+
81
+ run.click(
82
+ fn=generate,
83
+ inputs=[prompt, negative_prompt, steps, guidance, width, height, seed],
84
+ outputs=[out_image, out_seed],
85
+ api_name="generate"
86
+ )
87
+
88
+ gr.Examples(
89
+ examples=[
90
+ ["a modern ikebana-style flower arrangement with orchids and branches, natural light, elegant, clean background",
91
+ "low quality, extra fingers, text", 25, 7.5, 768, 768, -1],
92
+ ["a cozy bouquet on a kitchen table, soft bokeh background, film photography, muted colors",
93
+ "blurry, noisy, text", 24, 6.5, 768, 512, -1],
94
+ ["close-up macro shot of dew on rose petals, dramatic lighting, high detail",
95
+ "cartoon, lowres", 30, 8.0, 768, 768, -1],
96
+ ],
97
+ inputs=[prompt, negative_prompt, steps, guidance, width, height, seed],
98
+ label="Try these"
99
+ )
100
+
101
+ demo.queue(max_size=32).launch()
packages.txt DELETED
@@ -1,9 +0,0 @@
1
- git
2
- git-lfs
3
- ffmpeg
4
- libsm6
5
- libxext6
6
- cmake
7
- rsync
8
- libgl1-mesa-dri
9
- mesa-utils
 
 
 
 
 
 
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ diffusers>=0.30.0
3
+ transformers>=4.44.0
4
+ accelerate>=0.33.0
5
+ safetensors>=0.4.3
6
+ torch