Spaces:
Runtime error
Runtime error
Commit
·
3b3e9b2
1
Parent(s):
3408f7c
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,9 @@ class Model:
|
|
| 20 |
|
| 21 |
models = [
|
| 22 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style"),
|
|
|
|
| 23 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style"),
|
|
|
|
| 24 |
Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style"),
|
| 25 |
Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style"),
|
| 26 |
Model("Modern Disney", "nitrosocke/modern-disney-diffusion", "modern disney style"),
|
|
@@ -46,6 +48,8 @@ models = [
|
|
| 46 |
Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
|
| 47 |
Model("Poolsuite", "prompthero/poolsuite", "poolsuite style "),
|
| 48 |
Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
|
|
|
|
|
|
|
| 49 |
Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
|
| 50 |
Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
|
| 51 |
Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"),
|
|
@@ -78,6 +82,7 @@ else:
|
|
| 78 |
|
| 79 |
if torch.cuda.is_available():
|
| 80 |
pipe = pipe.to("cuda")
|
|
|
|
| 81 |
|
| 82 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
| 83 |
|
|
@@ -144,6 +149,7 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
| 144 |
|
| 145 |
if torch.cuda.is_available():
|
| 146 |
pipe = pipe.to("cuda")
|
|
|
|
| 147 |
last_mode = "txt2img"
|
| 148 |
|
| 149 |
prompt = current_model.prefix + prompt
|
|
@@ -187,6 +193,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 187 |
|
| 188 |
if torch.cuda.is_available():
|
| 189 |
pipe = pipe.to("cuda")
|
|
|
|
| 190 |
last_mode = "img2img"
|
| 191 |
|
| 192 |
prompt = current_model.prefix + prompt
|
|
@@ -196,7 +203,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 196 |
prompt,
|
| 197 |
negative_prompt = neg_prompt,
|
| 198 |
num_images_per_prompt=n_images,
|
| 199 |
-
|
| 200 |
num_inference_steps = int(steps),
|
| 201 |
strength = strength,
|
| 202 |
guidance_scale = guidance,
|
|
|
|
| 20 |
|
| 21 |
models = [
|
| 22 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style"),
|
| 23 |
+
Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
|
| 24 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style"),
|
| 25 |
+
Model("Anything V3", "Linaqruf/anything-v3.0", ""),
|
| 26 |
Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style"),
|
| 27 |
Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style"),
|
| 28 |
Model("Modern Disney", "nitrosocke/modern-disney-diffusion", "modern disney style"),
|
|
|
|
| 48 |
Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
|
| 49 |
Model("Poolsuite", "prompthero/poolsuite", "poolsuite style "),
|
| 50 |
Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
|
| 51 |
+
Model("Wavyfusion", "wavymulder/wavyfusion", "wa-vy style "),
|
| 52 |
+
Model("Analog Diffusion", "wavymulder/Analog-Diffusion", "analog style "),
|
| 53 |
Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
|
| 54 |
Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
|
| 55 |
Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"),
|
|
|
|
| 82 |
|
| 83 |
if torch.cuda.is_available():
|
| 84 |
pipe = pipe.to("cuda")
|
| 85 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 86 |
|
| 87 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
| 88 |
|
|
|
|
| 149 |
|
| 150 |
if torch.cuda.is_available():
|
| 151 |
pipe = pipe.to("cuda")
|
| 152 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 153 |
last_mode = "txt2img"
|
| 154 |
|
| 155 |
prompt = current_model.prefix + prompt
|
|
|
|
| 193 |
|
| 194 |
if torch.cuda.is_available():
|
| 195 |
pipe = pipe.to("cuda")
|
| 196 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 197 |
last_mode = "img2img"
|
| 198 |
|
| 199 |
prompt = current_model.prefix + prompt
|
|
|
|
| 203 |
prompt,
|
| 204 |
negative_prompt = neg_prompt,
|
| 205 |
num_images_per_prompt=n_images,
|
| 206 |
+
image = img,
|
| 207 |
num_inference_steps = int(steps),
|
| 208 |
strength = strength,
|
| 209 |
guidance_scale = guidance,
|