Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files
app.py
CHANGED
|
@@ -51,15 +51,7 @@ MAX_SEED = 2**32-1
|
|
| 51 |
# https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux
|
| 52 |
#@spaces.GPU()
|
| 53 |
def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, progress=gr.Progress(track_tqdm=True)):
|
| 54 |
-
global pipe
|
| 55 |
-
global pipe_i2i
|
| 56 |
-
global taef1
|
| 57 |
-
global good_vae
|
| 58 |
-
global controlnet_union
|
| 59 |
-
global controlnet
|
| 60 |
-
global last_model
|
| 61 |
-
global last_cn_on
|
| 62 |
-
global dtype
|
| 63 |
try:
|
| 64 |
if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
|
| 65 |
pipe.to("cpu")
|
|
@@ -138,12 +130,9 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
| 138 |
)
|
| 139 |
|
| 140 |
@spaces.GPU(duration=70)
|
|
|
|
| 141 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
|
| 142 |
-
global pipe
|
| 143 |
-
global taef1
|
| 144 |
-
global good_vae
|
| 145 |
-
global controlnet
|
| 146 |
-
global controlnet_union
|
| 147 |
try:
|
| 148 |
good_vae.to("cuda")
|
| 149 |
taef1.to("cuda")
|
|
@@ -194,11 +183,9 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
| 194 |
raise gr.Error(f"Inference Error: {e}") from e
|
| 195 |
|
| 196 |
@spaces.GPU(duration=70)
|
|
|
|
| 197 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
|
| 198 |
-
global pipe_i2i
|
| 199 |
-
global good_vae
|
| 200 |
-
global controlnet
|
| 201 |
-
global controlnet_union
|
| 202 |
try:
|
| 203 |
good_vae.to("cuda")
|
| 204 |
generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
|
|
|
|
| 51 |
# https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux
|
| 52 |
#@spaces.GPU()
|
| 53 |
def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, progress=gr.Progress(track_tqdm=True)):
|
| 54 |
+
global pipe, pipe_i2i, taef1, good_vae, controlnet_union, controlnet, last_model, last_cn_on, dtype
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
try:
|
| 56 |
if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
|
| 57 |
pipe.to("cpu")
|
|
|
|
| 130 |
)
|
| 131 |
|
| 132 |
@spaces.GPU(duration=70)
|
| 133 |
+
@torch.inference_mode()
|
| 134 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
|
| 135 |
+
global pipe, taef1, good_vae, controlnet, controlnet_union
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
try:
|
| 137 |
good_vae.to("cuda")
|
| 138 |
taef1.to("cuda")
|
|
|
|
| 183 |
raise gr.Error(f"Inference Error: {e}") from e
|
| 184 |
|
| 185 |
@spaces.GPU(duration=70)
|
| 186 |
+
@torch.inference_mode()
|
| 187 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
|
| 188 |
+
global pipe_i2i, good_vae, controlnet, controlnet_union
|
|
|
|
|
|
|
|
|
|
| 189 |
try:
|
| 190 |
good_vae.to("cuda")
|
| 191 |
generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
|