zerhero commited on
Commit
e90d194
1 Parent(s): b428e5f

refactor gui.py

Browse files
Files changed (4) hide show
  1. app.py +9 -402
  2. gui.py +432 -0
  3. service/gemini_service.py +11 -0
  4. utils/model_utils.py +1 -1
app.py CHANGED
@@ -184,6 +184,7 @@ import diffusers
184
 
185
  diffusers.utils.logging.set_verbosity(40)
186
  import warnings
 
187
 
188
  warnings.filterwarnings(
189
  action="ignore",
@@ -204,407 +205,12 @@ warnings.filterwarnings(
204
  logger.setLevel(logging.DEBUG)
205
 
206
 
207
- class GuiSD:
208
- def __init__(self, stream=True):
209
- self.model = None
210
-
211
- print("Loading model...")
212
- self.model = Model_Diffusers(
213
- base_model_id="cagliostrolab/animagine-xl-3.1",
214
- task_name="txt2img",
215
- vae_model=None,
216
- type_model_precision=torch.float16,
217
- retain_task_model_in_cache=False,
218
- )
219
-
220
- def load_new_model(
221
- self,
222
- model_name,
223
- vae_model,
224
- task,
225
- progress=gr.Progress(track_tqdm=True)):
226
- """
227
- :param model_name:
228
- :param vae_model:
229
- :param task:
230
- :param progress:
231
- """
232
- yield f"Loading model: {model_name}"
233
-
234
- vae_model = vae_model if vae_model != "None" else None
235
-
236
- if model_name in model_list:
237
- model_is_xl = "xl" in model_name.lower()
238
- sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
239
- model_type = "SDXL" if model_is_xl else "SD 1.5"
240
- incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
241
-
242
- if incompatible_vae:
243
- vae_model = None
244
-
245
- self.model.load_pipe(
246
- model_name,
247
- task_name=task_stablepy[task],
248
- vae_model=vae_model if vae_model != "None" else None,
249
- type_model_precision=torch.float16,
250
- retain_task_model_in_cache=False,
251
- )
252
- yield f"Model loaded: {model_name}"
253
-
254
- @spaces.GPU
255
- def generate_pipeline(
256
- self,
257
- prompt,
258
- neg_prompt,
259
- num_images,
260
- steps,
261
- cfg,
262
- clip_skip,
263
- seed,
264
- lora1,
265
- lora_scale1,
266
- lora2,
267
- lora_scale2,
268
- lora3,
269
- lora_scale3,
270
- lora4,
271
- lora_scale4,
272
- lora5,
273
- lora_scale5,
274
- sampler,
275
- img_height,
276
- img_width,
277
- model_name,
278
- vae_model,
279
- task,
280
- image_control,
281
- preprocessor_name,
282
- preprocess_resolution,
283
- image_resolution,
284
- style_prompt, # list []
285
- style_json_file,
286
- image_mask,
287
- strength,
288
- low_threshold,
289
- high_threshold,
290
- value_threshold,
291
- distance_threshold,
292
- controlnet_output_scaling_in_unet,
293
- controlnet_start_threshold,
294
- controlnet_stop_threshold,
295
- textual_inversion,
296
- syntax_weights,
297
- upscaler_model_path,
298
- upscaler_increases_size,
299
- esrgan_tile,
300
- esrgan_tile_overlap,
301
- hires_steps,
302
- hires_denoising_strength,
303
- hires_sampler,
304
- hires_prompt,
305
- hires_negative_prompt,
306
- hires_before_adetailer,
307
- hires_after_adetailer,
308
- loop_generation,
309
- leave_progress_bar,
310
- disable_progress_bar,
311
- image_previews,
312
- display_images,
313
- save_generated_images,
314
- image_storage_location,
315
- retain_compel_previous_load,
316
- retain_detailfix_model_previous_load,
317
- retain_hires_model_previous_load,
318
- t2i_adapter_preprocessor,
319
- t2i_adapter_conditioning_scale,
320
- t2i_adapter_conditioning_factor,
321
- xformers_memory_efficient_attention,
322
- freeu,
323
- generator_in_cpu,
324
- adetailer_inpaint_only,
325
- adetailer_verbose,
326
- adetailer_sampler,
327
- adetailer_active_a,
328
- prompt_ad_a,
329
- negative_prompt_ad_a,
330
- strength_ad_a,
331
- face_detector_ad_a,
332
- person_detector_ad_a,
333
- hand_detector_ad_a,
334
- mask_dilation_a,
335
- mask_blur_a,
336
- mask_padding_a,
337
- adetailer_active_b,
338
- prompt_ad_b,
339
- negative_prompt_ad_b,
340
- strength_ad_b,
341
- face_detector_ad_b,
342
- person_detector_ad_b,
343
- hand_detector_ad_b,
344
- mask_dilation_b,
345
- mask_blur_b,
346
- mask_padding_b,
347
- retain_task_cache_gui,
348
- image_ip1,
349
- mask_ip1,
350
- model_ip1,
351
- mode_ip1,
352
- scale_ip1,
353
- image_ip2,
354
- mask_ip2,
355
- model_ip2,
356
- mode_ip2,
357
- scale_ip2):
358
- vae_model = vae_model if vae_model != "None" else None
359
- loras_list: list = [lora1, lora2, lora3, lora4, lora5]
360
- vae_msg: str = f"VAE: {vae_model}" if vae_model else ""
361
- msg_lora: list = []
362
-
363
- if model_name in model_list:
364
- model_is_xl = "xl" in model_name.lower()
365
- sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
366
- model_type = "SDXL" if model_is_xl else "SD 1.5"
367
- incompatible_vae = ((model_is_xl and
368
- vae_model and
369
- not sdxl_in_vae) or
370
- (not model_is_xl and
371
- sdxl_in_vae))
372
-
373
- if incompatible_vae:
374
- msg_inc_vae = (
375
- f"The selected VAE is for a {'SD 1.5' if model_is_xl else 'SDXL'} model, but you"
376
- f" are using a {model_type} model. The default VAE "
377
- "will be used."
378
- )
379
- gr.Info(msg_inc_vae)
380
- vae_msg = msg_inc_vae
381
- vae_model = None
382
-
383
- for la in loras_list:
384
- if la is None or la == "None" or la not in lora_model_list:
385
- continue
386
-
387
- print(la)
388
- lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
389
- if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
390
- msg_inc_lora = f"The LoRA {la} is for {'SD 1.5' if model_is_xl else 'SDXL'}, but you are using {model_type}."
391
- gr.Info(msg_inc_lora)
392
- msg_lora.append(msg_inc_lora)
393
-
394
- task = task_stablepy[task]
395
-
396
- params_ip_img: list = []
397
- params_ip_msk: list = []
398
- params_ip_model: list = []
399
- params_ip_mode: list = []
400
- params_ip_scale: list = []
401
-
402
- all_adapters = [
403
- (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
404
- (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
405
- ]
406
-
407
- for (imgip,
408
- mskip,
409
- modelip,
410
- modeip,
411
- scaleip) in all_adapters:
412
- if imgip:
413
- params_ip_img.append(imgip)
414
- if mskip:
415
- params_ip_msk.append(mskip)
416
- params_ip_model.append(modelip)
417
- params_ip_mode.append(modeip)
418
- params_ip_scale.append(scaleip)
419
-
420
- # First load
421
- model_precision = torch.float16
422
- if not self.model:
423
- from modelstream import Model_Diffusers2
424
-
425
- print("Loading model...")
426
- self.model = Model_Diffusers2(
427
- base_model_id=model_name,
428
- task_name=task,
429
- vae_model=vae_model if vae_model != "None" else None,
430
- type_model_precision=model_precision,
431
- retain_task_model_in_cache=retain_task_cache_gui,
432
- )
433
-
434
- if task != "txt2img" and not image_control:
435
- raise ValueError(
436
- "No control image found: To use this function, "
437
- "you have to upload an image in 'Image ControlNet/Inpaint/Img2img'"
438
- )
439
-
440
- if task == "inpaint" and not image_mask:
441
- raise ValueError("No mask image found: Specify one in 'Image Mask'")
442
-
443
- if upscaler_model_path in [None, "Lanczos", "Nearest"]:
444
- upscaler_model = upscaler_model_path
445
- else:
446
- directory_upscalers = 'upscalers'
447
- os.makedirs(directory_upscalers, exist_ok=True)
448
-
449
- url_upscaler = upscaler_dict_gui[upscaler_model_path]
450
-
451
- if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
452
- download_things(
453
- directory_upscalers,
454
- url_upscaler,
455
- hf_token
456
- )
457
-
458
- upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
459
-
460
- logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
461
-
462
- print("Config model:", model_name, vae_model, loras_list)
463
-
464
- self.model.load_pipe(
465
- model_name,
466
- task_name=task,
467
- vae_model=vae_model if vae_model != "None" else None,
468
- type_model_precision=model_precision,
469
- retain_task_model_in_cache=retain_task_cache_gui,
470
- )
471
-
472
- if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
473
- print("No Textual inversion for SDXL")
474
-
475
- adetailer_params_A = {
476
- "face_detector_ad": face_detector_ad_a,
477
- "person_detector_ad": person_detector_ad_a,
478
- "hand_detector_ad": hand_detector_ad_a,
479
- "prompt": prompt_ad_a,
480
- "negative_prompt": negative_prompt_ad_a,
481
- "strength": strength_ad_a,
482
- # "image_list_task" : None,
483
- "mask_dilation": mask_dilation_a,
484
- "mask_blur": mask_blur_a,
485
- "mask_padding": mask_padding_a,
486
- "inpaint_only": adetailer_inpaint_only,
487
- "sampler": adetailer_sampler,
488
- }
489
-
490
- adetailer_params_B = {
491
- "face_detector_ad": face_detector_ad_b,
492
- "person_detector_ad": person_detector_ad_b,
493
- "hand_detector_ad": hand_detector_ad_b,
494
- "prompt": prompt_ad_b,
495
- "negative_prompt": negative_prompt_ad_b,
496
- "strength": strength_ad_b,
497
- # "image_list_task" : None,
498
- "mask_dilation": mask_dilation_b,
499
- "mask_blur": mask_blur_b,
500
- "mask_padding": mask_padding_b,
501
- }
502
- pipe_params = {
503
- "prompt": prompt,
504
- "negative_prompt": neg_prompt,
505
- "img_height": img_height,
506
- "img_width": img_width,
507
- "num_images": num_images,
508
- "num_steps": steps,
509
- "guidance_scale": cfg,
510
- "clip_skip": clip_skip,
511
- "seed": seed,
512
- "image": image_control,
513
- "preprocessor_name": preprocessor_name,
514
- "preprocess_resolution": preprocess_resolution,
515
- "image_resolution": image_resolution,
516
- "style_prompt": style_prompt if style_prompt else "",
517
- "style_json_file": "",
518
- "image_mask": image_mask, # only for Inpaint
519
- "strength": strength, # only for Inpaint or ...
520
- "low_threshold": low_threshold,
521
- "high_threshold": high_threshold,
522
- "value_threshold": value_threshold,
523
- "distance_threshold": distance_threshold,
524
- "lora_A": lora1 if lora1 != "None" else None,
525
- "lora_scale_A": lora_scale1,
526
- "lora_B": lora2 if lora2 != "None" else None,
527
- "lora_scale_B": lora_scale2,
528
- "lora_C": lora3 if lora3 != "None" else None,
529
- "lora_scale_C": lora_scale3,
530
- "lora_D": lora4 if lora4 != "None" else None,
531
- "lora_scale_D": lora_scale4,
532
- "lora_E": lora5 if lora5 != "None" else None,
533
- "lora_scale_E": lora_scale5,
534
- "textual_inversion": embed_list if textual_inversion and self.model.class_name != "StableDiffusionXLPipeline" else [],
535
- "syntax_weights": syntax_weights, # "Classic"
536
- "sampler": sampler,
537
- "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
538
- "gui_active": True,
539
- "loop_generation": loop_generation,
540
- "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
541
- "control_guidance_start": float(controlnet_start_threshold),
542
- "control_guidance_end": float(controlnet_stop_threshold),
543
- "generator_in_cpu": generator_in_cpu,
544
- "FreeU": freeu,
545
- "adetailer_A": adetailer_active_a,
546
- "adetailer_A_params": adetailer_params_A,
547
- "adetailer_B": adetailer_active_b,
548
- "adetailer_B_params": adetailer_params_B,
549
- "leave_progress_bar": leave_progress_bar,
550
- "disable_progress_bar": disable_progress_bar,
551
- "image_previews": image_previews,
552
- "display_images": display_images,
553
- "save_generated_images": save_generated_images,
554
- "image_storage_location": image_storage_location,
555
- "retain_compel_previous_load": retain_compel_previous_load,
556
- "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
557
- "retain_hires_model_previous_load": retain_hires_model_previous_load,
558
- "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
559
- "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
560
- "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
561
- "upscaler_model_path": upscaler_model,
562
- "upscaler_increases_size": upscaler_increases_size,
563
- "esrgan_tile": esrgan_tile,
564
- "esrgan_tile_overlap": esrgan_tile_overlap,
565
- "hires_steps": hires_steps,
566
- "hires_denoising_strength": hires_denoising_strength,
567
- "hires_prompt": hires_prompt,
568
- "hires_negative_prompt": hires_negative_prompt,
569
- "hires_sampler": hires_sampler,
570
- "hires_before_adetailer": hires_before_adetailer,
571
- "hires_after_adetailer": hires_after_adetailer,
572
- "ip_adapter_image": params_ip_img,
573
- "ip_adapter_mask": params_ip_msk,
574
- "ip_adapter_model": params_ip_model,
575
- "ip_adapter_mode": params_ip_mode,
576
- "ip_adapter_scale": params_ip_scale,
577
- }
578
-
579
- # print(pipe_params)
580
-
581
- random_number = random.randint(1, 100)
582
- if random_number < 25 and num_images < 3:
583
- if (not upscaler_model and
584
- steps < 45 and
585
- task in ["txt2img", "img2img"] and
586
- not adetailer_active_a and
587
- not adetailer_active_b):
588
- num_images *= 2
589
- pipe_params["num_images"] = num_images
590
- gr.Info("Num images x 2 🎉")
591
-
592
- # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
593
- self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
594
-
595
- info_state = f"PROCESSING"
596
- for img, seed, data in self.model(**pipe_params):
597
- info_state += "."
598
- if data:
599
- info_state = f"COMPLETED. Seeds: {str(seed)}"
600
- if vae_msg:
601
- info_state = info_state + "<br>" + vae_msg
602
- if msg_lora:
603
- info_state = info_state + "<br>" + "<br>".join(msg_lora)
604
- yield img, info_state
605
-
606
-
607
- sd_gen = GuiSD()
608
 
609
  with open("app.css", "r") as f:
610
  CSS: str = f.read()
@@ -649,7 +255,7 @@ with gr.Blocks(css=CSS) as app:
649
  model_name_gui = gr.Dropdown(
650
  label="Model",
651
  choices=model_list,
652
- value=model_list[-6] or model_list[0],
653
  allow_custom_value=True
654
  )
655
  prompt_gui = gr.Textbox(
@@ -1104,6 +710,7 @@ with gr.Blocks(css=CSS) as app:
1104
  value=choices_task[0]
1105
  )
1106
 
 
1107
  task_gui.change(
1108
  change_preprocessor_choices,
1109
  [task_gui],
 
184
 
185
  diffusers.utils.logging.set_verbosity(40)
186
  import warnings
187
+ from gui import GuiSD
188
 
189
  warnings.filterwarnings(
190
  action="ignore",
 
205
  logger.setLevel(logging.DEBUG)
206
 
207
 
208
+ # init GuiSD
209
+ sd_gen = GuiSD(
210
+ model_list=model_list,
211
+ task_stablepy=task_stablepy,
212
+ lora_model_list=lora_model_list
213
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  with open("app.css", "r") as f:
216
  CSS: str = f.read()
 
255
  model_name_gui = gr.Dropdown(
256
  label="Model",
257
  choices=model_list,
258
+ value="models/animaPencilXL_v500.safetensors" or model_list[0],
259
  allow_custom_value=True
260
  )
261
  prompt_gui = gr.Textbox(
 
710
  value=choices_task[0]
711
  )
712
 
713
+
714
  task_gui.change(
715
  change_preprocessor_choices,
716
  [task_gui],
gui.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ import spaces
11
+ import gradio as gr
12
+ from PIL import Image
13
+ import IPython.display
14
+ import time, json
15
+ from IPython.utils import capture
16
+ import logging
17
+ from utils.string_utils import extract_parameters
18
+ from stablepy import logger
19
+
20
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
21
+ import diffusers
22
+
23
+ diffusers.utils.logging.set_verbosity(40)
24
+ import warnings
25
+
26
+
27
+ class GuiSD:
28
+ def __init__(self,
29
+ model_list,
30
+ task_stablepy,
31
+ lora_model_list,
32
+ stream=True):
33
+ self.model = None
34
+
35
+ print("Loading model...")
36
+ self.model = Model_Diffusers(
37
+ base_model_id="cagliostrolab/animagine-xl-3.1",
38
+ task_name="txt2img",
39
+ vae_model=None,
40
+ type_model_precision=torch.float16,
41
+ retain_task_model_in_cache=False,
42
+ )
43
+ self.model_list = model_list
44
+ self.task_stablepy = task_stablepy
45
+ self.lora_model_list = lora_model_list
46
+ self.stream = stream
47
+
48
+ def load_new_model(
49
+ self,
50
+ model_name,
51
+ vae_model,
52
+ task,
53
+ progress=gr.Progress(track_tqdm=True)):
54
+ """
55
+ :param model_name:
56
+ :param vae_model:
57
+ :param task:
58
+ :param progress:
59
+ """
60
+ yield f"Loading model: {model_name}"
61
+
62
+ vae_model = vae_model if vae_model != "None" else None
63
+
64
+ if model_name in self.model_list:
65
+ model_is_xl = "xl" in model_name.lower()
66
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
67
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
68
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
69
+
70
+ if incompatible_vae:
71
+ vae_model = None
72
+
73
+ self.model.load_pipe(
74
+ model_name,
75
+ task_name=self.task_stablepy[task],
76
+ vae_model=vae_model if vae_model != "None" else None,
77
+ type_model_precision=torch.float16,
78
+ retain_task_model_in_cache=False,
79
+ )
80
+ yield f"Model loaded: {model_name}"
81
+
82
+ @spaces.GPU
83
+ def generate_pipeline(
84
+ self,
85
+ prompt,
86
+ neg_prompt,
87
+ num_images,
88
+ steps,
89
+ cfg,
90
+ clip_skip,
91
+ seed,
92
+ lora1,
93
+ lora_scale1,
94
+ lora2,
95
+ lora_scale2,
96
+ lora3,
97
+ lora_scale3,
98
+ lora4,
99
+ lora_scale4,
100
+ lora5,
101
+ lora_scale5,
102
+ sampler,
103
+ img_height,
104
+ img_width,
105
+ model_name,
106
+ vae_model,
107
+ task,
108
+ image_control,
109
+ preprocessor_name,
110
+ preprocess_resolution,
111
+ image_resolution,
112
+ style_prompt, # list []
113
+ style_json_file,
114
+ image_mask,
115
+ strength,
116
+ low_threshold,
117
+ high_threshold,
118
+ value_threshold,
119
+ distance_threshold,
120
+ controlnet_output_scaling_in_unet,
121
+ controlnet_start_threshold,
122
+ controlnet_stop_threshold,
123
+ textual_inversion,
124
+ syntax_weights,
125
+ upscaler_model_path,
126
+ upscaler_increases_size,
127
+ esrgan_tile,
128
+ esrgan_tile_overlap,
129
+ hires_steps,
130
+ hires_denoising_strength,
131
+ hires_sampler,
132
+ hires_prompt,
133
+ hires_negative_prompt,
134
+ hires_before_adetailer,
135
+ hires_after_adetailer,
136
+ loop_generation,
137
+ leave_progress_bar,
138
+ disable_progress_bar,
139
+ image_previews,
140
+ display_images,
141
+ save_generated_images,
142
+ image_storage_location,
143
+ retain_compel_previous_load,
144
+ retain_detailfix_model_previous_load,
145
+ retain_hires_model_previous_load,
146
+ t2i_adapter_preprocessor,
147
+ t2i_adapter_conditioning_scale,
148
+ t2i_adapter_conditioning_factor,
149
+ xformers_memory_efficient_attention,
150
+ freeu,
151
+ generator_in_cpu,
152
+ adetailer_inpaint_only,
153
+ adetailer_verbose,
154
+ adetailer_sampler,
155
+ adetailer_active_a,
156
+ prompt_ad_a,
157
+ negative_prompt_ad_a,
158
+ strength_ad_a,
159
+ face_detector_ad_a,
160
+ person_detector_ad_a,
161
+ hand_detector_ad_a,
162
+ mask_dilation_a,
163
+ mask_blur_a,
164
+ mask_padding_a,
165
+ adetailer_active_b,
166
+ prompt_ad_b,
167
+ negative_prompt_ad_b,
168
+ strength_ad_b,
169
+ face_detector_ad_b,
170
+ person_detector_ad_b,
171
+ hand_detector_ad_b,
172
+ mask_dilation_b,
173
+ mask_blur_b,
174
+ mask_padding_b,
175
+ retain_task_cache_gui,
176
+ image_ip1,
177
+ mask_ip1,
178
+ model_ip1,
179
+ mode_ip1,
180
+ scale_ip1,
181
+ image_ip2,
182
+ mask_ip2,
183
+ model_ip2,
184
+ mode_ip2,
185
+ scale_ip2):
186
+ vae_model = vae_model if vae_model != "None" else None
187
+ loras_list: list = [lora1, lora2, lora3, lora4, lora5]
188
+ vae_msg: str = f"VAE: {vae_model}" if vae_model else ""
189
+ msg_lora: list = []
190
+
191
+ if model_name in self.model_list:
192
+ model_is_xl = "xl" in model_name.lower()
193
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
194
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
195
+ incompatible_vae = ((model_is_xl and
196
+ vae_model and
197
+ not sdxl_in_vae) or
198
+ (not model_is_xl and
199
+ sdxl_in_vae))
200
+
201
+ if incompatible_vae:
202
+ msg_inc_vae = (
203
+ f"The selected VAE is for a {'SD 1.5' if model_is_xl else 'SDXL'} model, but you"
204
+ f" are using a {model_type} model. The default VAE "
205
+ "will be used."
206
+ )
207
+ gr.Info(msg_inc_vae)
208
+ vae_msg = msg_inc_vae
209
+ vae_model = None
210
+
211
+ for la in loras_list:
212
+ if la is None or la == "None" or la not in self.lora_model_list:
213
+ continue
214
+
215
+ print(la)
216
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
217
+ if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
218
+ msg_inc_lora = f"The LoRA {la} is for {'SD 1.5' if model_is_xl else 'SDXL'}, but you are using {model_type}."
219
+ gr.Info(msg_inc_lora)
220
+ msg_lora.append(msg_inc_lora)
221
+
222
+ task = self.task_stablepy[task]
223
+
224
+ params_ip_img: list = []
225
+ params_ip_msk: list = []
226
+ params_ip_model: list = []
227
+ params_ip_mode: list = []
228
+ params_ip_scale: list = []
229
+
230
+ all_adapters = [
231
+ (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
232
+ (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
233
+ ]
234
+
235
+ for (imgip,
236
+ mskip,
237
+ modelip,
238
+ modeip,
239
+ scaleip) in all_adapters:
240
+ if imgip:
241
+ params_ip_img.append(imgip)
242
+ if mskip:
243
+ params_ip_msk.append(mskip)
244
+ params_ip_model.append(modelip)
245
+ params_ip_mode.append(modeip)
246
+ params_ip_scale.append(scaleip)
247
+
248
+ # First load
249
+ model_precision = torch.float16
250
+ if not self.model:
251
+ from modelstream import Model_Diffusers2
252
+
253
+ print("Loading model...")
254
+ self.model = Model_Diffusers2(
255
+ base_model_id=model_name,
256
+ task_name=task,
257
+ vae_model=vae_model if vae_model != "None" else None,
258
+ type_model_precision=model_precision,
259
+ retain_task_model_in_cache=retain_task_cache_gui,
260
+ )
261
+
262
+ if task != "txt2img" and not image_control:
263
+ raise ValueError(
264
+ "No control image found: To use this function, "
265
+ "you have to upload an image in 'Image ControlNet/Inpaint/Img2img'"
266
+ )
267
+
268
+ if task == "inpaint" and not image_mask:
269
+ raise ValueError("No mask image found: Specify one in 'Image Mask'")
270
+
271
+ if upscaler_model_path in [None, "Lanczos", "Nearest"]:
272
+ upscaler_model = upscaler_model_path
273
+ else:
274
+ directory_upscalers = 'upscalers'
275
+ os.makedirs(directory_upscalers, exist_ok=True)
276
+
277
+ url_upscaler = upscaler_dict_gui[upscaler_model_path]
278
+
279
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
280
+ download_things(
281
+ directory_upscalers,
282
+ url_upscaler,
283
+ hf_token
284
+ )
285
+
286
+ upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
287
+
288
+ logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
289
+
290
+ print("Config model:", model_name, vae_model, loras_list)
291
+
292
+ self.model.load_pipe(
293
+ model_name,
294
+ task_name=task,
295
+ vae_model=vae_model if vae_model != "None" else None,
296
+ type_model_precision=model_precision,
297
+ retain_task_model_in_cache=retain_task_cache_gui,
298
+ )
299
+
300
+ if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
301
+ print("No Textual inversion for SDXL")
302
+
303
+ adetailer_params_A: dict = {
304
+ "face_detector_ad": face_detector_ad_a,
305
+ "person_detector_ad": person_detector_ad_a,
306
+ "hand_detector_ad": hand_detector_ad_a,
307
+ "prompt": prompt_ad_a,
308
+ "negative_prompt": negative_prompt_ad_a,
309
+ "strength": strength_ad_a,
310
+ # "image_list_task" : None,
311
+ "mask_dilation": mask_dilation_a,
312
+ "mask_blur": mask_blur_a,
313
+ "mask_padding": mask_padding_a,
314
+ "inpaint_only": adetailer_inpaint_only,
315
+ "sampler": adetailer_sampler,
316
+ }
317
+
318
+ adetailer_params_B: dict = {
319
+ "face_detector_ad": face_detector_ad_b,
320
+ "person_detector_ad": person_detector_ad_b,
321
+ "hand_detector_ad": hand_detector_ad_b,
322
+ "prompt": prompt_ad_b,
323
+ "negative_prompt": negative_prompt_ad_b,
324
+ "strength": strength_ad_b,
325
+ # "image_list_task" : None,
326
+ "mask_dilation": mask_dilation_b,
327
+ "mask_blur": mask_blur_b,
328
+ "mask_padding": mask_padding_b,
329
+ }
330
+ pipe_params: dict = {
331
+ "prompt": prompt,
332
+ "negative_prompt": neg_prompt,
333
+ "img_height": img_height,
334
+ "img_width": img_width,
335
+ "num_images": num_images,
336
+ "num_steps": steps,
337
+ "guidance_scale": cfg,
338
+ "clip_skip": clip_skip,
339
+ "seed": seed,
340
+ "image": image_control,
341
+ "preprocessor_name": preprocessor_name,
342
+ "preprocess_resolution": preprocess_resolution,
343
+ "image_resolution": image_resolution,
344
+ "style_prompt": style_prompt if style_prompt else "",
345
+ "style_json_file": "",
346
+ "image_mask": image_mask, # only for Inpaint
347
+ "strength": strength, # only for Inpaint or ...
348
+ "low_threshold": low_threshold,
349
+ "high_threshold": high_threshold,
350
+ "value_threshold": value_threshold,
351
+ "distance_threshold": distance_threshold,
352
+ "lora_A": lora1 if lora1 != "None" else None,
353
+ "lora_scale_A": lora_scale1,
354
+ "lora_B": lora2 if lora2 != "None" else None,
355
+ "lora_scale_B": lora_scale2,
356
+ "lora_C": lora3 if lora3 != "None" else None,
357
+ "lora_scale_C": lora_scale3,
358
+ "lora_D": lora4 if lora4 != "None" else None,
359
+ "lora_scale_D": lora_scale4,
360
+ "lora_E": lora5 if lora5 != "None" else None,
361
+ "lora_scale_E": lora_scale5,
362
+ "textual_inversion": embed_list if textual_inversion and self.model.class_name != "StableDiffusionXLPipeline" else [],
363
+ "syntax_weights": syntax_weights, # "Classic"
364
+ "sampler": sampler,
365
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
366
+ "gui_active": True,
367
+ "loop_generation": loop_generation,
368
+ "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
369
+ "control_guidance_start": float(controlnet_start_threshold),
370
+ "control_guidance_end": float(controlnet_stop_threshold),
371
+ "generator_in_cpu": generator_in_cpu,
372
+ "FreeU": freeu,
373
+ "adetailer_A": adetailer_active_a,
374
+ "adetailer_A_params": adetailer_params_A,
375
+ "adetailer_B": adetailer_active_b,
376
+ "adetailer_B_params": adetailer_params_B,
377
+ "leave_progress_bar": leave_progress_bar,
378
+ "disable_progress_bar": disable_progress_bar,
379
+ "image_previews": image_previews,
380
+ "display_images": display_images,
381
+ "save_generated_images": save_generated_images,
382
+ "image_storage_location": image_storage_location,
383
+ "retain_compel_previous_load": retain_compel_previous_load,
384
+ "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
385
+ "retain_hires_model_previous_load": retain_hires_model_previous_load,
386
+ "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
387
+ "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
388
+ "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
389
+ "upscaler_model_path": upscaler_model,
390
+ "upscaler_increases_size": upscaler_increases_size,
391
+ "esrgan_tile": esrgan_tile,
392
+ "esrgan_tile_overlap": esrgan_tile_overlap,
393
+ "hires_steps": hires_steps,
394
+ "hires_denoising_strength": hires_denoising_strength,
395
+ "hires_prompt": hires_prompt,
396
+ "hires_negative_prompt": hires_negative_prompt,
397
+ "hires_sampler": hires_sampler,
398
+ "hires_before_adetailer": hires_before_adetailer,
399
+ "hires_after_adetailer": hires_after_adetailer,
400
+ "ip_adapter_image": params_ip_img,
401
+ "ip_adapter_mask": params_ip_msk,
402
+ "ip_adapter_model": params_ip_model,
403
+ "ip_adapter_mode": params_ip_mode,
404
+ "ip_adapter_scale": params_ip_scale,
405
+ }
406
+
407
+ # print(pipe_params)
408
+
409
+ random_number = random.randint(1, 100)
410
+ if random_number < 25 and num_images < 3:
411
+ if (not upscaler_model and
412
+ steps < 45 and
413
+ task in ["txt2img", "img2img"] and
414
+ not adetailer_active_a and
415
+ not adetailer_active_b):
416
+ num_images *= 2
417
+ pipe_params["num_images"] = num_images
418
+ gr.Info("Num images x 2 🎉")
419
+
420
+ # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
421
+ self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
422
+
423
+ info_state = f"PROCESSING"
424
+ for img, seed, data in self.model(**pipe_params):
425
+ info_state += "."
426
+ if data:
427
+ info_state = f"COMPLETED. Seeds: {str(seed)}"
428
+ if vae_msg:
429
+ info_state = info_state + "<br>" + vae_msg
430
+ if msg_lora:
431
+ info_state = info_state + "<br>" + "<br>".join(msg_lora)
432
+ yield img, info_state
service/gemini_service.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def prompt_gemini(prompt: str):
2
+ """
3
+ :param prompt:
4
+ :return:
5
+ """
6
+ import os
7
+ gemini_api_key: str = os.environ.get("GEMINI_API_KEY")
8
+ if not gemini_api_key:
9
+ print("\033[91mYou need an API key to download Gemini models.\033[0m")
10
+
11
+ return prompt
utils/model_utils.py CHANGED
@@ -20,4 +20,4 @@ def get_model_list(directory_path):
20
  # model_list.append((name_without_extension, file_path))
21
  model_list.append(file_path)
22
  print('\033[34mFILE: ' + file_path + '\033[0m')
23
- return model_list
 
20
  # model_list.append((name_without_extension, file_path))
21
  model_list.append(file_path)
22
  print('\033[34mFILE: ' + file_path + '\033[0m')
23
+ return model_list