fffiloni commited on
Commit
074ecb8
·
verified ·
1 Parent(s): 459fa69

Update run_gradio.py

Browse files
Files changed (1) hide show
  1. run_gradio.py +47 -1
run_gradio.py CHANGED
@@ -27,6 +27,52 @@ from src.models.refunet_2d_condition import RefUNet2DConditionModel
27
  from src.point_network import PointNet
28
  from src.annotator.lineart import BatchLineartDetector
29
  val_configs = OmegaConf.load('./configs/inference.yaml')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  # === load the checkpoint ===
31
  pretrained_model_name_or_path = val_configs.model_path.pretrained_model_name_or_path
32
  refnet_clip_vision_encoder_path = val_configs.model_path.clip_vision_encoder_path
@@ -378,4 +424,4 @@ with gr.Blocks() as demo:
378
  outputs=[baseline_gallery]
379
  )
380
 
381
- demo.launch(server_name="0.0.0.0")
 
27
  from src.point_network import PointNet
28
  from src.annotator.lineart import BatchLineartDetector
29
  val_configs = OmegaConf.load('./configs/inference.yaml')
30
+ # download the checkpoints
31
+ from huggingface_hub import snapshot_download, hf_hub_download
32
+
33
+ os.makedirs("checkpoints", exist_ok=True)
34
+
35
+ # List of subdirectories to create inside "checkpoints"
36
+ subfolders = [
37
+ "StableDiffusion",
38
+ "models",
39
+ "MangaNinja"
40
+ ]
41
+ # Create each subdirectory
42
+ for subfolder in subfolders:
43
+ os.makedirs(os.path.join("checkpoints", subfolder), exist_ok=True)
44
+
45
+ # List of subdirectories to create inside "models"
46
+ models_subfolders = [
47
+ "clip-vit-large-patch14",
48
+ "control_v11p_sd15_lineart",
49
+ "Annotators"
50
+ ]
51
+ # Create each subdirectory
52
+ for subfolder in models_subfolders:
53
+ os.makedirs(os.path.join("checkpoints/models", subfolder), exist_ok=True)
54
+
55
+ snapshot_download(
56
+ repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5",
57
+ local_dir = "./checkpoints/StableDiffusion"
58
+ )
59
+ snapshot_download(
60
+ repo_id = "openai/clip-vit-large-patch14",
61
+ local_dir = "./checkpoints/models/clip-vit-large-patch14"
62
+ )
63
+ snapshot_download(
64
+ repo_id = "lllyasviel/control_v11p_sd15_lineart",
65
+ local_dir = "./checkpoints/models/control_v11p_sd15_lineart"
66
+ )
67
+ hf_hub_download(
68
+ repo_id = "lllyasviel/Annotators",
69
+ filename = "sk_model.pth",
70
+ local_dir = "./checkpoints/models/Annotators"
71
+ )
72
+ snapshot_download(
73
+ repo_id = "Johanan0528/MangaNinjia",
74
+ local_dir = "./checkpoints/MangaNinja"
75
+ )
76
  # === load the checkpoint ===
77
  pretrained_model_name_or_path = val_configs.model_path.pretrained_model_name_or_path
78
  refnet_clip_vision_encoder_path = val_configs.model_path.clip_vision_encoder_path
 
424
  outputs=[baseline_gallery]
425
  )
426
 
427
+ demo.launch(show_api=False, show_error=True)