halffried commited on
Commit
e2a179c
1 Parent(s): ecd6e26

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +9 -3
  2. best_lsm_hawp.safetensors +3 -0
  3. best_lsm_hawp.yaml +36 -0
  4. last.safetensors +3 -0
  5. last.yaml +104 -0
README.md CHANGED
@@ -1,3 +1,9 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
1
+ # ZITS-PlusPlus models for Gyre
2
+
3
+ Models from https://github.com/ewrfcas/ZITS-PlusPlus
4
+
5
+ Distributed under the Apache-2.0 license
6
+
7
+ Changes:
8
+ - Converted to safetensors
9
+ - lsm_hawp config converted to yaml
best_lsm_hawp.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68d8f0020eb979a4951678a4336f351d9ec6f73d6560e8245e886fa69fcef169
3
+ size 41525248
best_lsm_hawp.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ DEVICE: cuda
3
+ HEAD_SIZE:
4
+ - - 3
5
+ - - 1
6
+ - - 1
7
+ - - 2
8
+ - - 2
9
+ HGNETS:
10
+ DEPTH: 4
11
+ INPLANES: 64
12
+ NUM_BLOCKS: 1
13
+ NUM_FEATS: 128
14
+ NUM_STACKS: 2
15
+ LOSS_WEIGHTS: {}
16
+ NAME: Hourglass
17
+ OUT_FEATURE_CHANNELS: 256
18
+ PARSING_HEAD:
19
+ DIM_FC: 1024
20
+ DIM_LOI: 128
21
+ MATCHING_STRATEGY: junction
22
+ MAX_DISTANCE: 5.0
23
+ N_DYN_JUNC: 300
24
+ N_DYN_NEGL: 300
25
+ N_DYN_OTHR: 0
26
+ N_DYN_OTHR2: 300
27
+ N_DYN_POSL: 300
28
+ N_OUT_JUNC: 250
29
+ N_OUT_LINE: 2500
30
+ N_PTS0: 32
31
+ N_PTS1: 8
32
+ N_STC_NEGL: 40
33
+ N_STC_POSL: 300
34
+ USE_RESIDUAL: true
35
+ SCALE: 1.0
36
+ WEIGHTS: ''
last.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd42c1d27c29114d8bb231b8af275e1eaae4d2eacdd755fecfdf39d42dd16b4b
3
+ size 785269879
last.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train_flist: '/home/wmlce/places365_standard/places2_all/train_list.txt'
2
+ val_flist: '/home/wmlce/places365_standard/places2_all/test_sub_list.txt'
3
+ test_path: '/home/wmlce/places365_standard/val_512img_for_eval'
4
+
5
+ train_mask_flist: [ '/home/wmlce/irregular_mask/irregular_lama_mask_list.txt',
6
+ '/home/wmlce/coco_mask/coco_mask_list.txt' ]
7
+ test_mask_flist: '/home/wmlce/Image-Transformer-Inpainting/data/indoor/test_mask'
8
+
9
+ batch_size: 12 # input batch size for training
10
+ num_workers: 12
11
+ sample_size: 12
12
+ fp16: false
13
+
14
+ # Dataset settings
15
+ data_class: 'base.dataset.DynamicDataset_gradient_line'
16
+ dataset:
17
+ rect_mask_rate: 0.0
18
+ train_line_path: "places2_train_wireframes"
19
+ eval_line_path: "places2_val_wireframes"
20
+ round: 64
21
+ str_size: 256
22
+ input_size: 512 # size for eval
23
+
24
+ # model settings
25
+ structure_upsample_class: 'networks.upsample.StructureUpsampling4'
26
+ edgeline_tsr_class: 'networks.tsr.EdgeLineGPT256RelBCE_edge_pred_infer'
27
+ grad_tsr_class: 'networks.tsr.GradientGPT256RelBCE'
28
+ PLTrainer: 'trainers.pl_trainers.FinetunePLTrainer_nms_threshold'
29
+
30
+ g_class: 'networks.generators.FTRModel'
31
+ g_args:
32
+ use_gradient: False
33
+ use_GFBlock: False
34
+ activation: 'swish'
35
+ use_VAN_between_FFC: False
36
+ van_kernel_size: 21
37
+ van_dilation: 3
38
+ prior_ch: 3
39
+ rezero_for_mpe: True
40
+ rel_pos_num: 128
41
+
42
+ d_class: 'networks.discriminators.NLayerDiscriminator'
43
+ d_args:
44
+ input_nc: 3
45
+
46
+
47
+ # pretrained ckpt settings
48
+ # resume_structure_upsample: '/mnt/storage/dongqiaole/dql_inpainting/CNN_final/ckpt/StructureUpsampling_V5_last.pth'
49
+ # resume_edgeline_tsr: '/mnt/storage/dongqiaole/dql_inpainting/Transformer_final_places2/ckpt/places2_line_cats_edge_pred_infer/best.pth'
50
+ # resume_grad_tsr: '/mnt/storage/dongqiaole/dql_inpainting/Transformer_final_places2/ckpt/places2_gradient/best.pth'
51
+ # resume_ftr: '/mnt/storage/dongqiaole/dql_inpainting/TPAMI2022-final/ckpts/Places2_lightning_converted_weights/converted_from_pl_800k_3sfe.pth'
52
+
53
+ resume_structure_upsample: '/home/wmlce/dql_inpainting/CNN_final/ckpt/StructureUpsampling_V5_last.pth'
54
+ resume_edgeline_tsr: '/home/wmlce/dql_inpainting/Transformer_final_places2/ckpt/places2_line_cats_edge_pred_infer/best.pth'
55
+ resume_grad_tsr: '/home/wmlce/dql_inpainting/Transformer_final_places2/ckpt/places2_gradient/best.pth'
56
+ resume_ftr: '/home/wmlce/dongqiaole/dql_inpainting/TPAMI2022-final/ckpts/Places2_lightning_converted_weights/converted_from_pl_800k_3sfe.pth'
57
+
58
+
59
+ # Trainer settings
60
+ trainer:
61
+ fix_256: False
62
+ Turning_Point: 10000
63
+ total_step: 150000
64
+ sample_period: 1000
65
+ eval_period: 2000
66
+ save_period: 1000
67
+ logging_every: 50
68
+ ema_beta: 0.995
69
+ sample_with_center_mask: false
70
+ # loss
71
+ l1:
72
+ use_l1: true
73
+ weight_missing: 0
74
+ weight_known: 10.0
75
+ adversarial:
76
+ weight: 10.0
77
+ gp_coef: 0.001
78
+ mask_as_fake_target: true
79
+ allow_scale_mask: true
80
+ extra_mask_weight_for_gen: 0.0
81
+ use_unmasked_for_gen: true
82
+ use_unmasked_for_discr: true
83
+ mask_scale_mode: 'maxpool'
84
+ perceptual:
85
+ weight: 0
86
+ resnet_pl:
87
+ weight: 30.0
88
+ weights_path: '/home/wmlce/dql_inpainting'
89
+ feature_matching:
90
+ weight: 100.0
91
+
92
+ # opt settings
93
+ optimizer:
94
+ warmup_steps: 0
95
+ decay_steps: [50000, 100000]
96
+ decay_rate: 0.5
97
+ g_opt:
98
+ lr: 3.0e-4
99
+ beta1: 0
100
+ beta2: 0.99
101
+ d_opt:
102
+ lr: 1.0e-4
103
+ beta1: 0
104
+ beta2: 0.99