Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- vanishing_point_extraction/neurvps/TMM17/checkpoint_latest.pth.tar +3 -0
- vanishing_point_extraction/neurvps/TMM17/config.yaml +39 -0
- vanishing_point_extraction/neurvps/neurvps/__init__.py +4 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/box.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/config.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/__pycache__/utils.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/box.py +1110 -0
- vanishing_point_extraction/neurvps/neurvps/config.py +9 -0
- vanishing_point_extraction/neurvps/neurvps/datasets.py +184 -0
- vanishing_point_extraction/neurvps/neurvps/models/__init__.py +2 -0
- vanishing_point_extraction/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/conic.py +50 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps +0 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_log +7 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so +3 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/build.ninja +30 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o +3 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o +3 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv.cpp +75 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cpu.h +39 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.cu +271 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.h +38 -0
- vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh +388 -0
- vanishing_point_extraction/neurvps/neurvps/models/deformable.py +193 -0
- vanishing_point_extraction/neurvps/neurvps/models/hourglass_pose.py +192 -0
- vanishing_point_extraction/neurvps/neurvps/models/vanishing_net.py +181 -0
- vanishing_point_extraction/neurvps/neurvps/trainer.py +304 -0
- vanishing_point_extraction/neurvps/neurvps/utils.py +96 -0
- vanishing_point_extraction/neurvps/vp_estim.py +180 -0
- vanishing_point_extraction/vanishing_point/neurvps/TMM17/checkpoint_latest.pth.tar +3 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__init__.py +4 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/box.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/config.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/utils.cpython-38.pyc +0 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/box.py +1110 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/config.py +9 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/datasets.py +184 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__init__.py +2 -0
- vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc +0 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/DCN.so filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o filter=lfs diff=lfs merge=lfs -text
|
vanishing_point_extraction/neurvps/TMM17/checkpoint_latest.pth.tar
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:951f12bfe2a3afdef5b95d6a1cb9bbe51e73913c70212c8e628b696bd39a74e7
|
| 3 |
+
size 358844104
|
vanishing_point_extraction/neurvps/TMM17/config.yaml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
io:
|
| 2 |
+
augmentation_level: 2
|
| 3 |
+
datadir: data/tmm17/
|
| 4 |
+
dataset: TMM17
|
| 5 |
+
focal_length: 1.0
|
| 6 |
+
logdir: logs/
|
| 7 |
+
num_vpts: 1
|
| 8 |
+
num_workers: 4
|
| 9 |
+
resume_from: logs/200107-013044-14545f5-tmm17-bugfix-lr1e-4-long
|
| 10 |
+
tensorboard_port: 0
|
| 11 |
+
validation_debug: 0
|
| 12 |
+
validation_interval: 8000
|
| 13 |
+
model:
|
| 14 |
+
backbone: stacked_hourglass
|
| 15 |
+
batch_size: 8
|
| 16 |
+
conic_6x: false
|
| 17 |
+
depth: 4
|
| 18 |
+
fc_channel: 1024
|
| 19 |
+
im2col_step: 11
|
| 20 |
+
multires:
|
| 21 |
+
- 0.0051941870036646
|
| 22 |
+
- 0.02004838034795
|
| 23 |
+
- 0.0774278195486317
|
| 24 |
+
- 0.299564810864565
|
| 25 |
+
num_blocks: 1
|
| 26 |
+
num_stacks: 1
|
| 27 |
+
output_stride: 4
|
| 28 |
+
smp_multiplier: 2
|
| 29 |
+
smp_neg: 1
|
| 30 |
+
smp_pos: 1
|
| 31 |
+
smp_rnd: 3
|
| 32 |
+
upsample_scale: 1
|
| 33 |
+
optim:
|
| 34 |
+
amsgrad: true
|
| 35 |
+
lr: 0.0001
|
| 36 |
+
lr_decay_epoch: 60
|
| 37 |
+
max_epoch: 100
|
| 38 |
+
name: Adam
|
| 39 |
+
weight_decay: 0.0006
|
vanishing_point_extraction/neurvps/neurvps/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import neurvps.models
|
| 2 |
+
import neurvps.trainer
|
| 3 |
+
import neurvps.datasets
|
| 4 |
+
import neurvps.config
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (267 Bytes). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/box.cpython-38.pyc
ADDED
|
Binary file (33.4 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/config.cpython-38.pyc
ADDED
|
Binary file (231 Bytes). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc
ADDED
|
Binary file (6.56 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc
ADDED
|
Binary file (8.97 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/__pycache__/utils.cpython-38.pyc
ADDED
|
Binary file (3.72 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/box.py
ADDED
|
@@ -0,0 +1,1110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: UTF-8 -*-
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2017-2019 - Chris Griffith - MIT License
|
| 5 |
+
"""
|
| 6 |
+
Improved dictionary access through dot notation with additional tools.
|
| 7 |
+
"""
|
| 8 |
+
import string
|
| 9 |
+
import sys
|
| 10 |
+
import json
|
| 11 |
+
import re
|
| 12 |
+
import copy
|
| 13 |
+
from keyword import kwlist
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from collections.abc import Iterable, Mapping, Callable
|
| 18 |
+
except ImportError:
|
| 19 |
+
from collections import Iterable, Mapping, Callable
|
| 20 |
+
|
| 21 |
+
yaml_support = True
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import yaml
|
| 25 |
+
except ImportError:
|
| 26 |
+
try:
|
| 27 |
+
import ruamel.yaml as yaml
|
| 28 |
+
except ImportError:
|
| 29 |
+
yaml = None
|
| 30 |
+
yaml_support = False
|
| 31 |
+
|
| 32 |
+
if sys.version_info >= (3, 0):
|
| 33 |
+
basestring = str
|
| 34 |
+
else:
|
| 35 |
+
from io import open
|
| 36 |
+
|
| 37 |
+
__all__ = ['Box', 'ConfigBox', 'BoxList', 'SBox',
|
| 38 |
+
'BoxError', 'BoxKeyError']
|
| 39 |
+
__author__ = 'Chris Griffith'
|
| 40 |
+
__version__ = '3.2.4'
|
| 41 |
+
|
| 42 |
+
BOX_PARAMETERS = ('default_box', 'default_box_attr', 'conversion_box',
|
| 43 |
+
'frozen_box', 'camel_killer_box', 'box_it_up',
|
| 44 |
+
'box_safe_prefix', 'box_duplicates', 'ordered_box')
|
| 45 |
+
|
| 46 |
+
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
|
| 47 |
+
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class BoxError(Exception):
|
| 51 |
+
"""Non standard dictionary exceptions"""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class BoxKeyError(BoxError, KeyError, AttributeError):
|
| 55 |
+
"""Key does not exist"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Abstract converter functions for use in any Box class
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _to_json(obj, filename=None,
|
| 62 |
+
encoding="utf-8", errors="strict", **json_kwargs):
|
| 63 |
+
json_dump = json.dumps(obj,
|
| 64 |
+
ensure_ascii=False, **json_kwargs)
|
| 65 |
+
if filename:
|
| 66 |
+
with open(filename, 'w', encoding=encoding, errors=errors) as f:
|
| 67 |
+
f.write(json_dump if sys.version_info >= (3, 0) else
|
| 68 |
+
json_dump.decode("utf-8"))
|
| 69 |
+
else:
|
| 70 |
+
return json_dump
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _from_json(json_string=None, filename=None,
|
| 74 |
+
encoding="utf-8", errors="strict", multiline=False, **kwargs):
|
| 75 |
+
if filename:
|
| 76 |
+
with open(filename, 'r', encoding=encoding, errors=errors) as f:
|
| 77 |
+
if multiline:
|
| 78 |
+
data = [json.loads(line.strip(), **kwargs) for line in f
|
| 79 |
+
if line.strip() and not line.strip().startswith("#")]
|
| 80 |
+
else:
|
| 81 |
+
data = json.load(f, **kwargs)
|
| 82 |
+
elif json_string:
|
| 83 |
+
data = json.loads(json_string, **kwargs)
|
| 84 |
+
else:
|
| 85 |
+
raise BoxError('from_json requires a string or filename')
|
| 86 |
+
return data
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _to_yaml(obj, filename=None, default_flow_style=False,
|
| 90 |
+
encoding="utf-8", errors="strict",
|
| 91 |
+
**yaml_kwargs):
|
| 92 |
+
if filename:
|
| 93 |
+
with open(filename, 'w',
|
| 94 |
+
encoding=encoding, errors=errors) as f:
|
| 95 |
+
yaml.dump(obj, stream=f,
|
| 96 |
+
default_flow_style=default_flow_style,
|
| 97 |
+
**yaml_kwargs)
|
| 98 |
+
else:
|
| 99 |
+
return yaml.dump(obj,
|
| 100 |
+
default_flow_style=default_flow_style,
|
| 101 |
+
**yaml_kwargs)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _from_yaml(yaml_string=None, filename=None,
|
| 105 |
+
encoding="utf-8", errors="strict",
|
| 106 |
+
**kwargs):
|
| 107 |
+
if filename:
|
| 108 |
+
with open(filename, 'r',
|
| 109 |
+
encoding=encoding, errors=errors) as f:
|
| 110 |
+
data = yaml.load(f, **kwargs)
|
| 111 |
+
elif yaml_string:
|
| 112 |
+
data = yaml.load(yaml_string, **kwargs)
|
| 113 |
+
else:
|
| 114 |
+
raise BoxError('from_yaml requires a string or filename')
|
| 115 |
+
return data
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# Helper functions
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _safe_key(key):
|
| 122 |
+
try:
|
| 123 |
+
return str(key)
|
| 124 |
+
except UnicodeEncodeError:
|
| 125 |
+
return key.encode("utf-8", "ignore")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _safe_attr(attr, camel_killer=False, replacement_char='x'):
|
| 129 |
+
"""Convert a key into something that is accessible as an attribute"""
|
| 130 |
+
allowed = string.ascii_letters + string.digits + '_'
|
| 131 |
+
|
| 132 |
+
attr = _safe_key(attr)
|
| 133 |
+
|
| 134 |
+
if camel_killer:
|
| 135 |
+
attr = _camel_killer(attr)
|
| 136 |
+
|
| 137 |
+
attr = attr.replace(' ', '_')
|
| 138 |
+
|
| 139 |
+
out = ''
|
| 140 |
+
for character in attr:
|
| 141 |
+
out += character if character in allowed else "_"
|
| 142 |
+
out = out.strip("_")
|
| 143 |
+
|
| 144 |
+
try:
|
| 145 |
+
int(out[0])
|
| 146 |
+
except (ValueError, IndexError):
|
| 147 |
+
pass
|
| 148 |
+
else:
|
| 149 |
+
out = '{0}{1}'.format(replacement_char, out)
|
| 150 |
+
|
| 151 |
+
if out in kwlist:
|
| 152 |
+
out = '{0}{1}'.format(replacement_char, out)
|
| 153 |
+
|
| 154 |
+
return re.sub('_+', '_', out)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _camel_killer(attr):
|
| 158 |
+
"""
|
| 159 |
+
CamelKiller, qu'est-ce que c'est?
|
| 160 |
+
|
| 161 |
+
Taken from http://stackoverflow.com/a/1176023/3244542
|
| 162 |
+
"""
|
| 163 |
+
try:
|
| 164 |
+
attr = str(attr)
|
| 165 |
+
except UnicodeEncodeError:
|
| 166 |
+
attr = attr.encode("utf-8", "ignore")
|
| 167 |
+
|
| 168 |
+
s1 = _first_cap_re.sub(r'\1_\2', attr)
|
| 169 |
+
s2 = _all_cap_re.sub(r'\1_\2', s1)
|
| 170 |
+
return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else
|
| 171 |
+
s2.lower())
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs):
|
| 175 |
+
out_list = []
|
| 176 |
+
for i in iterable:
|
| 177 |
+
if isinstance(i, dict):
|
| 178 |
+
out_list.append(box_class(i, **kwargs))
|
| 179 |
+
elif isinstance(i, list) or (recreate_tuples and isinstance(i, tuple)):
|
| 180 |
+
out_list.append(_recursive_tuples(i, box_class,
|
| 181 |
+
recreate_tuples, **kwargs))
|
| 182 |
+
else:
|
| 183 |
+
out_list.append(i)
|
| 184 |
+
return tuple(out_list)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _conversion_checks(item, keys, box_config, check_only=False,
|
| 188 |
+
pre_check=False):
|
| 189 |
+
"""
|
| 190 |
+
Internal use for checking if a duplicate safe attribute already exists
|
| 191 |
+
|
| 192 |
+
:param item: Item to see if a dup exists
|
| 193 |
+
:param keys: Keys to check against
|
| 194 |
+
:param box_config: Easier to pass in than ask for specfic items
|
| 195 |
+
:param check_only: Don't bother doing the conversion work
|
| 196 |
+
:param pre_check: Need to add the item to the list of keys to check
|
| 197 |
+
:return: the original unmodified key, if exists and not check_only
|
| 198 |
+
"""
|
| 199 |
+
if box_config['box_duplicates'] != 'ignore':
|
| 200 |
+
if pre_check:
|
| 201 |
+
keys = list(keys) + [item]
|
| 202 |
+
|
| 203 |
+
key_list = [(k,
|
| 204 |
+
_safe_attr(k, camel_killer=box_config['camel_killer_box'],
|
| 205 |
+
replacement_char=box_config['box_safe_prefix']
|
| 206 |
+
)) for k in keys]
|
| 207 |
+
if len(key_list) > len(set(x[1] for x in key_list)):
|
| 208 |
+
seen = set()
|
| 209 |
+
dups = set()
|
| 210 |
+
for x in key_list:
|
| 211 |
+
if x[1] in seen:
|
| 212 |
+
dups.add("{0}({1})".format(x[0], x[1]))
|
| 213 |
+
seen.add(x[1])
|
| 214 |
+
if box_config['box_duplicates'].startswith("warn"):
|
| 215 |
+
warnings.warn('Duplicate conversion attributes exist: '
|
| 216 |
+
'{0}'.format(dups))
|
| 217 |
+
else:
|
| 218 |
+
raise BoxError('Duplicate conversion attributes exist: '
|
| 219 |
+
'{0}'.format(dups))
|
| 220 |
+
if check_only:
|
| 221 |
+
return
|
| 222 |
+
# This way will be slower for warnings, as it will have double work
|
| 223 |
+
# But faster for the default 'ignore'
|
| 224 |
+
for k in keys:
|
| 225 |
+
if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'],
|
| 226 |
+
replacement_char=box_config['box_safe_prefix']):
|
| 227 |
+
return k
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def _get_box_config(cls, kwargs):
|
| 231 |
+
return {
|
| 232 |
+
# Internal use only
|
| 233 |
+
'__converted': set(),
|
| 234 |
+
'__box_heritage': kwargs.pop('__box_heritage', None),
|
| 235 |
+
'__created': False,
|
| 236 |
+
'__ordered_box_values': [],
|
| 237 |
+
# Can be changed by user after box creation
|
| 238 |
+
'default_box': kwargs.pop('default_box', False),
|
| 239 |
+
'default_box_attr': kwargs.pop('default_box_attr', cls),
|
| 240 |
+
'conversion_box': kwargs.pop('conversion_box', True),
|
| 241 |
+
'box_safe_prefix': kwargs.pop('box_safe_prefix', 'x'),
|
| 242 |
+
'frozen_box': kwargs.pop('frozen_box', False),
|
| 243 |
+
'camel_killer_box': kwargs.pop('camel_killer_box', False),
|
| 244 |
+
'modify_tuples_box': kwargs.pop('modify_tuples_box', False),
|
| 245 |
+
'box_duplicates': kwargs.pop('box_duplicates', 'ignore'),
|
| 246 |
+
'ordered_box': kwargs.pop('ordered_box', False)
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class Box(dict):
|
| 251 |
+
"""
|
| 252 |
+
Improved dictionary access through dot notation with additional tools.
|
| 253 |
+
|
| 254 |
+
:param default_box: Similar to defaultdict, return a default value
|
| 255 |
+
:param default_box_attr: Specify the default replacement.
|
| 256 |
+
WARNING: If this is not the default 'Box', it will not be recursive
|
| 257 |
+
:param frozen_box: After creation, the box cannot be modified
|
| 258 |
+
:param camel_killer_box: Convert CamelCase to snake_case
|
| 259 |
+
:param conversion_box: Check for near matching keys as attributes
|
| 260 |
+
:param modify_tuples_box: Recreate incoming tuples with dicts into Boxes
|
| 261 |
+
:param box_it_up: Recursively create all Boxes from the start
|
| 262 |
+
:param box_safe_prefix: Conversion box prefix for unsafe attributes
|
| 263 |
+
:param box_duplicates: "ignore", "error" or "warn" when duplicates exists
|
| 264 |
+
in a conversion_box
|
| 265 |
+
:param ordered_box: Preserve the order of keys entered into the box
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
_protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml',
|
| 269 |
+
'from_yaml', 'from_json']
|
| 270 |
+
|
| 271 |
+
def __new__(cls, *args, **kwargs):
|
| 272 |
+
"""
|
| 273 |
+
Due to the way pickling works in python 3, we need to make sure
|
| 274 |
+
the box config is created as early as possible.
|
| 275 |
+
"""
|
| 276 |
+
obj = super(Box, cls).__new__(cls, *args, **kwargs)
|
| 277 |
+
obj._box_config = _get_box_config(cls, kwargs)
|
| 278 |
+
return obj
|
| 279 |
+
|
| 280 |
+
def __init__(self, *args, **kwargs):
|
| 281 |
+
self._box_config = _get_box_config(self.__class__, kwargs)
|
| 282 |
+
if self._box_config['ordered_box']:
|
| 283 |
+
self._box_config['__ordered_box_values'] = []
|
| 284 |
+
if (not self._box_config['conversion_box'] and
|
| 285 |
+
self._box_config['box_duplicates'] != "ignore"):
|
| 286 |
+
raise BoxError('box_duplicates are only for conversion_boxes')
|
| 287 |
+
if len(args) == 1:
|
| 288 |
+
if isinstance(args[0], basestring):
|
| 289 |
+
raise ValueError('Cannot extrapolate Box from string')
|
| 290 |
+
if isinstance(args[0], Mapping):
|
| 291 |
+
for k, v in args[0].items():
|
| 292 |
+
if v is args[0]:
|
| 293 |
+
v = self
|
| 294 |
+
self[k] = v
|
| 295 |
+
self.__add_ordered(k)
|
| 296 |
+
elif isinstance(args[0], Iterable):
|
| 297 |
+
for k, v in args[0]:
|
| 298 |
+
self[k] = v
|
| 299 |
+
self.__add_ordered(k)
|
| 300 |
+
|
| 301 |
+
else:
|
| 302 |
+
raise ValueError('First argument must be mapping or iterable')
|
| 303 |
+
elif args:
|
| 304 |
+
raise TypeError('Box expected at most 1 argument, '
|
| 305 |
+
'got {0}'.format(len(args)))
|
| 306 |
+
|
| 307 |
+
box_it = kwargs.pop('box_it_up', False)
|
| 308 |
+
for k, v in kwargs.items():
|
| 309 |
+
if args and isinstance(args[0], Mapping) and v is args[0]:
|
| 310 |
+
v = self
|
| 311 |
+
self[k] = v
|
| 312 |
+
self.__add_ordered(k)
|
| 313 |
+
|
| 314 |
+
if (self._box_config['frozen_box'] or box_it or
|
| 315 |
+
self._box_config['box_duplicates'] != 'ignore'):
|
| 316 |
+
self.box_it_up()
|
| 317 |
+
|
| 318 |
+
self._box_config['__created'] = True
|
| 319 |
+
|
| 320 |
+
def __add_ordered(self, key):
|
| 321 |
+
if (self._box_config['ordered_box'] and
|
| 322 |
+
key not in self._box_config['__ordered_box_values']):
|
| 323 |
+
self._box_config['__ordered_box_values'].append(key)
|
| 324 |
+
|
| 325 |
+
def box_it_up(self):
|
| 326 |
+
"""
|
| 327 |
+
Perform value lookup for all items in current dictionary,
|
| 328 |
+
generating all sub Box objects, while also running `box_it_up` on
|
| 329 |
+
any of those sub box objects.
|
| 330 |
+
"""
|
| 331 |
+
for k in self:
|
| 332 |
+
_conversion_checks(k, self.keys(), self._box_config,
|
| 333 |
+
check_only=True)
|
| 334 |
+
if self[k] is not self and hasattr(self[k], 'box_it_up'):
|
| 335 |
+
self[k].box_it_up()
|
| 336 |
+
|
| 337 |
+
def __hash__(self):
|
| 338 |
+
if self._box_config['frozen_box']:
|
| 339 |
+
hashing = 54321
|
| 340 |
+
for item in self.items():
|
| 341 |
+
hashing ^= hash(item)
|
| 342 |
+
return hashing
|
| 343 |
+
raise TypeError("unhashable type: 'Box'")
|
| 344 |
+
|
| 345 |
+
def __dir__(self):
|
| 346 |
+
allowed = string.ascii_letters + string.digits + '_'
|
| 347 |
+
kill_camel = self._box_config['camel_killer_box']
|
| 348 |
+
items = set(dir(dict) + ['to_dict', 'to_json',
|
| 349 |
+
'from_json', 'box_it_up'])
|
| 350 |
+
# Only show items accessible by dot notation
|
| 351 |
+
for key in self.keys():
|
| 352 |
+
key = _safe_key(key)
|
| 353 |
+
if (' ' not in key and key[0] not in string.digits and
|
| 354 |
+
key not in kwlist):
|
| 355 |
+
for letter in key:
|
| 356 |
+
if letter not in allowed:
|
| 357 |
+
break
|
| 358 |
+
else:
|
| 359 |
+
items.add(key)
|
| 360 |
+
|
| 361 |
+
for key in self.keys():
|
| 362 |
+
key = _safe_key(key)
|
| 363 |
+
if key not in items:
|
| 364 |
+
if self._box_config['conversion_box']:
|
| 365 |
+
key = _safe_attr(key, camel_killer=kill_camel,
|
| 366 |
+
replacement_char=self._box_config[
|
| 367 |
+
'box_safe_prefix'])
|
| 368 |
+
if key:
|
| 369 |
+
items.add(key)
|
| 370 |
+
if kill_camel:
|
| 371 |
+
snake_key = _camel_killer(key)
|
| 372 |
+
if snake_key:
|
| 373 |
+
items.remove(key)
|
| 374 |
+
items.add(snake_key)
|
| 375 |
+
|
| 376 |
+
if yaml_support:
|
| 377 |
+
items.add('to_yaml')
|
| 378 |
+
items.add('from_yaml')
|
| 379 |
+
|
| 380 |
+
return list(items)
|
| 381 |
+
|
| 382 |
+
def get(self, key, default=None):
|
| 383 |
+
try:
|
| 384 |
+
return self[key]
|
| 385 |
+
except KeyError:
|
| 386 |
+
if isinstance(default, dict) and not isinstance(default, Box):
|
| 387 |
+
return Box(default)
|
| 388 |
+
if isinstance(default, list) and not isinstance(default, BoxList):
|
| 389 |
+
return BoxList(default)
|
| 390 |
+
return default
|
| 391 |
+
|
| 392 |
+
def copy(self):
|
| 393 |
+
return self.__class__(super(self.__class__, self).copy())
|
| 394 |
+
|
| 395 |
+
def __copy__(self):
|
| 396 |
+
return self.__class__(super(self.__class__, self).copy())
|
| 397 |
+
|
| 398 |
+
def __deepcopy__(self, memodict=None):
|
| 399 |
+
out = self.__class__()
|
| 400 |
+
memodict = memodict or {}
|
| 401 |
+
memodict[id(self)] = out
|
| 402 |
+
for k, v in self.items():
|
| 403 |
+
out[copy.deepcopy(k, memodict)] = copy.deepcopy(v, memodict)
|
| 404 |
+
return out
|
| 405 |
+
|
| 406 |
+
def __setstate__(self, state):
|
| 407 |
+
self._box_config = state['_box_config']
|
| 408 |
+
self.__dict__.update(state)
|
| 409 |
+
|
| 410 |
+
def __getitem__(self, item, _ignore_default=False):
|
| 411 |
+
try:
|
| 412 |
+
value = super(Box, self).__getitem__(item)
|
| 413 |
+
except KeyError as err:
|
| 414 |
+
if item == '_box_config':
|
| 415 |
+
raise BoxKeyError('_box_config should only exist as an '
|
| 416 |
+
'attribute and is never defaulted')
|
| 417 |
+
if self._box_config['default_box'] and not _ignore_default:
|
| 418 |
+
return self.__get_default(item)
|
| 419 |
+
raise BoxKeyError(str(err))
|
| 420 |
+
else:
|
| 421 |
+
return self.__convert_and_store(item, value)
|
| 422 |
+
|
| 423 |
+
def keys(self):
|
| 424 |
+
if self._box_config['ordered_box']:
|
| 425 |
+
return self._box_config['__ordered_box_values']
|
| 426 |
+
return super(Box, self).keys()
|
| 427 |
+
|
| 428 |
+
def values(self):
|
| 429 |
+
return [self[x] for x in self.keys()]
|
| 430 |
+
|
| 431 |
+
def items(self):
|
| 432 |
+
return [(x, self[x]) for x in self.keys()]
|
| 433 |
+
|
| 434 |
+
def __get_default(self, item):
|
| 435 |
+
default_value = self._box_config['default_box_attr']
|
| 436 |
+
if default_value is self.__class__:
|
| 437 |
+
return self.__class__(__box_heritage=(self, item),
|
| 438 |
+
**self.__box_config())
|
| 439 |
+
elif isinstance(default_value, Callable):
|
| 440 |
+
return default_value()
|
| 441 |
+
elif hasattr(default_value, 'copy'):
|
| 442 |
+
return default_value.copy()
|
| 443 |
+
return default_value
|
| 444 |
+
|
| 445 |
+
def __box_config(self):
|
| 446 |
+
out = {}
|
| 447 |
+
for k, v in self._box_config.copy().items():
|
| 448 |
+
if not k.startswith("__"):
|
| 449 |
+
out[k] = v
|
| 450 |
+
return out
|
| 451 |
+
|
| 452 |
+
def __convert_and_store(self, item, value):
|
| 453 |
+
if item in self._box_config['__converted']:
|
| 454 |
+
return value
|
| 455 |
+
if isinstance(value, dict) and not isinstance(value, Box):
|
| 456 |
+
value = self.__class__(value, __box_heritage=(self, item),
|
| 457 |
+
**self.__box_config())
|
| 458 |
+
self[item] = value
|
| 459 |
+
elif isinstance(value, list) and not isinstance(value, BoxList):
|
| 460 |
+
if self._box_config['frozen_box']:
|
| 461 |
+
value = _recursive_tuples(value, self.__class__,
|
| 462 |
+
recreate_tuples=self._box_config[
|
| 463 |
+
'modify_tuples_box'],
|
| 464 |
+
__box_heritage=(self, item),
|
| 465 |
+
**self.__box_config())
|
| 466 |
+
else:
|
| 467 |
+
value = BoxList(value, __box_heritage=(self, item),
|
| 468 |
+
box_class=self.__class__,
|
| 469 |
+
**self.__box_config())
|
| 470 |
+
self[item] = value
|
| 471 |
+
elif (self._box_config['modify_tuples_box'] and
|
| 472 |
+
isinstance(value, tuple)):
|
| 473 |
+
value = _recursive_tuples(value, self.__class__,
|
| 474 |
+
recreate_tuples=True,
|
| 475 |
+
__box_heritage=(self, item),
|
| 476 |
+
**self.__box_config())
|
| 477 |
+
self[item] = value
|
| 478 |
+
self._box_config['__converted'].add(item)
|
| 479 |
+
return value
|
| 480 |
+
|
| 481 |
+
def __create_lineage(self):
|
| 482 |
+
if (self._box_config['__box_heritage'] and
|
| 483 |
+
self._box_config['__created']):
|
| 484 |
+
past, item = self._box_config['__box_heritage']
|
| 485 |
+
if not past[item]:
|
| 486 |
+
past[item] = self
|
| 487 |
+
self._box_config['__box_heritage'] = None
|
| 488 |
+
|
| 489 |
+
def __getattr__(self, item):
|
| 490 |
+
try:
|
| 491 |
+
try:
|
| 492 |
+
value = self.__getitem__(item, _ignore_default=True)
|
| 493 |
+
except KeyError:
|
| 494 |
+
value = object.__getattribute__(self, item)
|
| 495 |
+
except AttributeError as err:
|
| 496 |
+
if item == "__getstate__":
|
| 497 |
+
raise AttributeError(item)
|
| 498 |
+
if item == '_box_config':
|
| 499 |
+
raise BoxError('_box_config key must exist')
|
| 500 |
+
kill_camel = self._box_config['camel_killer_box']
|
| 501 |
+
if self._box_config['conversion_box'] and item:
|
| 502 |
+
k = _conversion_checks(item, self.keys(), self._box_config)
|
| 503 |
+
if k:
|
| 504 |
+
return self.__getitem__(k)
|
| 505 |
+
if kill_camel:
|
| 506 |
+
for k in self.keys():
|
| 507 |
+
if item == _camel_killer(k):
|
| 508 |
+
return self.__getitem__(k)
|
| 509 |
+
if self._box_config['default_box']:
|
| 510 |
+
return self.__get_default(item)
|
| 511 |
+
raise BoxKeyError(str(err))
|
| 512 |
+
else:
|
| 513 |
+
if item == '_box_config':
|
| 514 |
+
return value
|
| 515 |
+
return self.__convert_and_store(item, value)
|
| 516 |
+
|
| 517 |
+
def __setitem__(self, key, value):
|
| 518 |
+
if (key != '_box_config' and self._box_config['__created'] and
|
| 519 |
+
self._box_config['frozen_box']):
|
| 520 |
+
raise BoxError('Box is frozen')
|
| 521 |
+
if self._box_config['conversion_box']:
|
| 522 |
+
_conversion_checks(key, self.keys(), self._box_config,
|
| 523 |
+
check_only=True, pre_check=True)
|
| 524 |
+
super(Box, self).__setitem__(key, value)
|
| 525 |
+
self.__add_ordered(key)
|
| 526 |
+
self.__create_lineage()
|
| 527 |
+
|
| 528 |
+
def __setattr__(self, key, value):
|
| 529 |
+
if (key != '_box_config' and self._box_config['frozen_box'] and
|
| 530 |
+
self._box_config['__created']):
|
| 531 |
+
raise BoxError('Box is frozen')
|
| 532 |
+
if key in self._protected_keys:
|
| 533 |
+
raise AttributeError("Key name '{0}' is protected".format(key))
|
| 534 |
+
if key == '_box_config':
|
| 535 |
+
return object.__setattr__(self, key, value)
|
| 536 |
+
try:
|
| 537 |
+
object.__getattribute__(self, key)
|
| 538 |
+
except (AttributeError, UnicodeEncodeError):
|
| 539 |
+
if (key not in self.keys() and
|
| 540 |
+
(self._box_config['conversion_box'] or
|
| 541 |
+
self._box_config['camel_killer_box'])):
|
| 542 |
+
if self._box_config['conversion_box']:
|
| 543 |
+
k = _conversion_checks(key, self.keys(),
|
| 544 |
+
self._box_config)
|
| 545 |
+
self[key if not k else k] = value
|
| 546 |
+
elif self._box_config['camel_killer_box']:
|
| 547 |
+
for each_key in self:
|
| 548 |
+
if key == _camel_killer(each_key):
|
| 549 |
+
self[each_key] = value
|
| 550 |
+
break
|
| 551 |
+
else:
|
| 552 |
+
self[key] = value
|
| 553 |
+
else:
|
| 554 |
+
object.__setattr__(self, key, value)
|
| 555 |
+
self.__add_ordered(key)
|
| 556 |
+
self.__create_lineage()
|
| 557 |
+
|
| 558 |
+
def __delitem__(self, key):
|
| 559 |
+
if self._box_config['frozen_box']:
|
| 560 |
+
raise BoxError('Box is frozen')
|
| 561 |
+
super(Box, self).__delitem__(key)
|
| 562 |
+
if (self._box_config['ordered_box'] and
|
| 563 |
+
key in self._box_config['__ordered_box_values']):
|
| 564 |
+
self._box_config['__ordered_box_values'].remove(key)
|
| 565 |
+
|
| 566 |
+
def __delattr__(self, item):
|
| 567 |
+
if self._box_config['frozen_box']:
|
| 568 |
+
raise BoxError('Box is frozen')
|
| 569 |
+
if item == '_box_config':
|
| 570 |
+
raise BoxError('"_box_config" is protected')
|
| 571 |
+
if item in self._protected_keys:
|
| 572 |
+
raise AttributeError("Key name '{0}' is protected".format(item))
|
| 573 |
+
try:
|
| 574 |
+
object.__getattribute__(self, item)
|
| 575 |
+
except AttributeError:
|
| 576 |
+
del self[item]
|
| 577 |
+
else:
|
| 578 |
+
object.__delattr__(self, item)
|
| 579 |
+
if (self._box_config['ordered_box'] and
|
| 580 |
+
item in self._box_config['__ordered_box_values']):
|
| 581 |
+
self._box_config['__ordered_box_values'].remove(item)
|
| 582 |
+
|
| 583 |
+
def pop(self, key, *args):
|
| 584 |
+
if args:
|
| 585 |
+
if len(args) != 1:
|
| 586 |
+
raise BoxError('pop() takes only one optional'
|
| 587 |
+
' argument "default"')
|
| 588 |
+
try:
|
| 589 |
+
item = self[key]
|
| 590 |
+
except KeyError:
|
| 591 |
+
return args[0]
|
| 592 |
+
else:
|
| 593 |
+
del self[key]
|
| 594 |
+
return item
|
| 595 |
+
try:
|
| 596 |
+
item = self[key]
|
| 597 |
+
except KeyError:
|
| 598 |
+
raise BoxKeyError('{0}'.format(key))
|
| 599 |
+
else:
|
| 600 |
+
del self[key]
|
| 601 |
+
return item
|
| 602 |
+
|
| 603 |
+
def clear(self):
|
| 604 |
+
self._box_config['__ordered_box_values'] = []
|
| 605 |
+
super(Box, self).clear()
|
| 606 |
+
|
| 607 |
+
def popitem(self):
|
| 608 |
+
try:
|
| 609 |
+
key = next(self.__iter__())
|
| 610 |
+
except StopIteration:
|
| 611 |
+
raise BoxKeyError('Empty box')
|
| 612 |
+
return key, self.pop(key)
|
| 613 |
+
|
| 614 |
+
def __repr__(self):
|
| 615 |
+
return '<Box: {0}>'.format(str(self.to_dict()))
|
| 616 |
+
|
| 617 |
+
def __str__(self):
|
| 618 |
+
return str(self.to_dict())
|
| 619 |
+
|
| 620 |
+
def __iter__(self):
|
| 621 |
+
for key in self.keys():
|
| 622 |
+
yield key
|
| 623 |
+
|
| 624 |
+
def __reversed__(self):
|
| 625 |
+
for key in reversed(list(self.keys())):
|
| 626 |
+
yield key
|
| 627 |
+
|
| 628 |
+
def to_dict(self):
|
| 629 |
+
"""
|
| 630 |
+
Turn the Box and sub Boxes back into a native
|
| 631 |
+
python dictionary.
|
| 632 |
+
|
| 633 |
+
:return: python dictionary of this Box
|
| 634 |
+
"""
|
| 635 |
+
out_dict = dict(self)
|
| 636 |
+
for k, v in out_dict.items():
|
| 637 |
+
if v is self:
|
| 638 |
+
out_dict[k] = out_dict
|
| 639 |
+
elif hasattr(v, 'to_dict'):
|
| 640 |
+
out_dict[k] = v.to_dict()
|
| 641 |
+
elif hasattr(v, 'to_list'):
|
| 642 |
+
out_dict[k] = v.to_list()
|
| 643 |
+
return out_dict
|
| 644 |
+
|
| 645 |
+
def update(self, item=None, **kwargs):
|
| 646 |
+
if not item:
|
| 647 |
+
item = kwargs
|
| 648 |
+
iter_over = item.items() if hasattr(item, 'items') else item
|
| 649 |
+
for k, v in iter_over:
|
| 650 |
+
if isinstance(v, dict):
|
| 651 |
+
# Box objects must be created in case they are already
|
| 652 |
+
# in the `converted` box_config set
|
| 653 |
+
v = self.__class__(v)
|
| 654 |
+
if k in self and isinstance(self[k], dict):
|
| 655 |
+
self[k].update(v)
|
| 656 |
+
continue
|
| 657 |
+
if isinstance(v, list):
|
| 658 |
+
v = BoxList(v)
|
| 659 |
+
try:
|
| 660 |
+
self.__setattr__(k, v)
|
| 661 |
+
except (AttributeError, TypeError):
|
| 662 |
+
self.__setitem__(k, v)
|
| 663 |
+
|
| 664 |
+
def setdefault(self, item, default=None):
|
| 665 |
+
if item in self:
|
| 666 |
+
return self[item]
|
| 667 |
+
|
| 668 |
+
if isinstance(default, dict):
|
| 669 |
+
default = self.__class__(default)
|
| 670 |
+
if isinstance(default, list):
|
| 671 |
+
default = BoxList(default)
|
| 672 |
+
self[item] = default
|
| 673 |
+
return default
|
| 674 |
+
|
| 675 |
+
def to_json(self, filename=None,
|
| 676 |
+
encoding="utf-8", errors="strict", **json_kwargs):
|
| 677 |
+
"""
|
| 678 |
+
Transform the Box object into a JSON string.
|
| 679 |
+
|
| 680 |
+
:param filename: If provided will save to file
|
| 681 |
+
:param encoding: File encoding
|
| 682 |
+
:param errors: How to handle encoding errors
|
| 683 |
+
:param json_kwargs: additional arguments to pass to json.dump(s)
|
| 684 |
+
:return: string of JSON or return of `json.dump`
|
| 685 |
+
"""
|
| 686 |
+
return _to_json(self.to_dict(), filename=filename,
|
| 687 |
+
encoding=encoding, errors=errors, **json_kwargs)
|
| 688 |
+
|
| 689 |
+
@classmethod
|
| 690 |
+
def from_json(cls, json_string=None, filename=None,
|
| 691 |
+
encoding="utf-8", errors="strict", **kwargs):
|
| 692 |
+
"""
|
| 693 |
+
Transform a json object string into a Box object. If the incoming
|
| 694 |
+
json is a list, you must use BoxList.from_json.
|
| 695 |
+
|
| 696 |
+
:param json_string: string to pass to `json.loads`
|
| 697 |
+
:param filename: filename to open and pass to `json.load`
|
| 698 |
+
:param encoding: File encoding
|
| 699 |
+
:param errors: How to handle encoding errors
|
| 700 |
+
:param kwargs: parameters to pass to `Box()` or `json.loads`
|
| 701 |
+
:return: Box object from json data
|
| 702 |
+
"""
|
| 703 |
+
bx_args = {}
|
| 704 |
+
for arg in kwargs.copy():
|
| 705 |
+
if arg in BOX_PARAMETERS:
|
| 706 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 707 |
+
|
| 708 |
+
data = _from_json(json_string, filename=filename,
|
| 709 |
+
encoding=encoding, errors=errors, **kwargs)
|
| 710 |
+
|
| 711 |
+
if not isinstance(data, dict):
|
| 712 |
+
raise BoxError('json data not returned as a dictionary, '
|
| 713 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 714 |
+
return cls(data, **bx_args)
|
| 715 |
+
|
| 716 |
+
if yaml_support:
|
| 717 |
+
def to_yaml(self, filename=None, default_flow_style=False,
|
| 718 |
+
encoding="utf-8", errors="strict",
|
| 719 |
+
**yaml_kwargs):
|
| 720 |
+
"""
|
| 721 |
+
Transform the Box object into a YAML string.
|
| 722 |
+
|
| 723 |
+
:param filename: If provided will save to file
|
| 724 |
+
:param default_flow_style: False will recursively dump dicts
|
| 725 |
+
:param encoding: File encoding
|
| 726 |
+
:param errors: How to handle encoding errors
|
| 727 |
+
:param yaml_kwargs: additional arguments to pass to yaml.dump
|
| 728 |
+
:return: string of YAML or return of `yaml.dump`
|
| 729 |
+
"""
|
| 730 |
+
return _to_yaml(self.to_dict(), filename=filename,
|
| 731 |
+
default_flow_style=default_flow_style,
|
| 732 |
+
encoding=encoding, errors=errors, **yaml_kwargs)
|
| 733 |
+
|
| 734 |
+
@classmethod
|
| 735 |
+
def from_yaml(cls, yaml_string=None, filename=None,
|
| 736 |
+
encoding="utf-8", errors="strict",
|
| 737 |
+
loader=yaml.SafeLoader, **kwargs):
|
| 738 |
+
"""
|
| 739 |
+
Transform a yaml object string into a Box object.
|
| 740 |
+
|
| 741 |
+
:param yaml_string: string to pass to `yaml.load`
|
| 742 |
+
:param filename: filename to open and pass to `yaml.load`
|
| 743 |
+
:param encoding: File encoding
|
| 744 |
+
:param errors: How to handle encoding errors
|
| 745 |
+
:param loader: YAML Loader, defaults to SafeLoader
|
| 746 |
+
:param kwargs: parameters to pass to `Box()` or `yaml.load`
|
| 747 |
+
:return: Box object from yaml data
|
| 748 |
+
"""
|
| 749 |
+
bx_args = {}
|
| 750 |
+
for arg in kwargs.copy():
|
| 751 |
+
if arg in BOX_PARAMETERS:
|
| 752 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 753 |
+
|
| 754 |
+
data = _from_yaml(yaml_string=yaml_string, filename=filename,
|
| 755 |
+
encoding=encoding, errors=errors,
|
| 756 |
+
Loader=loader, **kwargs)
|
| 757 |
+
if not isinstance(data, dict):
|
| 758 |
+
raise BoxError('yaml data not returned as a dictionary'
|
| 759 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 760 |
+
return cls(data, **bx_args)
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
class BoxList(list):
|
| 764 |
+
"""
|
| 765 |
+
Drop in replacement of list, that converts added objects to Box or BoxList
|
| 766 |
+
objects as necessary.
|
| 767 |
+
"""
|
| 768 |
+
|
| 769 |
+
def __init__(self, iterable=None, box_class=Box, **box_options):
|
| 770 |
+
self.box_class = box_class
|
| 771 |
+
self.box_options = box_options
|
| 772 |
+
self.box_org_ref = self.box_org_ref = id(iterable) if iterable else 0
|
| 773 |
+
if iterable:
|
| 774 |
+
for x in iterable:
|
| 775 |
+
self.append(x)
|
| 776 |
+
if box_options.get('frozen_box'):
|
| 777 |
+
def frozen(*args, **kwargs):
|
| 778 |
+
raise BoxError('BoxList is frozen')
|
| 779 |
+
|
| 780 |
+
for method in ['append', 'extend', 'insert', 'pop',
|
| 781 |
+
'remove', 'reverse', 'sort']:
|
| 782 |
+
self.__setattr__(method, frozen)
|
| 783 |
+
|
| 784 |
+
def __delitem__(self, key):
|
| 785 |
+
if self.box_options.get('frozen_box'):
|
| 786 |
+
raise BoxError('BoxList is frozen')
|
| 787 |
+
super(BoxList, self).__delitem__(key)
|
| 788 |
+
|
| 789 |
+
def __setitem__(self, key, value):
|
| 790 |
+
if self.box_options.get('frozen_box'):
|
| 791 |
+
raise BoxError('BoxList is frozen')
|
| 792 |
+
super(BoxList, self).__setitem__(key, value)
|
| 793 |
+
|
| 794 |
+
def append(self, p_object):
|
| 795 |
+
if isinstance(p_object, dict):
|
| 796 |
+
try:
|
| 797 |
+
p_object = self.box_class(p_object, **self.box_options)
|
| 798 |
+
except AttributeError as err:
|
| 799 |
+
if 'box_class' in self.__dict__:
|
| 800 |
+
raise err
|
| 801 |
+
elif isinstance(p_object, list):
|
| 802 |
+
try:
|
| 803 |
+
p_object = (self if id(p_object) == self.box_org_ref else
|
| 804 |
+
BoxList(p_object))
|
| 805 |
+
except AttributeError as err:
|
| 806 |
+
if 'box_org_ref' in self.__dict__:
|
| 807 |
+
raise err
|
| 808 |
+
super(BoxList, self).append(p_object)
|
| 809 |
+
|
| 810 |
+
def extend(self, iterable):
|
| 811 |
+
for item in iterable:
|
| 812 |
+
self.append(item)
|
| 813 |
+
|
| 814 |
+
def insert(self, index, p_object):
|
| 815 |
+
if isinstance(p_object, dict):
|
| 816 |
+
p_object = self.box_class(p_object, **self.box_options)
|
| 817 |
+
elif isinstance(p_object, list):
|
| 818 |
+
p_object = (self if id(p_object) == self.box_org_ref else
|
| 819 |
+
BoxList(p_object))
|
| 820 |
+
super(BoxList, self).insert(index, p_object)
|
| 821 |
+
|
| 822 |
+
def __repr__(self):
|
| 823 |
+
return "<BoxList: {0}>".format(self.to_list())
|
| 824 |
+
|
| 825 |
+
def __str__(self):
|
| 826 |
+
return str(self.to_list())
|
| 827 |
+
|
| 828 |
+
def __copy__(self):
|
| 829 |
+
return BoxList((x for x in self),
|
| 830 |
+
self.box_class,
|
| 831 |
+
**self.box_options)
|
| 832 |
+
|
| 833 |
+
def __deepcopy__(self, memodict=None):
|
| 834 |
+
out = self.__class__()
|
| 835 |
+
memodict = memodict or {}
|
| 836 |
+
memodict[id(self)] = out
|
| 837 |
+
for k in self:
|
| 838 |
+
out.append(copy.deepcopy(k))
|
| 839 |
+
return out
|
| 840 |
+
|
| 841 |
+
def __hash__(self):
|
| 842 |
+
if self.box_options.get('frozen_box'):
|
| 843 |
+
hashing = 98765
|
| 844 |
+
hashing ^= hash(tuple(self))
|
| 845 |
+
return hashing
|
| 846 |
+
raise TypeError("unhashable type: 'BoxList'")
|
| 847 |
+
|
| 848 |
+
def to_list(self):
|
| 849 |
+
new_list = []
|
| 850 |
+
for x in self:
|
| 851 |
+
if x is self:
|
| 852 |
+
new_list.append(new_list)
|
| 853 |
+
elif isinstance(x, Box):
|
| 854 |
+
new_list.append(x.to_dict())
|
| 855 |
+
elif isinstance(x, BoxList):
|
| 856 |
+
new_list.append(x.to_list())
|
| 857 |
+
else:
|
| 858 |
+
new_list.append(x)
|
| 859 |
+
return new_list
|
| 860 |
+
|
| 861 |
+
def to_json(self, filename=None,
|
| 862 |
+
encoding="utf-8", errors="strict",
|
| 863 |
+
multiline=False, **json_kwargs):
|
| 864 |
+
"""
|
| 865 |
+
Transform the BoxList object into a JSON string.
|
| 866 |
+
|
| 867 |
+
:param filename: If provided will save to file
|
| 868 |
+
:param encoding: File encoding
|
| 869 |
+
:param errors: How to handle encoding errors
|
| 870 |
+
:param multiline: Put each item in list onto it's own line
|
| 871 |
+
:param json_kwargs: additional arguments to pass to json.dump(s)
|
| 872 |
+
:return: string of JSON or return of `json.dump`
|
| 873 |
+
"""
|
| 874 |
+
if filename and multiline:
|
| 875 |
+
lines = [_to_json(item, filename=False, encoding=encoding,
|
| 876 |
+
errors=errors, **json_kwargs) for item in self]
|
| 877 |
+
with open(filename, 'w', encoding=encoding, errors=errors) as f:
|
| 878 |
+
f.write("\n".join(lines).decode('utf-8') if
|
| 879 |
+
sys.version_info < (3, 0) else "\n".join(lines))
|
| 880 |
+
else:
|
| 881 |
+
return _to_json(self.to_list(), filename=filename,
|
| 882 |
+
encoding=encoding, errors=errors, **json_kwargs)
|
| 883 |
+
|
| 884 |
+
@classmethod
|
| 885 |
+
def from_json(cls, json_string=None, filename=None, encoding="utf-8",
|
| 886 |
+
errors="strict", multiline=False, **kwargs):
|
| 887 |
+
"""
|
| 888 |
+
Transform a json object string into a BoxList object. If the incoming
|
| 889 |
+
json is a dict, you must use Box.from_json.
|
| 890 |
+
|
| 891 |
+
:param json_string: string to pass to `json.loads`
|
| 892 |
+
:param filename: filename to open and pass to `json.load`
|
| 893 |
+
:param encoding: File encoding
|
| 894 |
+
:param errors: How to handle encoding errors
|
| 895 |
+
:param multiline: One object per line
|
| 896 |
+
:param kwargs: parameters to pass to `Box()` or `json.loads`
|
| 897 |
+
:return: BoxList object from json data
|
| 898 |
+
"""
|
| 899 |
+
bx_args = {}
|
| 900 |
+
for arg in kwargs.copy():
|
| 901 |
+
if arg in BOX_PARAMETERS:
|
| 902 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 903 |
+
|
| 904 |
+
data = _from_json(json_string, filename=filename, encoding=encoding,
|
| 905 |
+
errors=errors, multiline=multiline, **kwargs)
|
| 906 |
+
|
| 907 |
+
if not isinstance(data, list):
|
| 908 |
+
raise BoxError('json data not returned as a list, '
|
| 909 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 910 |
+
return cls(data, **bx_args)
|
| 911 |
+
|
| 912 |
+
if yaml_support:
|
| 913 |
+
def to_yaml(self, filename=None, default_flow_style=False,
|
| 914 |
+
encoding="utf-8", errors="strict",
|
| 915 |
+
**yaml_kwargs):
|
| 916 |
+
"""
|
| 917 |
+
Transform the BoxList object into a YAML string.
|
| 918 |
+
|
| 919 |
+
:param filename: If provided will save to file
|
| 920 |
+
:param default_flow_style: False will recursively dump dicts
|
| 921 |
+
:param encoding: File encoding
|
| 922 |
+
:param errors: How to handle encoding errors
|
| 923 |
+
:param yaml_kwargs: additional arguments to pass to yaml.dump
|
| 924 |
+
:return: string of YAML or return of `yaml.dump`
|
| 925 |
+
"""
|
| 926 |
+
return _to_yaml(self.to_list(), filename=filename,
|
| 927 |
+
default_flow_style=default_flow_style,
|
| 928 |
+
encoding=encoding, errors=errors, **yaml_kwargs)
|
| 929 |
+
|
| 930 |
+
@classmethod
|
| 931 |
+
def from_yaml(cls, yaml_string=None, filename=None,
|
| 932 |
+
encoding="utf-8", errors="strict",
|
| 933 |
+
loader=yaml.SafeLoader,
|
| 934 |
+
**kwargs):
|
| 935 |
+
"""
|
| 936 |
+
Transform a yaml object string into a BoxList object.
|
| 937 |
+
|
| 938 |
+
:param yaml_string: string to pass to `yaml.load`
|
| 939 |
+
:param filename: filename to open and pass to `yaml.load`
|
| 940 |
+
:param encoding: File encoding
|
| 941 |
+
:param errors: How to handle encoding errors
|
| 942 |
+
:param loader: YAML Loader, defaults to SafeLoader
|
| 943 |
+
:param kwargs: parameters to pass to `BoxList()` or `yaml.load`
|
| 944 |
+
:return: BoxList object from yaml data
|
| 945 |
+
"""
|
| 946 |
+
bx_args = {}
|
| 947 |
+
for arg in kwargs.copy():
|
| 948 |
+
if arg in BOX_PARAMETERS:
|
| 949 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 950 |
+
|
| 951 |
+
data = _from_yaml(yaml_string=yaml_string, filename=filename,
|
| 952 |
+
encoding=encoding, errors=errors,
|
| 953 |
+
Loader=loader, **kwargs)
|
| 954 |
+
if not isinstance(data, list):
|
| 955 |
+
raise BoxError('yaml data not returned as a list'
|
| 956 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 957 |
+
return cls(data, **bx_args)
|
| 958 |
+
|
| 959 |
+
def box_it_up(self):
|
| 960 |
+
for v in self:
|
| 961 |
+
if hasattr(v, 'box_it_up') and v is not self:
|
| 962 |
+
v.box_it_up()
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
class ConfigBox(Box):
|
| 966 |
+
"""
|
| 967 |
+
Modified box object to add object transforms.
|
| 968 |
+
|
| 969 |
+
Allows for build in transforms like:
|
| 970 |
+
|
| 971 |
+
cns = ConfigBox(my_bool='yes', my_int='5', my_list='5,4,3,3,2')
|
| 972 |
+
|
| 973 |
+
cns.bool('my_bool') # True
|
| 974 |
+
cns.int('my_int') # 5
|
| 975 |
+
cns.list('my_list', mod=lambda x: int(x)) # [5, 4, 3, 3, 2]
|
| 976 |
+
"""
|
| 977 |
+
|
| 978 |
+
_protected_keys = dir({}) + ['to_dict', 'bool', 'int', 'float',
|
| 979 |
+
'list', 'getboolean', 'to_json', 'to_yaml',
|
| 980 |
+
'getfloat', 'getint',
|
| 981 |
+
'from_json', 'from_yaml']
|
| 982 |
+
|
| 983 |
+
def __getattr__(self, item):
|
| 984 |
+
"""Config file keys are stored in lower case, be a little more
|
| 985 |
+
loosey goosey"""
|
| 986 |
+
try:
|
| 987 |
+
return super(ConfigBox, self).__getattr__(item)
|
| 988 |
+
except AttributeError:
|
| 989 |
+
return super(ConfigBox, self).__getattr__(item.lower())
|
| 990 |
+
|
| 991 |
+
def __dir__(self):
|
| 992 |
+
return super(ConfigBox, self).__dir__() + ['bool', 'int', 'float',
|
| 993 |
+
'list', 'getboolean',
|
| 994 |
+
'getfloat', 'getint']
|
| 995 |
+
|
| 996 |
+
def bool(self, item, default=None):
|
| 997 |
+
""" Return value of key as a boolean
|
| 998 |
+
|
| 999 |
+
:param item: key of value to transform
|
| 1000 |
+
:param default: value to return if item does not exist
|
| 1001 |
+
:return: approximated bool of value
|
| 1002 |
+
"""
|
| 1003 |
+
try:
|
| 1004 |
+
item = self.__getattr__(item)
|
| 1005 |
+
except AttributeError as err:
|
| 1006 |
+
if default is not None:
|
| 1007 |
+
return default
|
| 1008 |
+
raise err
|
| 1009 |
+
|
| 1010 |
+
if isinstance(item, (bool, int)):
|
| 1011 |
+
return bool(item)
|
| 1012 |
+
|
| 1013 |
+
if (isinstance(item, str) and
|
| 1014 |
+
item.lower() in ('n', 'no', 'false', 'f', '0')):
|
| 1015 |
+
return False
|
| 1016 |
+
|
| 1017 |
+
return True if item else False
|
| 1018 |
+
|
| 1019 |
+
def int(self, item, default=None):
|
| 1020 |
+
""" Return value of key as an int
|
| 1021 |
+
|
| 1022 |
+
:param item: key of value to transform
|
| 1023 |
+
:param default: value to return if item does not exist
|
| 1024 |
+
:return: int of value
|
| 1025 |
+
"""
|
| 1026 |
+
try:
|
| 1027 |
+
item = self.__getattr__(item)
|
| 1028 |
+
except AttributeError as err:
|
| 1029 |
+
if default is not None:
|
| 1030 |
+
return default
|
| 1031 |
+
raise err
|
| 1032 |
+
return int(item)
|
| 1033 |
+
|
| 1034 |
+
def float(self, item, default=None):
|
| 1035 |
+
""" Return value of key as a float
|
| 1036 |
+
|
| 1037 |
+
:param item: key of value to transform
|
| 1038 |
+
:param default: value to return if item does not exist
|
| 1039 |
+
:return: float of value
|
| 1040 |
+
"""
|
| 1041 |
+
try:
|
| 1042 |
+
item = self.__getattr__(item)
|
| 1043 |
+
except AttributeError as err:
|
| 1044 |
+
if default is not None:
|
| 1045 |
+
return default
|
| 1046 |
+
raise err
|
| 1047 |
+
return float(item)
|
| 1048 |
+
|
| 1049 |
+
def list(self, item, default=None, spliter=",", strip=True, mod=None):
|
| 1050 |
+
""" Return value of key as a list
|
| 1051 |
+
|
| 1052 |
+
:param item: key of value to transform
|
| 1053 |
+
:param mod: function to map against list
|
| 1054 |
+
:param default: value to return if item does not exist
|
| 1055 |
+
:param spliter: character to split str on
|
| 1056 |
+
:param strip: clean the list with the `strip`
|
| 1057 |
+
:return: list of items
|
| 1058 |
+
"""
|
| 1059 |
+
try:
|
| 1060 |
+
item = self.__getattr__(item)
|
| 1061 |
+
except AttributeError as err:
|
| 1062 |
+
if default is not None:
|
| 1063 |
+
return default
|
| 1064 |
+
raise err
|
| 1065 |
+
if strip:
|
| 1066 |
+
item = item.lstrip('[').rstrip(']')
|
| 1067 |
+
out = [x.strip() if strip else x for x in item.split(spliter)]
|
| 1068 |
+
if mod:
|
| 1069 |
+
return list(map(mod, out))
|
| 1070 |
+
return out
|
| 1071 |
+
|
| 1072 |
+
# loose configparser compatibility
|
| 1073 |
+
|
| 1074 |
+
def getboolean(self, item, default=None):
|
| 1075 |
+
return self.bool(item, default)
|
| 1076 |
+
|
| 1077 |
+
def getint(self, item, default=None):
|
| 1078 |
+
return self.int(item, default)
|
| 1079 |
+
|
| 1080 |
+
def getfloat(self, item, default=None):
|
| 1081 |
+
return self.float(item, default)
|
| 1082 |
+
|
| 1083 |
+
def __repr__(self):
|
| 1084 |
+
return '<ConfigBox: {0}>'.format(str(self.to_dict()))
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
class SBox(Box):
|
| 1088 |
+
"""
|
| 1089 |
+
ShorthandBox (SBox) allows for
|
| 1090 |
+
property access of `dict` `json` and `yaml`
|
| 1091 |
+
"""
|
| 1092 |
+
_protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml',
|
| 1093 |
+
'json', 'yaml', 'from_yaml', 'from_json',
|
| 1094 |
+
'dict']
|
| 1095 |
+
|
| 1096 |
+
@property
|
| 1097 |
+
def dict(self):
|
| 1098 |
+
return self.to_dict()
|
| 1099 |
+
|
| 1100 |
+
@property
|
| 1101 |
+
def json(self):
|
| 1102 |
+
return self.to_json()
|
| 1103 |
+
|
| 1104 |
+
if yaml_support:
|
| 1105 |
+
@property
|
| 1106 |
+
def yaml(self):
|
| 1107 |
+
return self.to_yaml()
|
| 1108 |
+
|
| 1109 |
+
def __repr__(self):
|
| 1110 |
+
return '<ShorthandBox: {0}>'.format(str(self.to_dict()))
|
vanishing_point_extraction/neurvps/neurvps/config.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from neurvps.box import Box
|
| 4 |
+
|
| 5 |
+
# C is a dict storing all the configuration
|
| 6 |
+
C = Box()
|
| 7 |
+
|
| 8 |
+
# shortcut for C.model
|
| 9 |
+
M = Box()
|
vanishing_point_extraction/neurvps/neurvps/datasets.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
import os.path as osp
|
| 6 |
+
from glob import glob
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
import skimage.io
|
| 11 |
+
import numpy.linalg as LA
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
import skimage.transform
|
| 14 |
+
from torch.utils.data import Dataset
|
| 15 |
+
from torch.utils.data.dataloader import default_collate
|
| 16 |
+
|
| 17 |
+
from neurvps.config import C
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class WireframeDataset(Dataset):
|
| 21 |
+
def __init__(self, rootdir, split):
|
| 22 |
+
self.rootdir = rootdir
|
| 23 |
+
filelist = sorted(glob(f"{rootdir}/*/*.png"))
|
| 24 |
+
|
| 25 |
+
self.split = split
|
| 26 |
+
if split == "train":
|
| 27 |
+
self.filelist = filelist[500:]
|
| 28 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 29 |
+
elif split == "valid":
|
| 30 |
+
self.filelist = [f for f in filelist[:500] if "a1" not in f]
|
| 31 |
+
self.size = len(self.filelist)
|
| 32 |
+
print(f"n{split}:", self.size)
|
| 33 |
+
|
| 34 |
+
def __len__(self):
|
| 35 |
+
return self.size
|
| 36 |
+
|
| 37 |
+
def __getitem__(self, idx):
|
| 38 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 39 |
+
image = skimage.io.imread(iname).astype(float)[:, :, :3]
|
| 40 |
+
image = np.rollaxis(image, 2).copy()
|
| 41 |
+
with np.load(iname.replace(".png", "_label.npz")) as npz:
|
| 42 |
+
vpts = npz["vpts"]
|
| 43 |
+
return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()})
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ScanNetDataset(Dataset):
|
| 47 |
+
def __init__(self, rootdir, split):
|
| 48 |
+
self.rootdir = rootdir
|
| 49 |
+
self.split = split
|
| 50 |
+
|
| 51 |
+
dirs = np.genfromtxt(f"{rootdir}/scannetv2_{split}.txt", dtype=str)
|
| 52 |
+
self.filelist = sum([glob(f"{rootdir}/{d}/*.png") for d in dirs], [])
|
| 53 |
+
if split == "train":
|
| 54 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 55 |
+
elif split == "valid":
|
| 56 |
+
random.seed(0)
|
| 57 |
+
random.shuffle(self.filelist)
|
| 58 |
+
self.filelist = self.filelist[:500]
|
| 59 |
+
self.size = len(self.filelist)
|
| 60 |
+
print(f"n{split}:", self.size)
|
| 61 |
+
|
| 62 |
+
def __len__(self):
|
| 63 |
+
return self.size
|
| 64 |
+
|
| 65 |
+
def __getitem__(self, idx):
|
| 66 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 67 |
+
image = skimage.io.imread(iname)[:, :, :3]
|
| 68 |
+
with np.load(iname.replace("color.png", "vanish.npz")) as npz:
|
| 69 |
+
vpts = np.array([npz[d] for d in ["x", "y", "z"]])
|
| 70 |
+
vpts[:, 1] *= -1
|
| 71 |
+
# plt.imshow(image)
|
| 72 |
+
# cc = ["blue", "cyan", "orange"]
|
| 73 |
+
# for c, w in zip(cc, vpts):
|
| 74 |
+
# x = w[0] / w[2] * C.io.focal_length * 256 + 256
|
| 75 |
+
# y = -w[1] / w[2] * C.io.focal_length * 256 + 256
|
| 76 |
+
# plt.scatter(x, y, color=c)
|
| 77 |
+
# for xy in np.linspace(0, 512, 10):
|
| 78 |
+
# plt.plot(
|
| 79 |
+
# [x, xy, x, xy, x, 0, x, 511],
|
| 80 |
+
# [y, 0, y, 511, y, xy, y, xy],
|
| 81 |
+
# color=c,
|
| 82 |
+
# )
|
| 83 |
+
# plt.show()
|
| 84 |
+
image = np.rollaxis(image.astype(np.float), 2).copy()
|
| 85 |
+
return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()})
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class Tmm17Dataset(Dataset):
|
| 89 |
+
def __init__(self, rootdir, split):
|
| 90 |
+
self.rootdir = rootdir
|
| 91 |
+
self.split = split
|
| 92 |
+
|
| 93 |
+
filelist = np.genfromtxt(f"{rootdir}/{split}.txt", dtype=str)
|
| 94 |
+
self.filelist = [osp.join(rootdir, f) for f in filelist]
|
| 95 |
+
if split == "train":
|
| 96 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 97 |
+
elif split == "valid":
|
| 98 |
+
self.size = len(self.filelist)
|
| 99 |
+
print(f"n{split}:", self.size)
|
| 100 |
+
|
| 101 |
+
def __len__(self):
|
| 102 |
+
return self.size
|
| 103 |
+
|
| 104 |
+
def __getitem__(self, idx):
|
| 105 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 106 |
+
image = skimage.io.imread(iname)
|
| 107 |
+
tname = iname.replace(".jpg", ".txt")
|
| 108 |
+
axy, bxy = np.genfromtxt(tname, skip_header=1)
|
| 109 |
+
|
| 110 |
+
a0, a1 = np.array(axy[:2]), np.array(axy[2:])
|
| 111 |
+
b0, b1 = np.array(bxy[:2]), np.array(bxy[2:])
|
| 112 |
+
xy = intersect(a0, a1, b0, b1) - 0.5
|
| 113 |
+
xy[0] *= 512 / image.shape[1]
|
| 114 |
+
xy[1] *= 512 / image.shape[0]
|
| 115 |
+
image = skimage.transform.resize(image, (512, 512))
|
| 116 |
+
if image.ndim == 2:
|
| 117 |
+
image = image[:, :, None].repeat(3, 2)
|
| 118 |
+
if self.split == "train":
|
| 119 |
+
i, j, h, w = crop(image.shape)
|
| 120 |
+
else:
|
| 121 |
+
i, j, h, w = 0, 0, image.shape[0], image.shape[1]
|
| 122 |
+
image = skimage.transform.resize(image[j : j + h, i : i + w], (512, 512))
|
| 123 |
+
xy[1] = (xy[1] - j) / h * 512
|
| 124 |
+
xy[0] = (xy[0] - i) / w * 512
|
| 125 |
+
# plt.imshow(image)
|
| 126 |
+
# plt.scatter(xy[0], xy[1])
|
| 127 |
+
# plt.show()
|
| 128 |
+
vpts = np.array([[xy[0] / 256 - 1, 1 - xy[1] / 256, C.io.focal_length]])
|
| 129 |
+
vpts[0] /= LA.norm(vpts[0])
|
| 130 |
+
|
| 131 |
+
image, vpts = augment(image, vpts, idx // len(self.filelist))
|
| 132 |
+
image = np.rollaxis(image, 2)
|
| 133 |
+
return (torch.tensor(image * 255).float(), {"vpts": torch.tensor(vpts).float()})
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def augment(image, vpts, division):
|
| 137 |
+
if division == 1: # left-right flip
|
| 138 |
+
return image[:, ::-1].copy(), (vpts * [-1, 1, 1]).copy()
|
| 139 |
+
elif division == 2: # up-down flip
|
| 140 |
+
return image[::-1, :].copy(), (vpts * [1, -1, 1]).copy()
|
| 141 |
+
elif division == 3: # all flip
|
| 142 |
+
return image[::-1, ::-1].copy(), (vpts * [-1, -1, 1]).copy()
|
| 143 |
+
return image, vpts
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def intersect(a0, a1, b0, b1):
|
| 147 |
+
c0 = ccw(a0, a1, b0)
|
| 148 |
+
c1 = ccw(a0, a1, b1)
|
| 149 |
+
d0 = ccw(b0, b1, a0)
|
| 150 |
+
d1 = ccw(b0, b1, a1)
|
| 151 |
+
if abs(d1 - d0) > abs(c1 - c0):
|
| 152 |
+
return (a0 * d1 - a1 * d0) / (d1 - d0)
|
| 153 |
+
else:
|
| 154 |
+
return (b0 * c1 - b1 * c0) / (c1 - c0)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def ccw(c, a, b):
|
| 158 |
+
a0 = a - c
|
| 159 |
+
b0 = b - c
|
| 160 |
+
return a0[0] * b0[1] - b0[0] * a0[1]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def crop(shape, scale=(0.35, 1.0), ratio=(9 / 16, 16 / 9)):
|
| 164 |
+
for attempt in range(20):
|
| 165 |
+
area = shape[0] * shape[1]
|
| 166 |
+
target_area = random.uniform(*scale) * area
|
| 167 |
+
aspect_ratio = random.uniform(*ratio)
|
| 168 |
+
|
| 169 |
+
w = int(round(math.sqrt(target_area * aspect_ratio)))
|
| 170 |
+
h = int(round(math.sqrt(target_area / aspect_ratio)))
|
| 171 |
+
|
| 172 |
+
if random.random() < 0.5:
|
| 173 |
+
w, h = h, w
|
| 174 |
+
|
| 175 |
+
if h <= shape[0] and w <= shape[1]:
|
| 176 |
+
j = random.randint(0, shape[0] - h)
|
| 177 |
+
i = random.randint(0, shape[1] - w)
|
| 178 |
+
return i, j, h, w
|
| 179 |
+
|
| 180 |
+
# Fallback
|
| 181 |
+
w = min(shape[0], shape[1])
|
| 182 |
+
i = (shape[1] - w) // 2
|
| 183 |
+
j = (shape[0] - w) // 2
|
| 184 |
+
return i, j, w, w
|
vanishing_point_extraction/neurvps/neurvps/models/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .hourglass_pose import hg
|
| 2 |
+
from .vanishing_net import VanishingNet
|
vanishing_point_extraction/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (249 Bytes). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc
ADDED
|
Binary file (1.73 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc
ADDED
|
Binary file (4.03 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc
ADDED
|
Binary file (5.72 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc
ADDED
|
Binary file (5.74 kB). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/conic.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
from torch.nn.modules.utils import _pair
|
| 4 |
+
|
| 5 |
+
from neurvps.config import M
|
| 6 |
+
from neurvps.models.deformable import DeformConv
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ConicConv(nn.Module):
|
| 10 |
+
def __init__(self, c_in, c_out, kernel_size=3, bias=False):
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.deform_conv = DeformConv(
|
| 13 |
+
c_in,
|
| 14 |
+
c_out,
|
| 15 |
+
kernel_size=kernel_size,
|
| 16 |
+
stride=1,
|
| 17 |
+
padding=1,
|
| 18 |
+
im2col_step=M.im2col_step,
|
| 19 |
+
bias=bias,
|
| 20 |
+
)
|
| 21 |
+
self.kernel_size = _pair(kernel_size)
|
| 22 |
+
|
| 23 |
+
def forward(self, input, vpts):
|
| 24 |
+
N, C, H, W = input.shape
|
| 25 |
+
Kh, Kw = self.kernel_size
|
| 26 |
+
|
| 27 |
+
with torch.no_grad():
|
| 28 |
+
ys, xs = torch.meshgrid(
|
| 29 |
+
torch.arange(0, H).float().to(input.device),
|
| 30 |
+
torch.arange(0, W).float().to(input.device),
|
| 31 |
+
)
|
| 32 |
+
# d: [N, H, W, 2]
|
| 33 |
+
d = torch.cat(
|
| 34 |
+
[
|
| 35 |
+
(vpts[:, 0, None, None] - ys)[..., None],
|
| 36 |
+
(vpts[:, 1, None, None] - xs)[..., None],
|
| 37 |
+
],
|
| 38 |
+
dim=-1,
|
| 39 |
+
)
|
| 40 |
+
d /= torch.norm(d, dim=-1, keepdim=True).clamp(min=1e-5)
|
| 41 |
+
n = torch.cat([-d[..., 1:2], d[..., 0:1]], dim=-1)
|
| 42 |
+
|
| 43 |
+
offset = torch.zeros((N, H, W, Kh, Kw, 2)).to(input.device)
|
| 44 |
+
for i in range(Kh):
|
| 45 |
+
for j in range(Kw):
|
| 46 |
+
offset[..., i, j, :] = d * (1 - i) + n * (1 - j)
|
| 47 |
+
offset[..., i, j, 0] += 1 - i
|
| 48 |
+
offset[..., i, j, 1] += 1 - j
|
| 49 |
+
offset = offset.permute(0, 3, 4, 5, 1, 2).reshape((N, -1, H, W))
|
| 50 |
+
return self.deform_conv(input, offset)
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps
ADDED
|
Binary file (644 Bytes). View file
|
|
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_log
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ninja log v5
|
| 2 |
+
0 16103 1705843691094256220 deform_conv_cuda.cuda.o faf06c0154fdd95
|
| 3 |
+
0 17978 1705843692978288598 deform_conv.o 9bdf84a104d95de9
|
| 4 |
+
17978 18346 1705843693342294852 DCN.so d5002c9f854b5479
|
| 5 |
+
1 14024 1720225807965090925 deform_conv_cuda.cuda.o 12c1d8fa6984d93
|
| 6 |
+
1 16540 1720225810493145171 deform_conv.o 84f97a3edd60cf1e
|
| 7 |
+
16540 16855 1720225810805151862 DCN.so d5002c9f854b5479
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d858d752cacb6eedb4f05258437d2dfdf45a2a4e8fbbba467b8e7f8553b0140
|
| 3 |
+
size 580640
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/build.ninja
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ninja_required_version = 1.3
|
| 2 |
+
cxx = c++
|
| 3 |
+
nvcc = /usr/local/cuda/bin/nvcc
|
| 4 |
+
|
| 5 |
+
cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++14 -O3
|
| 6 |
+
post_cflags =
|
| 7 |
+
cuda_cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 --compiler-options '-fPIC' -std=c++14
|
| 8 |
+
cuda_post_cflags =
|
| 9 |
+
ldflags = -shared -L/opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/lib -lc10 -lc10_cuda -ltorch_cpu -ltorch_cuda_cu -ltorch_cuda_cpp -ltorch -ltorch_python -L/usr/local/cuda/lib64 -lcudart
|
| 10 |
+
|
| 11 |
+
rule compile
|
| 12 |
+
command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags
|
| 13 |
+
depfile = $out.d
|
| 14 |
+
deps = gcc
|
| 15 |
+
|
| 16 |
+
rule cuda_compile
|
| 17 |
+
depfile = $out.d
|
| 18 |
+
deps = gcc
|
| 19 |
+
command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags
|
| 20 |
+
|
| 21 |
+
rule link
|
| 22 |
+
command = $cxx $in $ldflags -o $out
|
| 23 |
+
|
| 24 |
+
build deform_conv_cuda.cuda.o: cuda_compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu
|
| 25 |
+
build deform_conv.o: compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp
|
| 26 |
+
|
| 27 |
+
build DCN.so: link deform_conv_cuda.cuda.o deform_conv.o
|
| 28 |
+
|
| 29 |
+
default DCN.so
|
| 30 |
+
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe3c7f68e8eefb0ce25c505d4e1c74ebdc200d2bf2dbdb335750788635a1e114
|
| 3 |
+
size 234296
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67b0f98276530eb69dd8ad586e105adb457b4f506c4acbfe8418d192f49dcf7e
|
| 3 |
+
size 603176
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv.cpp
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "deform_conv_cpu.h"
|
| 2 |
+
#include "deform_conv_cuda.h"
|
| 3 |
+
|
| 4 |
+
at::Tensor
|
| 5 |
+
deform_conv_forward(const at::Tensor &input,
|
| 6 |
+
const at::Tensor &weight,
|
| 7 |
+
const at::Tensor &bias,
|
| 8 |
+
const at::Tensor &offset,
|
| 9 |
+
const int kernel_h,
|
| 10 |
+
const int kernel_w,
|
| 11 |
+
const int stride_h,
|
| 12 |
+
const int stride_w,
|
| 13 |
+
const int pad_h,
|
| 14 |
+
const int pad_w,
|
| 15 |
+
const int dilation_h,
|
| 16 |
+
const int dilation_w,
|
| 17 |
+
const int group,
|
| 18 |
+
const int deformable_group,
|
| 19 |
+
const int im2col_step)
|
| 20 |
+
{
|
| 21 |
+
if (input.type().is_cuda())
|
| 22 |
+
{
|
| 23 |
+
return deform_conv_cuda_forward(input, weight, bias, offset,
|
| 24 |
+
kernel_h, kernel_w,
|
| 25 |
+
stride_h, stride_w,
|
| 26 |
+
pad_h, pad_w,
|
| 27 |
+
dilation_h, dilation_w,
|
| 28 |
+
group,
|
| 29 |
+
deformable_group,
|
| 30 |
+
im2col_step);
|
| 31 |
+
}
|
| 32 |
+
AT_ERROR("Not implemented on the CPU");
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
std::vector<at::Tensor>
|
| 36 |
+
deform_conv_backward(const at::Tensor &input,
|
| 37 |
+
const at::Tensor &weight,
|
| 38 |
+
const at::Tensor &bias,
|
| 39 |
+
const at::Tensor &offset,
|
| 40 |
+
const at::Tensor &grad_output,
|
| 41 |
+
const int kernel_h,
|
| 42 |
+
const int kernel_w,
|
| 43 |
+
const int stride_h,
|
| 44 |
+
const int stride_w,
|
| 45 |
+
const int pad_h,
|
| 46 |
+
const int pad_w,
|
| 47 |
+
const int dilation_h,
|
| 48 |
+
const int dilation_w,
|
| 49 |
+
const int group,
|
| 50 |
+
const int deformable_group,
|
| 51 |
+
const int im2col_step)
|
| 52 |
+
{
|
| 53 |
+
if (input.type().is_cuda())
|
| 54 |
+
{
|
| 55 |
+
return deform_conv_cuda_backward(input,
|
| 56 |
+
weight,
|
| 57 |
+
bias,
|
| 58 |
+
offset,
|
| 59 |
+
grad_output,
|
| 60 |
+
kernel_h, kernel_w,
|
| 61 |
+
stride_h, stride_w,
|
| 62 |
+
pad_h, pad_w,
|
| 63 |
+
dilation_h, dilation_w,
|
| 64 |
+
group,
|
| 65 |
+
deformable_group,
|
| 66 |
+
im2col_step);
|
| 67 |
+
}
|
| 68 |
+
AT_ERROR("Not implemented on the CPU");
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 73 |
+
m.def("deform_conv_forward", &deform_conv_forward, "Backward pass of deformable convolution");
|
| 74 |
+
m.def("deform_conv_backward", &deform_conv_backward, "Backward pass of deformable convolution");
|
| 75 |
+
}
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cpu.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/extension.h>
|
| 3 |
+
|
| 4 |
+
at::Tensor
|
| 5 |
+
deform_conv_cpu_forward(const at::Tensor &input,
|
| 6 |
+
const at::Tensor &weight,
|
| 7 |
+
const at::Tensor &bias,
|
| 8 |
+
const at::Tensor &offset,
|
| 9 |
+
const int kernel_h,
|
| 10 |
+
const int kernel_w,
|
| 11 |
+
const int stride_h,
|
| 12 |
+
const int stride_w,
|
| 13 |
+
const int pad_h,
|
| 14 |
+
const int pad_w,
|
| 15 |
+
const int dilation_h,
|
| 16 |
+
const int dilation_w,
|
| 17 |
+
const int group,
|
| 18 |
+
const int deformable_group,
|
| 19 |
+
const int im2col_step);
|
| 20 |
+
|
| 21 |
+
std::vector<at::Tensor>
|
| 22 |
+
deform_conv_cpu_backward(const at::Tensor &input,
|
| 23 |
+
const at::Tensor &weight,
|
| 24 |
+
const at::Tensor &bias,
|
| 25 |
+
const at::Tensor &offset,
|
| 26 |
+
const at::Tensor &grad_output,
|
| 27 |
+
const int kernel_h,
|
| 28 |
+
const int kernel_w,
|
| 29 |
+
const int stride_h,
|
| 30 |
+
const int stride_w,
|
| 31 |
+
const int pad_h,
|
| 32 |
+
const int pad_w,
|
| 33 |
+
const int dilation_h,
|
| 34 |
+
const int dilation_w,
|
| 35 |
+
const int group,
|
| 36 |
+
const int deformable_group,
|
| 37 |
+
const int im2col_step);
|
| 38 |
+
|
| 39 |
+
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.cu
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <vector>
|
| 2 |
+
#include "deform_im2col_cuda.cuh"
|
| 3 |
+
|
| 4 |
+
#include <ATen/ATen.h>
|
| 5 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 6 |
+
#include <cuda.h>
|
| 7 |
+
#include <cuda_runtime.h>
|
| 8 |
+
|
| 9 |
+
// #include <THC/THC.h>
|
| 10 |
+
// #include <THC/THCAtomics.cuh>
|
| 11 |
+
// #include <THC/THCDeviceUtils.cuh>
|
| 12 |
+
|
| 13 |
+
// extern THCState *state;
|
| 14 |
+
|
| 15 |
+
// author: Charles Shang
|
| 16 |
+
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
at::Tensor
|
| 20 |
+
deform_conv_cuda_forward(const at::Tensor &input,
|
| 21 |
+
const at::Tensor &weight,
|
| 22 |
+
const at::Tensor &bias,
|
| 23 |
+
const at::Tensor &offset,
|
| 24 |
+
const int kernel_h,
|
| 25 |
+
const int kernel_w,
|
| 26 |
+
const int stride_h,
|
| 27 |
+
const int stride_w,
|
| 28 |
+
const int pad_h,
|
| 29 |
+
const int pad_w,
|
| 30 |
+
const int dilation_h,
|
| 31 |
+
const int dilation_w,
|
| 32 |
+
const int group,
|
| 33 |
+
const int deformable_group,
|
| 34 |
+
const int im2col_step)
|
| 35 |
+
{
|
| 36 |
+
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
|
| 37 |
+
|
| 38 |
+
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
|
| 39 |
+
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
|
| 40 |
+
|
| 41 |
+
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
|
| 42 |
+
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
|
| 43 |
+
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
|
| 44 |
+
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
|
| 45 |
+
|
| 46 |
+
const int batch = input.size(0);
|
| 47 |
+
const int channels = input.size(1);
|
| 48 |
+
const int height = input.size(2);
|
| 49 |
+
const int width = input.size(3);
|
| 50 |
+
|
| 51 |
+
const int channels_out = weight.size(0);
|
| 52 |
+
const int channels_kernel = weight.size(1);
|
| 53 |
+
const int kernel_h_ = weight.size(2);
|
| 54 |
+
const int kernel_w_ = weight.size(3);
|
| 55 |
+
|
| 56 |
+
const int im2col_step_ = std::min(batch, im2col_step);
|
| 57 |
+
|
| 58 |
+
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
|
| 59 |
+
|
| 60 |
+
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
|
| 61 |
+
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
|
| 62 |
+
|
| 63 |
+
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
|
| 64 |
+
// printf("Channels: %d %d\n", channels, channels_kernel);
|
| 65 |
+
// printf("Channels: %d %d\n", channels_out, channels_kernel);
|
| 66 |
+
|
| 67 |
+
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
|
| 68 |
+
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
|
| 69 |
+
|
| 70 |
+
AT_ASSERTM(channels == (channels_kernel * group),
|
| 71 |
+
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
|
| 72 |
+
|
| 73 |
+
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
|
| 74 |
+
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
|
| 75 |
+
|
| 76 |
+
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
|
| 77 |
+
|
| 78 |
+
// prepare group weight and bias
|
| 79 |
+
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
|
| 80 |
+
auto bias_g = bias.view({group, channels_out/group});
|
| 81 |
+
|
| 82 |
+
// define alias for easy use
|
| 83 |
+
const int batch_n = im2col_step_;
|
| 84 |
+
const int per_input_size = channels * height * width;
|
| 85 |
+
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
|
| 86 |
+
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
|
| 87 |
+
for (int n = 0; n < batch/im2col_step_; ++n)
|
| 88 |
+
{
|
| 89 |
+
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
|
| 90 |
+
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] {
|
| 91 |
+
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
|
| 92 |
+
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
|
| 93 |
+
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
|
| 94 |
+
batch_n, channels, height, width,
|
| 95 |
+
height_out, width_out, kernel_h, kernel_w,
|
| 96 |
+
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
|
| 97 |
+
deformable_group,
|
| 98 |
+
columns.data<scalar_t>());
|
| 99 |
+
|
| 100 |
+
}));
|
| 101 |
+
|
| 102 |
+
// auto columns_m = columns.t();
|
| 103 |
+
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
|
| 104 |
+
// output = at::addmm(bias, columns_m, weight_m);
|
| 105 |
+
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
|
| 106 |
+
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
|
| 107 |
+
for (int g = 0; g < group; ++g)
|
| 108 |
+
{
|
| 109 |
+
auto columns_gm = columns_g.select(0, g).t();
|
| 110 |
+
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
|
| 111 |
+
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
|
| 112 |
+
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
|
| 118 |
+
|
| 119 |
+
return output;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
std::vector<at::Tensor> deform_conv_cuda_backward(const at::Tensor &input,
|
| 123 |
+
const at::Tensor &weight,
|
| 124 |
+
const at::Tensor &bias,
|
| 125 |
+
const at::Tensor &offset,
|
| 126 |
+
const at::Tensor &grad_output,
|
| 127 |
+
const int kernel_h,
|
| 128 |
+
const int kernel_w,
|
| 129 |
+
const int stride_h,
|
| 130 |
+
const int stride_w,
|
| 131 |
+
const int pad_h,
|
| 132 |
+
const int pad_w,
|
| 133 |
+
const int dilation_h,
|
| 134 |
+
const int dilation_w,
|
| 135 |
+
const int group,
|
| 136 |
+
const int deformable_group,
|
| 137 |
+
const int im2col_step)
|
| 138 |
+
{
|
| 139 |
+
|
| 140 |
+
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
|
| 141 |
+
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
|
| 142 |
+
|
| 143 |
+
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
|
| 144 |
+
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
|
| 145 |
+
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
|
| 146 |
+
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
|
| 147 |
+
|
| 148 |
+
const int batch = input.size(0);
|
| 149 |
+
const int channels = input.size(1);
|
| 150 |
+
const int height = input.size(2);
|
| 151 |
+
const int width = input.size(3);
|
| 152 |
+
|
| 153 |
+
const int channels_out = weight.size(0);
|
| 154 |
+
const int channels_kernel = weight.size(1);
|
| 155 |
+
const int kernel_h_ = weight.size(2);
|
| 156 |
+
const int kernel_w_ = weight.size(3);
|
| 157 |
+
|
| 158 |
+
const int batch_ = grad_output.size(0);
|
| 159 |
+
const int channels_out_ = grad_output.size(1);
|
| 160 |
+
const int height_out_ = grad_output.size(2);
|
| 161 |
+
const int width_out_ = grad_output.size(3);
|
| 162 |
+
|
| 163 |
+
const int im2col_step_ = std::min(im2col_step, batch);
|
| 164 |
+
|
| 165 |
+
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
|
| 166 |
+
|
| 167 |
+
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
|
| 168 |
+
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
|
| 169 |
+
|
| 170 |
+
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
|
| 171 |
+
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
|
| 172 |
+
|
| 173 |
+
AT_ASSERTM(channels == (channels_kernel * group),
|
| 174 |
+
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
|
| 175 |
+
|
| 176 |
+
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
|
| 177 |
+
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
|
| 178 |
+
|
| 179 |
+
AT_ASSERTM(batch == batch_,
|
| 180 |
+
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
|
| 181 |
+
|
| 182 |
+
AT_ASSERTM(channels_out == channels_out_,
|
| 183 |
+
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
|
| 184 |
+
|
| 185 |
+
AT_ASSERTM(height_out == height_out_ && width_out == width_out_,
|
| 186 |
+
"Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_);
|
| 187 |
+
|
| 188 |
+
auto grad_input = at::zeros_like(input);
|
| 189 |
+
auto grad_offset = at::zeros_like(offset);
|
| 190 |
+
auto grad_weight = at::zeros_like(weight);
|
| 191 |
+
auto grad_bias = at::zeros_like(bias);
|
| 192 |
+
|
| 193 |
+
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
|
| 194 |
+
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
|
| 195 |
+
// columns = at::mm(weight_m, grad_output_m);
|
| 196 |
+
|
| 197 |
+
// prepare group weight and bias
|
| 198 |
+
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
|
| 199 |
+
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
|
| 200 |
+
auto grad_bias_g = grad_bias.view({group, channels_out/group});
|
| 201 |
+
|
| 202 |
+
const int batch_n = im2col_step_;
|
| 203 |
+
const int per_input_size = channels * height * width;
|
| 204 |
+
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
|
| 205 |
+
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out});
|
| 206 |
+
for (int n = 0; n < batch/im2col_step_; ++n)
|
| 207 |
+
{
|
| 208 |
+
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out});
|
| 209 |
+
auto ones = at::ones({batch_n * height_out * width_out}, input.options());
|
| 210 |
+
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options());
|
| 211 |
+
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
|
| 212 |
+
for (int g = 0; g < group; ++g)
|
| 213 |
+
{
|
| 214 |
+
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
|
| 215 |
+
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
|
| 216 |
+
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] {
|
| 220 |
+
deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(),
|
| 221 |
+
columns.data<scalar_t>(),
|
| 222 |
+
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
|
| 223 |
+
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
|
| 224 |
+
batch_n, channels, height, width,
|
| 225 |
+
height_out, width_out, kernel_h, kernel_w,
|
| 226 |
+
pad_h, pad_w, stride_h, stride_w,
|
| 227 |
+
dilation_h, dilation_w, deformable_group,
|
| 228 |
+
grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size);
|
| 229 |
+
// gradient w.r.t. input data
|
| 230 |
+
deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
|
| 231 |
+
columns.data<scalar_t>(),
|
| 232 |
+
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
|
| 233 |
+
batch_n, channels, height, width,
|
| 234 |
+
height_out, width_out, kernel_h, kernel_w,
|
| 235 |
+
pad_h, pad_w, stride_h, stride_w,
|
| 236 |
+
dilation_h, dilation_w, deformable_group,
|
| 237 |
+
grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size);
|
| 238 |
+
|
| 239 |
+
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
|
| 240 |
+
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
|
| 241 |
+
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
|
| 242 |
+
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
|
| 243 |
+
batch_n, channels, height, width,
|
| 244 |
+
height_out, width_out, kernel_h, kernel_w,
|
| 245 |
+
pad_h, pad_w, stride_h, stride_w,
|
| 246 |
+
dilation_h, dilation_w, deformable_group,
|
| 247 |
+
columns.data<scalar_t>());
|
| 248 |
+
|
| 249 |
+
}));
|
| 250 |
+
|
| 251 |
+
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
|
| 252 |
+
// grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight);
|
| 253 |
+
// grad_bias = at::mv(grad_output_m, ones);
|
| 254 |
+
// auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out});
|
| 255 |
+
// auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out});
|
| 256 |
+
for (int g = 0; g < group; ++g)
|
| 257 |
+
{
|
| 258 |
+
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
|
| 259 |
+
auto columns_gm = columns_g.select(0, g).t();
|
| 260 |
+
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w});
|
| 261 |
+
auto grad_bias_gm = grad_bias_g.select(0, g);
|
| 262 |
+
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
|
| 263 |
+
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
return {
|
| 269 |
+
grad_input, grad_offset, grad_weight, grad_bias
|
| 270 |
+
};
|
| 271 |
+
}
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/extension.h>
|
| 3 |
+
|
| 4 |
+
at::Tensor
|
| 5 |
+
deform_conv_cuda_forward(const at::Tensor &input,
|
| 6 |
+
const at::Tensor &weight,
|
| 7 |
+
const at::Tensor &bias,
|
| 8 |
+
const at::Tensor &offset,
|
| 9 |
+
const int kernel_h,
|
| 10 |
+
const int kernel_w,
|
| 11 |
+
const int stride_h,
|
| 12 |
+
const int stride_w,
|
| 13 |
+
const int pad_h,
|
| 14 |
+
const int pad_w,
|
| 15 |
+
const int dilation_h,
|
| 16 |
+
const int dilation_w,
|
| 17 |
+
const int group,
|
| 18 |
+
const int deformable_group,
|
| 19 |
+
const int im2col_step);
|
| 20 |
+
|
| 21 |
+
std::vector<at::Tensor>
|
| 22 |
+
deform_conv_cuda_backward(const at::Tensor &input,
|
| 23 |
+
const at::Tensor &weight,
|
| 24 |
+
const at::Tensor &bias,
|
| 25 |
+
const at::Tensor &offset,
|
| 26 |
+
const at::Tensor &grad_output,
|
| 27 |
+
const int kernel_h,
|
| 28 |
+
const int kernel_w,
|
| 29 |
+
const int stride_h,
|
| 30 |
+
const int stride_w,
|
| 31 |
+
const int pad_h,
|
| 32 |
+
const int pad_w,
|
| 33 |
+
const int dilation_h,
|
| 34 |
+
const int dilation_w,
|
| 35 |
+
const int group,
|
| 36 |
+
const int deformable_group,
|
| 37 |
+
const int im2col_step);
|
| 38 |
+
|
vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <cstdio>
|
| 2 |
+
#include <algorithm>
|
| 3 |
+
#include <cstring>
|
| 4 |
+
|
| 5 |
+
#include <ATen/ATen.h>
|
| 6 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 7 |
+
|
| 8 |
+
// #include <THC/THC.h>
|
| 9 |
+
#include <THC/THCAtomics.cuh>
|
| 10 |
+
// #include <THC/THCDeviceUtils.cuh>
|
| 11 |
+
|
| 12 |
+
#define CUDA_KERNEL_LOOP(i, n) \
|
| 13 |
+
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
|
| 14 |
+
i < (n); \
|
| 15 |
+
i += blockDim.x * gridDim.x)
|
| 16 |
+
|
| 17 |
+
const int CUDA_NUM_THREADS = 1024;
|
| 18 |
+
inline int GET_BLOCKS(const int N)
|
| 19 |
+
{
|
| 20 |
+
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
template <typename scalar_t>
|
| 24 |
+
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
|
| 25 |
+
const int height, const int width, scalar_t h, scalar_t w)
|
| 26 |
+
{
|
| 27 |
+
int h_low = floor(h);
|
| 28 |
+
int w_low = floor(w);
|
| 29 |
+
int h_high = h_low + 1;
|
| 30 |
+
int w_high = w_low + 1;
|
| 31 |
+
|
| 32 |
+
scalar_t lh = h - h_low;
|
| 33 |
+
scalar_t lw = w - w_low;
|
| 34 |
+
scalar_t hh = 1 - lh, hw = 1 - lw;
|
| 35 |
+
|
| 36 |
+
scalar_t v1 = 0;
|
| 37 |
+
if (h_low >= 0 && w_low >= 0)
|
| 38 |
+
v1 = bottom_data[h_low * data_width + w_low];
|
| 39 |
+
scalar_t v2 = 0;
|
| 40 |
+
if (h_low >= 0 && w_high <= width - 1)
|
| 41 |
+
v2 = bottom_data[h_low * data_width + w_high];
|
| 42 |
+
scalar_t v3 = 0;
|
| 43 |
+
if (h_high <= height - 1 && w_low >= 0)
|
| 44 |
+
v3 = bottom_data[h_high * data_width + w_low];
|
| 45 |
+
scalar_t v4 = 0;
|
| 46 |
+
if (h_high <= height - 1 && w_high <= width - 1)
|
| 47 |
+
v4 = bottom_data[h_high * data_width + w_high];
|
| 48 |
+
|
| 49 |
+
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
|
| 50 |
+
|
| 51 |
+
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
|
| 52 |
+
return val;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
template <typename scalar_t>
|
| 56 |
+
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
|
| 57 |
+
const int h, const int w, const int height, const int width)
|
| 58 |
+
{
|
| 59 |
+
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
|
| 60 |
+
{
|
| 61 |
+
//empty
|
| 62 |
+
return 0;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
int argmax_h_low = floor(argmax_h);
|
| 66 |
+
int argmax_w_low = floor(argmax_w);
|
| 67 |
+
int argmax_h_high = argmax_h_low + 1;
|
| 68 |
+
int argmax_w_high = argmax_w_low + 1;
|
| 69 |
+
|
| 70 |
+
scalar_t weight = 0;
|
| 71 |
+
if (h == argmax_h_low && w == argmax_w_low)
|
| 72 |
+
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
|
| 73 |
+
if (h == argmax_h_low && w == argmax_w_high)
|
| 74 |
+
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
|
| 75 |
+
if (h == argmax_h_high && w == argmax_w_low)
|
| 76 |
+
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
|
| 77 |
+
if (h == argmax_h_high && w == argmax_w_high)
|
| 78 |
+
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
|
| 79 |
+
return weight;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
template <typename scalar_t>
|
| 83 |
+
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
|
| 84 |
+
const int height, const int width, const scalar_t *im_data,
|
| 85 |
+
const int data_width, const int bp_dir)
|
| 86 |
+
{
|
| 87 |
+
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
|
| 88 |
+
{
|
| 89 |
+
//empty
|
| 90 |
+
return 0;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
int argmax_h_low = floor(argmax_h);
|
| 94 |
+
int argmax_w_low = floor(argmax_w);
|
| 95 |
+
int argmax_h_high = argmax_h_low + 1;
|
| 96 |
+
int argmax_w_high = argmax_w_low + 1;
|
| 97 |
+
|
| 98 |
+
scalar_t weight = 0;
|
| 99 |
+
|
| 100 |
+
if (bp_dir == 0)
|
| 101 |
+
{
|
| 102 |
+
if (argmax_h_low >= 0 && argmax_w_low >= 0)
|
| 103 |
+
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
|
| 104 |
+
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
|
| 105 |
+
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
|
| 106 |
+
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
|
| 107 |
+
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
|
| 108 |
+
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
|
| 109 |
+
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
|
| 110 |
+
}
|
| 111 |
+
else if (bp_dir == 1)
|
| 112 |
+
{
|
| 113 |
+
if (argmax_h_low >= 0 && argmax_w_low >= 0)
|
| 114 |
+
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
|
| 115 |
+
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
|
| 116 |
+
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
|
| 117 |
+
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
|
| 118 |
+
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
|
| 119 |
+
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
|
| 120 |
+
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
return weight;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
template <typename scalar_t>
|
| 127 |
+
__global__ void deformable_im2col_gpu_kernel(const int n,
|
| 128 |
+
const scalar_t *data_im, const scalar_t *data_offset,
|
| 129 |
+
const int height, const int width, const int kernel_h, const int kernel_w,
|
| 130 |
+
const int pad_h, const int pad_w,
|
| 131 |
+
const int stride_h, const int stride_w,
|
| 132 |
+
const int dilation_h, const int dilation_w,
|
| 133 |
+
const int channel_per_deformable_group,
|
| 134 |
+
const int batch_size, const int num_channels, const int deformable_group,
|
| 135 |
+
const int height_col, const int width_col,
|
| 136 |
+
scalar_t *data_col)
|
| 137 |
+
{
|
| 138 |
+
// launch channels * batch_size * height_col * width_col cores
|
| 139 |
+
CUDA_KERNEL_LOOP(index, n)
|
| 140 |
+
{
|
| 141 |
+
// NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
|
| 142 |
+
// here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
|
| 143 |
+
// NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow)
|
| 144 |
+
// here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis
|
| 145 |
+
|
| 146 |
+
// index index of output matrix
|
| 147 |
+
const int w_col = index % width_col;
|
| 148 |
+
const int h_col = (index / width_col) % height_col;
|
| 149 |
+
const int b_col = (index / width_col / height_col) % batch_size;
|
| 150 |
+
const int c_im = (index / width_col / height_col) / batch_size;
|
| 151 |
+
const int c_col = c_im * kernel_h * kernel_w;
|
| 152 |
+
|
| 153 |
+
// compute deformable group index
|
| 154 |
+
const int deformable_group_index = c_im / channel_per_deformable_group;
|
| 155 |
+
|
| 156 |
+
const int h_in = h_col * stride_h - pad_h;
|
| 157 |
+
const int w_in = w_col * stride_w - pad_w;
|
| 158 |
+
|
| 159 |
+
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
|
| 160 |
+
// const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
|
| 161 |
+
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
|
| 162 |
+
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
|
| 163 |
+
|
| 164 |
+
for (int i = 0; i < kernel_h; ++i)
|
| 165 |
+
{
|
| 166 |
+
for (int j = 0; j < kernel_w; ++j)
|
| 167 |
+
{
|
| 168 |
+
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
|
| 169 |
+
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
|
| 170 |
+
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
|
| 171 |
+
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
|
| 172 |
+
scalar_t val = static_cast<scalar_t>(0);
|
| 173 |
+
const scalar_t h_im = h_in + i * dilation_h + offset_h;
|
| 174 |
+
const scalar_t w_im = w_in + j * dilation_w + offset_w;
|
| 175 |
+
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
|
| 176 |
+
{
|
| 177 |
+
//const scalar_t map_h = i * dilation_h + offset_h;
|
| 178 |
+
//const scalar_t map_w = j * dilation_w + offset_w;
|
| 179 |
+
//const int cur_height = height - h_in;
|
| 180 |
+
//const int cur_width = width - w_in;
|
| 181 |
+
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
|
| 182 |
+
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
|
| 183 |
+
}
|
| 184 |
+
*data_col_ptr = val;
|
| 185 |
+
data_col_ptr += batch_size * height_col * width_col;
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
template <typename scalar_t>
|
| 192 |
+
__global__ void deformable_col2im_gpu_kernel(const int n,
|
| 193 |
+
const scalar_t *data_col, const scalar_t *data_offset,
|
| 194 |
+
const int channels, const int height, const int width,
|
| 195 |
+
const int kernel_h, const int kernel_w,
|
| 196 |
+
const int pad_h, const int pad_w,
|
| 197 |
+
const int stride_h, const int stride_w,
|
| 198 |
+
const int dilation_h, const int dilation_w,
|
| 199 |
+
const int channel_per_deformable_group,
|
| 200 |
+
const int batch_size, const int deformable_group,
|
| 201 |
+
const int height_col, const int width_col,
|
| 202 |
+
scalar_t *grad_im)
|
| 203 |
+
{
|
| 204 |
+
CUDA_KERNEL_LOOP(index, n)
|
| 205 |
+
{
|
| 206 |
+
const int j = (index / width_col / height_col / batch_size) % kernel_w;
|
| 207 |
+
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
|
| 208 |
+
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
|
| 209 |
+
// compute the start and end of the output
|
| 210 |
+
|
| 211 |
+
const int deformable_group_index = c / channel_per_deformable_group;
|
| 212 |
+
|
| 213 |
+
int w_out = index % width_col;
|
| 214 |
+
int h_out = (index / width_col) % height_col;
|
| 215 |
+
int b = (index / width_col / height_col) % batch_size;
|
| 216 |
+
int w_in = w_out * stride_w - pad_w;
|
| 217 |
+
int h_in = h_out * stride_h - pad_h;
|
| 218 |
+
|
| 219 |
+
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
|
| 220 |
+
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
|
| 221 |
+
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
|
| 222 |
+
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
|
| 223 |
+
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
|
| 224 |
+
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
|
| 225 |
+
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
|
| 226 |
+
|
| 227 |
+
const scalar_t cur_top_grad = data_col[index];
|
| 228 |
+
const int cur_h = (int)cur_inv_h_data;
|
| 229 |
+
const int cur_w = (int)cur_inv_w_data;
|
| 230 |
+
for (int dy = -2; dy <= 2; dy++)
|
| 231 |
+
{
|
| 232 |
+
for (int dx = -2; dx <= 2; dx++)
|
| 233 |
+
{
|
| 234 |
+
if (cur_h + dy >= 0 && cur_h + dy < height &&
|
| 235 |
+
cur_w + dx >= 0 && cur_w + dx < width &&
|
| 236 |
+
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
|
| 237 |
+
abs(cur_inv_w_data - (cur_w + dx)) < 1)
|
| 238 |
+
{
|
| 239 |
+
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
|
| 240 |
+
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
|
| 241 |
+
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
template <typename scalar_t>
|
| 249 |
+
__global__ void deformable_col2im_coord_gpu_kernel(const int n,
|
| 250 |
+
const scalar_t *data_col, const scalar_t *data_im,
|
| 251 |
+
const scalar_t *data_offset,
|
| 252 |
+
const int channels, const int height, const int width,
|
| 253 |
+
const int kernel_h, const int kernel_w,
|
| 254 |
+
const int pad_h, const int pad_w,
|
| 255 |
+
const int stride_h, const int stride_w,
|
| 256 |
+
const int dilation_h, const int dilation_w,
|
| 257 |
+
const int channel_per_deformable_group,
|
| 258 |
+
const int batch_size, const int offset_channels, const int deformable_group,
|
| 259 |
+
const int height_col, const int width_col,
|
| 260 |
+
scalar_t *grad_offset)
|
| 261 |
+
{
|
| 262 |
+
CUDA_KERNEL_LOOP(index, n)
|
| 263 |
+
{
|
| 264 |
+
scalar_t val = 0;
|
| 265 |
+
int w = index % width_col;
|
| 266 |
+
int h = (index / width_col) % height_col;
|
| 267 |
+
int c = (index / width_col / height_col) % offset_channels;
|
| 268 |
+
int b = (index / width_col / height_col) / offset_channels;
|
| 269 |
+
// compute the start and end of the output
|
| 270 |
+
|
| 271 |
+
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
|
| 272 |
+
const int col_step = kernel_h * kernel_w;
|
| 273 |
+
int cnt = 0;
|
| 274 |
+
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
|
| 275 |
+
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
|
| 276 |
+
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
|
| 277 |
+
|
| 278 |
+
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
|
| 279 |
+
|
| 280 |
+
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
|
| 281 |
+
{
|
| 282 |
+
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
|
| 283 |
+
const int bp_dir = offset_c % 2;
|
| 284 |
+
|
| 285 |
+
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
|
| 286 |
+
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
|
| 287 |
+
int w_out = col_pos % width_col;
|
| 288 |
+
int h_out = (col_pos / width_col) % height_col;
|
| 289 |
+
int w_in = w_out * stride_w - pad_w;
|
| 290 |
+
int h_in = h_out * stride_h - pad_h;
|
| 291 |
+
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
|
| 292 |
+
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
|
| 293 |
+
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
|
| 294 |
+
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
|
| 295 |
+
scalar_t inv_h = h_in + i * dilation_h + offset_h;
|
| 296 |
+
scalar_t inv_w = w_in + j * dilation_w + offset_w;
|
| 297 |
+
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
|
| 298 |
+
{
|
| 299 |
+
inv_h = inv_w = -2;
|
| 300 |
+
}
|
| 301 |
+
const scalar_t weight = dmcn_get_coordinate_weight(
|
| 302 |
+
inv_h, inv_w,
|
| 303 |
+
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
|
| 304 |
+
val += weight * data_col_ptr[col_pos];
|
| 305 |
+
cnt += 1;
|
| 306 |
+
}
|
| 307 |
+
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
|
| 308 |
+
grad_offset[index] = val;
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
template <typename scalar_t>
|
| 313 |
+
void deformable_im2col_cuda(cudaStream_t stream,
|
| 314 |
+
const scalar_t* data_im, const scalar_t* data_offset,
|
| 315 |
+
const int batch_size, const int channels, const int height_im, const int width_im,
|
| 316 |
+
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
|
| 317 |
+
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
|
| 318 |
+
const int dilation_h, const int dilation_w,
|
| 319 |
+
const int deformable_group, scalar_t* data_col) {
|
| 320 |
+
// num_axes should be smaller than block size
|
| 321 |
+
const int channel_per_deformable_group = channels / deformable_group;
|
| 322 |
+
const int num_kernels = channels * batch_size * height_col * width_col;
|
| 323 |
+
deformable_im2col_gpu_kernel<scalar_t>
|
| 324 |
+
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
|
| 325 |
+
0, stream>>>(
|
| 326 |
+
num_kernels, data_im, data_offset, height_im, width_im, kernel_h, kernel_w,
|
| 327 |
+
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
|
| 328 |
+
batch_size, channels, deformable_group, height_col, width_col, data_col);
|
| 329 |
+
|
| 330 |
+
cudaError_t err = cudaGetLastError();
|
| 331 |
+
if (err != cudaSuccess)
|
| 332 |
+
{
|
| 333 |
+
printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
template <typename scalar_t>
|
| 339 |
+
void deformable_col2im_cuda(cudaStream_t stream,
|
| 340 |
+
const scalar_t* data_col, const scalar_t* data_offset,
|
| 341 |
+
const int batch_size, const int channels, const int height_im, const int width_im,
|
| 342 |
+
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
|
| 343 |
+
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
|
| 344 |
+
const int dilation_h, const int dilation_w,
|
| 345 |
+
const int deformable_group, scalar_t* grad_im){
|
| 346 |
+
|
| 347 |
+
const int channel_per_deformable_group = channels / deformable_group;
|
| 348 |
+
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
|
| 349 |
+
deformable_col2im_gpu_kernel<scalar_t>
|
| 350 |
+
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
|
| 351 |
+
0, stream>>>(
|
| 352 |
+
num_kernels, data_col, data_offset, channels, height_im, width_im,
|
| 353 |
+
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
|
| 354 |
+
dilation_h, dilation_w, channel_per_deformable_group,
|
| 355 |
+
batch_size, deformable_group, height_col, width_col, grad_im);
|
| 356 |
+
cudaError_t err = cudaGetLastError();
|
| 357 |
+
if (err != cudaSuccess)
|
| 358 |
+
{
|
| 359 |
+
printf("error in deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
template <typename scalar_t>
|
| 365 |
+
void deformable_col2im_coord_cuda(cudaStream_t stream,
|
| 366 |
+
const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset,
|
| 367 |
+
const int batch_size, const int channels, const int height_im, const int width_im,
|
| 368 |
+
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
|
| 369 |
+
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
|
| 370 |
+
const int dilation_h, const int dilation_w,
|
| 371 |
+
const int deformable_group,
|
| 372 |
+
scalar_t* grad_offset) {
|
| 373 |
+
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
|
| 374 |
+
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
|
| 375 |
+
deformable_col2im_coord_gpu_kernel<scalar_t>
|
| 376 |
+
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
|
| 377 |
+
0, stream>>>(
|
| 378 |
+
num_kernels, data_col, data_im, data_offset, channels, height_im, width_im,
|
| 379 |
+
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
|
| 380 |
+
dilation_h, dilation_w, channel_per_deformable_group,
|
| 381 |
+
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
|
| 382 |
+
grad_offset);
|
| 383 |
+
cudaError_t err = cudaGetLastError();
|
| 384 |
+
if (err != cudaSuccess)
|
| 385 |
+
{
|
| 386 |
+
printf("error in deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
|
| 387 |
+
}
|
| 388 |
+
}
|
vanishing_point_extraction/neurvps/neurvps/models/deformable.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
from glob import glob
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.autograd import Function
|
| 9 |
+
from torch.nn.modules.utils import _pair
|
| 10 |
+
from torch.autograd.function import once_differentiable
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def load_cpp_ext(ext_name):
|
| 14 |
+
root_dir = os.path.join(os.path.split(__file__)[0])
|
| 15 |
+
src_dir = os.path.join(root_dir, "cpp")
|
| 16 |
+
tar_dir = os.path.join(src_dir, "build", ext_name)
|
| 17 |
+
os.makedirs(tar_dir, exist_ok=True)
|
| 18 |
+
srcs = glob(f"{src_dir}/*.cu") + glob(f"{src_dir}/*.cpp")
|
| 19 |
+
|
| 20 |
+
with warnings.catch_warnings():
|
| 21 |
+
warnings.simplefilter("ignore")
|
| 22 |
+
from torch.utils.cpp_extension import load
|
| 23 |
+
|
| 24 |
+
ext = load(
|
| 25 |
+
name=ext_name,
|
| 26 |
+
sources=srcs,
|
| 27 |
+
extra_cflags=["-O3"],
|
| 28 |
+
extra_cuda_cflags=[],
|
| 29 |
+
build_directory=tar_dir,
|
| 30 |
+
)
|
| 31 |
+
return ext
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# defer calling load_cpp_ext to make CUDA_VISIBLE_DEVICES happy
|
| 35 |
+
DCN = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DeformConvFunction(Function):
|
| 39 |
+
@staticmethod
|
| 40 |
+
def forward(
|
| 41 |
+
ctx,
|
| 42 |
+
input,
|
| 43 |
+
offset,
|
| 44 |
+
weight,
|
| 45 |
+
bias,
|
| 46 |
+
stride,
|
| 47 |
+
padding,
|
| 48 |
+
dilation,
|
| 49 |
+
group,
|
| 50 |
+
deformable_groups,
|
| 51 |
+
im2col_step,
|
| 52 |
+
):
|
| 53 |
+
ctx.stride = _pair(stride)
|
| 54 |
+
ctx.padding = _pair(padding)
|
| 55 |
+
ctx.dilation = _pair(dilation)
|
| 56 |
+
ctx.kernel_size = _pair(weight.shape[2:4])
|
| 57 |
+
ctx.group = group
|
| 58 |
+
ctx.deformable_groups = deformable_groups
|
| 59 |
+
ctx.im2col_step = im2col_step
|
| 60 |
+
output = DCN.deform_conv_forward(
|
| 61 |
+
input,
|
| 62 |
+
weight,
|
| 63 |
+
bias,
|
| 64 |
+
offset,
|
| 65 |
+
ctx.kernel_size[0],
|
| 66 |
+
ctx.kernel_size[1],
|
| 67 |
+
ctx.stride[0],
|
| 68 |
+
ctx.stride[1],
|
| 69 |
+
ctx.padding[0],
|
| 70 |
+
ctx.padding[1],
|
| 71 |
+
ctx.dilation[0],
|
| 72 |
+
ctx.dilation[1],
|
| 73 |
+
ctx.group,
|
| 74 |
+
ctx.deformable_groups,
|
| 75 |
+
ctx.im2col_step,
|
| 76 |
+
)
|
| 77 |
+
ctx.save_for_backward(input, offset, weight, bias)
|
| 78 |
+
return output
|
| 79 |
+
|
| 80 |
+
@staticmethod
|
| 81 |
+
@once_differentiable
|
| 82 |
+
def backward(ctx, grad_output):
|
| 83 |
+
input, offset, weight, bias = ctx.saved_tensors
|
| 84 |
+
grad_input, grad_offset, grad_weight, grad_bias = DCN.deform_conv_backward(
|
| 85 |
+
input,
|
| 86 |
+
weight,
|
| 87 |
+
bias,
|
| 88 |
+
offset,
|
| 89 |
+
grad_output,
|
| 90 |
+
ctx.kernel_size[0],
|
| 91 |
+
ctx.kernel_size[1],
|
| 92 |
+
ctx.stride[0],
|
| 93 |
+
ctx.stride[1],
|
| 94 |
+
ctx.padding[0],
|
| 95 |
+
ctx.padding[1],
|
| 96 |
+
ctx.dilation[0],
|
| 97 |
+
ctx.dilation[1],
|
| 98 |
+
ctx.group,
|
| 99 |
+
ctx.deformable_groups,
|
| 100 |
+
ctx.im2col_step,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
return (
|
| 104 |
+
grad_input,
|
| 105 |
+
grad_offset,
|
| 106 |
+
grad_weight,
|
| 107 |
+
grad_bias,
|
| 108 |
+
None,
|
| 109 |
+
None,
|
| 110 |
+
None,
|
| 111 |
+
None,
|
| 112 |
+
None,
|
| 113 |
+
None,
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class DeformConv(nn.Module):
|
| 118 |
+
def __init__(
|
| 119 |
+
self,
|
| 120 |
+
in_channels,
|
| 121 |
+
out_channels,
|
| 122 |
+
kernel_size,
|
| 123 |
+
stride,
|
| 124 |
+
padding,
|
| 125 |
+
dilation=1,
|
| 126 |
+
groups=1,
|
| 127 |
+
deformable_groups=1,
|
| 128 |
+
im2col_step=11,
|
| 129 |
+
bias=True,
|
| 130 |
+
):
|
| 131 |
+
global DCN
|
| 132 |
+
DCN = load_cpp_ext("DCN")
|
| 133 |
+
super(DeformConv, self).__init__()
|
| 134 |
+
|
| 135 |
+
if in_channels % groups != 0:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"in_channels {} must be divisible by groups {}".format(
|
| 138 |
+
in_channels, groups
|
| 139 |
+
)
|
| 140 |
+
)
|
| 141 |
+
if out_channels % groups != 0:
|
| 142 |
+
raise ValueError(
|
| 143 |
+
"out_channels {} must be divisible by groups {}".format(
|
| 144 |
+
out_channels, groups
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
self.in_channels = in_channels
|
| 149 |
+
self.out_channels = out_channels
|
| 150 |
+
self.kernel_size = _pair(kernel_size)
|
| 151 |
+
self.stride = _pair(stride)
|
| 152 |
+
self.padding = _pair(padding)
|
| 153 |
+
self.dilation = _pair(dilation)
|
| 154 |
+
self.groups = groups
|
| 155 |
+
self.deformable_groups = deformable_groups
|
| 156 |
+
self.im2col_step = im2col_step
|
| 157 |
+
self.use_bias = bias
|
| 158 |
+
|
| 159 |
+
self.weight = nn.Parameter(
|
| 160 |
+
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
|
| 161 |
+
)
|
| 162 |
+
self.bias = nn.Parameter(torch.Tensor(out_channels))
|
| 163 |
+
self.reset_parameters()
|
| 164 |
+
if not self.use_bias:
|
| 165 |
+
self.bias.requires_grad = False
|
| 166 |
+
|
| 167 |
+
def reset_parameters(self):
|
| 168 |
+
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
| 169 |
+
if self.bias is not None:
|
| 170 |
+
if self.use_bias:
|
| 171 |
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
|
| 172 |
+
bound = 1 / math.sqrt(fan_in)
|
| 173 |
+
nn.init.uniform_(self.bias, -bound, bound)
|
| 174 |
+
else:
|
| 175 |
+
nn.init.zeros_(self.bias)
|
| 176 |
+
|
| 177 |
+
def forward(self, input, offset):
|
| 178 |
+
assert (
|
| 179 |
+
2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1]
|
| 180 |
+
== offset.shape[1]
|
| 181 |
+
)
|
| 182 |
+
return DeformConvFunction.apply(
|
| 183 |
+
input.contiguous(),
|
| 184 |
+
offset.contiguous(),
|
| 185 |
+
self.weight,
|
| 186 |
+
self.bias,
|
| 187 |
+
self.stride,
|
| 188 |
+
self.padding,
|
| 189 |
+
self.dilation,
|
| 190 |
+
self.groups,
|
| 191 |
+
self.deformable_groups,
|
| 192 |
+
self.im2col_step,
|
| 193 |
+
)
|
vanishing_point_extraction/neurvps/neurvps/models/hourglass_pose.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hourglass network inserted in the pre-activated Resnet
|
| 3 |
+
Use lr=0.01 for current version
|
| 4 |
+
(c) Yichao Zhou (VanishingNet)
|
| 5 |
+
(c) Yichao Zhou (LCNN)
|
| 6 |
+
(c) YANG, Wei
|
| 7 |
+
"""
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
|
| 12 |
+
__all__ = ["HourglassNet", "hg"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Bottleneck2D(nn.Module):
|
| 16 |
+
expansion = 2
|
| 17 |
+
|
| 18 |
+
def __init__(self, inplanes, planes, stride=1, resample=None):
|
| 19 |
+
super(Bottleneck2D, self).__init__()
|
| 20 |
+
|
| 21 |
+
self.bn1 = nn.BatchNorm2d(inplanes)
|
| 22 |
+
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1)
|
| 23 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 24 |
+
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
|
| 25 |
+
self.bn3 = nn.BatchNorm2d(planes)
|
| 26 |
+
self.conv3 = nn.Conv2d(planes, planes * Bottleneck2D.expansion, kernel_size=1)
|
| 27 |
+
self.relu = nn.ReLU(inplace=True)
|
| 28 |
+
self.resample = resample
|
| 29 |
+
self.stride = stride
|
| 30 |
+
|
| 31 |
+
def forward(self, x):
|
| 32 |
+
residual = x
|
| 33 |
+
|
| 34 |
+
out = self.bn1(x)
|
| 35 |
+
out = self.relu(out)
|
| 36 |
+
out = self.conv1(out)
|
| 37 |
+
|
| 38 |
+
out = self.bn2(out)
|
| 39 |
+
out = self.relu(out)
|
| 40 |
+
out = self.conv2(out)
|
| 41 |
+
|
| 42 |
+
out = self.bn3(out)
|
| 43 |
+
out = self.relu(out)
|
| 44 |
+
out = self.conv3(out)
|
| 45 |
+
|
| 46 |
+
if self.resample is not None:
|
| 47 |
+
residual = self.resample(x)
|
| 48 |
+
|
| 49 |
+
out += residual
|
| 50 |
+
|
| 51 |
+
return out
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Hourglass(nn.Module):
|
| 55 |
+
def __init__(self, block, num_blocks, planes, depth):
|
| 56 |
+
super(Hourglass, self).__init__()
|
| 57 |
+
self.depth = depth
|
| 58 |
+
self.block = block
|
| 59 |
+
self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
|
| 60 |
+
|
| 61 |
+
def _make_residual(self, block, num_blocks, planes):
|
| 62 |
+
layers = []
|
| 63 |
+
for i in range(0, num_blocks):
|
| 64 |
+
layers.append(block(planes * block.expansion, planes))
|
| 65 |
+
return nn.Sequential(*layers)
|
| 66 |
+
|
| 67 |
+
def _make_hour_glass(self, block, num_blocks, planes, depth):
|
| 68 |
+
hg = []
|
| 69 |
+
for i in range(depth):
|
| 70 |
+
res = []
|
| 71 |
+
for j in range(3):
|
| 72 |
+
res.append(self._make_residual(block, num_blocks, planes))
|
| 73 |
+
if i == 0:
|
| 74 |
+
res.append(self._make_residual(block, num_blocks, planes))
|
| 75 |
+
hg.append(nn.ModuleList(res))
|
| 76 |
+
return nn.ModuleList(hg)
|
| 77 |
+
|
| 78 |
+
def _hour_glass_forward(self, n, x):
|
| 79 |
+
up1 = self.hg[n - 1][0](x)
|
| 80 |
+
low1 = F.max_pool2d(x, 2, stride=2)
|
| 81 |
+
low1 = self.hg[n - 1][1](low1)
|
| 82 |
+
|
| 83 |
+
if n > 1:
|
| 84 |
+
low2 = self._hour_glass_forward(n - 1, low1)
|
| 85 |
+
else:
|
| 86 |
+
low2 = self.hg[n - 1][3](low1)
|
| 87 |
+
low3 = self.hg[n - 1][2](low2)
|
| 88 |
+
up2 = F.interpolate(low3, scale_factor=2)
|
| 89 |
+
out = up1 + up2
|
| 90 |
+
return out
|
| 91 |
+
|
| 92 |
+
def forward(self, x):
|
| 93 |
+
return self._hour_glass_forward(self.depth, x)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class HourglassNet(nn.Module):
|
| 97 |
+
def __init__(self, planes, block, head, depth, num_stacks, num_blocks):
|
| 98 |
+
super(HourglassNet, self).__init__()
|
| 99 |
+
|
| 100 |
+
self.inplanes = 64
|
| 101 |
+
self.num_feats = 128
|
| 102 |
+
self.num_stacks = num_stacks
|
| 103 |
+
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3)
|
| 104 |
+
self.bn1 = nn.BatchNorm2d(self.inplanes)
|
| 105 |
+
self.relu = nn.ReLU(inplace=True)
|
| 106 |
+
self.layer1 = self._make_residual(block, self.inplanes, 1)
|
| 107 |
+
self.layer2 = self._make_residual(block, self.inplanes, 1)
|
| 108 |
+
self.layer3 = self._make_residual(block, self.num_feats, 1)
|
| 109 |
+
self.maxpool = nn.MaxPool2d(2, stride=2)
|
| 110 |
+
|
| 111 |
+
# build hourglass modules
|
| 112 |
+
ch = self.num_feats * block.expansion
|
| 113 |
+
|
| 114 |
+
hg, res, fc, score, fc_, score_ = [], [], [], [], [], []
|
| 115 |
+
for i in range(num_stacks):
|
| 116 |
+
hg.append(Hourglass(block, num_blocks, self.num_feats, depth))
|
| 117 |
+
res.append(self._make_residual(block, self.num_feats, num_blocks))
|
| 118 |
+
fc.append(self._make_fc(ch, ch))
|
| 119 |
+
score.append(head(ch, planes))
|
| 120 |
+
if i < num_stacks - 1:
|
| 121 |
+
fc_.append(nn.Conv2d(ch, ch, kernel_size=1))
|
| 122 |
+
score_.append(nn.Conv2d(planes, ch, kernel_size=1))
|
| 123 |
+
|
| 124 |
+
self.hg = nn.ModuleList(hg)
|
| 125 |
+
self.res = nn.ModuleList(res)
|
| 126 |
+
self.fc = nn.ModuleList(fc)
|
| 127 |
+
self.score = nn.ModuleList(score)
|
| 128 |
+
self.fc_ = nn.ModuleList(fc_)
|
| 129 |
+
self.score_ = nn.ModuleList(score_)
|
| 130 |
+
|
| 131 |
+
def _make_residual(self, block, planes, blocks, stride=1):
|
| 132 |
+
resample = None
|
| 133 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
| 134 |
+
resample = nn.Conv2d(
|
| 135 |
+
self.inplanes, planes * block.expansion, kernel_size=1, stride=stride
|
| 136 |
+
)
|
| 137 |
+
layers = [block(self.inplanes, planes, stride, resample)]
|
| 138 |
+
self.inplanes = planes * block.expansion
|
| 139 |
+
for i in range(blocks - 1):
|
| 140 |
+
layers.append(block(self.inplanes, planes))
|
| 141 |
+
return nn.Sequential(*layers)
|
| 142 |
+
|
| 143 |
+
def _make_fc(self, inplanes, outplanes):
|
| 144 |
+
return nn.Sequential(
|
| 145 |
+
nn.Conv2d(inplanes, outplanes, kernel_size=1),
|
| 146 |
+
nn.BatchNorm2d(inplanes),
|
| 147 |
+
nn.ReLU(inplace=True),
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
def forward(self, x):
|
| 151 |
+
out = []
|
| 152 |
+
x = self.conv1(x)
|
| 153 |
+
x = self.bn1(x)
|
| 154 |
+
x = self.relu(x)
|
| 155 |
+
|
| 156 |
+
x = self.layer1(x)
|
| 157 |
+
x = self.maxpool(x)
|
| 158 |
+
x = self.layer2(x)
|
| 159 |
+
x = self.layer3(x)
|
| 160 |
+
|
| 161 |
+
for i in range(self.num_stacks):
|
| 162 |
+
y = self.hg[i](x)
|
| 163 |
+
y = self.res[i](y)
|
| 164 |
+
y = self.fc[i](y)
|
| 165 |
+
score = self.score[i](y)
|
| 166 |
+
out.append(score)
|
| 167 |
+
if i < self.num_stacks - 1:
|
| 168 |
+
fc_ = self.fc_[i](y)
|
| 169 |
+
score_ = self.score_[i](score)
|
| 170 |
+
x = x + fc_ + score_
|
| 171 |
+
|
| 172 |
+
return out[::-1]
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def hg(**kwargs):
|
| 176 |
+
model = HourglassNet(
|
| 177 |
+
planes=kwargs["planes"],
|
| 178 |
+
block=Bottleneck2D,
|
| 179 |
+
head=kwargs.get("head", lambda c_in, c_out: nn.Conv2d(c_in, c_out, 1)),
|
| 180 |
+
depth=kwargs["depth"],
|
| 181 |
+
num_stacks=kwargs["num_stacks"],
|
| 182 |
+
num_blocks=kwargs["num_blocks"],
|
| 183 |
+
)
|
| 184 |
+
return model
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def main():
|
| 188 |
+
hg(depth=2, num_stacks=1, num_blocks=1)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
if __name__ == "__main__":
|
| 192 |
+
main()
|
vanishing_point_extraction/neurvps/neurvps/models/vanishing_net.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import math
|
| 3 |
+
import random
|
| 4 |
+
import itertools
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import numpy.linalg as LA
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
|
| 14 |
+
from neurvps.utils import plot_image_grid
|
| 15 |
+
from neurvps.config import C, M
|
| 16 |
+
from neurvps.models.conic import ConicConv
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class VanishingNet(nn.Module):
|
| 20 |
+
def __init__(self, backbone, output_stride=4, upsample_scale=1):
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.backbone = backbone
|
| 23 |
+
self.anet = ApolloniusNet(output_stride, upsample_scale)
|
| 24 |
+
self.loss = nn.BCEWithLogitsLoss(reduction="none")
|
| 25 |
+
|
| 26 |
+
def forward(self, input_dict):
|
| 27 |
+
x = self.backbone(input_dict["image"])[0]
|
| 28 |
+
N, _, H, W = x.shape
|
| 29 |
+
test = input_dict.get("test", False)
|
| 30 |
+
if test:
|
| 31 |
+
c = len(input_dict["vpts"])
|
| 32 |
+
else:
|
| 33 |
+
c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg)
|
| 34 |
+
x = x[:, None].repeat(1, c, 1, 1, 1).reshape(N * c, _, H, W)
|
| 35 |
+
|
| 36 |
+
if test:
|
| 37 |
+
vpts = [to_pixel(v) for v in input_dict["vpts"]]
|
| 38 |
+
vpts = torch.tensor(vpts, device=x.device)
|
| 39 |
+
return self.anet(x, vpts).sigmoid()
|
| 40 |
+
|
| 41 |
+
vpts_gt = input_dict["vpts"].cpu().numpy()
|
| 42 |
+
vpts, y = [], []
|
| 43 |
+
for n in range(N):
|
| 44 |
+
|
| 45 |
+
def add_sample(p):
|
| 46 |
+
vpts.append(to_pixel(p))
|
| 47 |
+
y.append(to_label(p, vpts_gt[n]))
|
| 48 |
+
|
| 49 |
+
for vgt in vpts_gt[n]:
|
| 50 |
+
for st, ed in zip([0] + M.multires[:-1], M.multires):
|
| 51 |
+
# positive samples
|
| 52 |
+
for _ in range(M.smp_pos):
|
| 53 |
+
add_sample(sample_sphere(vgt, st, ed))
|
| 54 |
+
# negative samples
|
| 55 |
+
for _ in range(M.smp_neg):
|
| 56 |
+
add_sample(sample_sphere(vgt, ed, ed * M.smp_multiplier))
|
| 57 |
+
# random samples
|
| 58 |
+
for _ in range(M.smp_rnd):
|
| 59 |
+
add_sample(sample_sphere(np.array([0, 0, 1]), 0, math.pi / 2))
|
| 60 |
+
|
| 61 |
+
y = torch.tensor(y, device=x.device, dtype=torch.float)
|
| 62 |
+
vpts = torch.tensor(vpts, device=x.device)
|
| 63 |
+
|
| 64 |
+
x = self.anet(x, vpts)
|
| 65 |
+
L = self.loss(x, y)
|
| 66 |
+
maskn = (y == 0).float()
|
| 67 |
+
maskp = (y == 1).float()
|
| 68 |
+
losses = {}
|
| 69 |
+
for i in range(len(M.multires)):
|
| 70 |
+
assert maskn[:, i].sum().item() != 0
|
| 71 |
+
assert maskp[:, i].sum().item() != 0
|
| 72 |
+
losses[f"lneg{i}"] = (L[:, i] * maskn[:, i]).sum() / maskn[:, i].sum()
|
| 73 |
+
losses[f"lpos{i}"] = (L[:, i] * maskp[:, i]).sum() / maskp[:, i].sum()
|
| 74 |
+
|
| 75 |
+
return {
|
| 76 |
+
"losses": [losses],
|
| 77 |
+
"preds": {"vpts": vpts, "scores": x.sigmoid(), "ys": y},
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class ApolloniusNet(nn.Module):
|
| 82 |
+
def __init__(self, output_stride, upsample_scale):
|
| 83 |
+
super().__init__()
|
| 84 |
+
self.fc0 = nn.Conv2d(64, 32, 1)
|
| 85 |
+
self.relu = nn.ReLU(inplace=True)
|
| 86 |
+
self.pool = nn.MaxPool2d(2, 2)
|
| 87 |
+
|
| 88 |
+
if M.conic_6x:
|
| 89 |
+
self.bn00 = nn.BatchNorm2d(32)
|
| 90 |
+
self.conv00 = ConicConv(32, 32)
|
| 91 |
+
self.bn0 = nn.BatchNorm2d(32)
|
| 92 |
+
self.conv0 = ConicConv(32, 32)
|
| 93 |
+
|
| 94 |
+
self.bn1 = nn.BatchNorm2d(32)
|
| 95 |
+
self.conv1 = ConicConv(32, 64)
|
| 96 |
+
self.bn2 = nn.BatchNorm2d(64)
|
| 97 |
+
self.conv2 = ConicConv(64, 128)
|
| 98 |
+
self.bn3 = nn.BatchNorm2d(128)
|
| 99 |
+
self.conv3 = ConicConv(128, 256)
|
| 100 |
+
self.bn4 = nn.BatchNorm2d(256)
|
| 101 |
+
self.conv4 = ConicConv(256, 256)
|
| 102 |
+
|
| 103 |
+
self.fc1 = nn.Linear(16384, M.fc_channel)
|
| 104 |
+
self.fc2 = nn.Linear(M.fc_channel, M.fc_channel)
|
| 105 |
+
self.fc3 = nn.Linear(M.fc_channel, len(M.multires))
|
| 106 |
+
|
| 107 |
+
self.upsample_scale = upsample_scale
|
| 108 |
+
self.stride = output_stride / upsample_scale
|
| 109 |
+
|
| 110 |
+
def forward(self, input, vpts):
|
| 111 |
+
# for now we did not do interpolation
|
| 112 |
+
if self.upsample_scale != 1:
|
| 113 |
+
input = F.interpolate(input, scale_factor=self.upsample_scale)
|
| 114 |
+
x = self.fc0(input)
|
| 115 |
+
|
| 116 |
+
if M.conic_6x:
|
| 117 |
+
x = self.bn00(x)
|
| 118 |
+
x = self.relu(x)
|
| 119 |
+
x = self.conv00(x, vpts / self.stride - 0.5)
|
| 120 |
+
x = self.bn0(x)
|
| 121 |
+
x = self.relu(x)
|
| 122 |
+
x = self.conv0(x, vpts / self.stride - 0.5)
|
| 123 |
+
|
| 124 |
+
# 128
|
| 125 |
+
x = self.bn1(x)
|
| 126 |
+
x = self.relu(x)
|
| 127 |
+
x = self.conv1(x, vpts / self.stride - 0.5)
|
| 128 |
+
x = self.pool(x)
|
| 129 |
+
# 64
|
| 130 |
+
x = self.bn2(x)
|
| 131 |
+
x = self.relu(x)
|
| 132 |
+
x = self.conv2(x, vpts / self.stride / 2 - 0.5)
|
| 133 |
+
x = self.pool(x)
|
| 134 |
+
# 32
|
| 135 |
+
x = self.bn3(x)
|
| 136 |
+
x = self.relu(x)
|
| 137 |
+
x = self.conv3(x, vpts / self.stride / 4 - 0.5)
|
| 138 |
+
x = self.pool(x)
|
| 139 |
+
# 16
|
| 140 |
+
x = self.bn4(x)
|
| 141 |
+
x = self.relu(x)
|
| 142 |
+
x = self.conv4(x, vpts / self.stride / 8 - 0.5)
|
| 143 |
+
x = self.pool(x)
|
| 144 |
+
# 8
|
| 145 |
+
x = x.view(x.shape[0], -1)
|
| 146 |
+
x = self.relu(x)
|
| 147 |
+
x = self.fc1(x)
|
| 148 |
+
x = self.relu(x)
|
| 149 |
+
x = self.fc2(x)
|
| 150 |
+
x = self.relu(x)
|
| 151 |
+
x = self.fc3(x)
|
| 152 |
+
|
| 153 |
+
return x
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def orth(v):
|
| 157 |
+
x, y, z = v
|
| 158 |
+
o = np.array([0.0, -z, y] if abs(x) < abs(y) else [-z, 0.0, x])
|
| 159 |
+
o /= LA.norm(o)
|
| 160 |
+
return o
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def sample_sphere(v, theta0, theta1):
|
| 164 |
+
costheta = random.uniform(math.cos(theta1), math.cos(theta0))
|
| 165 |
+
phi = random.random() * math.pi * 2
|
| 166 |
+
v1 = orth(v)
|
| 167 |
+
v2 = np.cross(v, v1)
|
| 168 |
+
r = math.sqrt(1 - costheta ** 2)
|
| 169 |
+
w = v * costheta + r * (v1 * math.cos(phi) + v2 * math.sin(phi))
|
| 170 |
+
return w / LA.norm(w)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def to_label(w, vpts):
|
| 174 |
+
degree = np.min(np.arccos(np.abs(vpts @ w).clip(max=1)))
|
| 175 |
+
return [int(degree < res + 1e-6) for res in M.multires]
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def to_pixel(w):
|
| 179 |
+
x = w[0] / w[2] * C.io.focal_length * 256 + 256
|
| 180 |
+
y = -w[1] / w[2] * C.io.focal_length * 256 + 256
|
| 181 |
+
return y, x
|
vanishing_point_extraction/neurvps/neurvps/trainer.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import atexit
|
| 3 |
+
import random
|
| 4 |
+
import shutil
|
| 5 |
+
import signal
|
| 6 |
+
import os.path as osp
|
| 7 |
+
import threading
|
| 8 |
+
import subprocess
|
| 9 |
+
from timeit import default_timer as timer
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import matplotlib as mpl
|
| 14 |
+
import matplotlib.pyplot as plt
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from skimage import io
|
| 17 |
+
from tensorboardX import SummaryWriter
|
| 18 |
+
|
| 19 |
+
import neurvps.utils as utils
|
| 20 |
+
from neurvps.config import C, M
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Trainer(object):
|
| 24 |
+
def __init__(
|
| 25 |
+
self, device, model, optimizer, train_loader, val_loader, batch_size, out
|
| 26 |
+
):
|
| 27 |
+
self.device = device
|
| 28 |
+
|
| 29 |
+
self.model = model
|
| 30 |
+
self.optim = optimizer
|
| 31 |
+
|
| 32 |
+
self.train_loader = train_loader
|
| 33 |
+
self.val_loader = val_loader
|
| 34 |
+
self.batch_size = batch_size
|
| 35 |
+
|
| 36 |
+
self.out = out
|
| 37 |
+
if not osp.exists(self.out):
|
| 38 |
+
os.makedirs(self.out)
|
| 39 |
+
|
| 40 |
+
board_out = osp.join(self.out, "tensorboard")
|
| 41 |
+
if not osp.exists(board_out):
|
| 42 |
+
os.makedirs(board_out)
|
| 43 |
+
self.writer = SummaryWriter(board_out)
|
| 44 |
+
# self.run_tensorboard(board_out)
|
| 45 |
+
# time.sleep(1)
|
| 46 |
+
|
| 47 |
+
self.epoch = 0
|
| 48 |
+
self.iteration = 0
|
| 49 |
+
self.max_epoch = C.optim.max_epoch
|
| 50 |
+
self.lr_decay_epoch = C.optim.lr_decay_epoch
|
| 51 |
+
self.num_stacks = C.model.num_stacks
|
| 52 |
+
self.mean_loss = self.best_mean_loss = 1e1000
|
| 53 |
+
|
| 54 |
+
self.loss_labels = None
|
| 55 |
+
self.avg_metrics = None
|
| 56 |
+
self.metrics = np.zeros(0)
|
| 57 |
+
|
| 58 |
+
def run_tensorboard(self, board_out):
|
| 59 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
| 60 |
+
p = subprocess.Popen(
|
| 61 |
+
["tensorboard", f"--logdir={board_out}", f"--port={C.io.tensorboard_port}"]
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def killme():
|
| 65 |
+
os.kill(p.pid, signal.SIGTERM)
|
| 66 |
+
|
| 67 |
+
atexit.register(killme)
|
| 68 |
+
|
| 69 |
+
def _loss(self, result):
|
| 70 |
+
losses = result["losses"]
|
| 71 |
+
# Don't move loss label to other place.
|
| 72 |
+
# If I want to change the loss, I just need to change this function.
|
| 73 |
+
if self.loss_labels is None:
|
| 74 |
+
self.loss_labels = ["sum"] + list(losses[0].keys())
|
| 75 |
+
self.metrics = np.zeros([self.num_stacks, len(self.loss_labels)])
|
| 76 |
+
print()
|
| 77 |
+
print(
|
| 78 |
+
"| ".join(
|
| 79 |
+
["progress "]
|
| 80 |
+
+ list(map("{:7}".format, self.loss_labels))
|
| 81 |
+
+ ["speed"]
|
| 82 |
+
)
|
| 83 |
+
)
|
| 84 |
+
with open(f"{self.out}/loss.csv", "a") as fout:
|
| 85 |
+
print(",".join(["progress"] + self.loss_labels), file=fout)
|
| 86 |
+
|
| 87 |
+
total_loss = 0
|
| 88 |
+
for i in range(self.num_stacks):
|
| 89 |
+
for j, name in enumerate(self.loss_labels):
|
| 90 |
+
if name == "sum":
|
| 91 |
+
continue
|
| 92 |
+
if name not in losses[i]:
|
| 93 |
+
assert i != 0
|
| 94 |
+
continue
|
| 95 |
+
loss = losses[i][name].mean()
|
| 96 |
+
self.metrics[i, 0] += loss.item()
|
| 97 |
+
self.metrics[i, j] += loss.item()
|
| 98 |
+
total_loss += loss
|
| 99 |
+
return total_loss
|
| 100 |
+
|
| 101 |
+
def validate(self):
|
| 102 |
+
tprint("Running validation...", " " * 75)
|
| 103 |
+
training = self.model.training
|
| 104 |
+
self.model.eval()
|
| 105 |
+
|
| 106 |
+
viz = osp.join(self.out, "viz", f"{self.iteration * self.batch_size:09d}")
|
| 107 |
+
npz = osp.join(self.out, "npz", f"{self.iteration * self.batch_size:09d}")
|
| 108 |
+
osp.exists(viz) or os.makedirs(viz)
|
| 109 |
+
osp.exists(npz) or os.makedirs(npz)
|
| 110 |
+
|
| 111 |
+
total_loss = 0
|
| 112 |
+
self.metrics[...] = 0
|
| 113 |
+
c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg)
|
| 114 |
+
with torch.no_grad():
|
| 115 |
+
for batch_idx, (image, target) in enumerate(self.val_loader):
|
| 116 |
+
image = image.to(self.device)
|
| 117 |
+
input_dict = {"image": image, "vpts": target["vpts"], "eval": True}
|
| 118 |
+
result = self.model(input_dict)
|
| 119 |
+
total_loss += self._loss(result)
|
| 120 |
+
# permute output to be (batch x (nneg + npos) x 2)
|
| 121 |
+
preds = result["preds"]
|
| 122 |
+
vpts = preds["vpts"].reshape(-1, c, 2).cpu().numpy()
|
| 123 |
+
scores = preds["scores"].reshape(-1, c, len(M.multires)).cpu().numpy()
|
| 124 |
+
ys = preds["ys"].reshape(-1, c, len(M.multires)).cpu().numpy()
|
| 125 |
+
for i in range(self.batch_size):
|
| 126 |
+
index = batch_idx * self.batch_size + i
|
| 127 |
+
np.savez(
|
| 128 |
+
f"{npz}/{index:06}.npz",
|
| 129 |
+
**{k: v[i].cpu().numpy() for k, v in preds.items()},
|
| 130 |
+
)
|
| 131 |
+
if index >= 8:
|
| 132 |
+
continue
|
| 133 |
+
self.plot(index, image[i], vpts[i], scores[i], ys[i], f"{viz}/{index:06}")
|
| 134 |
+
|
| 135 |
+
self._write_metrics(len(self.val_loader), total_loss, "validation", True)
|
| 136 |
+
self.mean_loss = total_loss / len(self.val_loader)
|
| 137 |
+
|
| 138 |
+
torch.save(
|
| 139 |
+
{
|
| 140 |
+
"iteration": self.iteration,
|
| 141 |
+
"arch": self.model.__class__.__name__,
|
| 142 |
+
"optim_state_dict": self.optim.state_dict(),
|
| 143 |
+
"model_state_dict": self.model.state_dict(),
|
| 144 |
+
"best_mean_loss": self.best_mean_loss,
|
| 145 |
+
},
|
| 146 |
+
osp.join(self.out, "checkpoint_latest.pth.tar"),
|
| 147 |
+
)
|
| 148 |
+
shutil.copy(
|
| 149 |
+
osp.join(self.out, "checkpoint_latest.pth.tar"),
|
| 150 |
+
osp.join(npz, "checkpoint.pth.tar"),
|
| 151 |
+
)
|
| 152 |
+
if self.mean_loss < self.best_mean_loss:
|
| 153 |
+
self.best_mean_loss = self.mean_loss
|
| 154 |
+
shutil.copy(
|
| 155 |
+
osp.join(self.out, "checkpoint_latest.pth.tar"),
|
| 156 |
+
osp.join(self.out, "checkpoint_best.pth.tar"),
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if training:
|
| 160 |
+
self.model.train()
|
| 161 |
+
|
| 162 |
+
def train_epoch(self):
|
| 163 |
+
self.model.train()
|
| 164 |
+
time = timer()
|
| 165 |
+
for batch_idx, (image, target) in enumerate(self.train_loader):
|
| 166 |
+
self.optim.zero_grad()
|
| 167 |
+
self.metrics[...] = 0
|
| 168 |
+
|
| 169 |
+
image = image.to(self.device)
|
| 170 |
+
input_dict = {"image": image, "vpts": target["vpts"], "eval": False}
|
| 171 |
+
result = self.model(input_dict)
|
| 172 |
+
|
| 173 |
+
loss = self._loss(result)
|
| 174 |
+
if np.isnan(loss.item()):
|
| 175 |
+
raise ValueError("loss is nan while training")
|
| 176 |
+
loss.backward()
|
| 177 |
+
self.optim.step()
|
| 178 |
+
|
| 179 |
+
if self.avg_metrics is None:
|
| 180 |
+
self.avg_metrics = self.metrics
|
| 181 |
+
else:
|
| 182 |
+
self.avg_metrics = self.avg_metrics * 0.9 + self.metrics * 0.1
|
| 183 |
+
self.iteration += 1
|
| 184 |
+
self._write_metrics(1, loss.item(), "training", do_print=False)
|
| 185 |
+
|
| 186 |
+
if self.iteration % 4 == 0:
|
| 187 |
+
tprint(
|
| 188 |
+
f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| "
|
| 189 |
+
+ "| ".join(map("{:.5f}".format, self.avg_metrics[0]))
|
| 190 |
+
+ f"| {4 * self.batch_size / (timer() - time):04.1f} "
|
| 191 |
+
)
|
| 192 |
+
time = timer()
|
| 193 |
+
num_images = self.batch_size * self.iteration
|
| 194 |
+
if (
|
| 195 |
+
num_images % C.io.validation_interval == 0
|
| 196 |
+
or num_images == C.io.validation_debug
|
| 197 |
+
):
|
| 198 |
+
self.validate()
|
| 199 |
+
time = timer()
|
| 200 |
+
|
| 201 |
+
def _write_metrics(self, size, total_loss, prefix, do_print=False):
|
| 202 |
+
for i, metrics in enumerate(self.metrics):
|
| 203 |
+
for label, metric in zip(self.loss_labels, metrics):
|
| 204 |
+
self.writer.add_scalar(
|
| 205 |
+
f"{prefix}/{i}/{label}", metric / size, self.iteration
|
| 206 |
+
)
|
| 207 |
+
if i == 0 and do_print:
|
| 208 |
+
csv_str = (
|
| 209 |
+
f"{self.epoch:03}/{self.iteration * self.batch_size:07},"
|
| 210 |
+
+ ",".join(map("{:.11f}".format, metrics / size))
|
| 211 |
+
)
|
| 212 |
+
prt_str = (
|
| 213 |
+
f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| "
|
| 214 |
+
+ "| ".join(map("{:.5f}".format, metrics / size))
|
| 215 |
+
)
|
| 216 |
+
with open(f"{self.out}/loss.csv", "a") as fout:
|
| 217 |
+
print(csv_str, file=fout)
|
| 218 |
+
pprint(prt_str, " " * 7)
|
| 219 |
+
self.writer.add_scalar(
|
| 220 |
+
f"{prefix}/total_loss", total_loss / size, self.iteration
|
| 221 |
+
)
|
| 222 |
+
return total_loss
|
| 223 |
+
|
| 224 |
+
def plot(self, index, image, vpts, scores, ys, prefix):
|
| 225 |
+
for idx, (vp, score, y) in enumerate(zip(vpts, scores, ys)):
|
| 226 |
+
plt.imshow(image[0].cpu().numpy())
|
| 227 |
+
color = (random.random(), random.random(), random.random())
|
| 228 |
+
plt.scatter(vp[1], vp[0])
|
| 229 |
+
plt.text(
|
| 230 |
+
vp[1] - 20,
|
| 231 |
+
vp[0] - 10,
|
| 232 |
+
" ".join(map("{:.3f}".format, score))
|
| 233 |
+
+ "\n"
|
| 234 |
+
+ " ".join(map("{:.3f}".format, y)),
|
| 235 |
+
bbox=dict(facecolor=color),
|
| 236 |
+
fontsize=12,
|
| 237 |
+
)
|
| 238 |
+
for xy in np.linspace(0, 512, 10):
|
| 239 |
+
plt.plot(
|
| 240 |
+
[vp[1], xy, vp[1], xy, vp[1], 0, vp[1], 511],
|
| 241 |
+
[vp[0], 0, vp[0], 511, vp[0], xy, vp[0], xy],
|
| 242 |
+
color=color,
|
| 243 |
+
)
|
| 244 |
+
plt.savefig(f"{prefix}_vpts_{idx}.jpg"), plt.close()
|
| 245 |
+
|
| 246 |
+
def train(self):
|
| 247 |
+
plt.rcParams["figure.figsize"] = (24, 24)
|
| 248 |
+
epoch_size = len(self.train_loader)
|
| 249 |
+
start_epoch = self.iteration // epoch_size
|
| 250 |
+
for self.epoch in range(start_epoch, self.max_epoch):
|
| 251 |
+
if self.epoch == self.lr_decay_epoch:
|
| 252 |
+
self.optim.param_groups[0]["lr"] /= 10
|
| 253 |
+
self.train_epoch()
|
| 254 |
+
|
| 255 |
+
def move(self, obj):
|
| 256 |
+
if isinstance(obj, torch.Tensor):
|
| 257 |
+
return obj.to(self.device)
|
| 258 |
+
if isinstance(obj, dict):
|
| 259 |
+
for name in obj:
|
| 260 |
+
if isinstance(obj[name], torch.Tensor):
|
| 261 |
+
obj[name] = obj[name].to(self.device)
|
| 262 |
+
return obj
|
| 263 |
+
assert False
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
cmap = plt.get_cmap("jet")
|
| 267 |
+
norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0)
|
| 268 |
+
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
|
| 269 |
+
sm.set_array([])
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def c(x):
|
| 273 |
+
return sm.to_rgba(x)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def imshow(im):
|
| 277 |
+
plt.close()
|
| 278 |
+
plt.tight_layout()
|
| 279 |
+
plt.imshow(im)
|
| 280 |
+
plt.colorbar(sm, fraction=0.046)
|
| 281 |
+
plt.xlim([0, im.shape[0]])
|
| 282 |
+
plt.ylim([im.shape[0], 0])
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def tprint(*args):
|
| 286 |
+
"""Temporarily prints things on the screen"""
|
| 287 |
+
print("\r", end="")
|
| 288 |
+
print(*args, end="")
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def pprint(*args):
|
| 292 |
+
"""Permanently prints things on the screen"""
|
| 293 |
+
print("\r", end="")
|
| 294 |
+
print(*args)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def _launch_tensorboard(board_out, port, out):
|
| 298 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
| 299 |
+
p = subprocess.Popen(["tensorboard", f"--logdir={board_out}", f"--port={port}"])
|
| 300 |
+
|
| 301 |
+
def kill():
|
| 302 |
+
os.kill(p.pid, signal.SIGTERM)
|
| 303 |
+
|
| 304 |
+
atexit.register(kill)
|
vanishing_point_extraction/neurvps/neurvps/utils.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import random
|
| 3 |
+
import os.path as osp
|
| 4 |
+
import multiprocessing
|
| 5 |
+
from timeit import default_timer as timer
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import numpy.linalg as LA
|
| 9 |
+
import matplotlib.pyplot as plt
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class benchmark(object):
|
| 13 |
+
def __init__(self, msg, enable=True, fmt="%0.3g"):
|
| 14 |
+
self.msg = msg
|
| 15 |
+
self.fmt = fmt
|
| 16 |
+
self.enable = enable
|
| 17 |
+
|
| 18 |
+
def __enter__(self):
|
| 19 |
+
if self.enable:
|
| 20 |
+
self.start = timer()
|
| 21 |
+
return self
|
| 22 |
+
|
| 23 |
+
def __exit__(self, *args):
|
| 24 |
+
if self.enable:
|
| 25 |
+
t = timer() - self.start
|
| 26 |
+
print(("%s : " + self.fmt + " seconds") % (self.msg, t))
|
| 27 |
+
self.time = t
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def plot_image_grid(im, title):
|
| 31 |
+
plt.figure()
|
| 32 |
+
for i in range(16):
|
| 33 |
+
plt.subplot(4, 4, i + 1)
|
| 34 |
+
plt.imshow(im[i])
|
| 35 |
+
plt.colorbar()
|
| 36 |
+
plt.title(title)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def quiver(x, y, ax):
|
| 40 |
+
ax.set_xlim(0, x.shape[1])
|
| 41 |
+
ax.set_ylim(x.shape[0], 0)
|
| 42 |
+
ax.quiver(
|
| 43 |
+
x,
|
| 44 |
+
y,
|
| 45 |
+
units="xy",
|
| 46 |
+
angles="xy",
|
| 47 |
+
scale_units="xy",
|
| 48 |
+
scale=1,
|
| 49 |
+
minlength=0.01,
|
| 50 |
+
width=0.1,
|
| 51 |
+
color="b",
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def np_softmax(x, axis=0):
|
| 56 |
+
"""Compute softmax values for each sets of scores in x."""
|
| 57 |
+
e_x = np.exp(x - np.max(x))
|
| 58 |
+
return e_x / e_x.sum(axis=axis, keepdims=True)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def argsort2d(arr):
|
| 62 |
+
return np.dstack(np.unravel_index(np.argsort(arr.ravel()), arr.shape))[0]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def __parallel_handle(f, q_in, q_out):
|
| 66 |
+
while True:
|
| 67 |
+
i, x = q_in.get()
|
| 68 |
+
if i is None:
|
| 69 |
+
break
|
| 70 |
+
q_out.put((i, f(x)))
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def parmap(f, X, nprocs=multiprocessing.cpu_count(), progress_bar=lambda x: x):
|
| 74 |
+
if nprocs == 0:
|
| 75 |
+
nprocs = multiprocessing.cpu_count()
|
| 76 |
+
q_in = multiprocessing.Queue(1)
|
| 77 |
+
q_out = multiprocessing.Queue()
|
| 78 |
+
|
| 79 |
+
proc = [
|
| 80 |
+
multiprocessing.Process(target=__parallel_handle, args=(f, q_in, q_out))
|
| 81 |
+
for _ in range(nprocs)
|
| 82 |
+
]
|
| 83 |
+
for p in proc:
|
| 84 |
+
p.daemon = True
|
| 85 |
+
p.start()
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
|
| 89 |
+
[q_in.put((None, None)) for _ in range(nprocs)]
|
| 90 |
+
res = [q_out.get() for _ in progress_bar(range(len(sent)))]
|
| 91 |
+
[p.join() for p in proc]
|
| 92 |
+
except KeyboardInterrupt:
|
| 93 |
+
q_in.close()
|
| 94 |
+
q_out.close()
|
| 95 |
+
raise
|
| 96 |
+
return [x for i, x in sorted(res)]
|
vanishing_point_extraction/neurvps/vp_estim.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import skimage.io
|
| 9 |
+
import numpy.linalg as LA
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
import neurvps
|
| 14 |
+
from neurvps.config import C, M
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
import re
|
| 17 |
+
import json
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def AA(x, y, threshold):
|
| 21 |
+
index = np.searchsorted(x, threshold)
|
| 22 |
+
x = np.concatenate([x[:index], [threshold]])
|
| 23 |
+
y = np.concatenate([y[:index], [threshold]])
|
| 24 |
+
return ((x[1:] - x[:-1]) * y[:-1]).sum() / threshold
|
| 25 |
+
|
| 26 |
+
def sample_sphere(v, alpha, num_pts):
|
| 27 |
+
v1 = orth(v)
|
| 28 |
+
v2 = np.cross(v, v1)
|
| 29 |
+
v, v1, v2 = v[:, None], v1[:, None], v2[:, None]
|
| 30 |
+
indices = np.linspace(1, num_pts, num_pts)
|
| 31 |
+
phi = np.arccos(1 + (math.cos(alpha) - 1) * indices / num_pts)
|
| 32 |
+
theta = np.pi * (1 + 5 ** 0.5) * indices
|
| 33 |
+
r = np.sin(phi)
|
| 34 |
+
return (v * np.cos(phi) + r * (v1 * np.cos(theta) + v2 * np.sin(theta))).T
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def orth(v):
|
| 38 |
+
x, y, z = v
|
| 39 |
+
o = np.array([0.0, -z, y] if abs(x) < abs(y) else [-z, 0.0, x])
|
| 40 |
+
o /= LA.norm(o)
|
| 41 |
+
return o
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
parser = argparse.ArgumentParser()
|
| 46 |
+
parser.add_argument('--devices', type=str, help='cuda device')
|
| 47 |
+
parser.add_argument('--config_file', type=str, help='configuration file path')
|
| 48 |
+
parser.add_argument('--checkpoint', type=str, help='model checkpoint path')
|
| 49 |
+
parser.add_argument('--dataset', type=str, help='dataset (e.g. SemanticKITTI | KITTI360)')
|
| 50 |
+
parser.add_argument('--root_path', type=str, help='dataset root path')
|
| 51 |
+
parser.add_argument('--save_path', type=str, help='result path')
|
| 52 |
+
|
| 53 |
+
args = parser.parse_args()
|
| 54 |
+
config_file = args.config_file
|
| 55 |
+
C.update(C.from_yaml(filename=config_file))
|
| 56 |
+
C.model.im2col_step = 32 # override im2col_step for evaluation
|
| 57 |
+
M.update(C.model)
|
| 58 |
+
|
| 59 |
+
random.seed(0)
|
| 60 |
+
np.random.seed(0)
|
| 61 |
+
torch.manual_seed(0)
|
| 62 |
+
|
| 63 |
+
device_name = "cpu"
|
| 64 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = args.devices
|
| 65 |
+
if torch.cuda.is_available():
|
| 66 |
+
device_name = "cuda"
|
| 67 |
+
torch.backends.cudnn.deterministic = True
|
| 68 |
+
torch.cuda.manual_seed(0)
|
| 69 |
+
print("Let's use", torch.cuda.device_count(), "GPU(s)!")
|
| 70 |
+
else:
|
| 71 |
+
print("CUDA is not available")
|
| 72 |
+
device = torch.device(device_name)
|
| 73 |
+
|
| 74 |
+
if M.backbone == "stacked_hourglass":
|
| 75 |
+
model = neurvps.models.hg(
|
| 76 |
+
planes=64, depth=M.depth, num_stacks=M.num_stacks, num_blocks=M.num_blocks
|
| 77 |
+
)
|
| 78 |
+
else:
|
| 79 |
+
raise NotImplementedError
|
| 80 |
+
|
| 81 |
+
checkpoint = torch.load(args.checkpoint)
|
| 82 |
+
model = neurvps.models.VanishingNet(
|
| 83 |
+
model, C.model.output_stride, C.model.upsample_scale
|
| 84 |
+
)
|
| 85 |
+
model = model.to(device)
|
| 86 |
+
model = torch.nn.DataParallel(
|
| 87 |
+
model, device_ids=list(range(args.devices.count(",") + 1))
|
| 88 |
+
)
|
| 89 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
| 90 |
+
model.eval()
|
| 91 |
+
|
| 92 |
+
dataset = args.dataset
|
| 93 |
+
root_path = args.root_path
|
| 94 |
+
save_root_path = args.save_path
|
| 95 |
+
|
| 96 |
+
if dataset == "SemanticKITTI":
|
| 97 |
+
sequences = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
|
| 98 |
+
folder_paths = [os.path.join(root_path, 'dataset/sequences', str(sequence), 'image_2') for sequence in sequences]
|
| 99 |
+
dataset_prefixes = ['SemanticKITTI/dataset/sequences/'+str(sequence)+'/image_2/' for sequence in sequences]
|
| 100 |
+
if not os.path.exists(save_root_path):
|
| 101 |
+
os.makedirs(save_root_path)
|
| 102 |
+
save_paths = [os.path.join(save_root_path, "seq_" + sequence + '.json') for sequence in sequences]
|
| 103 |
+
|
| 104 |
+
elif dataset == "KITTI-360":
|
| 105 |
+
sequences = ['2013_05_28_drive_0000_sync', '2013_05_28_drive_0002_sync', '2013_05_28_drive_0003_sync',\
|
| 106 |
+
'2013_05_28_drive_0004_sync', '2013_05_28_drive_0005_sync', '2013_05_28_drive_0006_sync',\
|
| 107 |
+
'2013_05_28_drive_0007_sync','2013_05_28_drive_0009_sync','2013_05_28_drive_0010_sync']
|
| 108 |
+
folder_paths = [os.path.join(root_path, 'data_2d_raw',str(sequence), 'image_00/data_rect') for sequence in sequences]
|
| 109 |
+
dataset_prefixes = ['KITTI-360/'+str(sequence)+'/image_00/' for sequence in sequences]
|
| 110 |
+
if not os.path.exists(save_root_path):
|
| 111 |
+
os.makedirs(save_root_path)
|
| 112 |
+
save_sequences = ['00', '02', '03', '04', '05', '06', '07', '09', '10']
|
| 113 |
+
save_paths = [os.path.join(save_root_path, "seq_" + sequence + '.json') for sequence in save_sequences]
|
| 114 |
+
|
| 115 |
+
for seq in range(len(sequences)):
|
| 116 |
+
print("sequence : ", seq)
|
| 117 |
+
folder_path = folder_paths[seq]
|
| 118 |
+
all_files = os.listdir(folder_path)
|
| 119 |
+
all_files = sorted(all_files, key=lambda s: int(re.search(r'\d+', s).group()))
|
| 120 |
+
|
| 121 |
+
image_extensions = ['.jpg', '.png', '.jpeg']
|
| 122 |
+
|
| 123 |
+
VP = {}
|
| 124 |
+
|
| 125 |
+
for file in tqdm(all_files):
|
| 126 |
+
if any(file.endswith(ext) for ext in image_extensions):
|
| 127 |
+
image_path = os.path.join(folder_path, file)
|
| 128 |
+
|
| 129 |
+
image_origin = skimage.io.imread(image_path)
|
| 130 |
+
|
| 131 |
+
original_height, original_width = image_origin.shape[:2]
|
| 132 |
+
|
| 133 |
+
image = skimage.transform.resize(image_origin, (512, 512))
|
| 134 |
+
|
| 135 |
+
if image.ndim == 2:
|
| 136 |
+
image = image[:, :, None].repeat(3, 2)
|
| 137 |
+
|
| 138 |
+
image = np.rollaxis(image, 2)
|
| 139 |
+
image_tensor = torch.tensor(image * 255).float().to(device).unsqueeze(0)
|
| 140 |
+
|
| 141 |
+
input_dict = {"image": image_tensor, "test": True}
|
| 142 |
+
vpts = sample_sphere(np.array([0, 0, 1]), np.pi / 2, 64)
|
| 143 |
+
input_dict["vpts"] = vpts
|
| 144 |
+
with torch.no_grad():
|
| 145 |
+
score = model(input_dict)[:, -1].cpu().numpy()
|
| 146 |
+
index = np.argsort(-score)
|
| 147 |
+
candidate = [index[0]]
|
| 148 |
+
n = C.io.num_vpts
|
| 149 |
+
for i in index[1:]:
|
| 150 |
+
if len(candidate) == n:
|
| 151 |
+
break
|
| 152 |
+
dst = np.min(np.arccos(np.abs(vpts[candidate] @ vpts[i])))
|
| 153 |
+
if dst < np.pi / n:
|
| 154 |
+
continue
|
| 155 |
+
candidate.append(i)
|
| 156 |
+
vpts_pd = vpts[candidate]
|
| 157 |
+
|
| 158 |
+
for res in range(1, len(M.multires)):
|
| 159 |
+
vpts = [sample_sphere(vpts_pd[vp], M.multires[-res], 64) for vp in range(n)]
|
| 160 |
+
input_dict["vpts"] = np.vstack(vpts)
|
| 161 |
+
with torch.no_grad():
|
| 162 |
+
score = model(input_dict)[:, -res - 1].cpu().numpy().reshape(n, -1)
|
| 163 |
+
for i, s in enumerate(score):
|
| 164 |
+
vpts_pd[i] = vpts[i][np.argmax(s)]
|
| 165 |
+
|
| 166 |
+
Vanishing_point = []
|
| 167 |
+
|
| 168 |
+
for vp in vpts_pd:
|
| 169 |
+
x = vp[0] * original_width / 2 + original_width / 2
|
| 170 |
+
y = original_height / 2 - vp[1] * original_height / 2
|
| 171 |
+
|
| 172 |
+
Vanishing_point.append([x,y])
|
| 173 |
+
|
| 174 |
+
VP[os.path.join(dataset_prefixes[seq], file)] = Vanishing_point
|
| 175 |
+
|
| 176 |
+
with open(save_paths[seq], 'w') as f:
|
| 177 |
+
json.dump(VP, f)
|
| 178 |
+
|
| 179 |
+
if __name__ == "__main__":
|
| 180 |
+
main()
|
vanishing_point_extraction/vanishing_point/neurvps/TMM17/checkpoint_latest.pth.tar
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:951f12bfe2a3afdef5b95d6a1cb9bbe51e73913c70212c8e628b696bd39a74e7
|
| 3 |
+
size 358844104
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import neurvps.models
|
| 2 |
+
import neurvps.trainer
|
| 3 |
+
import neurvps.datasets
|
| 4 |
+
import neurvps.config
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (267 Bytes). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/box.cpython-38.pyc
ADDED
|
Binary file (33.4 kB). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/config.cpython-38.pyc
ADDED
|
Binary file (231 Bytes). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc
ADDED
|
Binary file (6.56 kB). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc
ADDED
|
Binary file (8.97 kB). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/utils.cpython-38.pyc
ADDED
|
Binary file (3.72 kB). View file
|
|
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/box.py
ADDED
|
@@ -0,0 +1,1110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: UTF-8 -*-
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2017-2019 - Chris Griffith - MIT License
|
| 5 |
+
"""
|
| 6 |
+
Improved dictionary access through dot notation with additional tools.
|
| 7 |
+
"""
|
| 8 |
+
import string
|
| 9 |
+
import sys
|
| 10 |
+
import json
|
| 11 |
+
import re
|
| 12 |
+
import copy
|
| 13 |
+
from keyword import kwlist
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from collections.abc import Iterable, Mapping, Callable
|
| 18 |
+
except ImportError:
|
| 19 |
+
from collections import Iterable, Mapping, Callable
|
| 20 |
+
|
| 21 |
+
yaml_support = True
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import yaml
|
| 25 |
+
except ImportError:
|
| 26 |
+
try:
|
| 27 |
+
import ruamel.yaml as yaml
|
| 28 |
+
except ImportError:
|
| 29 |
+
yaml = None
|
| 30 |
+
yaml_support = False
|
| 31 |
+
|
| 32 |
+
if sys.version_info >= (3, 0):
|
| 33 |
+
basestring = str
|
| 34 |
+
else:
|
| 35 |
+
from io import open
|
| 36 |
+
|
| 37 |
+
__all__ = ['Box', 'ConfigBox', 'BoxList', 'SBox',
|
| 38 |
+
'BoxError', 'BoxKeyError']
|
| 39 |
+
__author__ = 'Chris Griffith'
|
| 40 |
+
__version__ = '3.2.4'
|
| 41 |
+
|
| 42 |
+
BOX_PARAMETERS = ('default_box', 'default_box_attr', 'conversion_box',
|
| 43 |
+
'frozen_box', 'camel_killer_box', 'box_it_up',
|
| 44 |
+
'box_safe_prefix', 'box_duplicates', 'ordered_box')
|
| 45 |
+
|
| 46 |
+
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
|
| 47 |
+
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class BoxError(Exception):
|
| 51 |
+
"""Non standard dictionary exceptions"""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class BoxKeyError(BoxError, KeyError, AttributeError):
|
| 55 |
+
"""Key does not exist"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Abstract converter functions for use in any Box class
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _to_json(obj, filename=None,
|
| 62 |
+
encoding="utf-8", errors="strict", **json_kwargs):
|
| 63 |
+
json_dump = json.dumps(obj,
|
| 64 |
+
ensure_ascii=False, **json_kwargs)
|
| 65 |
+
if filename:
|
| 66 |
+
with open(filename, 'w', encoding=encoding, errors=errors) as f:
|
| 67 |
+
f.write(json_dump if sys.version_info >= (3, 0) else
|
| 68 |
+
json_dump.decode("utf-8"))
|
| 69 |
+
else:
|
| 70 |
+
return json_dump
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _from_json(json_string=None, filename=None,
|
| 74 |
+
encoding="utf-8", errors="strict", multiline=False, **kwargs):
|
| 75 |
+
if filename:
|
| 76 |
+
with open(filename, 'r', encoding=encoding, errors=errors) as f:
|
| 77 |
+
if multiline:
|
| 78 |
+
data = [json.loads(line.strip(), **kwargs) for line in f
|
| 79 |
+
if line.strip() and not line.strip().startswith("#")]
|
| 80 |
+
else:
|
| 81 |
+
data = json.load(f, **kwargs)
|
| 82 |
+
elif json_string:
|
| 83 |
+
data = json.loads(json_string, **kwargs)
|
| 84 |
+
else:
|
| 85 |
+
raise BoxError('from_json requires a string or filename')
|
| 86 |
+
return data
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _to_yaml(obj, filename=None, default_flow_style=False,
|
| 90 |
+
encoding="utf-8", errors="strict",
|
| 91 |
+
**yaml_kwargs):
|
| 92 |
+
if filename:
|
| 93 |
+
with open(filename, 'w',
|
| 94 |
+
encoding=encoding, errors=errors) as f:
|
| 95 |
+
yaml.dump(obj, stream=f,
|
| 96 |
+
default_flow_style=default_flow_style,
|
| 97 |
+
**yaml_kwargs)
|
| 98 |
+
else:
|
| 99 |
+
return yaml.dump(obj,
|
| 100 |
+
default_flow_style=default_flow_style,
|
| 101 |
+
**yaml_kwargs)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _from_yaml(yaml_string=None, filename=None,
|
| 105 |
+
encoding="utf-8", errors="strict",
|
| 106 |
+
**kwargs):
|
| 107 |
+
if filename:
|
| 108 |
+
with open(filename, 'r',
|
| 109 |
+
encoding=encoding, errors=errors) as f:
|
| 110 |
+
data = yaml.load(f, **kwargs)
|
| 111 |
+
elif yaml_string:
|
| 112 |
+
data = yaml.load(yaml_string, **kwargs)
|
| 113 |
+
else:
|
| 114 |
+
raise BoxError('from_yaml requires a string or filename')
|
| 115 |
+
return data
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# Helper functions
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _safe_key(key):
|
| 122 |
+
try:
|
| 123 |
+
return str(key)
|
| 124 |
+
except UnicodeEncodeError:
|
| 125 |
+
return key.encode("utf-8", "ignore")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def _safe_attr(attr, camel_killer=False, replacement_char='x'):
|
| 129 |
+
"""Convert a key into something that is accessible as an attribute"""
|
| 130 |
+
allowed = string.ascii_letters + string.digits + '_'
|
| 131 |
+
|
| 132 |
+
attr = _safe_key(attr)
|
| 133 |
+
|
| 134 |
+
if camel_killer:
|
| 135 |
+
attr = _camel_killer(attr)
|
| 136 |
+
|
| 137 |
+
attr = attr.replace(' ', '_')
|
| 138 |
+
|
| 139 |
+
out = ''
|
| 140 |
+
for character in attr:
|
| 141 |
+
out += character if character in allowed else "_"
|
| 142 |
+
out = out.strip("_")
|
| 143 |
+
|
| 144 |
+
try:
|
| 145 |
+
int(out[0])
|
| 146 |
+
except (ValueError, IndexError):
|
| 147 |
+
pass
|
| 148 |
+
else:
|
| 149 |
+
out = '{0}{1}'.format(replacement_char, out)
|
| 150 |
+
|
| 151 |
+
if out in kwlist:
|
| 152 |
+
out = '{0}{1}'.format(replacement_char, out)
|
| 153 |
+
|
| 154 |
+
return re.sub('_+', '_', out)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _camel_killer(attr):
|
| 158 |
+
"""
|
| 159 |
+
CamelKiller, qu'est-ce que c'est?
|
| 160 |
+
|
| 161 |
+
Taken from http://stackoverflow.com/a/1176023/3244542
|
| 162 |
+
"""
|
| 163 |
+
try:
|
| 164 |
+
attr = str(attr)
|
| 165 |
+
except UnicodeEncodeError:
|
| 166 |
+
attr = attr.encode("utf-8", "ignore")
|
| 167 |
+
|
| 168 |
+
s1 = _first_cap_re.sub(r'\1_\2', attr)
|
| 169 |
+
s2 = _all_cap_re.sub(r'\1_\2', s1)
|
| 170 |
+
return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else
|
| 171 |
+
s2.lower())
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs):
|
| 175 |
+
out_list = []
|
| 176 |
+
for i in iterable:
|
| 177 |
+
if isinstance(i, dict):
|
| 178 |
+
out_list.append(box_class(i, **kwargs))
|
| 179 |
+
elif isinstance(i, list) or (recreate_tuples and isinstance(i, tuple)):
|
| 180 |
+
out_list.append(_recursive_tuples(i, box_class,
|
| 181 |
+
recreate_tuples, **kwargs))
|
| 182 |
+
else:
|
| 183 |
+
out_list.append(i)
|
| 184 |
+
return tuple(out_list)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def _conversion_checks(item, keys, box_config, check_only=False,
|
| 188 |
+
pre_check=False):
|
| 189 |
+
"""
|
| 190 |
+
Internal use for checking if a duplicate safe attribute already exists
|
| 191 |
+
|
| 192 |
+
:param item: Item to see if a dup exists
|
| 193 |
+
:param keys: Keys to check against
|
| 194 |
+
:param box_config: Easier to pass in than ask for specfic items
|
| 195 |
+
:param check_only: Don't bother doing the conversion work
|
| 196 |
+
:param pre_check: Need to add the item to the list of keys to check
|
| 197 |
+
:return: the original unmodified key, if exists and not check_only
|
| 198 |
+
"""
|
| 199 |
+
if box_config['box_duplicates'] != 'ignore':
|
| 200 |
+
if pre_check:
|
| 201 |
+
keys = list(keys) + [item]
|
| 202 |
+
|
| 203 |
+
key_list = [(k,
|
| 204 |
+
_safe_attr(k, camel_killer=box_config['camel_killer_box'],
|
| 205 |
+
replacement_char=box_config['box_safe_prefix']
|
| 206 |
+
)) for k in keys]
|
| 207 |
+
if len(key_list) > len(set(x[1] for x in key_list)):
|
| 208 |
+
seen = set()
|
| 209 |
+
dups = set()
|
| 210 |
+
for x in key_list:
|
| 211 |
+
if x[1] in seen:
|
| 212 |
+
dups.add("{0}({1})".format(x[0], x[1]))
|
| 213 |
+
seen.add(x[1])
|
| 214 |
+
if box_config['box_duplicates'].startswith("warn"):
|
| 215 |
+
warnings.warn('Duplicate conversion attributes exist: '
|
| 216 |
+
'{0}'.format(dups))
|
| 217 |
+
else:
|
| 218 |
+
raise BoxError('Duplicate conversion attributes exist: '
|
| 219 |
+
'{0}'.format(dups))
|
| 220 |
+
if check_only:
|
| 221 |
+
return
|
| 222 |
+
# This way will be slower for warnings, as it will have double work
|
| 223 |
+
# But faster for the default 'ignore'
|
| 224 |
+
for k in keys:
|
| 225 |
+
if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'],
|
| 226 |
+
replacement_char=box_config['box_safe_prefix']):
|
| 227 |
+
return k
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def _get_box_config(cls, kwargs):
|
| 231 |
+
return {
|
| 232 |
+
# Internal use only
|
| 233 |
+
'__converted': set(),
|
| 234 |
+
'__box_heritage': kwargs.pop('__box_heritage', None),
|
| 235 |
+
'__created': False,
|
| 236 |
+
'__ordered_box_values': [],
|
| 237 |
+
# Can be changed by user after box creation
|
| 238 |
+
'default_box': kwargs.pop('default_box', False),
|
| 239 |
+
'default_box_attr': kwargs.pop('default_box_attr', cls),
|
| 240 |
+
'conversion_box': kwargs.pop('conversion_box', True),
|
| 241 |
+
'box_safe_prefix': kwargs.pop('box_safe_prefix', 'x'),
|
| 242 |
+
'frozen_box': kwargs.pop('frozen_box', False),
|
| 243 |
+
'camel_killer_box': kwargs.pop('camel_killer_box', False),
|
| 244 |
+
'modify_tuples_box': kwargs.pop('modify_tuples_box', False),
|
| 245 |
+
'box_duplicates': kwargs.pop('box_duplicates', 'ignore'),
|
| 246 |
+
'ordered_box': kwargs.pop('ordered_box', False)
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class Box(dict):
|
| 251 |
+
"""
|
| 252 |
+
Improved dictionary access through dot notation with additional tools.
|
| 253 |
+
|
| 254 |
+
:param default_box: Similar to defaultdict, return a default value
|
| 255 |
+
:param default_box_attr: Specify the default replacement.
|
| 256 |
+
WARNING: If this is not the default 'Box', it will not be recursive
|
| 257 |
+
:param frozen_box: After creation, the box cannot be modified
|
| 258 |
+
:param camel_killer_box: Convert CamelCase to snake_case
|
| 259 |
+
:param conversion_box: Check for near matching keys as attributes
|
| 260 |
+
:param modify_tuples_box: Recreate incoming tuples with dicts into Boxes
|
| 261 |
+
:param box_it_up: Recursively create all Boxes from the start
|
| 262 |
+
:param box_safe_prefix: Conversion box prefix for unsafe attributes
|
| 263 |
+
:param box_duplicates: "ignore", "error" or "warn" when duplicates exists
|
| 264 |
+
in a conversion_box
|
| 265 |
+
:param ordered_box: Preserve the order of keys entered into the box
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
_protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml',
|
| 269 |
+
'from_yaml', 'from_json']
|
| 270 |
+
|
| 271 |
+
def __new__(cls, *args, **kwargs):
|
| 272 |
+
"""
|
| 273 |
+
Due to the way pickling works in python 3, we need to make sure
|
| 274 |
+
the box config is created as early as possible.
|
| 275 |
+
"""
|
| 276 |
+
obj = super(Box, cls).__new__(cls, *args, **kwargs)
|
| 277 |
+
obj._box_config = _get_box_config(cls, kwargs)
|
| 278 |
+
return obj
|
| 279 |
+
|
| 280 |
+
def __init__(self, *args, **kwargs):
|
| 281 |
+
self._box_config = _get_box_config(self.__class__, kwargs)
|
| 282 |
+
if self._box_config['ordered_box']:
|
| 283 |
+
self._box_config['__ordered_box_values'] = []
|
| 284 |
+
if (not self._box_config['conversion_box'] and
|
| 285 |
+
self._box_config['box_duplicates'] != "ignore"):
|
| 286 |
+
raise BoxError('box_duplicates are only for conversion_boxes')
|
| 287 |
+
if len(args) == 1:
|
| 288 |
+
if isinstance(args[0], basestring):
|
| 289 |
+
raise ValueError('Cannot extrapolate Box from string')
|
| 290 |
+
if isinstance(args[0], Mapping):
|
| 291 |
+
for k, v in args[0].items():
|
| 292 |
+
if v is args[0]:
|
| 293 |
+
v = self
|
| 294 |
+
self[k] = v
|
| 295 |
+
self.__add_ordered(k)
|
| 296 |
+
elif isinstance(args[0], Iterable):
|
| 297 |
+
for k, v in args[0]:
|
| 298 |
+
self[k] = v
|
| 299 |
+
self.__add_ordered(k)
|
| 300 |
+
|
| 301 |
+
else:
|
| 302 |
+
raise ValueError('First argument must be mapping or iterable')
|
| 303 |
+
elif args:
|
| 304 |
+
raise TypeError('Box expected at most 1 argument, '
|
| 305 |
+
'got {0}'.format(len(args)))
|
| 306 |
+
|
| 307 |
+
box_it = kwargs.pop('box_it_up', False)
|
| 308 |
+
for k, v in kwargs.items():
|
| 309 |
+
if args and isinstance(args[0], Mapping) and v is args[0]:
|
| 310 |
+
v = self
|
| 311 |
+
self[k] = v
|
| 312 |
+
self.__add_ordered(k)
|
| 313 |
+
|
| 314 |
+
if (self._box_config['frozen_box'] or box_it or
|
| 315 |
+
self._box_config['box_duplicates'] != 'ignore'):
|
| 316 |
+
self.box_it_up()
|
| 317 |
+
|
| 318 |
+
self._box_config['__created'] = True
|
| 319 |
+
|
| 320 |
+
def __add_ordered(self, key):
|
| 321 |
+
if (self._box_config['ordered_box'] and
|
| 322 |
+
key not in self._box_config['__ordered_box_values']):
|
| 323 |
+
self._box_config['__ordered_box_values'].append(key)
|
| 324 |
+
|
| 325 |
+
def box_it_up(self):
|
| 326 |
+
"""
|
| 327 |
+
Perform value lookup for all items in current dictionary,
|
| 328 |
+
generating all sub Box objects, while also running `box_it_up` on
|
| 329 |
+
any of those sub box objects.
|
| 330 |
+
"""
|
| 331 |
+
for k in self:
|
| 332 |
+
_conversion_checks(k, self.keys(), self._box_config,
|
| 333 |
+
check_only=True)
|
| 334 |
+
if self[k] is not self and hasattr(self[k], 'box_it_up'):
|
| 335 |
+
self[k].box_it_up()
|
| 336 |
+
|
| 337 |
+
def __hash__(self):
|
| 338 |
+
if self._box_config['frozen_box']:
|
| 339 |
+
hashing = 54321
|
| 340 |
+
for item in self.items():
|
| 341 |
+
hashing ^= hash(item)
|
| 342 |
+
return hashing
|
| 343 |
+
raise TypeError("unhashable type: 'Box'")
|
| 344 |
+
|
| 345 |
+
def __dir__(self):
|
| 346 |
+
allowed = string.ascii_letters + string.digits + '_'
|
| 347 |
+
kill_camel = self._box_config['camel_killer_box']
|
| 348 |
+
items = set(dir(dict) + ['to_dict', 'to_json',
|
| 349 |
+
'from_json', 'box_it_up'])
|
| 350 |
+
# Only show items accessible by dot notation
|
| 351 |
+
for key in self.keys():
|
| 352 |
+
key = _safe_key(key)
|
| 353 |
+
if (' ' not in key and key[0] not in string.digits and
|
| 354 |
+
key not in kwlist):
|
| 355 |
+
for letter in key:
|
| 356 |
+
if letter not in allowed:
|
| 357 |
+
break
|
| 358 |
+
else:
|
| 359 |
+
items.add(key)
|
| 360 |
+
|
| 361 |
+
for key in self.keys():
|
| 362 |
+
key = _safe_key(key)
|
| 363 |
+
if key not in items:
|
| 364 |
+
if self._box_config['conversion_box']:
|
| 365 |
+
key = _safe_attr(key, camel_killer=kill_camel,
|
| 366 |
+
replacement_char=self._box_config[
|
| 367 |
+
'box_safe_prefix'])
|
| 368 |
+
if key:
|
| 369 |
+
items.add(key)
|
| 370 |
+
if kill_camel:
|
| 371 |
+
snake_key = _camel_killer(key)
|
| 372 |
+
if snake_key:
|
| 373 |
+
items.remove(key)
|
| 374 |
+
items.add(snake_key)
|
| 375 |
+
|
| 376 |
+
if yaml_support:
|
| 377 |
+
items.add('to_yaml')
|
| 378 |
+
items.add('from_yaml')
|
| 379 |
+
|
| 380 |
+
return list(items)
|
| 381 |
+
|
| 382 |
+
def get(self, key, default=None):
|
| 383 |
+
try:
|
| 384 |
+
return self[key]
|
| 385 |
+
except KeyError:
|
| 386 |
+
if isinstance(default, dict) and not isinstance(default, Box):
|
| 387 |
+
return Box(default)
|
| 388 |
+
if isinstance(default, list) and not isinstance(default, BoxList):
|
| 389 |
+
return BoxList(default)
|
| 390 |
+
return default
|
| 391 |
+
|
| 392 |
+
def copy(self):
|
| 393 |
+
return self.__class__(super(self.__class__, self).copy())
|
| 394 |
+
|
| 395 |
+
def __copy__(self):
|
| 396 |
+
return self.__class__(super(self.__class__, self).copy())
|
| 397 |
+
|
| 398 |
+
def __deepcopy__(self, memodict=None):
|
| 399 |
+
out = self.__class__()
|
| 400 |
+
memodict = memodict or {}
|
| 401 |
+
memodict[id(self)] = out
|
| 402 |
+
for k, v in self.items():
|
| 403 |
+
out[copy.deepcopy(k, memodict)] = copy.deepcopy(v, memodict)
|
| 404 |
+
return out
|
| 405 |
+
|
| 406 |
+
def __setstate__(self, state):
|
| 407 |
+
self._box_config = state['_box_config']
|
| 408 |
+
self.__dict__.update(state)
|
| 409 |
+
|
| 410 |
+
def __getitem__(self, item, _ignore_default=False):
|
| 411 |
+
try:
|
| 412 |
+
value = super(Box, self).__getitem__(item)
|
| 413 |
+
except KeyError as err:
|
| 414 |
+
if item == '_box_config':
|
| 415 |
+
raise BoxKeyError('_box_config should only exist as an '
|
| 416 |
+
'attribute and is never defaulted')
|
| 417 |
+
if self._box_config['default_box'] and not _ignore_default:
|
| 418 |
+
return self.__get_default(item)
|
| 419 |
+
raise BoxKeyError(str(err))
|
| 420 |
+
else:
|
| 421 |
+
return self.__convert_and_store(item, value)
|
| 422 |
+
|
| 423 |
+
def keys(self):
|
| 424 |
+
if self._box_config['ordered_box']:
|
| 425 |
+
return self._box_config['__ordered_box_values']
|
| 426 |
+
return super(Box, self).keys()
|
| 427 |
+
|
| 428 |
+
def values(self):
|
| 429 |
+
return [self[x] for x in self.keys()]
|
| 430 |
+
|
| 431 |
+
def items(self):
|
| 432 |
+
return [(x, self[x]) for x in self.keys()]
|
| 433 |
+
|
| 434 |
+
def __get_default(self, item):
|
| 435 |
+
default_value = self._box_config['default_box_attr']
|
| 436 |
+
if default_value is self.__class__:
|
| 437 |
+
return self.__class__(__box_heritage=(self, item),
|
| 438 |
+
**self.__box_config())
|
| 439 |
+
elif isinstance(default_value, Callable):
|
| 440 |
+
return default_value()
|
| 441 |
+
elif hasattr(default_value, 'copy'):
|
| 442 |
+
return default_value.copy()
|
| 443 |
+
return default_value
|
| 444 |
+
|
| 445 |
+
def __box_config(self):
|
| 446 |
+
out = {}
|
| 447 |
+
for k, v in self._box_config.copy().items():
|
| 448 |
+
if not k.startswith("__"):
|
| 449 |
+
out[k] = v
|
| 450 |
+
return out
|
| 451 |
+
|
| 452 |
+
def __convert_and_store(self, item, value):
|
| 453 |
+
if item in self._box_config['__converted']:
|
| 454 |
+
return value
|
| 455 |
+
if isinstance(value, dict) and not isinstance(value, Box):
|
| 456 |
+
value = self.__class__(value, __box_heritage=(self, item),
|
| 457 |
+
**self.__box_config())
|
| 458 |
+
self[item] = value
|
| 459 |
+
elif isinstance(value, list) and not isinstance(value, BoxList):
|
| 460 |
+
if self._box_config['frozen_box']:
|
| 461 |
+
value = _recursive_tuples(value, self.__class__,
|
| 462 |
+
recreate_tuples=self._box_config[
|
| 463 |
+
'modify_tuples_box'],
|
| 464 |
+
__box_heritage=(self, item),
|
| 465 |
+
**self.__box_config())
|
| 466 |
+
else:
|
| 467 |
+
value = BoxList(value, __box_heritage=(self, item),
|
| 468 |
+
box_class=self.__class__,
|
| 469 |
+
**self.__box_config())
|
| 470 |
+
self[item] = value
|
| 471 |
+
elif (self._box_config['modify_tuples_box'] and
|
| 472 |
+
isinstance(value, tuple)):
|
| 473 |
+
value = _recursive_tuples(value, self.__class__,
|
| 474 |
+
recreate_tuples=True,
|
| 475 |
+
__box_heritage=(self, item),
|
| 476 |
+
**self.__box_config())
|
| 477 |
+
self[item] = value
|
| 478 |
+
self._box_config['__converted'].add(item)
|
| 479 |
+
return value
|
| 480 |
+
|
| 481 |
+
def __create_lineage(self):
|
| 482 |
+
if (self._box_config['__box_heritage'] and
|
| 483 |
+
self._box_config['__created']):
|
| 484 |
+
past, item = self._box_config['__box_heritage']
|
| 485 |
+
if not past[item]:
|
| 486 |
+
past[item] = self
|
| 487 |
+
self._box_config['__box_heritage'] = None
|
| 488 |
+
|
| 489 |
+
def __getattr__(self, item):
|
| 490 |
+
try:
|
| 491 |
+
try:
|
| 492 |
+
value = self.__getitem__(item, _ignore_default=True)
|
| 493 |
+
except KeyError:
|
| 494 |
+
value = object.__getattribute__(self, item)
|
| 495 |
+
except AttributeError as err:
|
| 496 |
+
if item == "__getstate__":
|
| 497 |
+
raise AttributeError(item)
|
| 498 |
+
if item == '_box_config':
|
| 499 |
+
raise BoxError('_box_config key must exist')
|
| 500 |
+
kill_camel = self._box_config['camel_killer_box']
|
| 501 |
+
if self._box_config['conversion_box'] and item:
|
| 502 |
+
k = _conversion_checks(item, self.keys(), self._box_config)
|
| 503 |
+
if k:
|
| 504 |
+
return self.__getitem__(k)
|
| 505 |
+
if kill_camel:
|
| 506 |
+
for k in self.keys():
|
| 507 |
+
if item == _camel_killer(k):
|
| 508 |
+
return self.__getitem__(k)
|
| 509 |
+
if self._box_config['default_box']:
|
| 510 |
+
return self.__get_default(item)
|
| 511 |
+
raise BoxKeyError(str(err))
|
| 512 |
+
else:
|
| 513 |
+
if item == '_box_config':
|
| 514 |
+
return value
|
| 515 |
+
return self.__convert_and_store(item, value)
|
| 516 |
+
|
| 517 |
+
def __setitem__(self, key, value):
|
| 518 |
+
if (key != '_box_config' and self._box_config['__created'] and
|
| 519 |
+
self._box_config['frozen_box']):
|
| 520 |
+
raise BoxError('Box is frozen')
|
| 521 |
+
if self._box_config['conversion_box']:
|
| 522 |
+
_conversion_checks(key, self.keys(), self._box_config,
|
| 523 |
+
check_only=True, pre_check=True)
|
| 524 |
+
super(Box, self).__setitem__(key, value)
|
| 525 |
+
self.__add_ordered(key)
|
| 526 |
+
self.__create_lineage()
|
| 527 |
+
|
| 528 |
+
def __setattr__(self, key, value):
|
| 529 |
+
if (key != '_box_config' and self._box_config['frozen_box'] and
|
| 530 |
+
self._box_config['__created']):
|
| 531 |
+
raise BoxError('Box is frozen')
|
| 532 |
+
if key in self._protected_keys:
|
| 533 |
+
raise AttributeError("Key name '{0}' is protected".format(key))
|
| 534 |
+
if key == '_box_config':
|
| 535 |
+
return object.__setattr__(self, key, value)
|
| 536 |
+
try:
|
| 537 |
+
object.__getattribute__(self, key)
|
| 538 |
+
except (AttributeError, UnicodeEncodeError):
|
| 539 |
+
if (key not in self.keys() and
|
| 540 |
+
(self._box_config['conversion_box'] or
|
| 541 |
+
self._box_config['camel_killer_box'])):
|
| 542 |
+
if self._box_config['conversion_box']:
|
| 543 |
+
k = _conversion_checks(key, self.keys(),
|
| 544 |
+
self._box_config)
|
| 545 |
+
self[key if not k else k] = value
|
| 546 |
+
elif self._box_config['camel_killer_box']:
|
| 547 |
+
for each_key in self:
|
| 548 |
+
if key == _camel_killer(each_key):
|
| 549 |
+
self[each_key] = value
|
| 550 |
+
break
|
| 551 |
+
else:
|
| 552 |
+
self[key] = value
|
| 553 |
+
else:
|
| 554 |
+
object.__setattr__(self, key, value)
|
| 555 |
+
self.__add_ordered(key)
|
| 556 |
+
self.__create_lineage()
|
| 557 |
+
|
| 558 |
+
def __delitem__(self, key):
|
| 559 |
+
if self._box_config['frozen_box']:
|
| 560 |
+
raise BoxError('Box is frozen')
|
| 561 |
+
super(Box, self).__delitem__(key)
|
| 562 |
+
if (self._box_config['ordered_box'] and
|
| 563 |
+
key in self._box_config['__ordered_box_values']):
|
| 564 |
+
self._box_config['__ordered_box_values'].remove(key)
|
| 565 |
+
|
| 566 |
+
def __delattr__(self, item):
|
| 567 |
+
if self._box_config['frozen_box']:
|
| 568 |
+
raise BoxError('Box is frozen')
|
| 569 |
+
if item == '_box_config':
|
| 570 |
+
raise BoxError('"_box_config" is protected')
|
| 571 |
+
if item in self._protected_keys:
|
| 572 |
+
raise AttributeError("Key name '{0}' is protected".format(item))
|
| 573 |
+
try:
|
| 574 |
+
object.__getattribute__(self, item)
|
| 575 |
+
except AttributeError:
|
| 576 |
+
del self[item]
|
| 577 |
+
else:
|
| 578 |
+
object.__delattr__(self, item)
|
| 579 |
+
if (self._box_config['ordered_box'] and
|
| 580 |
+
item in self._box_config['__ordered_box_values']):
|
| 581 |
+
self._box_config['__ordered_box_values'].remove(item)
|
| 582 |
+
|
| 583 |
+
def pop(self, key, *args):
|
| 584 |
+
if args:
|
| 585 |
+
if len(args) != 1:
|
| 586 |
+
raise BoxError('pop() takes only one optional'
|
| 587 |
+
' argument "default"')
|
| 588 |
+
try:
|
| 589 |
+
item = self[key]
|
| 590 |
+
except KeyError:
|
| 591 |
+
return args[0]
|
| 592 |
+
else:
|
| 593 |
+
del self[key]
|
| 594 |
+
return item
|
| 595 |
+
try:
|
| 596 |
+
item = self[key]
|
| 597 |
+
except KeyError:
|
| 598 |
+
raise BoxKeyError('{0}'.format(key))
|
| 599 |
+
else:
|
| 600 |
+
del self[key]
|
| 601 |
+
return item
|
| 602 |
+
|
| 603 |
+
def clear(self):
|
| 604 |
+
self._box_config['__ordered_box_values'] = []
|
| 605 |
+
super(Box, self).clear()
|
| 606 |
+
|
| 607 |
+
def popitem(self):
|
| 608 |
+
try:
|
| 609 |
+
key = next(self.__iter__())
|
| 610 |
+
except StopIteration:
|
| 611 |
+
raise BoxKeyError('Empty box')
|
| 612 |
+
return key, self.pop(key)
|
| 613 |
+
|
| 614 |
+
def __repr__(self):
|
| 615 |
+
return '<Box: {0}>'.format(str(self.to_dict()))
|
| 616 |
+
|
| 617 |
+
def __str__(self):
|
| 618 |
+
return str(self.to_dict())
|
| 619 |
+
|
| 620 |
+
def __iter__(self):
|
| 621 |
+
for key in self.keys():
|
| 622 |
+
yield key
|
| 623 |
+
|
| 624 |
+
def __reversed__(self):
|
| 625 |
+
for key in reversed(list(self.keys())):
|
| 626 |
+
yield key
|
| 627 |
+
|
| 628 |
+
def to_dict(self):
|
| 629 |
+
"""
|
| 630 |
+
Turn the Box and sub Boxes back into a native
|
| 631 |
+
python dictionary.
|
| 632 |
+
|
| 633 |
+
:return: python dictionary of this Box
|
| 634 |
+
"""
|
| 635 |
+
out_dict = dict(self)
|
| 636 |
+
for k, v in out_dict.items():
|
| 637 |
+
if v is self:
|
| 638 |
+
out_dict[k] = out_dict
|
| 639 |
+
elif hasattr(v, 'to_dict'):
|
| 640 |
+
out_dict[k] = v.to_dict()
|
| 641 |
+
elif hasattr(v, 'to_list'):
|
| 642 |
+
out_dict[k] = v.to_list()
|
| 643 |
+
return out_dict
|
| 644 |
+
|
| 645 |
+
def update(self, item=None, **kwargs):
|
| 646 |
+
if not item:
|
| 647 |
+
item = kwargs
|
| 648 |
+
iter_over = item.items() if hasattr(item, 'items') else item
|
| 649 |
+
for k, v in iter_over:
|
| 650 |
+
if isinstance(v, dict):
|
| 651 |
+
# Box objects must be created in case they are already
|
| 652 |
+
# in the `converted` box_config set
|
| 653 |
+
v = self.__class__(v)
|
| 654 |
+
if k in self and isinstance(self[k], dict):
|
| 655 |
+
self[k].update(v)
|
| 656 |
+
continue
|
| 657 |
+
if isinstance(v, list):
|
| 658 |
+
v = BoxList(v)
|
| 659 |
+
try:
|
| 660 |
+
self.__setattr__(k, v)
|
| 661 |
+
except (AttributeError, TypeError):
|
| 662 |
+
self.__setitem__(k, v)
|
| 663 |
+
|
| 664 |
+
def setdefault(self, item, default=None):
|
| 665 |
+
if item in self:
|
| 666 |
+
return self[item]
|
| 667 |
+
|
| 668 |
+
if isinstance(default, dict):
|
| 669 |
+
default = self.__class__(default)
|
| 670 |
+
if isinstance(default, list):
|
| 671 |
+
default = BoxList(default)
|
| 672 |
+
self[item] = default
|
| 673 |
+
return default
|
| 674 |
+
|
| 675 |
+
def to_json(self, filename=None,
|
| 676 |
+
encoding="utf-8", errors="strict", **json_kwargs):
|
| 677 |
+
"""
|
| 678 |
+
Transform the Box object into a JSON string.
|
| 679 |
+
|
| 680 |
+
:param filename: If provided will save to file
|
| 681 |
+
:param encoding: File encoding
|
| 682 |
+
:param errors: How to handle encoding errors
|
| 683 |
+
:param json_kwargs: additional arguments to pass to json.dump(s)
|
| 684 |
+
:return: string of JSON or return of `json.dump`
|
| 685 |
+
"""
|
| 686 |
+
return _to_json(self.to_dict(), filename=filename,
|
| 687 |
+
encoding=encoding, errors=errors, **json_kwargs)
|
| 688 |
+
|
| 689 |
+
@classmethod
|
| 690 |
+
def from_json(cls, json_string=None, filename=None,
|
| 691 |
+
encoding="utf-8", errors="strict", **kwargs):
|
| 692 |
+
"""
|
| 693 |
+
Transform a json object string into a Box object. If the incoming
|
| 694 |
+
json is a list, you must use BoxList.from_json.
|
| 695 |
+
|
| 696 |
+
:param json_string: string to pass to `json.loads`
|
| 697 |
+
:param filename: filename to open and pass to `json.load`
|
| 698 |
+
:param encoding: File encoding
|
| 699 |
+
:param errors: How to handle encoding errors
|
| 700 |
+
:param kwargs: parameters to pass to `Box()` or `json.loads`
|
| 701 |
+
:return: Box object from json data
|
| 702 |
+
"""
|
| 703 |
+
bx_args = {}
|
| 704 |
+
for arg in kwargs.copy():
|
| 705 |
+
if arg in BOX_PARAMETERS:
|
| 706 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 707 |
+
|
| 708 |
+
data = _from_json(json_string, filename=filename,
|
| 709 |
+
encoding=encoding, errors=errors, **kwargs)
|
| 710 |
+
|
| 711 |
+
if not isinstance(data, dict):
|
| 712 |
+
raise BoxError('json data not returned as a dictionary, '
|
| 713 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 714 |
+
return cls(data, **bx_args)
|
| 715 |
+
|
| 716 |
+
if yaml_support:
|
| 717 |
+
def to_yaml(self, filename=None, default_flow_style=False,
|
| 718 |
+
encoding="utf-8", errors="strict",
|
| 719 |
+
**yaml_kwargs):
|
| 720 |
+
"""
|
| 721 |
+
Transform the Box object into a YAML string.
|
| 722 |
+
|
| 723 |
+
:param filename: If provided will save to file
|
| 724 |
+
:param default_flow_style: False will recursively dump dicts
|
| 725 |
+
:param encoding: File encoding
|
| 726 |
+
:param errors: How to handle encoding errors
|
| 727 |
+
:param yaml_kwargs: additional arguments to pass to yaml.dump
|
| 728 |
+
:return: string of YAML or return of `yaml.dump`
|
| 729 |
+
"""
|
| 730 |
+
return _to_yaml(self.to_dict(), filename=filename,
|
| 731 |
+
default_flow_style=default_flow_style,
|
| 732 |
+
encoding=encoding, errors=errors, **yaml_kwargs)
|
| 733 |
+
|
| 734 |
+
@classmethod
|
| 735 |
+
def from_yaml(cls, yaml_string=None, filename=None,
|
| 736 |
+
encoding="utf-8", errors="strict",
|
| 737 |
+
loader=yaml.SafeLoader, **kwargs):
|
| 738 |
+
"""
|
| 739 |
+
Transform a yaml object string into a Box object.
|
| 740 |
+
|
| 741 |
+
:param yaml_string: string to pass to `yaml.load`
|
| 742 |
+
:param filename: filename to open and pass to `yaml.load`
|
| 743 |
+
:param encoding: File encoding
|
| 744 |
+
:param errors: How to handle encoding errors
|
| 745 |
+
:param loader: YAML Loader, defaults to SafeLoader
|
| 746 |
+
:param kwargs: parameters to pass to `Box()` or `yaml.load`
|
| 747 |
+
:return: Box object from yaml data
|
| 748 |
+
"""
|
| 749 |
+
bx_args = {}
|
| 750 |
+
for arg in kwargs.copy():
|
| 751 |
+
if arg in BOX_PARAMETERS:
|
| 752 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 753 |
+
|
| 754 |
+
data = _from_yaml(yaml_string=yaml_string, filename=filename,
|
| 755 |
+
encoding=encoding, errors=errors,
|
| 756 |
+
Loader=loader, **kwargs)
|
| 757 |
+
if not isinstance(data, dict):
|
| 758 |
+
raise BoxError('yaml data not returned as a dictionary'
|
| 759 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 760 |
+
return cls(data, **bx_args)
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
class BoxList(list):
|
| 764 |
+
"""
|
| 765 |
+
Drop in replacement of list, that converts added objects to Box or BoxList
|
| 766 |
+
objects as necessary.
|
| 767 |
+
"""
|
| 768 |
+
|
| 769 |
+
def __init__(self, iterable=None, box_class=Box, **box_options):
|
| 770 |
+
self.box_class = box_class
|
| 771 |
+
self.box_options = box_options
|
| 772 |
+
self.box_org_ref = self.box_org_ref = id(iterable) if iterable else 0
|
| 773 |
+
if iterable:
|
| 774 |
+
for x in iterable:
|
| 775 |
+
self.append(x)
|
| 776 |
+
if box_options.get('frozen_box'):
|
| 777 |
+
def frozen(*args, **kwargs):
|
| 778 |
+
raise BoxError('BoxList is frozen')
|
| 779 |
+
|
| 780 |
+
for method in ['append', 'extend', 'insert', 'pop',
|
| 781 |
+
'remove', 'reverse', 'sort']:
|
| 782 |
+
self.__setattr__(method, frozen)
|
| 783 |
+
|
| 784 |
+
def __delitem__(self, key):
|
| 785 |
+
if self.box_options.get('frozen_box'):
|
| 786 |
+
raise BoxError('BoxList is frozen')
|
| 787 |
+
super(BoxList, self).__delitem__(key)
|
| 788 |
+
|
| 789 |
+
def __setitem__(self, key, value):
|
| 790 |
+
if self.box_options.get('frozen_box'):
|
| 791 |
+
raise BoxError('BoxList is frozen')
|
| 792 |
+
super(BoxList, self).__setitem__(key, value)
|
| 793 |
+
|
| 794 |
+
def append(self, p_object):
|
| 795 |
+
if isinstance(p_object, dict):
|
| 796 |
+
try:
|
| 797 |
+
p_object = self.box_class(p_object, **self.box_options)
|
| 798 |
+
except AttributeError as err:
|
| 799 |
+
if 'box_class' in self.__dict__:
|
| 800 |
+
raise err
|
| 801 |
+
elif isinstance(p_object, list):
|
| 802 |
+
try:
|
| 803 |
+
p_object = (self if id(p_object) == self.box_org_ref else
|
| 804 |
+
BoxList(p_object))
|
| 805 |
+
except AttributeError as err:
|
| 806 |
+
if 'box_org_ref' in self.__dict__:
|
| 807 |
+
raise err
|
| 808 |
+
super(BoxList, self).append(p_object)
|
| 809 |
+
|
| 810 |
+
def extend(self, iterable):
|
| 811 |
+
for item in iterable:
|
| 812 |
+
self.append(item)
|
| 813 |
+
|
| 814 |
+
def insert(self, index, p_object):
|
| 815 |
+
if isinstance(p_object, dict):
|
| 816 |
+
p_object = self.box_class(p_object, **self.box_options)
|
| 817 |
+
elif isinstance(p_object, list):
|
| 818 |
+
p_object = (self if id(p_object) == self.box_org_ref else
|
| 819 |
+
BoxList(p_object))
|
| 820 |
+
super(BoxList, self).insert(index, p_object)
|
| 821 |
+
|
| 822 |
+
def __repr__(self):
|
| 823 |
+
return "<BoxList: {0}>".format(self.to_list())
|
| 824 |
+
|
| 825 |
+
def __str__(self):
|
| 826 |
+
return str(self.to_list())
|
| 827 |
+
|
| 828 |
+
def __copy__(self):
|
| 829 |
+
return BoxList((x for x in self),
|
| 830 |
+
self.box_class,
|
| 831 |
+
**self.box_options)
|
| 832 |
+
|
| 833 |
+
def __deepcopy__(self, memodict=None):
|
| 834 |
+
out = self.__class__()
|
| 835 |
+
memodict = memodict or {}
|
| 836 |
+
memodict[id(self)] = out
|
| 837 |
+
for k in self:
|
| 838 |
+
out.append(copy.deepcopy(k))
|
| 839 |
+
return out
|
| 840 |
+
|
| 841 |
+
def __hash__(self):
|
| 842 |
+
if self.box_options.get('frozen_box'):
|
| 843 |
+
hashing = 98765
|
| 844 |
+
hashing ^= hash(tuple(self))
|
| 845 |
+
return hashing
|
| 846 |
+
raise TypeError("unhashable type: 'BoxList'")
|
| 847 |
+
|
| 848 |
+
def to_list(self):
|
| 849 |
+
new_list = []
|
| 850 |
+
for x in self:
|
| 851 |
+
if x is self:
|
| 852 |
+
new_list.append(new_list)
|
| 853 |
+
elif isinstance(x, Box):
|
| 854 |
+
new_list.append(x.to_dict())
|
| 855 |
+
elif isinstance(x, BoxList):
|
| 856 |
+
new_list.append(x.to_list())
|
| 857 |
+
else:
|
| 858 |
+
new_list.append(x)
|
| 859 |
+
return new_list
|
| 860 |
+
|
| 861 |
+
def to_json(self, filename=None,
|
| 862 |
+
encoding="utf-8", errors="strict",
|
| 863 |
+
multiline=False, **json_kwargs):
|
| 864 |
+
"""
|
| 865 |
+
Transform the BoxList object into a JSON string.
|
| 866 |
+
|
| 867 |
+
:param filename: If provided will save to file
|
| 868 |
+
:param encoding: File encoding
|
| 869 |
+
:param errors: How to handle encoding errors
|
| 870 |
+
:param multiline: Put each item in list onto it's own line
|
| 871 |
+
:param json_kwargs: additional arguments to pass to json.dump(s)
|
| 872 |
+
:return: string of JSON or return of `json.dump`
|
| 873 |
+
"""
|
| 874 |
+
if filename and multiline:
|
| 875 |
+
lines = [_to_json(item, filename=False, encoding=encoding,
|
| 876 |
+
errors=errors, **json_kwargs) for item in self]
|
| 877 |
+
with open(filename, 'w', encoding=encoding, errors=errors) as f:
|
| 878 |
+
f.write("\n".join(lines).decode('utf-8') if
|
| 879 |
+
sys.version_info < (3, 0) else "\n".join(lines))
|
| 880 |
+
else:
|
| 881 |
+
return _to_json(self.to_list(), filename=filename,
|
| 882 |
+
encoding=encoding, errors=errors, **json_kwargs)
|
| 883 |
+
|
| 884 |
+
@classmethod
|
| 885 |
+
def from_json(cls, json_string=None, filename=None, encoding="utf-8",
|
| 886 |
+
errors="strict", multiline=False, **kwargs):
|
| 887 |
+
"""
|
| 888 |
+
Transform a json object string into a BoxList object. If the incoming
|
| 889 |
+
json is a dict, you must use Box.from_json.
|
| 890 |
+
|
| 891 |
+
:param json_string: string to pass to `json.loads`
|
| 892 |
+
:param filename: filename to open and pass to `json.load`
|
| 893 |
+
:param encoding: File encoding
|
| 894 |
+
:param errors: How to handle encoding errors
|
| 895 |
+
:param multiline: One object per line
|
| 896 |
+
:param kwargs: parameters to pass to `Box()` or `json.loads`
|
| 897 |
+
:return: BoxList object from json data
|
| 898 |
+
"""
|
| 899 |
+
bx_args = {}
|
| 900 |
+
for arg in kwargs.copy():
|
| 901 |
+
if arg in BOX_PARAMETERS:
|
| 902 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 903 |
+
|
| 904 |
+
data = _from_json(json_string, filename=filename, encoding=encoding,
|
| 905 |
+
errors=errors, multiline=multiline, **kwargs)
|
| 906 |
+
|
| 907 |
+
if not isinstance(data, list):
|
| 908 |
+
raise BoxError('json data not returned as a list, '
|
| 909 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 910 |
+
return cls(data, **bx_args)
|
| 911 |
+
|
| 912 |
+
if yaml_support:
|
| 913 |
+
def to_yaml(self, filename=None, default_flow_style=False,
|
| 914 |
+
encoding="utf-8", errors="strict",
|
| 915 |
+
**yaml_kwargs):
|
| 916 |
+
"""
|
| 917 |
+
Transform the BoxList object into a YAML string.
|
| 918 |
+
|
| 919 |
+
:param filename: If provided will save to file
|
| 920 |
+
:param default_flow_style: False will recursively dump dicts
|
| 921 |
+
:param encoding: File encoding
|
| 922 |
+
:param errors: How to handle encoding errors
|
| 923 |
+
:param yaml_kwargs: additional arguments to pass to yaml.dump
|
| 924 |
+
:return: string of YAML or return of `yaml.dump`
|
| 925 |
+
"""
|
| 926 |
+
return _to_yaml(self.to_list(), filename=filename,
|
| 927 |
+
default_flow_style=default_flow_style,
|
| 928 |
+
encoding=encoding, errors=errors, **yaml_kwargs)
|
| 929 |
+
|
| 930 |
+
@classmethod
|
| 931 |
+
def from_yaml(cls, yaml_string=None, filename=None,
|
| 932 |
+
encoding="utf-8", errors="strict",
|
| 933 |
+
loader=yaml.SafeLoader,
|
| 934 |
+
**kwargs):
|
| 935 |
+
"""
|
| 936 |
+
Transform a yaml object string into a BoxList object.
|
| 937 |
+
|
| 938 |
+
:param yaml_string: string to pass to `yaml.load`
|
| 939 |
+
:param filename: filename to open and pass to `yaml.load`
|
| 940 |
+
:param encoding: File encoding
|
| 941 |
+
:param errors: How to handle encoding errors
|
| 942 |
+
:param loader: YAML Loader, defaults to SafeLoader
|
| 943 |
+
:param kwargs: parameters to pass to `BoxList()` or `yaml.load`
|
| 944 |
+
:return: BoxList object from yaml data
|
| 945 |
+
"""
|
| 946 |
+
bx_args = {}
|
| 947 |
+
for arg in kwargs.copy():
|
| 948 |
+
if arg in BOX_PARAMETERS:
|
| 949 |
+
bx_args[arg] = kwargs.pop(arg)
|
| 950 |
+
|
| 951 |
+
data = _from_yaml(yaml_string=yaml_string, filename=filename,
|
| 952 |
+
encoding=encoding, errors=errors,
|
| 953 |
+
Loader=loader, **kwargs)
|
| 954 |
+
if not isinstance(data, list):
|
| 955 |
+
raise BoxError('yaml data not returned as a list'
|
| 956 |
+
'but rather a {0}'.format(type(data).__name__))
|
| 957 |
+
return cls(data, **bx_args)
|
| 958 |
+
|
| 959 |
+
def box_it_up(self):
|
| 960 |
+
for v in self:
|
| 961 |
+
if hasattr(v, 'box_it_up') and v is not self:
|
| 962 |
+
v.box_it_up()
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
class ConfigBox(Box):
|
| 966 |
+
"""
|
| 967 |
+
Modified box object to add object transforms.
|
| 968 |
+
|
| 969 |
+
Allows for build in transforms like:
|
| 970 |
+
|
| 971 |
+
cns = ConfigBox(my_bool='yes', my_int='5', my_list='5,4,3,3,2')
|
| 972 |
+
|
| 973 |
+
cns.bool('my_bool') # True
|
| 974 |
+
cns.int('my_int') # 5
|
| 975 |
+
cns.list('my_list', mod=lambda x: int(x)) # [5, 4, 3, 3, 2]
|
| 976 |
+
"""
|
| 977 |
+
|
| 978 |
+
_protected_keys = dir({}) + ['to_dict', 'bool', 'int', 'float',
|
| 979 |
+
'list', 'getboolean', 'to_json', 'to_yaml',
|
| 980 |
+
'getfloat', 'getint',
|
| 981 |
+
'from_json', 'from_yaml']
|
| 982 |
+
|
| 983 |
+
def __getattr__(self, item):
|
| 984 |
+
"""Config file keys are stored in lower case, be a little more
|
| 985 |
+
loosey goosey"""
|
| 986 |
+
try:
|
| 987 |
+
return super(ConfigBox, self).__getattr__(item)
|
| 988 |
+
except AttributeError:
|
| 989 |
+
return super(ConfigBox, self).__getattr__(item.lower())
|
| 990 |
+
|
| 991 |
+
def __dir__(self):
|
| 992 |
+
return super(ConfigBox, self).__dir__() + ['bool', 'int', 'float',
|
| 993 |
+
'list', 'getboolean',
|
| 994 |
+
'getfloat', 'getint']
|
| 995 |
+
|
| 996 |
+
def bool(self, item, default=None):
|
| 997 |
+
""" Return value of key as a boolean
|
| 998 |
+
|
| 999 |
+
:param item: key of value to transform
|
| 1000 |
+
:param default: value to return if item does not exist
|
| 1001 |
+
:return: approximated bool of value
|
| 1002 |
+
"""
|
| 1003 |
+
try:
|
| 1004 |
+
item = self.__getattr__(item)
|
| 1005 |
+
except AttributeError as err:
|
| 1006 |
+
if default is not None:
|
| 1007 |
+
return default
|
| 1008 |
+
raise err
|
| 1009 |
+
|
| 1010 |
+
if isinstance(item, (bool, int)):
|
| 1011 |
+
return bool(item)
|
| 1012 |
+
|
| 1013 |
+
if (isinstance(item, str) and
|
| 1014 |
+
item.lower() in ('n', 'no', 'false', 'f', '0')):
|
| 1015 |
+
return False
|
| 1016 |
+
|
| 1017 |
+
return True if item else False
|
| 1018 |
+
|
| 1019 |
+
def int(self, item, default=None):
|
| 1020 |
+
""" Return value of key as an int
|
| 1021 |
+
|
| 1022 |
+
:param item: key of value to transform
|
| 1023 |
+
:param default: value to return if item does not exist
|
| 1024 |
+
:return: int of value
|
| 1025 |
+
"""
|
| 1026 |
+
try:
|
| 1027 |
+
item = self.__getattr__(item)
|
| 1028 |
+
except AttributeError as err:
|
| 1029 |
+
if default is not None:
|
| 1030 |
+
return default
|
| 1031 |
+
raise err
|
| 1032 |
+
return int(item)
|
| 1033 |
+
|
| 1034 |
+
def float(self, item, default=None):
|
| 1035 |
+
""" Return value of key as a float
|
| 1036 |
+
|
| 1037 |
+
:param item: key of value to transform
|
| 1038 |
+
:param default: value to return if item does not exist
|
| 1039 |
+
:return: float of value
|
| 1040 |
+
"""
|
| 1041 |
+
try:
|
| 1042 |
+
item = self.__getattr__(item)
|
| 1043 |
+
except AttributeError as err:
|
| 1044 |
+
if default is not None:
|
| 1045 |
+
return default
|
| 1046 |
+
raise err
|
| 1047 |
+
return float(item)
|
| 1048 |
+
|
| 1049 |
+
def list(self, item, default=None, spliter=",", strip=True, mod=None):
|
| 1050 |
+
""" Return value of key as a list
|
| 1051 |
+
|
| 1052 |
+
:param item: key of value to transform
|
| 1053 |
+
:param mod: function to map against list
|
| 1054 |
+
:param default: value to return if item does not exist
|
| 1055 |
+
:param spliter: character to split str on
|
| 1056 |
+
:param strip: clean the list with the `strip`
|
| 1057 |
+
:return: list of items
|
| 1058 |
+
"""
|
| 1059 |
+
try:
|
| 1060 |
+
item = self.__getattr__(item)
|
| 1061 |
+
except AttributeError as err:
|
| 1062 |
+
if default is not None:
|
| 1063 |
+
return default
|
| 1064 |
+
raise err
|
| 1065 |
+
if strip:
|
| 1066 |
+
item = item.lstrip('[').rstrip(']')
|
| 1067 |
+
out = [x.strip() if strip else x for x in item.split(spliter)]
|
| 1068 |
+
if mod:
|
| 1069 |
+
return list(map(mod, out))
|
| 1070 |
+
return out
|
| 1071 |
+
|
| 1072 |
+
# loose configparser compatibility
|
| 1073 |
+
|
| 1074 |
+
def getboolean(self, item, default=None):
|
| 1075 |
+
return self.bool(item, default)
|
| 1076 |
+
|
| 1077 |
+
def getint(self, item, default=None):
|
| 1078 |
+
return self.int(item, default)
|
| 1079 |
+
|
| 1080 |
+
def getfloat(self, item, default=None):
|
| 1081 |
+
return self.float(item, default)
|
| 1082 |
+
|
| 1083 |
+
def __repr__(self):
|
| 1084 |
+
return '<ConfigBox: {0}>'.format(str(self.to_dict()))
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
class SBox(Box):
|
| 1088 |
+
"""
|
| 1089 |
+
ShorthandBox (SBox) allows for
|
| 1090 |
+
property access of `dict` `json` and `yaml`
|
| 1091 |
+
"""
|
| 1092 |
+
_protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml',
|
| 1093 |
+
'json', 'yaml', 'from_yaml', 'from_json',
|
| 1094 |
+
'dict']
|
| 1095 |
+
|
| 1096 |
+
@property
|
| 1097 |
+
def dict(self):
|
| 1098 |
+
return self.to_dict()
|
| 1099 |
+
|
| 1100 |
+
@property
|
| 1101 |
+
def json(self):
|
| 1102 |
+
return self.to_json()
|
| 1103 |
+
|
| 1104 |
+
if yaml_support:
|
| 1105 |
+
@property
|
| 1106 |
+
def yaml(self):
|
| 1107 |
+
return self.to_yaml()
|
| 1108 |
+
|
| 1109 |
+
def __repr__(self):
|
| 1110 |
+
return '<ShorthandBox: {0}>'.format(str(self.to_dict()))
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/config.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from neurvps.box import Box
|
| 4 |
+
|
| 5 |
+
# C is a dict storing all the configuration
|
| 6 |
+
C = Box()
|
| 7 |
+
|
| 8 |
+
# shortcut for C.model
|
| 9 |
+
M = Box()
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/datasets.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
import os.path as osp
|
| 6 |
+
from glob import glob
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
import skimage.io
|
| 11 |
+
import numpy.linalg as LA
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
import skimage.transform
|
| 14 |
+
from torch.utils.data import Dataset
|
| 15 |
+
from torch.utils.data.dataloader import default_collate
|
| 16 |
+
|
| 17 |
+
from neurvps.config import C
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class WireframeDataset(Dataset):
|
| 21 |
+
def __init__(self, rootdir, split):
|
| 22 |
+
self.rootdir = rootdir
|
| 23 |
+
filelist = sorted(glob(f"{rootdir}/*/*.png"))
|
| 24 |
+
|
| 25 |
+
self.split = split
|
| 26 |
+
if split == "train":
|
| 27 |
+
self.filelist = filelist[500:]
|
| 28 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 29 |
+
elif split == "valid":
|
| 30 |
+
self.filelist = [f for f in filelist[:500] if "a1" not in f]
|
| 31 |
+
self.size = len(self.filelist)
|
| 32 |
+
print(f"n{split}:", self.size)
|
| 33 |
+
|
| 34 |
+
def __len__(self):
|
| 35 |
+
return self.size
|
| 36 |
+
|
| 37 |
+
def __getitem__(self, idx):
|
| 38 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 39 |
+
image = skimage.io.imread(iname).astype(float)[:, :, :3]
|
| 40 |
+
image = np.rollaxis(image, 2).copy()
|
| 41 |
+
with np.load(iname.replace(".png", "_label.npz")) as npz:
|
| 42 |
+
vpts = npz["vpts"]
|
| 43 |
+
return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()})
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ScanNetDataset(Dataset):
|
| 47 |
+
def __init__(self, rootdir, split):
|
| 48 |
+
self.rootdir = rootdir
|
| 49 |
+
self.split = split
|
| 50 |
+
|
| 51 |
+
dirs = np.genfromtxt(f"{rootdir}/scannetv2_{split}.txt", dtype=str)
|
| 52 |
+
self.filelist = sum([glob(f"{rootdir}/{d}/*.png") for d in dirs], [])
|
| 53 |
+
if split == "train":
|
| 54 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 55 |
+
elif split == "valid":
|
| 56 |
+
random.seed(0)
|
| 57 |
+
random.shuffle(self.filelist)
|
| 58 |
+
self.filelist = self.filelist[:500]
|
| 59 |
+
self.size = len(self.filelist)
|
| 60 |
+
print(f"n{split}:", self.size)
|
| 61 |
+
|
| 62 |
+
def __len__(self):
|
| 63 |
+
return self.size
|
| 64 |
+
|
| 65 |
+
def __getitem__(self, idx):
|
| 66 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 67 |
+
image = skimage.io.imread(iname)[:, :, :3]
|
| 68 |
+
with np.load(iname.replace("color.png", "vanish.npz")) as npz:
|
| 69 |
+
vpts = np.array([npz[d] for d in ["x", "y", "z"]])
|
| 70 |
+
vpts[:, 1] *= -1
|
| 71 |
+
# plt.imshow(image)
|
| 72 |
+
# cc = ["blue", "cyan", "orange"]
|
| 73 |
+
# for c, w in zip(cc, vpts):
|
| 74 |
+
# x = w[0] / w[2] * C.io.focal_length * 256 + 256
|
| 75 |
+
# y = -w[1] / w[2] * C.io.focal_length * 256 + 256
|
| 76 |
+
# plt.scatter(x, y, color=c)
|
| 77 |
+
# for xy in np.linspace(0, 512, 10):
|
| 78 |
+
# plt.plot(
|
| 79 |
+
# [x, xy, x, xy, x, 0, x, 511],
|
| 80 |
+
# [y, 0, y, 511, y, xy, y, xy],
|
| 81 |
+
# color=c,
|
| 82 |
+
# )
|
| 83 |
+
# plt.show()
|
| 84 |
+
image = np.rollaxis(image.astype(np.float), 2).copy()
|
| 85 |
+
return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()})
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class Tmm17Dataset(Dataset):
|
| 89 |
+
def __init__(self, rootdir, split):
|
| 90 |
+
self.rootdir = rootdir
|
| 91 |
+
self.split = split
|
| 92 |
+
|
| 93 |
+
filelist = np.genfromtxt(f"{rootdir}/{split}.txt", dtype=str)
|
| 94 |
+
self.filelist = [osp.join(rootdir, f) for f in filelist]
|
| 95 |
+
if split == "train":
|
| 96 |
+
self.size = len(self.filelist) * C.io.augmentation_level
|
| 97 |
+
elif split == "valid":
|
| 98 |
+
self.size = len(self.filelist)
|
| 99 |
+
print(f"n{split}:", self.size)
|
| 100 |
+
|
| 101 |
+
def __len__(self):
|
| 102 |
+
return self.size
|
| 103 |
+
|
| 104 |
+
def __getitem__(self, idx):
|
| 105 |
+
iname = self.filelist[idx % len(self.filelist)]
|
| 106 |
+
image = skimage.io.imread(iname)
|
| 107 |
+
tname = iname.replace(".jpg", ".txt")
|
| 108 |
+
axy, bxy = np.genfromtxt(tname, skip_header=1)
|
| 109 |
+
|
| 110 |
+
a0, a1 = np.array(axy[:2]), np.array(axy[2:])
|
| 111 |
+
b0, b1 = np.array(bxy[:2]), np.array(bxy[2:])
|
| 112 |
+
xy = intersect(a0, a1, b0, b1) - 0.5
|
| 113 |
+
xy[0] *= 512 / image.shape[1]
|
| 114 |
+
xy[1] *= 512 / image.shape[0]
|
| 115 |
+
image = skimage.transform.resize(image, (512, 512))
|
| 116 |
+
if image.ndim == 2:
|
| 117 |
+
image = image[:, :, None].repeat(3, 2)
|
| 118 |
+
if self.split == "train":
|
| 119 |
+
i, j, h, w = crop(image.shape)
|
| 120 |
+
else:
|
| 121 |
+
i, j, h, w = 0, 0, image.shape[0], image.shape[1]
|
| 122 |
+
image = skimage.transform.resize(image[j : j + h, i : i + w], (512, 512))
|
| 123 |
+
xy[1] = (xy[1] - j) / h * 512
|
| 124 |
+
xy[0] = (xy[0] - i) / w * 512
|
| 125 |
+
# plt.imshow(image)
|
| 126 |
+
# plt.scatter(xy[0], xy[1])
|
| 127 |
+
# plt.show()
|
| 128 |
+
vpts = np.array([[xy[0] / 256 - 1, 1 - xy[1] / 256, C.io.focal_length]])
|
| 129 |
+
vpts[0] /= LA.norm(vpts[0])
|
| 130 |
+
|
| 131 |
+
image, vpts = augment(image, vpts, idx // len(self.filelist))
|
| 132 |
+
image = np.rollaxis(image, 2)
|
| 133 |
+
return (torch.tensor(image * 255).float(), {"vpts": torch.tensor(vpts).float()})
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def augment(image, vpts, division):
|
| 137 |
+
if division == 1: # left-right flip
|
| 138 |
+
return image[:, ::-1].copy(), (vpts * [-1, 1, 1]).copy()
|
| 139 |
+
elif division == 2: # up-down flip
|
| 140 |
+
return image[::-1, :].copy(), (vpts * [1, -1, 1]).copy()
|
| 141 |
+
elif division == 3: # all flip
|
| 142 |
+
return image[::-1, ::-1].copy(), (vpts * [-1, -1, 1]).copy()
|
| 143 |
+
return image, vpts
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def intersect(a0, a1, b0, b1):
|
| 147 |
+
c0 = ccw(a0, a1, b0)
|
| 148 |
+
c1 = ccw(a0, a1, b1)
|
| 149 |
+
d0 = ccw(b0, b1, a0)
|
| 150 |
+
d1 = ccw(b0, b1, a1)
|
| 151 |
+
if abs(d1 - d0) > abs(c1 - c0):
|
| 152 |
+
return (a0 * d1 - a1 * d0) / (d1 - d0)
|
| 153 |
+
else:
|
| 154 |
+
return (b0 * c1 - b1 * c0) / (c1 - c0)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def ccw(c, a, b):
|
| 158 |
+
a0 = a - c
|
| 159 |
+
b0 = b - c
|
| 160 |
+
return a0[0] * b0[1] - b0[0] * a0[1]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def crop(shape, scale=(0.35, 1.0), ratio=(9 / 16, 16 / 9)):
|
| 164 |
+
for attempt in range(20):
|
| 165 |
+
area = shape[0] * shape[1]
|
| 166 |
+
target_area = random.uniform(*scale) * area
|
| 167 |
+
aspect_ratio = random.uniform(*ratio)
|
| 168 |
+
|
| 169 |
+
w = int(round(math.sqrt(target_area * aspect_ratio)))
|
| 170 |
+
h = int(round(math.sqrt(target_area / aspect_ratio)))
|
| 171 |
+
|
| 172 |
+
if random.random() < 0.5:
|
| 173 |
+
w, h = h, w
|
| 174 |
+
|
| 175 |
+
if h <= shape[0] and w <= shape[1]:
|
| 176 |
+
j = random.randint(0, shape[0] - h)
|
| 177 |
+
i = random.randint(0, shape[1] - w)
|
| 178 |
+
return i, j, h, w
|
| 179 |
+
|
| 180 |
+
# Fallback
|
| 181 |
+
w = min(shape[0], shape[1])
|
| 182 |
+
i = (shape[1] - w) // 2
|
| 183 |
+
j = (shape[0] - w) // 2
|
| 184 |
+
return i, j, w, w
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .hourglass_pose import hg
|
| 2 |
+
from .vanishing_net import VanishingNet
|
vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (249 Bytes). View file
|
|
|