donquicode commited on
Commit
4999c45
·
verified ·
1 Parent(s): c889058

Upload folder using huggingface_hub

Browse files
Files changed (8) hide show
  1. .gitignore +12 -0
  2. README.md +24 -3
  3. S23DR_writeup.pdf +0 -0
  4. hoho2025/__init__.py +0 -0
  5. hoho2025/color_mappings.py +209 -0
  6. requirements.txt +8 -0
  7. script.py +76 -0
  8. solution.py +492 -0
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python virtual environment
2
+ .venv/
3
+ venv/
4
+
5
+ # Python cache files
6
+ __pycache__/
7
+ *.pyc
8
+
9
+ # IDE and OS files
10
+ .idea/
11
+ .vscode/
12
+ .DS_Store
README.md CHANGED
@@ -1,3 +1,24 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # S23DR 2025 Challenge: 4th Place Solution
2
+
3
+ This repository contains the 4th place solution for the S23DR 2025 Challenge. For more details, refer to the writeup included in this repository.
4
+
5
+ ## Approach
6
+
7
+ The solution is a fully handcrafted pipeline based on classical computer vision and geometry. It builds on the official baseline with improvements in edge detection, depth assignment, and 3D merging. No learned models were used.
8
+
9
+ ## Final Score
10
+
11
+ - **HSS Mean:** 0.3309
12
+ - **Corner F1:** 0.4310
13
+ - **Edge IOU:** 0.2773
14
+
15
+ ## How to Run
16
+
17
+ The competition platform executes `script.py` to generate predictions. To run locally, you would need a dataset entry and could call the `predict_wireframe` function from `solution.py`.
18
+
19
+ ## File Structure
20
+
21
+ - `solution.py`: Contains the main logic for the wireframe prediction pipeline.
22
+ - `script.py`: The entry point script for the Hugging Face competition platform.
23
+ - `requirements.txt`: Lists all Python dependencies.
24
+ - `hoho2025/`: A helper module containing color mappings.
S23DR_writeup.pdf ADDED
Binary file (71.4 kB). View file
 
hoho2025/__init__.py ADDED
File without changes
hoho2025/color_mappings.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gestalt_color_mapping = {
2
+ "unclassified": (215, 62, 138),
3
+ "apex": (235, 88, 48),
4
+ "eave_end_point": (248, 130, 228),
5
+ "flashing_end_point": (71, 11, 161),
6
+ "ridge": (214, 251, 248),
7
+ "rake": (13, 94, 47),
8
+ "eave": (54, 243, 63),
9
+ "post": (187, 123, 236),
10
+ "ground_line": (136, 206, 14),
11
+ "flashing": (162, 162, 32),
12
+ "step_flashing": (169, 255, 219),
13
+ "hip": (8, 89, 52),
14
+ "valley": (85, 27, 65),
15
+ "roof": (215, 232, 179),
16
+ "door": (110, 52, 23),
17
+ "garage": (50, 233, 171),
18
+ "window": (230, 249, 40),
19
+ "shutter": (122, 4, 233),
20
+ "fascia": (95, 230, 240),
21
+ "soffit": (2, 102, 197),
22
+ "horizontal_siding": (131, 88, 59),
23
+ "vertical_siding": (110, 187, 198),
24
+ "brick": (171, 252, 7),
25
+ "concrete": (32, 47, 246),
26
+ "other_wall": (112, 61, 240),
27
+ "trim": (151, 206, 58),
28
+ "unknown": (127, 127, 127),
29
+ "transition_line": (0,0,0),
30
+ }
31
+
32
+ ade20k_color_mapping = {
33
+ 'wall': (120, 120, 120),
34
+ 'building;edifice': (180, 120, 120),
35
+ 'sky': (6, 230, 230),
36
+ 'floor;flooring': (80, 50, 50),
37
+ 'tree': (4, 200, 3),
38
+ 'ceiling': (120, 120, 80),
39
+ 'road;route': (140, 140, 140),
40
+ 'bed': (204, 5, 255),
41
+ 'windowpane;window': (230, 230, 230),
42
+ 'grass': (4, 250, 7),
43
+ 'cabinet': (224, 5, 255),
44
+ 'sidewalk;pavement': (235, 255, 7),
45
+ 'person;individual;someone;somebody;mortal;soul': (150, 5, 61),
46
+ 'earth;ground': (120, 120, 70),
47
+ 'door;double;door': (8, 255, 51),
48
+ 'table': (255, 6, 82),
49
+ 'mountain;mount': (143, 255, 140),
50
+ 'plant;flora;plant;life': (204, 255, 4),
51
+ 'curtain;drape;drapery;mantle;pall': (255, 51, 7),
52
+ 'chair': (204, 70, 3),
53
+ 'car;auto;automobile;machine;motorcar': (0, 102, 200),
54
+ 'water': (61, 230, 250),
55
+ 'painting;picture': (255, 6, 51),
56
+ 'sofa;couch;lounge': (11, 102, 255),
57
+ 'shelf': (255, 7, 71),
58
+ 'house': (255, 9, 224),
59
+ 'sea': (9, 7, 230),
60
+ 'mirror': (220, 220, 220),
61
+ 'rug;carpet;carpeting': (255, 9, 92),
62
+ 'field': (112, 9, 255),
63
+ 'armchair': (8, 255, 214),
64
+ 'seat': (7, 255, 224),
65
+ 'fence;fencing': (255, 184, 6),
66
+ 'desk': (10, 255, 71),
67
+ 'rock;stone': (255, 41, 10),
68
+ 'wardrobe;closet;press': (7, 255, 255),
69
+ 'lamp': (224, 255, 8),
70
+ 'bathtub;bathing;tub;bath;tub': (102, 8, 255),
71
+ 'railing;rail': (255, 61, 6),
72
+ 'cushion': (255, 194, 7),
73
+ 'base;pedestal;stand': (255, 122, 8),
74
+ 'box': (0, 255, 20),
75
+ 'column;pillar': (255, 8, 41),
76
+ 'signboard;sign': (255, 5, 153),
77
+ 'chest;of;drawers;chest;bureau;dresser': (6, 51, 255),
78
+ 'counter': (235, 12, 255),
79
+ 'sand': (160, 150, 20),
80
+ 'sink': (0, 163, 255),
81
+ 'skyscraper': (140, 140, 140),
82
+ 'fireplace;hearth;open;fireplace': (250, 10, 15),
83
+ 'refrigerator;icebox': (20, 255, 0),
84
+ 'grandstand;covered;stand': (31, 255, 0),
85
+ 'path': (255, 31, 0),
86
+ 'stairs;steps': (255, 224, 0),
87
+ 'runway': (153, 255, 0),
88
+ 'case;display;case;showcase;vitrine': (0, 0, 255),
89
+ 'pool;table;billiard;table;snooker;table': (255, 71, 0),
90
+ 'pillow': (0, 235, 255),
91
+ 'screen;door;screen': (0, 173, 255),
92
+ 'stairway;staircase': (31, 0, 255),
93
+ 'river': (11, 200, 200),
94
+ 'bridge;span': (255 ,82, 0),
95
+ 'bookcase': (0, 255, 245),
96
+ 'blind;screen': (0, 61, 255),
97
+ 'coffee;table;cocktail;table': (0, 255, 112),
98
+ 'toilet;can;commode;crapper;pot;potty;stool;throne': (0, 255, 133),
99
+ 'flower': (255, 0, 0),
100
+ 'book': (255, 163, 0),
101
+ 'hill': (255, 102, 0),
102
+ 'bench': (194, 255, 0),
103
+ 'countertop': (0, 143, 255),
104
+ 'stove;kitchen;stove;range;kitchen;range;cooking;stove': (51, 255, 0),
105
+ 'palm;palm;tree': (0, 82, 255),
106
+ 'kitchen;island': (0, 255, 41),
107
+ 'computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system': (0, 255, 173),
108
+ 'swivel;chair': (10, 0, 255),
109
+ 'boat': (173, 255, 0),
110
+ 'bar': (0, 255, 153),
111
+ 'arcade;machine': (255, 92, 0),
112
+ 'hovel;hut;hutch;shack;shanty': (255, 0, 255),
113
+ 'bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle': (255, 0, 245),
114
+ 'towel': (255, 0, 102),
115
+ 'light;light;source': (255, 173, 0),
116
+ 'truck;motortruck': (255, 0, 20),
117
+ 'tower': (255, 184, 184),
118
+ 'chandelier;pendant;pendent': (0, 31, 255),
119
+ 'awning;sunshade;sunblind': (0, 255, 61),
120
+ 'streetlight;street;lamp': (0, 71, 255),
121
+ 'booth;cubicle;stall;kiosk': (255, 0, 204),
122
+ 'television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box': (0, 255, 194),
123
+ 'airplane;aeroplane;plane': (0, 255, 82),
124
+ 'dirt;track': (0, 10, 255),
125
+ 'apparel;wearing;apparel;dress;clothes': (0, 112, 255),
126
+ 'pole': (51, 0, 255),
127
+ 'land;ground;soil': (0, 194, 255),
128
+ 'bannister;banister;balustrade;balusters;handrail': (0, 122, 255),
129
+ 'escalator;moving;staircase;moving;stairway': (0, 255, 163),
130
+ 'ottoman;pouf;pouffe;puff;hassock': (255, 153, 0),
131
+ 'bottle': (0, 255, 10),
132
+ 'buffet;counter;sideboard': (255, 112, 0),
133
+ 'poster;posting;placard;notice;bill;card': (143, 255, 0),
134
+ 'stage': (82, 0, 255),
135
+ 'van': (163, 255, 0),
136
+ 'ship': (255, 235, 0),
137
+ 'fountain': (8, 184, 170),
138
+ 'conveyer;belt;conveyor;belt;conveyer;conveyor;transporter': (133, 0, 255),
139
+ 'canopy': (0, 255, 92),
140
+ 'washer;automatic;washer;washing;machine': (184, 0, 255),
141
+ 'plaything;toy': (255, 0, 31),
142
+ 'swimming;pool;swimming;bath;natatorium': (0, 184, 255),
143
+ 'stool': (0, 214, 255),
144
+ 'barrel;cask': (255, 0, 112),
145
+ 'basket;handbasket': (92, 255, 0),
146
+ 'waterfall;falls': (0, 224, 255),
147
+ 'tent;collapsible;shelter': (112, 224, 255),
148
+ 'bag': (70, 184, 160),
149
+ 'minibike;motorbike': (163, 0, 255),
150
+ 'cradle': (153, 0, 255),
151
+ 'oven': (71, 255, 0),
152
+ 'ball': (255, 0, 163),
153
+ 'food;solid;food': (255, 204, 0),
154
+ 'step;stair': (255, 0, 143),
155
+ 'tank;storage;tank': (0, 255, 235),
156
+ 'trade;name;brand;name;brand;marque': (133, 255, 0),
157
+ 'microwave;microwave;oven': (255, 0, 235),
158
+ 'pot;flowerpot': (245, 0, 255),
159
+ 'animal;animate;being;beast;brute;creature;fauna': (255, 0, 122),
160
+ 'bicycle;bike;wheel;cycle': (255, 245, 0),
161
+ 'lake': (10, 190, 212),
162
+ 'dishwasher;dish;washer;dishwashing;machine': (214, 255, 0),
163
+ 'screen;silver;screen;projection;screen': (0, 204, 255),
164
+ 'blanket;cover': (20, 0, 255),
165
+ 'sculpture': (255, 255, 0),
166
+ 'hood;exhaust;hood': (0, 153, 255),
167
+ 'sconce': (0, 41, 255),
168
+ 'vase': (0, 255, 204),
169
+ 'traffic;light;traffic;signal;stoplight': (41, 0, 255),
170
+ 'tray': (41, 255, 0),
171
+ 'ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin': (173, 0, 255),
172
+ 'fan': (0, 245, 255),
173
+ 'pier;wharf;wharfage;dock': (71, 0, 255),
174
+ 'crt;screen': (122, 0, 255),
175
+ 'plate': (0, 255, 184),
176
+ 'monitor;monitoring;device': (0, 92, 255),
177
+ 'bulletin;board;notice;board': (184, 255, 0),
178
+ 'shower': (0, 133, 255),
179
+ 'radiator': (255, 214, 0),
180
+ 'glass;drinking;glass': (25, 194, 194),
181
+ 'clock': (102, 255, 0),
182
+ 'flag': (92, 0, 255),
183
+ }
184
+
185
+
186
+ EDGE_CLASSES = {'cornice_return': 0,
187
+ 'cornice_strip': 1,
188
+ 'eave': 2,
189
+ 'flashing': 3,
190
+ 'hip': 4,
191
+ 'rake': 5,
192
+ 'ridge': 6,
193
+ 'step_flashing': 7,
194
+ 'transition_line': 8,
195
+ 'valley': 9}
196
+ EDGE_CLASSES_BY_ID = {v: k for k, v in EDGE_CLASSES.items()}
197
+
198
+ edge_color_mapping = {
199
+ 'cornice_return': (215, 62, 138),
200
+ 'cornice_strip': (235, 88, 48),
201
+ 'eave': (54, 243, 63),
202
+ "flashing": (162, 162, 32),
203
+ 'hip': (8, 89, 52),
204
+ 'rake': (13, 94, 47),
205
+ 'ridge': (214, 251, 248),
206
+ "step_flashing": (169, 255, 219),
207
+ 'transition_line': (200,0,50),
208
+ 'valley': (85, 27, 65),
209
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ opencv-python
3
+ scikit-learn
4
+ scipy
5
+ pycolmap
6
+ Pillow
7
+ pyarrow
8
+ pandas
script.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ from pathlib import Path
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ from datasets import load_dataset
7
+ from tqdm import tqdm
8
+ from solution import predict_wireframe
9
+
10
+
11
+ def empty_solution():
12
+ """Return a minimal valid solution in case of an error."""
13
+ return np.zeros((2, 3)), []
14
+
15
+
16
+ def main():
17
+ """
18
+ Main script for the S23DR 2025 Challenge.
19
+ This script loads the test dataset using the competition's specific
20
+ method, runs the prediction pipeline, and saves the results.
21
+ """
22
+ print("------------ Setting up data paths ------------")
23
+ # This is the essential path where data is stored in the submission environment.
24
+ data_path = Path('/tmp/data')
25
+
26
+ print("------------ Loading dataset ------------")
27
+ # This data loading logic is preserved from the original script to ensure
28
+ # compatibility with the submission environment.
29
+ data_files = {
30
+ "validation": [str(p) for p in data_path.rglob('*public*/**/*.tar')],
31
+ "test": [str(p) for p in data_path.rglob('*private*/**/*.tar')],
32
+ }
33
+ print(f"Found data files: {data_files}")
34
+
35
+ dataset = load_dataset(
36
+ str(data_path / 'hoho25k_test_x.py'),
37
+ data_files=data_files,
38
+ trust_remote_code=True,
39
+ writer_batch_size=100,
40
+ )
41
+ print(f"Dataset loaded successfully: {dataset}")
42
+
43
+ print('------------ Starting prediction loop ---------------')
44
+ solution = []
45
+ for subset_name in dataset.keys():
46
+ print(f"Predicting for subset: {subset_name}")
47
+ for i, entry in enumerate(tqdm(dataset[subset_name], desc=f"Processing {subset_name}")):
48
+ try:
49
+ # Run your prediction pipeline
50
+ pred_vertices, pred_edges = predict_wireframe(entry)
51
+ except Exception as e:
52
+ # If your pipeline fails, provide an empty solution and log the error.
53
+ print(f"Error processing sample {entry.get('order_id', 'UNKNOWN')}: {e}")
54
+ pred_vertices, pred_edges = empty_solution()
55
+
56
+ # Append the result in the required format.
57
+ solution.append(
58
+ {
59
+ 'order_id': entry['order_id'],
60
+ 'wf_vertices': pred_vertices.tolist(),
61
+ 'wf_edges': pred_edges,
62
+ }
63
+ )
64
+
65
+ # Periodically run garbage collection to manage memory.
66
+ if (i + 1) % 50 == 0:
67
+ gc.collect()
68
+
69
+ print('------------ Saving results ---------------')
70
+ sub = pd.DataFrame(solution, columns=["order_id", "wf_vertices", "wf_edges"])
71
+ sub.to_parquet("submission.parquet", index=False)
72
+ print("------------ Done ------------")
73
+
74
+
75
+ if __name__ == "__main__":
76
+ main()
solution.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import tempfile
3
+ import zipfile
4
+ from collections import defaultdict
5
+ from typing import Tuple, List, Dict, Any
6
+
7
+ import cv2
8
+ import numpy as np
9
+ import pycolmap
10
+ from PIL import Image as PImage
11
+ from scipy.spatial.distance import cdist
12
+ from sklearn.cluster import DBSCAN
13
+
14
+ from hoho2025.color_mappings import ade20k_color_mapping, gestalt_color_mapping
15
+
16
+
17
+ class Config:
18
+ """Configuration for wireframe extraction pipeline."""
19
+ EDGE_THRESHOLD = 15.5
20
+ MERGE_THRESHOLD = 0.65
21
+ PRUNE_DISTANCE = 3.0
22
+ SEARCH_RADIUS = 14
23
+ MIN_EDGE_LENGTH = 0.15
24
+ MAX_EDGE_LENGTH = 27.0
25
+ MORPHOLOGY_KERNEL = 3
26
+ REFINEMENT_CLUSTER_EPS = 0.45
27
+ REFINEMENT_MIN_SAMPLES = 1
28
+
29
+
30
+ def empty_solution() -> Tuple[np.ndarray, List[Tuple[int, int]]]:
31
+ """Returns an empty wireframe solution."""
32
+ return np.zeros((2, 3)), [(0, 1)]
33
+
34
+
35
+ def get_vertices_and_edges_from_segmentation(
36
+ gest_seg_np: np.ndarray, edge_th: float = Config.EDGE_THRESHOLD
37
+ ) -> Tuple[List[Dict[str, Any]], List[Tuple[int, int]]]:
38
+ """
39
+ Detects roof vertices and edges from a Gestalt segmentation mask.
40
+
41
+ Args:
42
+ gest_seg_np: Segmentation mask as a numpy array.
43
+ edge_th: Distance threshold for associating edges to vertices.
44
+
45
+ Returns:
46
+ vertices: List of detected vertices with coordinates and type.
47
+ connections: List of edge connections (vertex index pairs).
48
+ """
49
+ vertices, connections = [], []
50
+ if not isinstance(gest_seg_np, np.ndarray):
51
+ gest_seg_np = np.array(gest_seg_np)
52
+
53
+ # Vertex detection
54
+ for v_type, color_name in [("apex", "apex"), ("eave_end_point", "eave_end_point")]:
55
+ color = np.array(gestalt_color_mapping[color_name])
56
+ mask = cv2.inRange(gest_seg_np, color - 0.5, color + 0.5)
57
+ if mask.sum() == 0:
58
+ continue
59
+
60
+ output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
61
+ numLabels, labels, stats, centroids = output
62
+ for i in range(1, numLabels):
63
+ mask_i = (labels == i).astype(np.uint8)
64
+ M = cv2.moments(mask_i)
65
+ if M["m00"] > 0:
66
+ cx, cy = M["m10"] / M["m00"], M["m01"] / M["m00"]
67
+ else:
68
+ ys, xs = np.where(mask_i)
69
+ if len(xs) > 0:
70
+ cx, cy = np.mean(xs), np.mean(ys)
71
+ else:
72
+ continue
73
+ vertices.append({"xy": np.array([cx, cy]), "type": v_type})
74
+
75
+ if not vertices:
76
+ return [], []
77
+
78
+ apex_pts = np.array([v['xy'] for v in vertices])
79
+
80
+ # Edge detection
81
+ edge_classes = ['eave', 'ridge', 'rake', 'valley']
82
+ for edge_class in edge_classes:
83
+ if edge_class not in gestalt_color_mapping:
84
+ continue
85
+ edge_color = np.array(gestalt_color_mapping[edge_class])
86
+ mask_raw = cv2.inRange(gest_seg_np, edge_color - 0.5, edge_color + 0.5)
87
+
88
+ # Improved morphology: close then open
89
+ kernel = np.ones((Config.MORPHOLOGY_KERNEL, Config.MORPHOLOGY_KERNEL), np.uint8)
90
+ mask = cv2.morphologyEx(mask_raw, cv2.MORPH_CLOSE, kernel)
91
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((2, 2), np.uint8))
92
+
93
+ if mask.sum() == 0:
94
+ continue
95
+
96
+ output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
97
+ numLabels, labels, stats, centroids = output
98
+ for lbl in range(1, numLabels):
99
+ ys, xs = np.where(labels == lbl)
100
+ if len(xs) < 2:
101
+ continue
102
+ pts_for_fit = np.column_stack([xs, ys]).astype(np.float32)
103
+ line_params = cv2.fitLine(
104
+ pts_for_fit, distType=cv2.DIST_L2, param=0, reps=0.01, aeps=0.01
105
+ )
106
+ vx, vy, x0, y0 = line_params.ravel()
107
+ proj = ((xs - x0) * vx + (ys - y0) * vy)
108
+ p1 = np.array([x0 + proj.min() * vx, y0 + proj.min() * vy])
109
+ p2 = np.array([x0 + proj.max() * vx, y0 + proj.max() * vy])
110
+ if len(apex_pts) < 2:
111
+ continue
112
+ dists = np.array([point_to_segment_dist(apex_pts[i], p1, p2) for i in range(len(apex_pts))])
113
+ near_indices = np.where(dists <= edge_th)[0]
114
+ if len(near_indices) < 2:
115
+ continue
116
+ for i in range(len(near_indices)):
117
+ for j in range(i + 1, len(near_indices)):
118
+ conn = tuple(sorted((near_indices[i], near_indices[j])))
119
+ if conn not in connections:
120
+ connections.append(conn)
121
+ return vertices, connections
122
+
123
+
124
+ def get_uv_depth(
125
+ vertices: List[Dict[str, Any]],
126
+ depth_fitted: np.ndarray,
127
+ sparse_depth: np.ndarray,
128
+ search_radius: int = Config.SEARCH_RADIUS,
129
+ ) -> Tuple[np.ndarray, np.ndarray]:
130
+ """
131
+ Assigns depth to each vertex using a weighted search in the sparse depth map.
132
+
133
+ Args:
134
+ vertices: List of detected vertices.
135
+ depth_fitted: Dense depth map.
136
+ sparse_depth: Sparse depth map.
137
+ search_radius: Search radius for depth assignment.
138
+
139
+ Returns:
140
+ uv: 2D coordinates of vertices.
141
+ vertex_depth: Depth values for each vertex.
142
+ """
143
+ uv = np.array([vert['xy'] for vert in vertices], dtype=np.float32)
144
+ uv_int = np.round(uv).astype(np.int32)
145
+ H, W = depth_fitted.shape[:2]
146
+ uv_int[:, 0] = np.clip(uv_int[:, 0], 0, W - 1)
147
+ uv_int[:, 1] = np.clip(uv_int[:, 1], 0, H - 1)
148
+ vertex_depth = np.zeros(len(vertices), dtype=np.float32)
149
+
150
+ for i, (x_i, y_i) in enumerate(uv_int):
151
+ x0, x1 = max(0, x_i - search_radius), min(W, x_i + search_radius + 1)
152
+ y0, y1 = max(0, y_i - search_radius), min(H, y_i + search_radius + 1)
153
+ region = sparse_depth[y0:y1, x0:x1]
154
+ valid_y, valid_x = np.where(region > 0)
155
+
156
+ if valid_y.size > 0:
157
+ global_x, global_y = x0 + valid_x, y0 + valid_y
158
+ dist_sq = (global_x - x_i) ** 2 + (global_y - y_i) ** 2
159
+ weights = np.exp(-dist_sq / (2 * (search_radius / 3) ** 2))
160
+ vertex_depth[i] = np.sum(weights * region[valid_y, valid_x]) / np.sum(weights)
161
+ else:
162
+ vertex_depth[i] = depth_fitted[y_i, x_i]
163
+
164
+ return uv, vertex_depth
165
+
166
+
167
+ def read_colmap_rec(colmap_data: bytes) -> pycolmap.Reconstruction:
168
+ """Reads a COLMAP reconstruction from a zipped binary."""
169
+ with tempfile.TemporaryDirectory() as tmpdir:
170
+ with zipfile.ZipFile(io.BytesIO(colmap_data), "r") as zf:
171
+ zf.extractall(tmpdir)
172
+ rec = pycolmap.Reconstruction(tmpdir)
173
+ return rec
174
+
175
+
176
+ def convert_entry_to_human_readable(entry: Dict[str, Any]) -> Dict[str, Any]:
177
+ """Converts a raw entry to a human-readable format."""
178
+ out = {}
179
+ for k, v in entry.items():
180
+ if k == 'colmap_binary':
181
+ out[k] = read_colmap_rec(v)
182
+ elif k in ['K', 'R', 't']:
183
+ try:
184
+ out[k] = np.array(v)
185
+ except ValueError:
186
+ out[k] = v
187
+ else:
188
+ out[k] = v
189
+ out['__key__'] = entry.get('order_id', 'unknown_id')
190
+ return out
191
+
192
+
193
+ def get_house_mask(ade20k_seg: np.ndarray) -> np.ndarray:
194
+ """Returns a mask for house/building regions from ADE20K segmentation."""
195
+ house_classes_ade20k = [
196
+ 'wall', 'house', 'building;edifice', 'door;double;door', 'windowpane;window'
197
+ ]
198
+ np_seg = np.array(ade20k_seg)
199
+ full_mask = np.zeros(np_seg.shape[:2], dtype=np.uint8)
200
+ for c in house_classes_ade20k:
201
+ if c in ade20k_color_mapping:
202
+ color = np.array(ade20k_color_mapping[c])
203
+ mask = cv2.inRange(np_seg, color - 0.5, color + 0.5)
204
+ full_mask = np.logical_or(full_mask, mask)
205
+ return full_mask
206
+
207
+
208
+ def point_to_segment_dist(pt: np.ndarray, seg_p1: np.ndarray, seg_p2: np.ndarray) -> float:
209
+ """Computes the distance from a point to a line segment."""
210
+ if np.allclose(seg_p1, seg_p2):
211
+ return np.linalg.norm(pt - seg_p1)
212
+ seg_vec = seg_p2 - seg_p1
213
+ pt_vec = pt - seg_p1
214
+ seg_len2 = seg_vec.dot(seg_vec)
215
+ t = max(0, min(1, pt_vec.dot(seg_vec) / seg_len2))
216
+ proj = seg_p1 + t * seg_vec
217
+ return np.linalg.norm(pt - proj)
218
+
219
+
220
+ def get_sparse_depth(
221
+ colmap_rec: pycolmap.Reconstruction, img_id_substring: str, depth: np.ndarray
222
+ ) -> Tuple[np.ndarray, bool, Any]:
223
+ """Projects COLMAP 3D points into the image to create a sparse depth map."""
224
+ H, W = depth.shape
225
+ found_img = None
226
+ for img_id_c, col_img in colmap_rec.images.items():
227
+ if img_id_substring in col_img.name:
228
+ found_img = col_img
229
+ break
230
+ if found_img is None:
231
+ return np.zeros((H, W), dtype=np.float32), False, None
232
+ points_xyz = [
233
+ p3D.xyz for pid, p3D in colmap_rec.points3D.items() if found_img.has_point3D(pid)
234
+ ]
235
+ if not points_xyz:
236
+ return np.zeros((H, W), dtype=np.float32), False, found_img
237
+ points_xyz = np.array(points_xyz)
238
+ uv, z_vals = [], []
239
+ for xyz in points_xyz:
240
+ proj = found_img.project_point(xyz)
241
+ if proj is not None:
242
+ u_i, v_i = int(round(proj[0])), int(round(proj[1]))
243
+ if 0 <= u_i < W and 0 <= v_i < H:
244
+ uv.append((u_i, v_i))
245
+ mat4x4 = np.eye(4)
246
+ mat4x4[:3, :4] = found_img.cam_from_world.matrix()
247
+ p_cam = mat4x4 @ np.array([xyz[0], xyz[1], xyz[2], 1.0])
248
+ z_vals.append(p_cam[2] / p_cam[3])
249
+ uv, z_vals = np.array(uv, dtype=int), np.array(z_vals)
250
+ depth_out = np.zeros((H, W), dtype=np.float32)
251
+ if len(uv) > 0:
252
+ depth_out[uv[:, 1], uv[:, 0]] = z_vals
253
+ return depth_out, True, found_img
254
+
255
+
256
+ def fit_scale_robust_median(
257
+ depth: np.ndarray, sparse_depth: np.ndarray, validity_mask: np.ndarray = None
258
+ ) -> Tuple[float, np.ndarray]:
259
+ """Fits a scale factor between dense and sparse depth using the median ratio."""
260
+ mask = (sparse_depth > 0.1) & (depth > 0.1) & (sparse_depth < 50) & (depth < 50)
261
+ if validity_mask is not None:
262
+ mask &= validity_mask
263
+ X, Y = depth[mask], sparse_depth[mask]
264
+ if len(X) < 5:
265
+ return 1.0, depth
266
+ ratios = Y / X
267
+ alpha = np.median(ratios)
268
+ return alpha, alpha * depth
269
+
270
+
271
+ def get_fitted_dense_depth(
272
+ depth: np.ndarray, colmap_rec: pycolmap.Reconstruction, img_id: str, ade20k_seg: np.ndarray
273
+ ) -> Tuple[np.ndarray, np.ndarray, bool, Any]:
274
+ """Fits the dense depth map to the sparse COLMAP depth."""
275
+ depth_np = np.array(depth) / 1000.0
276
+ depth_sparse, found_sparse, col_img = get_sparse_depth(colmap_rec, img_id, depth_np)
277
+ if not found_sparse:
278
+ return depth_np, np.zeros_like(depth_np), False, None
279
+ house_mask = get_house_mask(ade20k_seg)
280
+ k, depth_fitted = fit_scale_robust_median(depth_np, depth_sparse, validity_mask=house_mask)
281
+ return depth_fitted, depth_sparse, True, col_img
282
+
283
+
284
+ def project_vertices_to_3d(
285
+ uv: np.ndarray, depth_vert: np.ndarray, col_img: Any
286
+ ) -> np.ndarray:
287
+ """Projects 2D vertices to 3D using camera intrinsics and depth."""
288
+ xy_local = np.ones((len(uv), 3))
289
+ K = col_img.camera.calibration_matrix()
290
+ xy_local[:, 0] = (uv[:, 0] - K[0, 2]) / K[0, 0]
291
+ xy_local[:, 1] = (uv[:, 1] - K[1, 2]) / K[1, 1]
292
+ vertices_3d_local = xy_local * depth_vert[..., None]
293
+ world_to_cam = np.eye(4)
294
+ world_to_cam[:3] = col_img.cam_from_world.matrix()
295
+ cam_to_world = np.linalg.inv(world_to_cam)
296
+ vertices_3d_homogeneous = cv2.convertPointsToHomogeneous(vertices_3d_local)
297
+ vertices_3d = cv2.transform(vertices_3d_homogeneous, cam_to_world)
298
+ return cv2.convertPointsFromHomogeneous(vertices_3d).reshape(-1, 3)
299
+
300
+
301
+ def merge_vertices_3d(
302
+ vert_edge_per_image: Dict[int, Tuple[List[Dict[str, Any]], List[Tuple[int, int]], np.ndarray]],
303
+ th: float = Config.MERGE_THRESHOLD,
304
+ ) -> Tuple[np.ndarray, List[Tuple[int, int]]]:
305
+ """
306
+ Merges 3D vertices and edges across multiple views.
307
+
308
+ Args:
309
+ vert_edge_per_image: Dictionary of per-image vertices, edges, and 3D vertices.
310
+ th: Distance threshold for merging.
311
+
312
+ Returns:
313
+ new_vertices: Merged 3D vertices.
314
+ new_connections: Merged edge connections.
315
+ """
316
+ all_3d_vertices, connections_3d, types = [], [], []
317
+ cur_start = 0
318
+ for cimg_idx, (vertices, connections, vertices_3d) in vert_edge_per_image.items():
319
+ if len(vertices) == 0 or len(vertices_3d) == 0:
320
+ continue
321
+ types += [int(v['type'] == 'apex') for v in vertices]
322
+ all_3d_vertices.append(vertices_3d)
323
+ connections_3d += [(x + cur_start, y + cur_start) for (x, y) in connections]
324
+ cur_start += len(vertices_3d)
325
+ if len(all_3d_vertices) == 0:
326
+ return np.array([]), []
327
+ all_3d_vertices = np.concatenate(all_3d_vertices, axis=0)
328
+ if len(all_3d_vertices) == 0:
329
+ return np.array([]), []
330
+ distmat = cdist(all_3d_vertices, all_3d_vertices)
331
+ types = np.array(types).reshape(-1, 1)
332
+ same_types = cdist(types, types)
333
+ mask_to_merge = (distmat <= th) & (same_types == 0)
334
+ new_vertices, new_connections = [], []
335
+ to_merge = sorted(list(set([tuple(a.nonzero()[0].tolist()) for a in mask_to_merge])))
336
+ to_merge_final = defaultdict(list)
337
+ for i in range(len(all_3d_vertices)):
338
+ for j in to_merge:
339
+ if i in j:
340
+ to_merge_final[i] += j
341
+ for k, v in to_merge_final.items():
342
+ to_merge_final[k] = list(set(v))
343
+ already_there, merged = set(), []
344
+ for k, v in to_merge_final.items():
345
+ if k in already_there:
346
+ continue
347
+ merged.append(v)
348
+ for vv in v:
349
+ already_there.add(vv)
350
+ old_idx_to_new = {}
351
+ count = 0
352
+ for idxs in merged:
353
+ if len(idxs) > 0:
354
+ new_vertices.append(all_3d_vertices[idxs].mean(axis=0))
355
+ for idx in idxs:
356
+ old_idx_to_new[idx] = count
357
+ count += 1
358
+ if len(new_vertices) == 0:
359
+ return np.array([]), []
360
+ new_vertices = np.array(new_vertices)
361
+ for conn in connections_3d:
362
+ if conn[0] in old_idx_to_new and conn[1] in old_idx_to_new:
363
+ new_con = sorted((old_idx_to_new[conn[0]], old_idx_to_new[conn[1]]))
364
+ if new_con[0] != new_con[1] and new_con not in new_connections:
365
+ new_connections.append(new_con)
366
+ return new_vertices, new_connections
367
+
368
+
369
+ def prune_too_far(
370
+ all_3d_vertices: np.ndarray, connections_3d: List[Tuple[int, int]], colmap_rec: pycolmap.Reconstruction, th: float = Config.PRUNE_DISTANCE
371
+ ) -> Tuple[np.ndarray, List[Tuple[int, int]]]:
372
+ """Prunes 3D vertices and edges that are too far from COLMAP points."""
373
+ if len(all_3d_vertices) == 0:
374
+ return all_3d_vertices, connections_3d
375
+ xyz_sfm = np.array([v.xyz for v in colmap_rec.points3D.values()])
376
+ if len(xyz_sfm) == 0:
377
+ return all_3d_vertices, connections_3d
378
+ distmat = cdist(all_3d_vertices, xyz_sfm)
379
+ mask = distmat.min(axis=1) <= th
380
+ if not np.any(mask):
381
+ return np.empty((0, 3)), []
382
+ old_to_new_idx = {old: new for new, old in enumerate(np.where(mask)[0])}
383
+ new_vertices = all_3d_vertices[mask]
384
+ new_connections = []
385
+ for u, v in connections_3d:
386
+ if u in old_to_new_idx and v in old_to_new_idx:
387
+ new_connections.append((old_to_new_idx[u], old_to_new_idx[v]))
388
+ return new_vertices, new_connections
389
+
390
+
391
+ def metric_aware_refine(
392
+ vertices: np.ndarray, edges: List[Tuple[int, int]]
393
+ ) -> Tuple[np.ndarray, List[Tuple[int, int]]]:
394
+ """
395
+ Refines vertices and edges using clustering and edge length constraints.
396
+
397
+ Args:
398
+ vertices: 3D vertex coordinates.
399
+ edges: List of edge connections.
400
+
401
+ Returns:
402
+ final_vertices: Refined 3D vertices.
403
+ final_edges: Refined edge connections.
404
+ """
405
+ if len(vertices) < 2:
406
+ return vertices, edges
407
+ clustering = DBSCAN(
408
+ eps=Config.REFINEMENT_CLUSTER_EPS, min_samples=Config.REFINEMENT_MIN_SAMPLES
409
+ ).fit(vertices)
410
+ labels = clustering.labels_
411
+ refined_vertices = vertices.copy()
412
+ refined_centers = {}
413
+ for label in set(labels):
414
+ if label == -1:
415
+ continue
416
+ refined_centers[label] = np.mean(vertices[labels == label], axis=0)
417
+ for i, label in enumerate(labels):
418
+ if label in refined_centers:
419
+ refined_vertices[i] = refined_centers[label]
420
+ final_edges_set = set()
421
+ for u, v in edges:
422
+ if u >= len(refined_vertices) or v >= len(refined_vertices):
423
+ continue
424
+ p1 = refined_vertices[u]
425
+ p2 = refined_vertices[v]
426
+ dist = np.linalg.norm(p1 - p2)
427
+ if Config.MIN_EDGE_LENGTH <= dist <= Config.MAX_EDGE_LENGTH:
428
+ if not np.allclose(p1, p2, atol=1e-3):
429
+ final_edges_set.add(tuple(sorted((u, v))))
430
+ if not final_edges_set:
431
+ return np.empty((0, 3)), []
432
+ used_idxs = set(u for u, v in final_edges_set) | set(v for u, v in final_edges_set)
433
+ sorted_used_idxs = sorted(list(used_idxs))
434
+ final_map = {old_id: new_id for new_id, old_id in enumerate(sorted_used_idxs)}
435
+ final_vertices = np.array([refined_vertices[old_id] for old_id in sorted_used_idxs])
436
+ final_edges = [(final_map[u], final_map[v]) for u, v in final_edges_set]
437
+ return final_vertices, final_edges
438
+
439
+
440
+ def predict_wireframe(entry: Dict[str, Any]) -> Tuple[np.ndarray, List[Tuple[int, int]]]:
441
+ """
442
+ Main prediction function for the S23DR wireframe challenge.
443
+
444
+ Args:
445
+ entry: Input data entry.
446
+
447
+ Returns:
448
+ final_vertices: 3D vertices of the wireframe.
449
+ final_edges: Edge connections.
450
+ """
451
+ try:
452
+ good_entry = convert_entry_to_human_readable(entry)
453
+ colmap_rec = good_entry['colmap_binary']
454
+ vert_edge_per_image = {}
455
+ for i, (gest_img, img_id, ade_seg, depth_img) in enumerate(
456
+ zip(
457
+ good_entry['gestalt'],
458
+ good_entry['image_ids'],
459
+ good_entry['ade'],
460
+ good_entry['depth'],
461
+ )
462
+ ):
463
+ depth_size = (gest_img.width, gest_img.height)
464
+ gest_seg_np = np.array(gest_img.resize(depth_size)).astype(np.uint8)
465
+ vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np)
466
+ if not vertices:
467
+ vert_edge_per_image[i] = ([], [], [])
468
+ else:
469
+ depth_fitted, depth_sparse, found_sparse, col_img = get_fitted_dense_depth(
470
+ depth_img, colmap_rec, img_id, ade_seg
471
+ )
472
+ if found_sparse and col_img is not None:
473
+ uv, depth_vert = get_uv_depth(vertices, depth_fitted, depth_sparse)
474
+ vertices_3d = project_vertices_to_3d(uv, depth_vert, col_img)
475
+ vert_edge_per_image[i] = (vertices, connections, vertices_3d)
476
+ else:
477
+ vert_edge_per_image[i] = (vertices, connections, [])
478
+ all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image)
479
+ all_3d_vertices, connections_3d = prune_too_far(
480
+ all_3d_vertices, connections_3d, colmap_rec
481
+ )
482
+ final_vertices, final_edges = metric_aware_refine(
483
+ all_3d_vertices, connections_3d
484
+ )
485
+ if len(final_vertices) < 2 or len(final_edges) < 1:
486
+ return empty_solution()
487
+ return final_vertices, final_edges
488
+ except Exception as e:
489
+ print(f"An error occurred in the main prediction pipeline: {e}")
490
+ import traceback
491
+ traceback.print_exc()
492
+ return empty_solution()