this should be in a different repo
Browse files
scripts/collage_comparison/vre.sh
CHANGED
@@ -2,4 +2,4 @@
|
|
2 |
set -ex
|
3 |
video_file=$1
|
4 |
shift
|
5 |
-
vre_gpu_parallel $video_file --
|
|
|
2 |
set -ex
|
3 |
video_file=$1
|
4 |
shift
|
5 |
+
vre_gpu_parallel $video_file --config_path /scratch/sdc/datasets/dronescapes-2024/scripts/collage_comparison/cfg.yaml -o data_${video_file} --representations semantic_mask2former_coco_47429163_0 semantic_mask2former_mapillary_49189528_0 semantic_mask2former_mapillary_49189528_1 depth_marigold "normals_svd(depth_marigold)" semantic_mask2former_swin_mapillary_converted semantic_mask2former_r50_mapillary_converted semantic_mask2former_swin_coco_converted semantic_median_expert buildings "buildings(nearby)" containing rgb safe-landing-no-sseg safe-landing-semantics sky-and-water transportation vegetation -I /export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/readers/semantic_mapper.py:get_new_semantic_mapped_tasks --output_dir_exists_mode skip_computed
|
scripts/collage_comparison/wip.py
CHANGED
@@ -76,14 +76,13 @@ def load_model_from_path(weights_path):
|
|
76 |
logger.info(f"Excluded (fully masked) tasks: {cfg.train.algorithm.masking.parameters.excluded_tasks}")
|
77 |
return model
|
78 |
|
79 |
-
|
80 |
def colorize_dronescapes(item: np.ndarray) -> np.ndarray:
|
81 |
# colorize_semantic_segmentation
|
82 |
-
assert len(item.shape) ==
|
83 |
color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
84 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
85 |
classes_8 = ["land", "forest", "residential", "road", "little-objects", "water", "sky", "hill"]
|
86 |
-
return colorize_semantic_segmentation(item[None]
|
87 |
|
88 |
@tr.no_grad
|
89 |
def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.ndarray:
|
@@ -111,7 +110,7 @@ def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.nda
|
|
111 |
else:
|
112 |
acc_sema = (acc_sema * i + curr_sema) / (i + 1)
|
113 |
item = acc_sema[0]
|
114 |
-
return
|
115 |
|
116 |
def get_args() -> Namespace:
|
117 |
parser = ArgumentParser()
|
@@ -179,16 +178,30 @@ def main(args: Namespace):
|
|
179 |
fix_plot_fns_(plot_fns, task_types, stats, cfg["data"]["parameters"]["normalization"])
|
180 |
|
181 |
(out_dir := Path.cwd() / f"out_{video_path.name}").mkdir(exist_ok=True)
|
|
|
|
|
182 |
for frame_ix in tqdm(frames):
|
183 |
-
if (out_file := out_dir/ f"{frame_ix}.jpg").exists():
|
184 |
continue
|
185 |
batch_m2f = reader2.collate_fn([reader2[frame_ix]])
|
186 |
batch = reader.collate_fn([reader[frame_ix]])
|
187 |
-
m2f_img = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
|
188 |
-
ens_img = inference(model_mae, batch, n_ens=30)
|
189 |
-
distil_img = inference(model_distil, batch)
|
190 |
rgb = batch_m2f["data"]["rgb"][0].permute(1, 2, 0).numpy()
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
|
193 |
collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
|
194 |
image_write(collage, out_file)
|
|
|
76 |
logger.info(f"Excluded (fully masked) tasks: {cfg.train.algorithm.masking.parameters.excluded_tasks}")
|
77 |
return model
|
78 |
|
|
|
79 |
def colorize_dronescapes(item: np.ndarray) -> np.ndarray:
|
80 |
# colorize_semantic_segmentation
|
81 |
+
assert len(item.shape) == 2, item.shape
|
82 |
color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
83 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
84 |
classes_8 = ["land", "forest", "residential", "road", "little-objects", "water", "sky", "hill"]
|
85 |
+
return colorize_semantic_segmentation(item[None], color_map=color_map, classes=classes_8)[0]
|
86 |
|
87 |
@tr.no_grad
|
88 |
def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.ndarray:
|
|
|
110 |
else:
|
111 |
acc_sema = (acc_sema * i + curr_sema) / (i + 1)
|
112 |
item = acc_sema[0]
|
113 |
+
return item.permute(1, 2, 0).numpy().argmax(-1).astype(np.uint8)
|
114 |
|
115 |
def get_args() -> Namespace:
|
116 |
parser = ArgumentParser()
|
|
|
178 |
fix_plot_fns_(plot_fns, task_types, stats, cfg["data"]["parameters"]["normalization"])
|
179 |
|
180 |
(out_dir := Path.cwd() / f"out_{video_path.name}").mkdir(exist_ok=True)
|
181 |
+
[(out_dir / x).mkdir(exist_ok=True) for x in ["ens", "m2f", "distil", "collage"]]
|
182 |
+
|
183 |
for frame_ix in tqdm(frames):
|
184 |
+
if (out_file := out_dir / f"collage/{frame_ix}.jpg").exists():
|
185 |
continue
|
186 |
batch_m2f = reader2.collate_fn([reader2[frame_ix]])
|
187 |
batch = reader.collate_fn([reader[frame_ix]])
|
|
|
|
|
|
|
188 |
rgb = batch_m2f["data"]["rgb"][0].permute(1, 2, 0).numpy()
|
189 |
|
190 |
+
if not (pth := out_dir / f"m2f/{frame_ix}.npz").exists():
|
191 |
+
y = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
|
192 |
+
np.savez_compressed(pth, y)
|
193 |
+
m2f_img = colorize_dronescapes(np.load(pth)["arr_0"])
|
194 |
+
|
195 |
+
if not (pth := out_dir / f"ens/{frame_ix}.npz").exists():
|
196 |
+
y = inference(model_mae, batch, n_ens=30)
|
197 |
+
np.savez_compressed(pth, y)
|
198 |
+
ens_img = colorize_dronescapes(np.load(pth)["arr_0"])
|
199 |
+
|
200 |
+
if not (pth := out_dir / f"distil/{frame_ix}.npz").exists():
|
201 |
+
y = inference(model_distil, batch)
|
202 |
+
np.savez_compressed(pth, y)
|
203 |
+
distil_img = colorize_dronescapes(np.load(pth)["arr_0"])
|
204 |
+
|
205 |
titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
|
206 |
collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
|
207 |
image_write(collage, out_file)
|