|
import datasets |
|
import os |
|
from PIL import Image |
|
|
|
_KITTI2012_URL = "https://s3.eu-central-1.amazonaws.com/avg-kitti/data_stereo_flow.zip" |
|
|
|
class KITTIStereo2012(datasets.GeneratorBasedBuilder): |
|
"""KITTI Stereo 2012 dataset with stereo grayscale/color images and disparity ground truth.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=( |
|
"KITTI Stereo 2012 dataset: includes stereo grayscale and color images captured at two timepoints," |
|
" along with disparity ground truth and calibration matrices. Designed for stereo vision and flow tasks." |
|
), |
|
features=datasets.Features( |
|
{ |
|
"ImageGray_t0": datasets.Sequence(datasets.Image()), |
|
"ImageGray_t1": datasets.Sequence(datasets.Image()), |
|
"ImageColor_t0": datasets.Sequence(datasets.Image()), |
|
"ImageColor_t1": datasets.Sequence(datasets.Image()), |
|
"calib": { |
|
"P0": datasets.Sequence(datasets.Value("float32")), |
|
"P1": datasets.Sequence(datasets.Value("float32")), |
|
"P2": datasets.Sequence(datasets.Value("float32")), |
|
"P3": datasets.Sequence(datasets.Value("float32")), |
|
}, |
|
"disp_noc": datasets.Image(), |
|
"disp_occ": datasets.Image(), |
|
"disp_refl_noc": datasets.Image(), |
|
"disp_refl_occ": datasets.Image(), |
|
"flow_noc": datasets.Image(), |
|
"flow_occ": datasets.Image(), |
|
} |
|
), |
|
supervised_keys=("ImageGray_t0", "disp_noc"), |
|
homepage="http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php?benchmark=stereo", |
|
license="CC BY-NC-SA 3.0", |
|
citation="""@inproceedings{Geiger2012CVPR, |
|
author = {Andreas Geiger and Philip Lenz and Raquel Urtasun}, |
|
title = {Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite}, |
|
booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)}, |
|
year = {2012} |
|
}""", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive_path = dl_manager.download_and_extract(_KITTI2012_URL) |
|
train_path = os.path.join(archive_path, "training") |
|
test_path = os.path.join(archive_path, "testing") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"base_path": train_path, "split": "training"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"base_path": test_path, "split": "testing"}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, base_path, split): |
|
image_0_path = os.path.join(base_path, "image_0") |
|
image_1_path = os.path.join(base_path, "image_1") |
|
color_0_path = os.path.join(base_path, "colored_0") |
|
color_1_path = os.path.join(base_path, "colored_1") |
|
calib_path = os.path.join(base_path, "calib") |
|
|
|
if split == "training": |
|
disp_path = os.path.join(base_path, "disp_noc") |
|
disp_path_occ = os.path.join(base_path, "disp_occ") |
|
disp_path_refl_noc = os.path.join(base_path, "disp_refl_noc") |
|
disp_path_refl_occ = os.path.join(base_path, "disp_refl_occ") |
|
flow_noc_path = os.path.join(base_path, "flow_noc") |
|
flow_occ_path = os.path.join(base_path, "flow_occ") |
|
|
|
files = sorted(os.listdir(image_0_path)) |
|
ids = sorted(set(f.split("_")[0] for f in files)) |
|
|
|
for id_ in ids: |
|
example = { |
|
"ImageGray_t0": [ |
|
Image.open(os.path.join(image_0_path, f"{id_}_10.png")), |
|
Image.open(os.path.join(image_1_path, f"{id_}_10.png")), |
|
], |
|
"ImageGray_t1": [ |
|
Image.open(os.path.join(image_0_path, f"{id_}_11.png")), |
|
Image.open(os.path.join(image_1_path, f"{id_}_11.png")), |
|
], |
|
"ImageColor_t0": [ |
|
Image.open(os.path.join(color_0_path, f"{id_}_10.png")), |
|
Image.open(os.path.join(color_1_path, f"{id_}_10.png")), |
|
], |
|
"ImageColor_t1": [ |
|
Image.open(os.path.join(color_0_path, f"{id_}_11.png")), |
|
Image.open(os.path.join(color_1_path, f"{id_}_11.png")), |
|
], |
|
"calib": { |
|
"P0": [], |
|
"P1": [], |
|
"P2": [], |
|
"P3": [], |
|
}, |
|
"disp_noc": Image.open(os.path.join(disp_path, f"{id_}_10.png")) if split == "training" else None, |
|
"disp_occ": Image.open(os.path.join(disp_path_occ, f"{id_}_10.png")) if split == "training" else None, |
|
"disp_refl_noc": Image.open(os.path.join(disp_path_refl_noc, f"{id_}_10.png")) if split == "training" else None, |
|
"disp_refl_occ": Image.open(os.path.join(disp_path_refl_occ, f"{id_}_10.png")) if split == "training" else None, |
|
"flow_noc": Image.open(os.path.join(flow_noc_path, f"{id_}_10.png")) if split == "training" else None, |
|
"flow_occ": Image.open(os.path.join(flow_occ_path, f"{id_}_10.png")) if split == "training" else None, |
|
} |
|
|
|
calib_file = os.path.join(calib_path, f"{id_}.txt") |
|
with open(calib_file, "r") as f: |
|
lines = f.readlines() |
|
for line in lines: |
|
key, value = line.strip().split(":", 1) |
|
if key in ["P0", "P1", "P2", "P3"]: |
|
example["calib"][key] = [float(x) for x in value.strip().split()] |
|
|
|
yield id_, example |