Causal3D / Causal3D.py
DsL
update default
c9e54b2
raw
history blame
9.28 kB
import datasets
import pandas as pd
import os
from pathlib import Path
from tqdm import tqdm
print("✅ Custom Causal3D loaded: outside Causal3D.py")
_CITATION = """\
@article{liu2025causal3d,
title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
journal={arXiv preprint arXiv:2503.04852},
year={2025}
}
"""
_DESCRIPTION = """\
Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
"""
_HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
_LICENSE = "CC-BY-4.0"
class Causal3D(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "real_scenes_Water_flow_scene_render"
BUILDER_CONFIGS = [
# hypothetical_scenes
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
# real_scenes
datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
# datasets.BuilderConfig(name="real_scenes_Real_Parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"file_name": datasets.Value("string"),
"metadata": datasets.Value("string"), # optionally replace with structured fields
}),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
parts = self.config.name.split("_", 2)
category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
if category not in ["real_scenes", "hypothetical_scenes"]:
raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
scene = parts[2]
data_dir = os.path.join(category, scene)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir},
)
]
def _generate_examples(self, data_dir):
def color(text, code):
return f"\033[{code}m{text}\033[0m"
# Load image paths
try:
image_files = {}
for ext in ("*.png", "*.jpg", "*.jpeg"):
for img_path in Path(data_dir).rglob(ext):
relative_path = str(img_path.relative_to(data_dir))
image_files[relative_path] = str(img_path)
parts = [i.split('/')[0] for i in list(image_files.keys())]
parts = set(parts)
if "part_000" not in parts:
parts= ['']
except Exception as e:
print(color(f"Error loading images: {e}", "31")) # Red
return
# Find the .csv file
csv_files = list(Path(data_dir).rglob("*.csv"))
csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
if not csv_files:
# print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
pass
# print(f"\033[33m[INFO] Found CSV: {csv_files}\033[0m")
csv_path = csv_files[0] if csv_files else None
df = pd.read_csv(csv_path) if csv_path else None
image_col_exists = True
if df is not None and "image" not in df.columns:
image_col_exists = False
images = df["image"].tolist() if image_col_exists and df is not None else []
images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
try:
# Match CSV rows with image paths
if df is None:
for i, j in tqdm(image_files.items(), desc="Processing images", unit="image"):
yield i, {
"image": j,
"file_name": i,
"metadata": None,
}
else:
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
fname = row["ID"]
raw_record_img_path = images[idx] if images else "" #row["image"]
record_img_name = raw_record_img_path.split('/')[-1]
for part in parts:
if part == '':
record_img_path = record_img_name
else:
record_img_path = "/".join([part, record_img_name.strip()])
if "Water_flow_scene_render" in data_dir:
record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
if record_img_path in image_files:
# print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
yield idx, {
"image": image_files[record_img_path],
"file_name": fname,
"metadata": row.to_json(),
}
break
else:
yield idx, {
# "image": "",
"file_name": fname,
"metadata": row.to_json(),
}
break
except Exception as e:
print(color(f"Error processing CSV rows: {e}", "31"))