[email protected] commited on
Commit
e914239
·
1 Parent(s): 7d13bcb

update dataset

Browse files
Files changed (1) hide show
  1. Cuasal3D.py +175 -0
Cuasal3D.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ from pathlib import Path
4
+ from typing import List
5
+ import pandas as pd
6
+ import numpy as np
7
+ from tqdm import tqdm
8
+ import datasets
9
+
10
+
11
+ _CITATION = """\
12
+ @article{liu2025causal3d,
13
+ title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
14
+ author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
15
+ journal={arXiv preprint arXiv:2503.04852},
16
+ year={2025}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
22
+ It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
23
+ """
24
+
25
+ _HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
26
+ _LICENSE = "CC-BY-4.0"
27
+
28
+ class Causal3D(datasets.GeneratorBasedBuilder):
29
+ DEFAULT_CONFIG_NAME = "real_scenes_Real_magnet_v3"
30
+ BUILDER_CONFIGS = [
31
+ # hypothetical_scenes
32
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear",
33
+ version=datasets.Version("1.0.0"),
34
+ description="Hypothetic_v2_linear scene"),
35
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
36
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
37
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
38
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
39
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
40
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
41
+ datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
42
+ datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
43
+ datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
44
+ datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
45
+
46
+ # real_scenes
47
+ datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
48
+ datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
49
+ datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
50
+ datasets.BuilderConfig(name="real_scenes_Real_parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
51
+ datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
52
+ datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
53
+ datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
54
+ datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
55
+ datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
56
+ datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
57
+ datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
58
+ datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
59
+ ]
60
+
61
+ def _info(self):
62
+ print(">>> Loaded config:", self.config.name) # 🟡 加这个调试输出
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features({
66
+ "image": datasets.Image(),
67
+ "file_name": datasets.Value("string"),
68
+ "metadata": datasets.Value("string"), # optionally replace with structured fields
69
+ }),
70
+ homepage=_HOMEPAGE,
71
+ license=_LICENSE,
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ parts = self.config.name.split("_", 2)
77
+ category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
78
+
79
+ if category not in ["real_scenes", "hypothetical_scenes"]:
80
+ raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
81
+
82
+ scene = parts[2]
83
+ data_dir = os.path.join(category, scene)
84
+
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={"data_dir": data_dir},
89
+ )
90
+ ]
91
+
92
+ def _generate_examples(self, data_dir):
93
+ # Find the .csv file
94
+ csv_files = list(Path(data_dir).rglob("*.csv"))
95
+ csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
96
+ if not csv_files:
97
+ print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
98
+ return # ✅ 跳过该 config,不报错
99
+ csv_path = csv_files[0]
100
+ df = pd.read_csv(csv_path)
101
+ if "image" not in df.columns:
102
+ print(f"\033[31m[SKIP] 'image' column not found in {csv_path}, skipping this config.\033[0m")
103
+ return
104
+
105
+ # sub_folders = [os.path.join(data_dir, i) for i in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, i))]
106
+
107
+ def color(text, code):
108
+ return f"\033[{code}m{text}\033[0m"
109
+ # print()
110
+ # print(color(f"data_dir: {data_dir}", "36")) # Cyan
111
+ # print(color(f"csv_path: {csv_path}", "33")) # Yellow
112
+ # print(color(f"csv_path.name: {csv_path.name}", "35")) # Magenta
113
+ # print(color(f"CSV columns: {list(df.columns)}", "32")) # Green
114
+
115
+ images = df["image"].tolist()
116
+ # images only contain image names
117
+
118
+ images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
119
+
120
+
121
+ # Load image paths
122
+ try:
123
+ image_files = {}
124
+ for ext in ("*.png", "*.jpg", "*.jpeg"):
125
+ for img_path in Path(data_dir).rglob(ext):
126
+ relative_path = str(img_path.relative_to(data_dir))
127
+ image_files[relative_path] = str(img_path)
128
+ parts = [i.split('/')[0] for i in list(image_files.keys())]
129
+ parts = set(parts)
130
+ if "part_000" not in parts:
131
+ parts= ['']
132
+
133
+
134
+ except Exception as e:
135
+ print(color(f"Error loading images: {e}", "31")) # Red
136
+ return
137
+ try:
138
+ # Match CSV rows with image paths
139
+ for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
140
+ fname = row["ID"]
141
+ raw_record_img_path = row["image"]
142
+ record_img_name = raw_record_img_path.split('/')[-1]
143
+ for part in parts:
144
+ if part == '':
145
+ record_img_path = record_img_name
146
+ else:
147
+ record_img_path = "/".join([part, record_img_name.strip()])
148
+ if "Water_flow_scene_render" in data_dir:
149
+ record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
150
+
151
+ # print(f"raw_record_img_path: {raw_record_img_path}")
152
+ # print(f"record_img_name: {record_img_name}")
153
+ # print("part: ", part)
154
+ # print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}")
155
+ # print(f"record_img_path in image_files: {record_img_path in image_files}")
156
+ # print(image_files.keys())
157
+ # print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}, "
158
+ # f"record_image_path in image_files: {record_img_path in image_files}, image_files,key[0]: {list(image_files.keys())[0]}")
159
+ # print(image_files)
160
+ # exit(0)
161
+ if record_img_path in image_files:
162
+ # print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
163
+ yield idx, {
164
+ "image": image_files[record_img_path],
165
+ "file_name": fname,
166
+ "metadata": row.to_json(),
167
+ }
168
+ break
169
+
170
+
171
+ except Exception as e:
172
+ print(color(f"Error processing CSV rows: {e}", "31"))
173
+
174
+
175
+