LLDDSS commited on
Commit
c0ea771
·
verified ·
1 Parent(s): e30c01c

Delete loading script

Browse files
Files changed (1) hide show
  1. Causal3D.py +0 -165
Causal3D.py DELETED
@@ -1,165 +0,0 @@
1
- import datasets
2
- import pandas as pd
3
- import os
4
- from pathlib import Path
5
- from tqdm import tqdm
6
-
7
- print("✅ Custom Causal3D loaded: outside Causal3D.py")
8
- _CITATION = """\
9
- @article{liu2025causal3d,
10
- title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
11
- author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
12
- journal={arXiv preprint arXiv:2503.04852},
13
- year={2025}
14
- }
15
- """
16
-
17
- _DESCRIPTION = """\
18
- Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
19
- It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
20
- """
21
-
22
- _HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
23
- _LICENSE = "CC-BY-4.0"
24
-
25
- class Causal3D(datasets.GeneratorBasedBuilder):
26
- DEFAULT_CONFIG_NAME = "real_scenes_Water_flow_scene_render"
27
- BUILDER_CONFIGS = [
28
- # hypothetical_scenes
29
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_linear scene"),
30
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
31
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
32
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
33
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
34
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
35
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
36
- datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
37
- datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
38
- datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
39
- datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
40
-
41
- # real_scenes
42
- datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
43
- datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
44
- datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
45
- # datasets.BuilderConfig(name="real_scenes_Real_Parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
46
- datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
47
- datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
48
- datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
49
- datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
50
- datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
51
- datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
52
- datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
53
- datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
54
- ]
55
-
56
- def _info(self):
57
- return datasets.DatasetInfo(
58
- description=_DESCRIPTION,
59
- features=datasets.Features({
60
- "image": datasets.Image(),
61
- "file_name": datasets.Value("string"),
62
- "metadata": datasets.Value("string"), # optionally replace with structured fields
63
- }),
64
- homepage=_HOMEPAGE,
65
- license=_LICENSE,
66
- citation=_CITATION,
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- parts = self.config.name.split("_", 2)
71
- category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
72
-
73
- if category not in ["real_scenes", "hypothetical_scenes"]:
74
- raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
75
-
76
- scene = parts[2]
77
- data_dir = os.path.join(category, scene)
78
-
79
- return [
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={"data_dir": data_dir},
83
- )
84
- ]
85
-
86
- def _generate_examples(self, data_dir):
87
- def color(text, code):
88
- return f"\033[{code}m{text}\033[0m"
89
-
90
- # Load image paths
91
- try:
92
- image_files = {}
93
- for ext in ("*.png", "*.jpg", "*.jpeg"):
94
- for img_path in Path(data_dir).rglob(ext):
95
- relative_path = str(img_path.relative_to(data_dir))
96
- image_files[relative_path] = str(img_path)
97
- parts = [i.split('/')[0] for i in list(image_files.keys())]
98
- parts = set(parts)
99
- if "part_000" not in parts:
100
- parts= ['']
101
-
102
-
103
- except Exception as e:
104
- print(color(f"Error loading images: {e}", "31")) # Red
105
- return
106
-
107
- # Find the .csv file
108
- csv_files = list(Path(data_dir).rglob("*.csv"))
109
- csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
110
- if not csv_files:
111
- # print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
112
- pass
113
- # print(f"\033[33m[INFO] Found CSV: {csv_files}\033[0m")
114
- csv_path = csv_files[0] if csv_files else None
115
- df = pd.read_csv(csv_path) if csv_path else None
116
- image_col_exists = True
117
- if df is not None and "image" not in df.columns:
118
- image_col_exists = False
119
-
120
- images = df["image"].tolist() if image_col_exists and df is not None else []
121
- images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
122
-
123
- try:
124
- # Match CSV rows with image paths
125
- if df is None:
126
- for i, j in tqdm(image_files.items(), desc="Processing images", unit="image"):
127
- yield i, {
128
- "image": j,
129
- "file_name": i,
130
- "metadata": None,
131
- }
132
-
133
- else:
134
- for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
135
- fname = row["ID"]
136
- raw_record_img_path = images[idx] if images else "" #row["image"]
137
- record_img_name = raw_record_img_path.split('/')[-1]
138
- for part in parts:
139
- if part == '':
140
- record_img_path = record_img_name
141
- else:
142
- record_img_path = "/".join([part, record_img_name.strip()])
143
- if "Water_flow_scene_render" in data_dir:
144
- record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
145
- if record_img_path in image_files:
146
- # print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
147
- yield idx, {
148
- "image": image_files[record_img_path],
149
- "file_name": fname,
150
- "metadata": row.to_json(),
151
- }
152
- break
153
-
154
- else:
155
- yield idx, {
156
- # "image": "",
157
- "file_name": fname,
158
- "metadata": row.to_json(),
159
- }
160
- break
161
-
162
-
163
- except Exception as e:
164
- print(color(f"Error processing CSV rows: {e}", "31"))
165
-