add random splits to filtered data
Browse files- ppb_affinity.py +87 -61
ppb_affinity.py
CHANGED
@@ -3,76 +3,102 @@ import csv
|
|
3 |
import random
|
4 |
|
5 |
class ppb_affinity(datasets.GeneratorBasedBuilder):
|
6 |
-
VERSION = datasets.Version("1.0.
|
7 |
|
8 |
BUILDER_CONFIGS = [
|
9 |
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
|
10 |
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
|
|
|
11 |
]
|
12 |
|
13 |
def _info(self):
|
14 |
return datasets.DatasetInfo()
|
15 |
-
|
16 |
def _split_generators(self, dl_manager):
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
return [
|
21 |
-
datasets.SplitGenerator(
|
22 |
name=datasets.Split.TRAIN,
|
23 |
-
gen_kwargs={"filepath": filepath
|
24 |
-
)
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
with open(filepath, encoding="utf-8") as f:
|
51 |
reader = csv.DictReader(f)
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
train_end = int(0.8 * total)
|
69 |
-
val_end = train_end + int(0.1 * total)
|
70 |
-
|
71 |
-
split_map = {
|
72 |
-
"train_rand": data[:train_end],
|
73 |
-
"val_rand": data[train_end:val_end],
|
74 |
-
"test_rand": data[val_end:]
|
75 |
-
}
|
76 |
-
|
77 |
-
for idx, row in enumerate(split_map[split]):
|
78 |
-
yield idx, row
|
|
|
3 |
import random
|
4 |
|
5 |
class ppb_affinity(datasets.GeneratorBasedBuilder):
|
6 |
+
VERSION = datasets.Version("1.0.1")
|
7 |
|
8 |
BUILDER_CONFIGS = [
|
9 |
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
|
10 |
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
|
11 |
+
datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
|
12 |
]
|
13 |
|
14 |
def _info(self):
|
15 |
return datasets.DatasetInfo()
|
16 |
+
|
17 |
def _split_generators(self, dl_manager):
|
18 |
+
if self.config.name == "raw":
|
19 |
+
filepath = dl_manager.download_and_extract("raw.csv")
|
20 |
+
return [datasets.SplitGenerator(
|
|
|
|
|
21 |
name=datasets.Split.TRAIN,
|
22 |
+
gen_kwargs={"filepath": filepath}
|
23 |
+
)]
|
24 |
+
elif self.config.name == "filtered":
|
25 |
+
filepath = dl_manager.download_and_extract("filtered.csv")
|
26 |
+
return [
|
27 |
+
datasets.SplitGenerator(
|
28 |
+
name=datasets.Split.TRAIN,
|
29 |
+
gen_kwargs={"filepath": filepath, "split": "train"},
|
30 |
+
),
|
31 |
+
datasets.SplitGenerator(
|
32 |
+
name=datasets.Split.VALIDATION,
|
33 |
+
gen_kwargs={"filepath": filepath, "split": "val"},
|
34 |
+
),
|
35 |
+
datasets.SplitGenerator(
|
36 |
+
name=datasets.Split.TEST,
|
37 |
+
gen_kwargs={"filepath": filepath, "split": "test"},
|
38 |
+
),
|
39 |
+
]
|
40 |
+
elif self.config.name == "filtered_random":
|
41 |
+
filepath = dl_manager.download_and_extract("filtered.csv")
|
42 |
+
# Read all rows to determine splits
|
43 |
+
with open(filepath, encoding="utf-8") as f:
|
44 |
+
reader = csv.DictReader(f)
|
45 |
+
rows = list(reader)
|
46 |
+
n_total = len(rows)
|
47 |
+
# Generate shuffled indices with fixed seed
|
48 |
+
indices = list(range(n_total))
|
49 |
+
rng = random.Random(42) # Fixed seed for reproducibility
|
50 |
+
rng.shuffle(indices)
|
51 |
+
# Calculate split sizes
|
52 |
+
n_train = int(0.8 * n_total)
|
53 |
+
n_val = int(0.1 * n_total)
|
54 |
+
n_test = n_total - n_train - n_val # Handle remainder
|
55 |
+
# Split indices into ranges
|
56 |
+
return [
|
57 |
+
datasets.SplitGenerator(
|
58 |
+
name=datasets.NamedSplit("train_rand"),
|
59 |
+
gen_kwargs={
|
60 |
+
"filepath": filepath,
|
61 |
+
"shuffled_indices": indices,
|
62 |
+
"split_start": 0,
|
63 |
+
"split_end": n_train,
|
64 |
+
},
|
65 |
+
),
|
66 |
+
datasets.SplitGenerator(
|
67 |
+
name=datasets.NamedSplit("validation_rand"),
|
68 |
+
gen_kwargs={
|
69 |
+
"filepath": filepath,
|
70 |
+
"shuffled_indices": indices,
|
71 |
+
"split_start": n_train,
|
72 |
+
"split_end": n_train + n_val,
|
73 |
+
},
|
74 |
+
),
|
75 |
+
datasets.SplitGenerator(
|
76 |
+
name=datasets.NamedSplit("test_rand"),
|
77 |
+
gen_kwargs={
|
78 |
+
"filepath": filepath,
|
79 |
+
"shuffled_indices": indices,
|
80 |
+
"split_start": n_train + n_val,
|
81 |
+
"split_end": n_total,
|
82 |
+
},
|
83 |
+
),
|
84 |
+
]
|
85 |
+
|
86 |
+
def _generate_examples(self, filepath, split=None, shuffled_indices=None, split_start=None, split_end=None):
|
87 |
with open(filepath, encoding="utf-8") as f:
|
88 |
reader = csv.DictReader(f)
|
89 |
+
rows = list(reader)
|
90 |
+
if self.config.name == "raw":
|
91 |
+
for idx, row in enumerate(reader):
|
92 |
+
yield idx, row
|
93 |
+
elif self.config.name == "filtered":
|
94 |
+
for idx, row in enumerate(reader):
|
95 |
+
if row["split"] == split:
|
96 |
+
del row["split"]
|
97 |
+
yield idx, row
|
98 |
+
elif self.config.name == "filtered_random":
|
99 |
+
# Iterate over the range [split_start, split_end) in shuffled_indices
|
100 |
+
for global_idx in range(split_start, split_end):
|
101 |
+
original_idx = shuffled_indices[global_idx]
|
102 |
+
row = rows[original_idx]
|
103 |
+
del row["split"] # Remove original split column
|
104 |
+
yield global_idx, row # Key is global shuffled index
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|