shaffei commited on
Commit
1119679
·
verified ·
1 Parent(s): 883c75e

add random splits to filtered data

Browse files
Files changed (1) hide show
  1. ppb_affinity.py +87 -61
ppb_affinity.py CHANGED
@@ -3,76 +3,102 @@ import csv
3
  import random
4
 
5
  class ppb_affinity(datasets.GeneratorBasedBuilder):
6
- VERSION = datasets.Version("1.0.2")
7
 
8
  BUILDER_CONFIGS = [
9
  datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
10
  datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
 
11
  ]
12
 
13
  def _info(self):
14
  return datasets.DatasetInfo()
15
-
16
  def _split_generators(self, dl_manager):
17
- """Downloads and defines dataset splits"""
18
- filepath = dl_manager.download_and_extract("filtered.csv")
19
-
20
- return [
21
- datasets.SplitGenerator(
22
  name=datasets.Split.TRAIN,
23
- gen_kwargs={"filepath": filepath, "split": "train"},
24
- ),
25
- datasets.SplitGenerator(
26
- name=datasets.Split.VALIDATION,
27
- gen_kwargs={"filepath": filepath, "split": "val"},
28
- ),
29
- datasets.SplitGenerator(
30
- name=datasets.Split.TEST,
31
- gen_kwargs={"filepath": filepath, "split": "test"},
32
- ),
33
-
34
- datasets.SplitGenerator(
35
- name=datasets.NamedSplit("train_rand"),
36
- gen_kwargs={"filepath": filepath, "split": "train_rand", "random_split": True},
37
- ),
38
- datasets.SplitGenerator(
39
- name=datasets.NamedSplit("validation_rand"),
40
- gen_kwargs={"filepath": filepath, "split": "val_rand", "random_split": True},
41
- ),
42
- datasets.SplitGenerator(
43
- name=datasets.NamedSplit("test_rand"),
44
- gen_kwargs={"filepath": filepath, "split": "test_rand", "random_split": True},
45
- ),
46
- ]
47
-
48
- def _generate_examples(self, filepath, split=None, random_split=False):
49
- """Generates examples, either using predefined splits or random splits"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  with open(filepath, encoding="utf-8") as f:
51
  reader = csv.DictReader(f)
52
- data = list(reader)
53
-
54
- if random_split:
55
- return self._generate_examples_rand(data, split)
56
-
57
- for idx, row in enumerate(data):
58
- if row["split"] == split:
59
- del row["split"] # Remove split column from examples
60
- yield idx, row
61
-
62
- def _generate_examples_rand(self, data, split):
63
- """Randomly splits the dataset into 80% train, 10% val, 10% test with a fixed seed"""
64
- random.seed(42)
65
- random.shuffle(data)
66
-
67
- total = len(data)
68
- train_end = int(0.8 * total)
69
- val_end = train_end + int(0.1 * total)
70
-
71
- split_map = {
72
- "train_rand": data[:train_end],
73
- "val_rand": data[train_end:val_end],
74
- "test_rand": data[val_end:]
75
- }
76
-
77
- for idx, row in enumerate(split_map[split]):
78
- yield idx, row
 
3
  import random
4
 
5
  class ppb_affinity(datasets.GeneratorBasedBuilder):
6
+ VERSION = datasets.Version("1.0.1")
7
 
8
  BUILDER_CONFIGS = [
9
  datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
10
  datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
11
+ datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
12
  ]
13
 
14
  def _info(self):
15
  return datasets.DatasetInfo()
16
+
17
  def _split_generators(self, dl_manager):
18
+ if self.config.name == "raw":
19
+ filepath = dl_manager.download_and_extract("raw.csv")
20
+ return [datasets.SplitGenerator(
 
 
21
  name=datasets.Split.TRAIN,
22
+ gen_kwargs={"filepath": filepath}
23
+ )]
24
+ elif self.config.name == "filtered":
25
+ filepath = dl_manager.download_and_extract("filtered.csv")
26
+ return [
27
+ datasets.SplitGenerator(
28
+ name=datasets.Split.TRAIN,
29
+ gen_kwargs={"filepath": filepath, "split": "train"},
30
+ ),
31
+ datasets.SplitGenerator(
32
+ name=datasets.Split.VALIDATION,
33
+ gen_kwargs={"filepath": filepath, "split": "val"},
34
+ ),
35
+ datasets.SplitGenerator(
36
+ name=datasets.Split.TEST,
37
+ gen_kwargs={"filepath": filepath, "split": "test"},
38
+ ),
39
+ ]
40
+ elif self.config.name == "filtered_random":
41
+ filepath = dl_manager.download_and_extract("filtered.csv")
42
+ # Read all rows to determine splits
43
+ with open(filepath, encoding="utf-8") as f:
44
+ reader = csv.DictReader(f)
45
+ rows = list(reader)
46
+ n_total = len(rows)
47
+ # Generate shuffled indices with fixed seed
48
+ indices = list(range(n_total))
49
+ rng = random.Random(42) # Fixed seed for reproducibility
50
+ rng.shuffle(indices)
51
+ # Calculate split sizes
52
+ n_train = int(0.8 * n_total)
53
+ n_val = int(0.1 * n_total)
54
+ n_test = n_total - n_train - n_val # Handle remainder
55
+ # Split indices into ranges
56
+ return [
57
+ datasets.SplitGenerator(
58
+ name=datasets.NamedSplit("train_rand"),
59
+ gen_kwargs={
60
+ "filepath": filepath,
61
+ "shuffled_indices": indices,
62
+ "split_start": 0,
63
+ "split_end": n_train,
64
+ },
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.NamedSplit("validation_rand"),
68
+ gen_kwargs={
69
+ "filepath": filepath,
70
+ "shuffled_indices": indices,
71
+ "split_start": n_train,
72
+ "split_end": n_train + n_val,
73
+ },
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.NamedSplit("test_rand"),
77
+ gen_kwargs={
78
+ "filepath": filepath,
79
+ "shuffled_indices": indices,
80
+ "split_start": n_train + n_val,
81
+ "split_end": n_total,
82
+ },
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, filepath, split=None, shuffled_indices=None, split_start=None, split_end=None):
87
  with open(filepath, encoding="utf-8") as f:
88
  reader = csv.DictReader(f)
89
+ rows = list(reader)
90
+ if self.config.name == "raw":
91
+ for idx, row in enumerate(reader):
92
+ yield idx, row
93
+ elif self.config.name == "filtered":
94
+ for idx, row in enumerate(reader):
95
+ if row["split"] == split:
96
+ del row["split"]
97
+ yield idx, row
98
+ elif self.config.name == "filtered_random":
99
+ # Iterate over the range [split_start, split_end) in shuffled_indices
100
+ for global_idx in range(split_start, split_end):
101
+ original_idx = shuffled_indices[global_idx]
102
+ row = rows[original_idx]
103
+ del row["split"] # Remove original split column
104
+ yield global_idx, row # Key is global shuffled index