File size: 4,137 Bytes
870b700 b951534 870b700 db61916 870b700 1119679 870b700 1119679 870b700 1119679 870b700 1119679 b03b53b 1119679 b03b53b 1119679 b03b53b 1119679 b03b53b 1119679 b03b53b 1119679 870b700 1119679 5d2b646 1119679 5d2b646 1119679 b03b53b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import datasets
import csv
import random
class ppb_affinity(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.2")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
]
def _info(self):
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
if self.config.name == "raw":
filepath = dl_manager.download_and_extract("raw.csv")
return [datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": filepath}
)]
elif self.config.name == "filtered":
filepath = dl_manager.download_and_extract("filtered.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": filepath, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": filepath, "split": "val"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": filepath, "split": "test"},
),
]
elif self.config.name == "filtered_random":
filepath = dl_manager.download_and_extract("filtered.csv")
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
rows = list(reader)
n_total = len(rows)
indices = list(range(n_total))
rng = random.Random(42)
rng.shuffle(indices)
n_train = int(0.8 * n_total)
n_val = int(0.1 * n_total)
n_test = n_total - n_train - n_val
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": 0,
"split_end": n_train,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": n_train,
"split_end": n_train + n_val,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": n_train + n_val,
"split_end": n_total,
},
),
]
def _generate_examples(self, filepath, split=None, shuffled_indices=None, split_start=None, split_end=None):
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
rows = list(reader)
if self.config.name == "raw":
for idx, row in enumerate(rows):
yield idx, row
elif self.config.name == "filtered":
for idx, row in enumerate(rows):
if row["split"] == split:
del row["split"]
yield idx, row
elif self.config.name == "filtered_random":
for global_idx in range(split_start, split_end):
original_idx = shuffled_indices[global_idx]
row = rows[original_idx]
del row["split"]
yield global_idx, row |