ppb_affinity / ppb_affinity.py
shaffei's picture
Update ppb_affinity.py
1144112 verified
raw
history blame
4.58 kB
import datasets
import csv
import random
class ppb_affinity(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."),
datasets.BuilderConfig(name="raw_rec", description="Raw parsed PDBs dataset with critical filtrations and missing residues recovered."),
datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."),
datasets.BuilderConfig(name="filtered_random", description="Filtered dataset with random 80-10-10 splits."),
]
def _info(self):
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
if self.config.name == "raw":
filepath = dl_manager.download_and_extract("raw.csv")
return [datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": filepath}
)]
elif self.config.name == "raw_rec":
filepath = dl_manager.download_and_extract("raw_recover_missing_res.csv")
return [datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": filepath}
)]
elif self.config.name == "filtered":
filepath = dl_manager.download_and_extract("filtered.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": filepath, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": filepath, "split": "val"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": filepath, "split": "test"},
),
]
elif self.config.name == "filtered_random":
filepath = dl_manager.download_and_extract("filtered.csv")
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
rows = list(reader)
n_total = len(rows)
indices = list(range(n_total))
rng = random.Random(42)
rng.shuffle(indices)
n_train = int(0.8 * n_total)
n_val = int(0.1 * n_total)
n_test = n_total - n_train - n_val
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": 0,
"split_end": n_train,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": n_train,
"split_end": n_train + n_val,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": filepath,
"shuffled_indices": indices,
"split_start": n_train + n_val,
"split_end": n_total,
},
),
]
def _generate_examples(self, filepath, split=None, shuffled_indices=None, split_start=None, split_end=None):
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
rows = list(reader)
if self.config.name in ["raw", "raw_rec"]:
for idx, row in enumerate(rows):
yield idx, row
elif self.config.name == "filtered":
for idx, row in enumerate(rows):
if row["split"] == split:
del row["split"]
yield idx, row
elif self.config.name == "filtered_random":
for global_idx in range(split_start, split_end):
original_idx = shuffled_indices[global_idx]
row = rows[original_idx]
del row["split"]
yield global_idx, row