import datasets import csv import random class ppb_affinity(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.2") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="raw", description="Raw parsed PDBs dataset with critical filtrations only."), datasets.BuilderConfig(name="filtered", description="Raw dataset with additional cleaning and train/val/test splits."), ] def _info(self): return datasets.DatasetInfo() def _split_generators(self, dl_manager): """Downloads and defines dataset splits""" filepath = dl_manager.download_and_extract("filtered.csv") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": filepath, "split": "val"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": filepath, "split": "test"}, ), datasets.SplitGenerator( name=datasets.NamedSplit("train_rand"), gen_kwargs={"filepath": filepath, "split": "train_rand", "random_split": True}, ), datasets.SplitGenerator( name=datasets.NamedSplit("validation_rand"), gen_kwargs={"filepath": filepath, "split": "val_rand", "random_split": True}, ), datasets.SplitGenerator( name=datasets.NamedSplit("test_rand"), gen_kwargs={"filepath": filepath, "split": "test_rand", "random_split": True}, ), ] def _generate_examples(self, filepath, split=None, random_split=False): """Generates examples, either using predefined splits or random splits""" with open(filepath, encoding="utf-8") as f: reader = csv.DictReader(f) data = list(reader) if random_split: return self._generate_examples_rand(data, split) for idx, row in enumerate(data): if row["split"] == split: del row["split"] # Remove split column from examples yield idx, row def _generate_examples_rand(self, data, split): """Randomly splits the dataset into 80% train, 10% val, 10% test with a fixed seed""" random.seed(42) random.shuffle(data) total = len(data) train_end = int(0.8 * total) val_end = train_end + int(0.1 * total) split_map = { "train_rand": data[:train_end], "val_rand": data[train_end:val_end], "test_rand": data[val_end:] } for idx, row in enumerate(split_map[split]): yield idx, row