import datasets import os import numpy as np class DBP1M_EN_FR_Config(datasets.BuilderConfig): def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): super(DBP1M_EN_FR_Config, self).__init__(version=datasets.Version("0.0.1"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url class DBP1M_EN_FR_(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ DBP1M_EN_FR_Config( name="source", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/DBP1M-EN-FR/resolve/main/DBP1M-EN-FR-src.zip" ), DBP1M_EN_FR_Config( name="target", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/DBP1M-EN-FR/resolve/main/DBP1M-EN-FR-tgt.zip" ), DBP1M_EN_FR_Config( name="pairs", features=["left_id", "right_id"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/DBP1M-EN-FR/resolve/main/DBP1M-EN-FR-pairs.zip" ), ] def _info(self): if self.config.name in ["source", "target"]: features = {feature: datasets.Value("string") for feature in self.config.features} elif self.config.name=="pairs": features = {feature: datasets.Value("int32") for feature in self.config.features} return datasets.DatasetInfo( features=datasets.Features(features) ) def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" #task_name = _get_task_name_from_data_url(self.config.data_url) #dl_dir = os.path.join(dl_dir, task_name) if self.config.name == "source": return [ datasets.SplitGenerator( name="ent_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "ent_ids_1"), "split": "ent_ids", }, ), datasets.SplitGenerator( name="rel_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "rel_ids_1"), "split": "rel_ids", }, ), datasets.SplitGenerator( name="rel_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "triples_1"), "split": "rel_triples", }, ), ] elif self.config.name == "target": return [ datasets.SplitGenerator( name="ent_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "ent_ids_2"), "split": "ent_ids", }, ), datasets.SplitGenerator( name="rel_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "rel_ids_2"), "split": "rel_ids", }, ), datasets.SplitGenerator( name="rel_triples", gen_kwargs={ "data_file": os.path.join(dl_dir, "triples_2"), "split": "rel_triples", }, ), ] elif self.config.name == "pairs": return [ datasets.SplitGenerator( name="train", gen_kwargs={ "data_file": os.path.join(dl_dir, "ill_ent_ids_train"), "split": "train", }, ), datasets.SplitGenerator( name="valid", gen_kwargs={ "data_file": os.path.join(dl_dir, "ill_ent_ids"), "split": "valid", }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "data_file": os.path.join(dl_dir, "ill_ent_ids"), "split": "test", }, ), ] def _generate_examples(self, data_file, split): f = open(data_file,"r",encoding='utf-8') data = f.readlines() for i in range(len(data)): if self.config.name in ["source", "target"]: if split in ["ent_ids","rel_ids"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": None } elif split in ["rel_triples"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": row[2] } if self.config.name == "pairs": row = data[i].strip('\n').split('\t') yield i, { "left_id": row[0], "right_id": row[1] }