# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
import csv | |
import json | |
import os | |
import numpy as np | |
import pandas as pd | |
import datasets | |
from tqdm.auto import tqdm | |
from constants import _CITATION, _HOMEPAGE, _LICENSE, _DESCRIPTION | |
from constants import SECTOR_DESCRIPTIONS, AVAILABLE_FEATURES, AVAILABLE_LANGUAGES, SECTORS | |
class SuperEurlexConfig(datasets.BuilderConfig): | |
"""BuilderConfig for SuperGLUE.""" | |
def __init__(self, sector, language, features, citation, url, **kwargs): | |
"""BuilderConfig for SuperGLUE. | |
Args: | |
sector: sector of the wanted data | |
language: the language code for the language in which the text shall | |
be written in | |
features: *list[string]*, list of the features that will appear in the | |
feature dict. | |
citation: *string*, citation for the data set. | |
url: *string*, url for information about the data set. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
name=sector+'.'+language | |
super().__init__(name=name, version=datasets.Version("0.1.0"), **kwargs) | |
self.features = features | |
self.language = language | |
self.sector = sector | |
self.text_data_url = f"text_data/{language}/{sector}.parquet" | |
self.meta_data_url = f"meta_data/{sector}.parquet" | |
self.citation = citation | |
self.url = url | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class SuperEurlex(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
SuperEurlexConfig(#version=VERSION, | |
sector=sect, | |
language=lang, | |
description=SECTOR_DESCRIPTIONS[sect], | |
features=AVAILABLE_FEATURES[sect], | |
citation=_CITATION, | |
url=_HOMEPAGE) | |
for lang in AVAILABLE_LANGUAGES for sect in SECTORS | |
] | |
DEFAULT_CONFIG_NAME = "3.DE" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
features = AVAILABLE_FEATURES[self.config.sector] | |
info = datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
return info | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls = {'text': self.config.text_data_url, | |
'meta': self.config.meta_data_url} #_URLS[self.config.name] | |
data_dir = dl_manager.download_and_extract(urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"text": data_dir['text'], | |
"meta": data_dir['meta'], | |
"language": self.config.language, | |
"sector": self.config.sector, | |
'split': 'train' | |
}, | |
) | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, text, meta, sector, language, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
text_data = pd.read_parquet(text) | |
text_data['celex_id'] = text_data['celex_id'].apply(lambda x: str(x.tolist()[0]) if isinstance(x,list) else x) | |
meta_data = pd.read_parquet(meta) | |
meta_data['celex_id'] = meta_data['celex_id'].apply(lambda x: str(x.tolist()[0]) if isinstance(x, np.ndarray) else x) | |
combined_data = pd.merge(text_data, meta_data, on='celex_id') | |
dataset = datasets.Dataset.from_pandas(combined_data) | |
dataset = dataset.remove_columns('__index_level_0__') | |
for i, sample in enumerate(dataset): | |
yield i, sample | |
if __name__ == '__main__': | |
import datasets as ds | |
import sys | |
print(sys.argv[0]) | |
for sector in SECTORS: | |
for lang in AVAILABLE_LANGUAGES: | |
print(f'{sector}.{lang}') | |
dataset = ds.load_dataset(sys.argv[0],f'{sector}.{lang}') | |
print(dataset) | |
print('\n') |