# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""


import csv
import json
import os
import pandas as pd

import datasets


# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """ """

# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """ """

# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""

# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""

# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
    "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
    "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
}
AVAILABLE_LANGUAGES=['DE']#, 'EN'
SECTORS=['1']#, '1', '2', '3', '4', '5', '6', '7', '8', '9', 'C', 'E']
AVAILABLE_FEATURES={
    '1': datasets.Features({
         'celex_id': datasets.Value("string"),
         'text_html_raw': datasets.Value("string"),
         'text_html_cleaned': datasets.Value("string"),
         'text_cleaned': datasets.Value("string"),
         'form': datasets.Sequence(datasets.Value("string")),
         'subject_matter': datasets.Sequence(datasets.Value("string")),
         'current_consolidated_version': datasets.Sequence(datasets.Value("string")),
         'harmonisation_of_customs_law_community_transit': datasets.Sequence(datasets.Value("string")),
         'harmonisation_of_customs_law_customs_territory': datasets.Sequence(datasets.Value("string")),
         'harmonisation_of_customs_law_value_for_customs_purposes': datasets.Sequence(datasets.Value("string")),
         'directory_code': datasets.Sequence(datasets.Value("string")),
         'eurovoc': datasets.Sequence(datasets.Value("string")),
         'customs_duties_community_tariff_quotas': datasets.Sequence(datasets.Value("string")),
         'customs_duties_authorisation_to_defer_application_of_cct': datasets.Sequence(datasets.Value("string")),
         'harmonisation_of_customs_law_various': datasets.Sequence(datasets.Value("string")),
         'customs_duties_suspensions': datasets.Sequence(datasets.Value("string"))})
}
SECTOR_DESCRIPTIONS={
    '1':""
}


class SuperEurlexConfig(datasets.BuilderConfig):
    """BuilderConfig for SuperGLUE."""

    def __init__(self, sector, language, features, citation, url, **kwargs):
        """BuilderConfig for SuperGLUE.

        Args:
        sector: sector of the wanted data
        language: the language code for the language in which the text shall
            be written in
        features: *list[string]*, list of the features that will appear in the
            feature dict.
        citation: *string*, citation for the data set.
        url: *string*, url for information about the data set.
        **kwargs: keyword arguments forwarded to super.
        """
        name=sector+'.'+language
        super().__init__(name=name, version=datasets.Version("0.1.0"), **kwargs)
        self.features = features
        self.language = language
        self.sector = sector
        self.text_data_url = f"text_data/{language}/{sector}.parquet"
        self.meta_data_url = f"meta_data/{sector}.parquet"
        self.citation = citation
        self.url = url

# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class SuperEurlex(datasets.GeneratorBasedBuilder):
    """TODO: Short description of my dataset."""

    VERSION = datasets.Version("1.1.0")

    # If you need to make complex sub-parts in the datasets with configurable options
    # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
    # BUILDER_CONFIG_CLASS = MyBuilderConfig

    # You will be able to load one or the other configurations in the following list with
    # data = datasets.load_dataset('my_dataset', 'first_domain')
    # data = datasets.load_dataset('my_dataset', 'second_domain')

    BUILDER_CONFIGS = [
        SuperEurlexConfig(#version=VERSION,
                          sector=sect,
                          language=lang,
                          description=SECTOR_DESCRIPTIONS[sect],
                          features=AVAILABLE_FEATURES[sect],
                          citation=_CITATION,
                          url=_HOMEPAGE)
        for lang in AVAILABLE_LANGUAGES for sect in SECTORS
        ]

    DEFAULT_CONFIG_NAME = "3.DE"  # It's not mandatory to have a default configuration. Just use one if it make sense.

    def _info(self):
        # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
        features = AVAILABLE_FEATURES[self.config.sector]
        info = datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )
        return info

    def _split_generators(self, dl_manager):
        # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
        urls = {'text': self.config.text_data_url,
                'meta': self.config.meta_data_url} #_URLS[self.config.name]
        data_dir = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "text": data_dir['text'],
                    "meta": data_dir['meta'],
                    "language": self.config.language,
                    "sector": self.config.sector,
                    'split': 'train'
                },
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, text, meta, sector, language, split):
        # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
        # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
        print(text)
        print(meta)
        print(sector)
        print(split)
        print(sector)

        print("Reading Text Data...")
        text_data = pd.read_parquet(text)#, lines=True)
        text_data['celex_id'] = text_data['celex_id'].apply(lambda x: x[0] if isinstance(x,list) else x)
        print("Reading Meta Data...")
        meta_data = pd.read_parquet(meta)#, lines=True)
        meta_data['celex_id'] = meta_data['celex_id'].apply(lambda x: x[0] if isinstance(x, list) else x)
        print("Combining Text & Meta Data...")
        combined_data = pd.merge(text_data, meta_data, on='celex_id')
        print("Converting To final dataset...")
        dataset = datasets.Dataset.from_pandas(combined_data)
        dataset = dataset.remove_columns('__index_level_0__')#.cache_files()
        for i, sample in enumerate(dataset):
            yield i, sample



print("Hello World")
if __name__ == '__main__':
    import datasets as ds
    import sys
    print(sys.argv[0])
    dataset = ds.load_dataset(sys.argv[0],'1.DE')
    print(dataset)
    for sample in dataset['train']:
        continue
        #print(sample)