Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
English
Size:
10K - 100K
Tags:
long context
| import json | |
| import os | |
| import datasets | |
| from datasets.tasks import TextClassification | |
| _CITATION = None | |
| _DESCRIPTION = """ | |
| Patent Classification Dataset: a classification of Patents (9 classes). | |
| It contains 11 slightly unbalanced classes, 25k Patents and summaries divided into 3 splits: train (25k), val (5k) and test (5k). | |
| Copied from "Long Document Classification From Local Word Glimpses via Recurrent Attention Learning" by JUN HE LIQUN WANG LIU LIU, JIAO FENG AND HAO WU | |
| See: https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8675939 | |
| See: https://github.com/LiqunW/Long-document-dataset | |
| """ | |
| _LABELS = [ | |
| "Human Necessities", | |
| "Performing Operations; Transporting", | |
| "Chemistry; Metallurgy", | |
| "Textiles; Paper", | |
| "Fixed Constructions", | |
| "Mechanical Engineering; Lightning; Heating; Weapons; Blasting", | |
| "Physics", | |
| "Electricity", | |
| "General tagging of new or cross-sectional technology", | |
| ] | |
| class PatentClassificationConfig(datasets.BuilderConfig): | |
| """BuilderConfig for PatentClassification.""" | |
| def __init__(self, **kwargs): | |
| """BuilderConfig for PatentClassification. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(PatentClassificationConfig, self).__init__(**kwargs) | |
| class PatentClassificationDataset(datasets.GeneratorBasedBuilder): | |
| """PatentClassification Dataset: classification of Patents (9 classes).""" | |
| _DOWNLOAD_URL = "https://huggingface.co/datasets/ccdv/patent-classification/resolve/main/" | |
| _TRAIN_FILE = "train_data.txt" | |
| _VAL_FILE = "val_data.txt" | |
| _TEST_FILE = "test_data.txt" | |
| _LABELS_DICT = {label: i for i, label in enumerate(_LABELS)} | |
| BUILDER_CONFIGS = [ | |
| PatentClassificationConfig( | |
| name="patent", | |
| version=datasets.Version("1.0.0"), | |
| description="Patent Classification Dataset: A classification task of Patents (9 classes)", | |
| ), | |
| PatentClassificationConfig( | |
| name="abstract", | |
| version=datasets.Version("1.0.0"), | |
| description="Patent Classification Dataset: A classification task of Patents with abstracts (9 classes)", | |
| ), | |
| ] | |
| DEFAULT_CONFIG_NAME = "patent" | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "text": datasets.Value("string"), | |
| "label": datasets.features.ClassLabel(names=_LABELS), | |
| } | |
| ), | |
| supervised_keys=None, | |
| citation=_CITATION, | |
| task_templates=[TextClassification( | |
| text_column="text", label_column="label")], | |
| ) | |
| def _split_generators(self, dl_manager): | |
| train_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TRAIN_FILE) | |
| val_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._VAL_FILE) | |
| test_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TEST_FILE) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path} | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path} | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, gen_kwargs={"filepath": test_path} | |
| ), | |
| ] | |
| def _generate_examples(self, filepath): | |
| """Generate PatentClassification examples.""" | |
| with open(filepath, encoding="utf-8") as f: | |
| for id_, row in enumerate(f): | |
| data = json.loads(row) | |
| label = self._LABELS_DICT[data["label"]] | |
| if self.config.name == "abstract": | |
| text = data["abstract"] | |
| else: | |
| text = data["description"] | |
| yield id_, {"text": text, "label": label} | |