Datasets:
File size: 3,595 Bytes
d5a7eca 4cb2660 d5a7eca d498db6 4cb2660 d498db6 4cb2660 d498db6 d5a7eca 4cb2660 d5a7eca 4bd5cd3 d5a7eca c4c086e d5a7eca 416b705 d5a7eca caa450f d5a7eca 4cb2660 df2e7e1 4cb2660 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import datasets
from datasets import load_dataset, Dataset, Value, Sequence, Features, DatasetInfo, GeneratorBasedBuilder, Image
from pathlib import Path
import os
import pandas as pd
import json
_DESCRIPTION = """\ The PatFig Dataset is a curated collection of over 18,000 patent images from more than 7,
000 European patent applications, spanning the year 2020. It aims to provide a comprehensive resource for research
and applications in image captioning, abstract reasoning, patent analysis, and automated documentprocessing. The
overarching goal of this dataset is to advance the research in visually situated language understanding towards more
hollistic consumption of the visual and textual data.
"""
_BASE_URL = "https://huggingface.co/datasets/lcolonn/patfig/resolve/main/"
_METADATA_URLS = {
"annotations_train": "train/annotations_train.zip",
"annotations_test": "test/annotations_test.zip"
}
_IMAGES_URLS = {
"test_images": "train/train_images.tar.gz",
"train_images": "test/test_images.tar.gz",
}
_URLS = {
"train_images": "train/train_images.tar.gz",
"test_images": "test/test_images.tar.gz",
"annotations_train": "train/annotations_train.zip",
"annotations_test": "test/annotations_test.zip",
}
class PatFig(GeneratorBasedBuilder):
"""DatasetBuilder for patfig dataset."""
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"image": Image(),
"image_name": Value("string"),
"pub_number": Value("string"),
"title": Value("string"),
"figs_norm": Sequence(feature=Value("string"), length=-1),
"short_description": Sequence(feature=Value("string"), length=-1),
"long_description": Sequence(feature=Value("string"), length=-1),
"short_description_token_count": Value("int64"),
"long_description_token_count": Value("int64"),
"draft_class": Value("string"),
"cpc_class": Value("string"),
"relevant_terms": [{'element_identifier': Value("string"), "terms": Sequence(feature=Value("string"), length=-1)}],
"associated_claims": Value("string"),
"compound": Value("bool"),
"references": Sequence(feature=Value(dtype='string'), length=-1),
}),
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
urls_to_download = {key: _BASE_URL + fname for key, fname in _URLS.items()}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"images_dir": f'{downloaded_files["train_images"]}/train', "annotations_dir": f'{downloaded_files["annotations_train"]}/annotations_train.json'}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"images_dir": f'{downloaded_files["test_images"]}/test', "annotations_dir": f'{downloaded_files["annotations_test"]}/annotations_test.json'}
),
]
def _generate_examples(self, images_dir: str, annotations_dir: str):
with open(annotations_dir, "r") as f:
data = json.load(f)
for idx, record in data.items():
image_path = os.path.join(images_dir, record["pub_number"], record["image_name"])
yield idx, {
"image": image_path,
**record,
}
|