lcolonn commited on
Commit
d5a7eca
·
unverified ·
1 Parent(s): f4fb9aa

feat: add loading script

Browse files
Files changed (1) hide show
  1. patfig.py +69 -0
patfig.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets import load_dataset, Dataset, Value, Sequence, Features, DatasetInfo, GeneratorBasedBuilder, Image
3
+
4
+ from pathlib import Path
5
+ import os
6
+ import pandas as pd
7
+
8
+ _DESCRIPTION = """\ The PatFig Dataset is a curated collection of over 18,000 patent images from more than 7,
9
+ 000 European patent applications, spanning the year 2020. It aims to provide a comprehensive resource for research
10
+ and applications in image captioning, abstract reasoning, patent analysis, and automated documentprocessing. The
11
+ overarching goal of this dataset is to advance the research in visually situated language understanding towards more
12
+ hollistic consumption of the visual and textual data.
13
+ """
14
+
15
+ _URL = "https://huggingface.co/datasets/lcolonn/patfig/resolve/main/"
16
+ _URLS = {
17
+ "train_images": "train/train_images.tar.gz",
18
+ "test_images": "test/test_images.tar.gz",
19
+ "annotations_train": "train/annotations_train.parquet",
20
+ "annotations_test": "test/annotations_test.parquet",
21
+ }
22
+
23
+
24
+ class PatFig(GeneratorBasedBuilder):
25
+ """DatasetBuilder for patfig dataset."""
26
+
27
+ def _info(self):
28
+ return DatasetInfo(
29
+ description=_DESCRIPTION,
30
+ features=Features({
31
+ "image": Image(),
32
+ "image_name": Value("string"),
33
+ "pub_number": Value("string"),
34
+ "title": Value("string"),
35
+ "figs_norm": Sequence(feature=Value("string"), length=-1),
36
+ "short_description": Sequence(feature=Value("string"), length=-1),
37
+ "long_description": Sequence(feature=Value("string"), length=-1),
38
+ "short_description_token_count": Value("int64"),
39
+ "long_description_token_count": Value("int64"),
40
+ "draft_class": Value("string"),
41
+ "cpc_class": Value("string"),
42
+ "relevant_terms": [{'element_identifier': Value("string"), "terms": Sequence(feature=Value("string"), length=-1)}],
43
+ "associated_claims": Value("string"),
44
+ "compound": Value("bool"),
45
+ "references": Sequence(feature=Value(dtype='string'), length=-1),
46
+ }),
47
+ )
48
+
49
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
50
+ urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
51
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.TRAIN, gen_kwargs={"images_dir": downloaded_files["train_images"], "annotations_dir": downloaded_files["annotations_train"]}
55
+ ),
56
+ datasets.SplitGenerator(
57
+ name=datasets.Split.TEST, gen_kwargs={"images_dir": f'{downloaded_files["test_images"]}', "annotations_dir": downloaded_files["annotations_test"]}
58
+ ),
59
+ ]
60
+
61
+ def _generate_examples(self, images_dir: str, annotations_dir: str):
62
+ df = pd.read_csv(annotations_dir)
63
+
64
+ for idx, row in df.iterrows():
65
+ image_path = os.path.join(images_dir, row["pub_number"], row["image_name"])
66
+ yield idx, {
67
+ "image": image_path,
68
+ **row.to_dict(),
69
+ }