Connor Hoehn commited on
Commit
a3b3c95
·
1 Parent(s): d29ba5e

Initial commit.

Browse files
hugging_face_dataset/README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ dataset_info:
5
+ features:
6
+ - name: image_id
7
+ dtype: int64
8
+ - name: image
9
+ dtype: image
10
+ - name: width
11
+ dtype: int32
12
+ - name: height
13
+ dtype: int32
14
+ - name: objects
15
+ list:
16
+ - name: category_id
17
+ dtype:
18
+ class_label:
19
+ names:
20
+ 0: boxed
21
+ 1: grid
22
+ 2: spread
23
+ 3: stack
24
+ - name: image_id
25
+ dtype: string
26
+ - name: id
27
+ dtype: int64
28
+ - name: area
29
+ dtype: int64
30
+ - name: bbox
31
+ sequence: float32
32
+ length: 4
33
+ - name: iscrowd
34
+ dtype: bool
35
+ config_name: card-detection
36
+ splits:
37
+ - name: train
38
+ download_size: 96890427
39
+ dataset_size: 0
40
+ ---
hugging_face_dataset/card_detector_dataset.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Daniel van Strien.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """NLS Chapbook Illustrations"""
15
+
16
+ import collections
17
+ import json
18
+ import os
19
+ from typing import Any, Dict, List
20
+ import pandas as pd
21
+ import datasets
22
+
23
+ _CITATION = """Connor Hoehn"""
24
+
25
+ _DESCRIPTION = "This dataset comprises of card display images from the public domain"
26
+
27
+ _HOMEPAGE = "https://www.connorhoehn.com"
28
+
29
+ _LICENSE = "Public Domain Mark 1.0"
30
+
31
+ _DATASET_URL = "https://www.connorhoehn.com/object_detection_dataset_v1.zip"
32
+
33
+ _CATEGORIES = ["boxed","grid","spread","stack"]
34
+
35
+ class CardDisplayDetectorConfig(datasets.BuilderConfig):
36
+ """BuilderConfig for card display dataset."""
37
+
38
+ def __init__(self, name, **kwargs):
39
+
40
+ super(CardDisplayDetectorConfig, self).__init__(
41
+ version=datasets.Version("1.0.0"),
42
+ name=name,
43
+ description="Card Display Detector",
44
+ **kwargs,
45
+ )
46
+
47
+
48
+ class CardDisplayDetector(datasets.GeneratorBasedBuilder):
49
+ """Card Display dataset."""
50
+
51
+ BUILDER_CONFIGS = [
52
+ CardDisplayDetectorConfig("card-detection"),
53
+ ]
54
+
55
+ def _info(self):
56
+
57
+ if self.config.name == "display-detection":
58
+ features = datasets.Features(
59
+ {
60
+ "image_id": datasets.Value("int64"),
61
+ "image": datasets.Image(),
62
+ "width": datasets.Value("int32"),
63
+ "height": datasets.Value("int32"),
64
+ }
65
+ )
66
+ object_dict = {
67
+ "category_id": datasets.ClassLabel(names=_CATEGORIES),
68
+ "image_id": datasets.Value("string"),
69
+ "id": datasets.Value("int64"),
70
+ "area": datasets.Value("int64"),
71
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
72
+ "iscrowd": datasets.Value("bool"),
73
+ }
74
+ features["objects"] = [object_dict]
75
+
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=features,
79
+ homepage=_HOMEPAGE,
80
+ license=_LICENSE,
81
+ citation=_CITATION,
82
+ )
83
+ else:
84
+ features = datasets.Features(
85
+ {
86
+ "image_id": datasets.Value("int64"),
87
+ "image": datasets.Image(),
88
+ "width": datasets.Value("int32"),
89
+ "height": datasets.Value("int32"),
90
+ }
91
+ )
92
+
93
+ object_dict = {
94
+ "category_id": datasets.ClassLabel(names=_CATEGORIES),
95
+ "image_id": datasets.Value("string"),
96
+ "id": datasets.Value("int64"),
97
+ "area": datasets.Value("int64"),
98
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
99
+ "iscrowd": datasets.Value("bool"),
100
+ }
101
+
102
+ features["objects"] = [object_dict]
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+
113
+ def _split_generators(self, dl_manager):
114
+
115
+ dataset_zip = dl_manager.download_and_extract(_DATASET_URL)
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ # COCO -> x.json, images/
121
+ gen_kwargs={
122
+ "annotations_file": os.path.join(dataset_zip, "result.json"),
123
+ "image_dir": os.path.join(dataset_zip, "images"),
124
+ },
125
+ )
126
+ ]
127
+
128
+ # Return dictionary of unique image_ids that have multiple nested annotations
129
+ def _get_image_id_to_annotations_mapping(self, annotations: List[Dict]) -> Dict[int, List[Dict[Any, Any]]]:
130
+ """
131
+ A helper function to build a mapping from image ids to annotations.
132
+ """
133
+ image_id_to_annotations = collections.defaultdict(list)
134
+
135
+ for annotation in annotations:
136
+
137
+ image_id_to_annotations[annotation["image_id"]].append(annotation)
138
+
139
+ return image_id_to_annotations
140
+
141
+
142
+ def _generate_examples(self, annotations_file, image_dir):
143
+
144
+ def _image_info_to_example(image_info, image_dir):
145
+
146
+ # from the annotation file
147
+ image = image_info["file_name"]
148
+
149
+ return {
150
+ "image_id": image_info["id"],
151
+ "image": os.path.join(image_dir, image),
152
+ "width": image_info["width"],
153
+ "height": image_info["height"],
154
+ }
155
+
156
+ with open(annotations_file, encoding="utf8") as annotation_json:
157
+
158
+ annotation_data = json.load(annotation_json)
159
+
160
+ images = annotation_data["images"]
161
+
162
+ annotations = annotation_data["annotations"]
163
+
164
+ # dictionary of image_ids with all related annotations (bbox)
165
+ image_id_to_annotations = self._get_image_id_to_annotations_mapping(
166
+ annotations
167
+ )
168
+
169
+ if self.config.name == "illustration-detection":
170
+
171
+ # yield image_id, features
172
+ for image_id, image_info in enumerate(images):
173
+ #image_info -> (w,h,id,filename)
174
+
175
+ image_details = _image_info_to_example(image_info, image_dir)
176
+
177
+ # Get images unit id
178
+ annotations = image_id_to_annotations[image_info["id"]]
179
+
180
+ objects = []
181
+
182
+ # Add the annotation information to the image details
183
+ for annotation in annotations:
184
+ objects.append(annotation)
185
+
186
+ # nested dictionary
187
+ image_details["objects"] = objects
188
+
189
+ yield (image_id, image_details)
hugging_face_dataset/trainer_example.ipynb ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "5a6c5d60-6e65-4a51-a0cf-81f96d540d05",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": []
10
+ }
11
+ ],
12
+ "metadata": {
13
+ "instance_type": "ml.t3.medium",
14
+ "kernelspec": {
15
+ "display_name": "Python 3 (PyTorch 1.6 Python 3.6 CPU Optimized)",
16
+ "language": "python",
17
+ "name": "python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/pytorch-1.6-cpu-py36-ubuntu16.04-v1"
18
+ },
19
+ "language_info": {
20
+ "codemirror_mode": {
21
+ "name": "ipython",
22
+ "version": 3
23
+ },
24
+ "file_extension": ".py",
25
+ "mimetype": "text/x-python",
26
+ "name": "python",
27
+ "nbconvert_exporter": "python",
28
+ "pygments_lexer": "ipython3",
29
+ "version": "3.6.13"
30
+ }
31
+ },
32
+ "nbformat": 4,
33
+ "nbformat_minor": 5
34
+ }