shamikbose89 commited on
Commit
0a1e07a
·
1 Parent(s): 49be6ad

Upload old_bailey_proceedings.py

Browse files
Files changed (1) hide show
  1. old_bailey_proceedings.py +163 -0
old_bailey_proceedings.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import datasets
17
+ import glob
18
+ import xml.etree.ElementTree as ET
19
+
20
+ _CITATION = """
21
+ @misc{Old Bailey Proceedings,
22
+ author = {Mariona Coll Ardanuy and
23
+ Federico Nanni and
24
+ Kaspar Beelen and
25
+ Kasra Hosseini and
26
+ Ruth Ahnert and
27
+ Jon Lawrence and
28
+ Katherine McDonough and
29
+ Giorgia Tolfo and
30
+ Daniel C. S. Wilson and
31
+ Barbara McGillivray},
32
+ title = {Living Machines: {A} study of atypical animacy},
33
+ journal = {CoRR},
34
+ volume = {abs/2005.11140},
35
+ year = {2020},
36
+ url = {https://arxiv.org/abs/2005.11140},
37
+ eprinttype = {arXiv},
38
+ eprint = {2005.11140},
39
+ timestamp = {Sat, 23 Jan 2021 01:12:25 +0100},
40
+ biburl = {https://dblp.org/rec/journals/corr/abs-2005-11140.bib},
41
+ bibsource = {dblp computer science bibliography, https://dblp.org}
42
+ }
43
+ """
44
+
45
+
46
+ _DESCRIPTION = """The dataset consists of 2,163 transcriptions of the Proceedings and 475 Ordinary's Accounts marked up in TEI-XML,
47
+ and contains some documentation covering the data structure and variables. Each Proceedings file represents one session of the court (1674-1913),
48
+ and each Ordinary's Account file represents a single pamphlet (1676-1772)
49
+ """
50
+
51
+ _HOMEPAGE = "https://www.dhi.ac.uk/projects/old-bailey/"
52
+
53
+ _DATASETNAME = "old_bailey_proceedings"
54
+
55
+ _LICENSE = "Creative Commons Attribution 4.0 International"
56
+
57
+ _URLS = {
58
+ _DATASETNAME: "https://www.dhi.ac.uk/san/data/oldbailey/oldbailey.zip",
59
+ }
60
+
61
+ logger = datasets.utils.logging.get_logger(__name__)
62
+
63
+
64
+ class OldBaileyProceedings(datasets.GeneratorBasedBuilder):
65
+ """The dataset consists of 2,163 transcriptions of the Proceedings and 475 Ordinary's Accounts marked up in TEI-XML,
66
+ and contains some documentation covering the data structure and variables. Each Proceedings file represents one session of the court (1674-1913),
67
+ and each Ordinary's Account file represents a single pamphlet (1676-1772)"""
68
+
69
+ VERSION = datasets.Version("7.2.0")
70
+
71
+ def _info(self):
72
+ features = datasets.Features(
73
+ {
74
+ "id": datasets.Value("string"),
75
+ "text": datasets.Value("string"),
76
+ "places": datasets.Sequence(datasets.Value("string")),
77
+ "type": datasets.Value("string"),
78
+ "persons": datasets.Sequence(datasets.Value("string")),
79
+ "date": datasets.Value("string"),
80
+ }
81
+ )
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ urls = _URLS[_DATASETNAME]
92
+ data_dir = dl_manager.download_and_extract(urls)
93
+ oa_dir = "ordinarysAccounts"
94
+ obp_dir = "sessionsPapers"
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={
99
+ "data_dirs": {
100
+ "OA": os.path.join(data_dir, oa_dir),
101
+ "OBP": os.path.join(data_dir, obp_dir),
102
+ },
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def convert_text_to_features(self, file, key):
108
+ if key == "OA":
109
+ root_tag = "p"
110
+ else:
111
+ root_tag = "div1/p"
112
+ try:
113
+ xml_data = ET.parse(file)
114
+ root = xml_data.getroot()
115
+ start = root.find("./text/body/div0")
116
+ id = start.attrib["id"]
117
+ date = start.find("interp[@type='date']").attrib["value"]
118
+ text_parts = []
119
+ places, persons = [], []
120
+ for content in start.findall(root_tag):
121
+ for place in content.findall("placeName"):
122
+ if place.text:
123
+ place_name = place.text.replace("\n", "").strip()
124
+ if place_name:
125
+ places.append(place.text)
126
+ for person in content.findall("persName"):
127
+ full_name = []
128
+ for name_part in person.itertext():
129
+ name_part = (
130
+ name_part.replace("\n", "").replace("\t", "").strip()
131
+ )
132
+ if name_part:
133
+ full_name.append(name_part)
134
+ if full_name:
135
+ persons.append(" ".join(full_name))
136
+ for text_snippet in content.itertext():
137
+ text_snippet = (
138
+ text_snippet.replace("\n", "").replace("\t", "").strip()
139
+ )
140
+ if text_snippet:
141
+ text_parts.append(text_snippet)
142
+ full_text = " ".join(text_parts)
143
+ return 0, {
144
+ "id": id,
145
+ "date": date,
146
+ "type": key,
147
+ "places": places,
148
+ "persons": persons,
149
+ "text": full_text,
150
+ }
151
+ except Exception as e:
152
+ return -1, repr(e)
153
+
154
+ def _generate_examples(self, data_dirs):
155
+ for key, data_dir in data_dirs.items():
156
+ for file in glob.glob(os.path.join(data_dir, "*.xml")):
157
+ status_code, ret_val = self.convert_text_to_features(file, key)
158
+ if status_code:
159
+ logger.warn(f"{file}:{ret_val}")
160
+ input()
161
+ continue
162
+ else:
163
+ yield ret_val["id"], ret_val