Felix-ML commited on
Commit
9ae7bab
·
1 Parent(s): 40097d3

First version of quoteli3 dataset

Browse files
Files changed (1) hide show
  1. QuoteLi3.py +245 -0
QuoteLi3.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+ from nlp import DatasetInfo, BuilderConfig, SplitGenerator, Split, utils
24
+
25
+ import xml.etree.ElementTree as ET
26
+ import re
27
+
28
+ _CITATION = """\
29
+ @inproceedings{muzny2017two,
30
+ title={A two-stage sieve approach for quote attribution},
31
+ author={Muzny, Grace and Fang, Michael and Chang, Angel and Jurafsky, Dan},
32
+ booktitle={Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers},
33
+ pages={460--470},
34
+ year={2017}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ This dataset is a representation of Muzny et al.'s QuoteLi3 dataset as a Huggingface dataset
40
+ """
41
+
42
+ _HOMEPAGE = "https://nlp.stanford.edu/~muzny/quoteli.html"
43
+
44
+ _LICENSE = ""
45
+
46
+ _URL = 'http://downloads.cs.stanford.edu/nlp/data/quoteattribution/'
47
+ _URLs = {
48
+ 'train': {'pp': _URL + 'pp_full.xml'},
49
+ 'test': {'pp': 'https://nlp.stanford.edu/~muzny/data/pp_test.xml',
50
+ 'emma': _URL + 'austen_emma_full.xml',
51
+ 'steppe': _URL + 'chekhov_steppe_full.xml'}
52
+ }
53
+
54
+ class QuoteLi3(datasets.GeneratorBasedBuilder):
55
+
56
+ VERSION = datasets.Version("1.1.0")
57
+
58
+ BUILDER_CONFIGS = [
59
+ datasets.BuilderConfig(name="quotes", version=VERSION, description="Returns Quotes"),
60
+ datasets.BuilderConfig(name="characters", version=VERSION, description="Returns Characters")
61
+ ]
62
+
63
+ DEFAULT_CONFIG_NAME = "quotes"
64
+
65
+ def _info(self):
66
+ if self.config.name == "quotes": #returns quotes
67
+ features = datasets.Features(
68
+ {
69
+ "mention": datasets.Value("string"),
70
+ "oid": datasets.Value("string"),
71
+ "speaker": datasets.Value("string"),
72
+ "connection": datasets.Value("string"),
73
+ "id": datasets.Value("string"),
74
+ "answer": datasets.Value("string"),
75
+ "answer_mention": datasets.Value("string"),
76
+ "question": datasets.Value("string"),
77
+ "context": datasets.Value("string")
78
+ }
79
+ )
80
+ else: #returns characters
81
+ features = datasets.Features(
82
+ {
83
+ "aliases": datasets.Sequence(datasets.Value("string")),
84
+ "description": datasets.Value("string"),
85
+ "gender": datasets.Value("string"),
86
+ "id": datasets.Value("string"),
87
+ "name": datasets.Value("string")
88
+ }
89
+ )
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ supervised_keys=None,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ """Returns SplitGenerators."""
101
+ downloaded_files = dl_manager.download_and_extract(_URLs)
102
+ return [
103
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"],
104
+ "split": "train"}),
105
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"],
106
+ "split": "test"}),
107
+ ]
108
+
109
+ def _generate_examples(
110
+ self, filepath, split
111
+ ):
112
+ """ Yields examples as (key, example) tuples. """
113
+ for key in filepath:
114
+ path = filepath[key]
115
+ with open(path, encoding="utf-8") as f:
116
+ quote_list = []
117
+ file_tree = ET.parse(f)
118
+ base_tree = file_tree.getroot()
119
+ chapter_list = base_tree.find('text').findall('chapter')
120
+ if len(chapter_list) != 0:
121
+ for chapter in chapter_list:
122
+ quotes = chapter.findall('quote')
123
+ for quote in quotes:
124
+ quote_list.append(quote)
125
+ else:
126
+ quote_list = base_tree.find('text').findall('quote')
127
+
128
+ if self.config.name == "quotes":
129
+ for quote in quote_list:
130
+ quote_key = key + '_' + quote.attrib['id']
131
+ x = self.find_mention(quote, path)
132
+ if x == 'NO_MENTION':
133
+ keys = []
134
+ data = ''
135
+ for for_key in quote.attrib.keys():
136
+ keys.append(for_key)
137
+ data += f'{for_key}: {quote.attrib[for_key]} _'
138
+ yield quote_key, {
139
+ "mention": quote.attrib["mention"] if 'mention' in quote.attrib else 'no_mention',
140
+ "oid": quote.attrib["oid"] if 'oid' in quote.attrib else 'no_oid',
141
+ "speaker": quote.attrib["speaker"] if 'speaker' in quote.attrib else 'no_speaker',
142
+ "connection": quote.attrib["connection"] if 'connection' in quote.attrib else 'no_connection',
143
+ "id": quote.attrib["id"] if 'id' in quote.attrib else 'no_id',
144
+ "answer": "" if split == "test" else quote.attrib["speaker"],
145
+ "answer_mention": self.find_mention(quote, path),
146
+ "question": "Who says 'QUOTE'",
147
+ "context": self.get_context(quote, path),
148
+ }
149
+ else:
150
+ character_list = base_tree.find('characters').findall('character')
151
+ for character in character_list:
152
+ character_key = key + '_' + character.attrib['id']
153
+ yield character_key, {
154
+ "aliases": character.attrib["aliases"].split() if 'aliases' in character.attrib else 'no_aliases',
155
+ "description": character.attrib["description"] if 'description' in character.attrib else 'no_description',
156
+ "gender": character.attrib["gender"] if 'gender' in character.attrib else 'no_gender',
157
+ "name": character.attrib["name"] if 'name' in character.attrib else 'no_name',
158
+ "id": character.attrib["id"] if 'id' in character.attrib else 'no_id',
159
+ }
160
+
161
+
162
+ def find_mention(self, quote_element, filename):
163
+ connection = quote_element.attrib['connection']
164
+ file_tree = ET.parse(filename)
165
+ base_tree = file_tree.getroot()
166
+ mentions_list = []
167
+ text = base_tree.find('text')
168
+ chapters = text.findall('chapter')
169
+ if len(chapters) > 0:
170
+ for chapter in chapters:
171
+ mentions = chapter.findall('mention')
172
+ for mention in mentions:
173
+ mentions_list.append(mention)
174
+
175
+ # if the mention is inside a quote
176
+ quotes = chapter.findall('quote')
177
+ for quote in quotes:
178
+ mentions_in_quotes = quote.findall('mention')
179
+ for mention in mentions_in_quotes:
180
+ mentions_list.append(mention)
181
+ else:
182
+ mentions_list = base_tree.find('text').findall('mention')
183
+ #if the mention is inside a quote
184
+ quotes = text.findall('quote')
185
+ for quote in quotes:
186
+ mentions_in_quotes = quote.findall('mention')
187
+ for mention in mentions_in_quotes:
188
+ mentions_list.append(mention)
189
+ mention_tail = ''
190
+ mention_text = ''
191
+ for mention in mentions_list:
192
+ current_id = mention.attrib['id']
193
+ if type(current_id) == str:
194
+ if mention.attrib['id'] in connection:
195
+ mention_text = mention.text
196
+ mention_tail = mention.tail
197
+ break
198
+ else:
199
+ for single_id in current_id:
200
+ if single_id in connection:
201
+ mention_text = mention.text
202
+ mention_tail = mention.tail
203
+ break
204
+ if len(mention_tail) > 25:
205
+ mention_tail = mention_tail[:25]
206
+ search_text = mention_text + mention_tail
207
+ if mention_tail == '':
208
+ return 'NO_MENTION'
209
+ return mention_text
210
+
211
+ def get_context(self, quote_element, filename):
212
+ chapter_text = self.get_texts_by_file(filename)
213
+ #maximum range for the context in characters
214
+ max_range = 1000
215
+ quote = self.get_quote_content(quote_element)
216
+ start_index = chapter_text.find(quote)
217
+
218
+ pre = 500
219
+ post = max_range - pre
220
+ if start_index < pre:
221
+ start = 0
222
+ end = max_range
223
+ else:
224
+ start = int(start_index - pre)
225
+ end = int(start_index + post)
226
+
227
+ chapter_text = chapter_text.replace(quote, '"QUOTE"').replace('\n', ' ')
228
+ context = chapter_text[start:end]
229
+ return context
230
+
231
+
232
+ def get_texts_by_file(self, filename):
233
+ file_tree = ET.parse(filename)
234
+ base_tree = file_tree.getroot()
235
+ text_with_tags = ET.tostring(base_tree, encoding='unicode', method='xml') # unicode -> utf8
236
+ text_without_tags = re.sub('<.*?>', '', text_with_tags) # delete all tags
237
+ return text_without_tags
238
+
239
+ def get_quote_content(self, quote):
240
+ quote_text_tags = ET.tostring(quote, encoding='unicode', method='xml')
241
+ quote_text = re.sub('<quote.*?>', '', quote_text_tags)
242
+ end_of_quote = quote_text.find('</quote>')
243
+ quote_text = quote_text[:end_of_quote]
244
+ quote_text = re.sub('<.*?>', '', quote_text)
245
+ return quote_text