|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
from nlp import DatasetInfo, BuilderConfig, SplitGenerator, Split, utils |
|
|
|
import xml.etree.ElementTree as ET |
|
import re |
|
|
|
_CITATION = """\ |
|
@inproceedings{muzny2017two, |
|
title={A two-stage sieve approach for quote attribution}, |
|
author={Muzny, Grace and Fang, Michael and Chang, Angel and Jurafsky, Dan}, |
|
booktitle={Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers}, |
|
pages={460--470}, |
|
year={2017} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset is a representation of Muzny et al.'s QuoteLi3 dataset as a Huggingface dataset. It can be best used for |
|
quote attribution. |
|
""" |
|
|
|
_HOMEPAGE = "https://nlp.stanford.edu/~muzny/quoteli.html" |
|
|
|
_LICENSE = "" |
|
|
|
_URL = 'http://downloads.cs.stanford.edu/nlp/data/quoteattribution/' |
|
_URLs = { |
|
'train': {'pp': _URL + 'pp_full.xml'}, |
|
'test': {'pp': 'https://nlp.stanford.edu/~muzny/data/pp_test.xml', |
|
'emma': _URL + 'austen_emma_full.xml', |
|
'steppe': _URL + 'chekhov_steppe_full.xml'} |
|
} |
|
|
|
class QuoteLi3(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="quotes", version=VERSION, description="Returns Quotes"), |
|
datasets.BuilderConfig(name="characters", version=VERSION, description="Returns Characters") |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "quotes" |
|
|
|
def _info(self): |
|
if self.config.name == "quotes": |
|
features = datasets.Features( |
|
{ |
|
"mention": datasets.Value("string"), |
|
"oid": datasets.Value("string"), |
|
"speaker": datasets.Value("string"), |
|
"connection": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"answer": datasets.Value("string"), |
|
"answer_mention": {'answer': datasets.Value("string"), |
|
'answer_start': datasets.Value("int16"), |
|
'answer_end': datasets.Value("int16"), |
|
'answer_in_context': datasets.Value("bool")}, |
|
"question": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"large_context": datasets.Value("string"), |
|
"book_title": datasets.Value("string") |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"aliases": datasets.Sequence(datasets.Value("string")), |
|
"description": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"book_title": datasets.Value("string") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
downloaded_files = dl_manager.download_and_extract(_URLs) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], |
|
"split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], |
|
"split": "test"}), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
for key in filepath: |
|
path = filepath[key] |
|
with open(path, encoding="utf-8") as f: |
|
quote_list = [] |
|
file_tree = ET.parse(f) |
|
base_tree = file_tree.getroot() |
|
chapter_list = base_tree.find('text').findall('chapter') |
|
if len(chapter_list) != 0: |
|
for chapter in chapter_list: |
|
quotes = chapter.findall('quote') |
|
for quote in quotes: |
|
quote_list.append(quote) |
|
else: |
|
quote_list = base_tree.find('text').findall('quote') |
|
|
|
if self.config.name == "quotes": |
|
for quote in quote_list: |
|
quote_key = key + '_' + quote.attrib['id'] |
|
mention, search_text = self.find_mention(quote, path) |
|
context = self.get_context(quote, path) |
|
large_context = self.get_context(quote, path, 4000) |
|
answer_mention_start = context.find(search_text) |
|
answer_mention_end = answer_mention_start + len(mention) |
|
if mention != 'NO_MENTION' and answer_mention_start >= 0: |
|
answer_mention = { |
|
'answer': mention, |
|
'answer_start': answer_mention_start, |
|
'answer_end': answer_mention_end, |
|
'answer_in_context': True |
|
} |
|
else: |
|
answer_mention = { |
|
'answer': mention, |
|
'answer_start': 0, |
|
'answer_end': 0, |
|
'answer_in_context': False |
|
} |
|
yield quote_key, { |
|
"mention": quote.attrib["mention"] if 'mention' in quote.attrib else 'no_mention', |
|
"oid": quote.attrib["oid"] if 'oid' in quote.attrib else 'no_oid', |
|
"speaker": quote.attrib["speaker"] if 'speaker' in quote.attrib else 'no_speaker', |
|
"connection": quote.attrib["connection"] if 'connection' in quote.attrib else 'no_connection', |
|
"id": quote.attrib["id"] if 'id' in quote.attrib else 'no_id', |
|
"answer": "" if split == "test" else quote.attrib["speaker"], |
|
"answer_mention": answer_mention, |
|
"question": "Who says 'QUOTE'", |
|
"context": context, |
|
"large_context": large_context, |
|
"book_title": key, |
|
} |
|
else: |
|
character_list = base_tree.find('characters').findall('character') |
|
for character in character_list: |
|
character_key = key + '_' + character.attrib['id'] |
|
yield character_key, { |
|
"aliases": character.attrib["aliases"].split() if 'aliases' in character.attrib else 'no_aliases', |
|
"description": character.attrib["description"] if 'description' in character.attrib else 'no_description', |
|
"gender": character.attrib["gender"] if 'gender' in character.attrib else 'no_gender', |
|
"name": character.attrib["name"] if 'name' in character.attrib else 'no_name', |
|
"id": character.attrib["id"] if 'id' in character.attrib else 'no_id', |
|
"book_title": key, |
|
} |
|
|
|
|
|
def find_mention(self, quote_element, filename): |
|
connection = quote_element.attrib['connection'] |
|
file_tree = ET.parse(filename) |
|
base_tree = file_tree.getroot() |
|
mentions_list = [] |
|
text = base_tree.find('text') |
|
chapters = text.findall('chapter') |
|
if len(chapters) > 0: |
|
for chapter in chapters: |
|
mentions = chapter.findall('mention') |
|
for mention in mentions: |
|
mentions_list.append(mention) |
|
|
|
|
|
quotes = chapter.findall('quote') |
|
for quote in quotes: |
|
mentions_in_quotes = quote.findall('mention') |
|
for mention in mentions_in_quotes: |
|
mentions_list.append(mention) |
|
else: |
|
mentions_list = base_tree.find('text').findall('mention') |
|
|
|
quotes = text.findall('quote') |
|
for quote in quotes: |
|
mentions_in_quotes = quote.findall('mention') |
|
for mention in mentions_in_quotes: |
|
mentions_list.append(mention) |
|
mention_tail = '' |
|
mention_text = '' |
|
for mention in mentions_list: |
|
current_id = mention.attrib['id'] |
|
if type(current_id) == str: |
|
if mention.attrib['id'] in connection: |
|
mention_text = mention.text |
|
mention_tail = mention.tail |
|
break |
|
else: |
|
for single_id in current_id: |
|
if single_id in connection: |
|
mention_text = mention.text |
|
mention_tail = mention.tail |
|
break |
|
if len(mention_tail) > 25: |
|
mention_tail = mention_tail[:25] |
|
search_text = mention_text + mention_tail |
|
if mention_tail == '': |
|
return 'NO_MENTION', 'NO_MENTION' |
|
return mention_text, search_text |
|
|
|
def get_context(self, quote_element, filename, max_range=1000): |
|
chapter_text = self.get_texts_by_file(filename) |
|
quote = self.get_quote_content(quote_element) |
|
start_index = chapter_text.find(quote) |
|
|
|
pre = int(max_range/2) |
|
post = max_range - pre |
|
if start_index < pre: |
|
start = 0 |
|
end = max_range |
|
else: |
|
start = int(start_index - pre) |
|
end = int(start_index + post) |
|
|
|
chapter_text = chapter_text.replace(quote, '"QUOTE"').replace('\n', ' ') |
|
context = chapter_text[start:end] |
|
return context |
|
|
|
|
|
def get_texts_by_file(self, filename): |
|
file_tree = ET.parse(filename) |
|
base_tree = file_tree.getroot() |
|
text_with_tags = ET.tostring(base_tree, encoding='unicode', method='xml') |
|
text_without_tags = re.sub('<.*?>', '', text_with_tags) |
|
return text_without_tags |
|
|
|
def get_quote_content(self, quote): |
|
quote_text_tags = ET.tostring(quote, encoding='unicode', method='xml') |
|
quote_text = re.sub('<quote.*?>', '', quote_text_tags) |
|
end_of_quote = quote_text.find('</quote>') |
|
quote_text = quote_text[:end_of_quote] |
|
quote_text = re.sub('<.*?>', '', quote_text) |
|
return quote_text |