csqa-sparqltotext / csqa-sparqltotext.py
glecorve's picture
Inflate JSON (tarballed) dataset
63b31b3
raw
history blame
9.98 kB
import os
import json
import datasets
from typing import Any
import sys
_CITATION = """\
@inproceedings{lecorve2022sparql2text,
title={Coqar: Question rewriting on coqa},
author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
year={2022}
}
"""
_HOMEPAGE = ""
_DESCRIPTION = """\
Special version of CSQA for the SPARQL-to-Text task
"""
_URLS = {
"all": "json/csqa_sparql_to_text.tar.gz"
}
class CSQA(datasets.GeneratorBasedBuilder):
"""
Complex Sequential Question Answering dataset
"""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
#"active_set"
#"all_entities"
#"bool_ques_type"
#"count_ques_sub_type"
#"count_ques_type"
#"description"
#"entities"
#"entities_in_utterance"
#"gold_actions"
#"inc_ques_type"
#"is_inc"
#"is_incomplete"
#"is_spurious"
#"masked_verbalized_answer"
#"parsed_active_set"
#"ques_type_id"
#"question-type"
#"relations"
#"sec_ques_sub_type"
#"sec_ques_type"
#"set_op_choice"
#"set_op"
#"sparql_query"
#"speaker"
#"type_list"
#"utterance"
#"utterance"
#"verbalized_all_entities"
#"verbalized_answer"
#"verbalized_entities_in_utterance"
#"verbalized_gold_actions"
#"verbalized_parsed_active_set"
#"verbalized_sparql_query"
#"verbalized_triple"
#"verbalized_type_list"
features=datasets.Features(
{
"id": datasets.Value("string"),
"turns": [
{
"id": datasets.Value("int64"),
"ques_type_id": datasets.Value("int64"),
"question-type": datasets.Value("string"),
"description": datasets.Value("string"),
"entities_in_utterance": [datasets.Value("string")],
"relations": [datasets.Value("string")],
"type_list": [datasets.Value("string")],
"speaker": datasets.Value("string"),
"utterance": datasets.Value("string"),
"all_entities": [datasets.Value("string")],
"active_set": [datasets.Value("string")],
'sec_ques_sub_type': datasets.Value("int64"),
'sec_ques_type': datasets.Value("int64"),
'set_op_choice': datasets.Value("int64"),
'is_inc': datasets.Value("int64"),
'count_ques_sub_type': datasets.Value("int64"),
'count_ques_type': datasets.Value("int64"),
'is_incomplete': datasets.Value("int64"),
'inc_ques_type': datasets.Value("int64"),
'set_op': datasets.Value("int64"),
'bool_ques_type': datasets.Value("int64"),
'entities': [datasets.Value("string")],
"clarification_step": datasets.Value("int64"),
"gold_actions": [[datasets.Value("string")]],
"is_spurious": datasets.Value("bool"),
"masked_verbalized_answer": datasets.Value("string"),
"parsed_active_set": [datasets.Value("string")],
"sparql_query": datasets.Value("string"),
"verbalized_all_entities": [datasets.Value("string")],
"verbalized_answer": datasets.Value("string"),
"verbalized_entities_in_utterance": [datasets.Value("string")],
"verbalized_gold_actions": [[datasets.Value("string")]],
"verbalized_parsed_active_set": [datasets.Value("string")],
"verbalized_sparql_query": datasets.Value("string"),
"verbalized_triple": datasets.Value("string"),
"verbalized_type_list": [datasets.Value("string")]
}
]
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
downloaded_files = dl_manager.download_and_extract(_URLS)
train_path = os.path.join(downloaded_files['all'],'csqa_sparql_to_text/train/')
test_path = os.path.join(downloaded_files['all'],'csqa_sparql_to_text/test/')
valid_path = os.path.join(downloaded_files['all'],'csqa_sparql_to_text/valid/')
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": train_path,
"split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": test_path,
"split": "test"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": valid_path,
"split": "valid"}
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# Yields (key, example) tuples from the dataset
def _transform(x):
pattern = {
"id": None,
"ques_type_id": None,
"question-type": "",
"description": "",
"entities_in_utterance": [],
"relations": [],
"type_list": [],
"speaker": "",
"utterance": "",
"all_entities": [],
"active_set": [],
'sec_ques_sub_type': None,
'sec_ques_type': None,
'set_op_choice': None,
'is_inc': None,
'count_ques_sub_type': None,
'count_ques_type': None,
'is_incomplete': None,
'inc_ques_type': None,
'set_op': None,
'bool_ques_type': None,
'entities': [],
"clarification_step": None,
"gold_actions": [],
"is_spurious": None,
"masked_verbalized_answer": None,
"parsed_active_set": [],
"sparql_query": None,
"verbalized_all_entities": [],
"verbalized_answer": None,
"verbalized_entities_in_utterance": [],
"verbalized_gold_actions": [],
"verbalized_parsed_active_set": [],
"verbalized_sparql_query": None,
"verbalized_triple": [],
"verbalized_type_list": []
}
# if "verbalized_triple" in x:
# x["verbalized_triple"] = json.dumps(x["verbalized_triple"])
# for k in ["parsed_active_set", "verbalized_gold_actions", "verbalized_parsed_active_set"]:
# if k in x:
# del x[k]
pattern.update(x)
# if "verbalized_triple" in pattern:
# if type(pattern["verbalized_triple"]) != list:
# print(pattern["verbalized_triple"])
# sys.exit()
return pattern
data_keys = {}
for root, dirs, files in os.walk(filepath):
dialog_id = root.split('/')[-1]
for i,filename in enumerate(files):
sample_id = "%s:%s"%(dialog_id,i)
with open(os.path.join(root,filename),'r') as f:
data = json.load(f)
# print("--")
for x in data:
for k,v in x.items():
if not k in data_keys:
data_keys[k] = type(v)
new_data = list()
for i,_ in enumerate(data):
# if "verbalized_triple" in data[i]:
# print(json.dumps(data[i]["verbalized_triple"], indent=2))
# if i < len(data)-1:
# if "verbalized_triple" in data[i+1]:
# print("i+1", json.dumps(data[i+1]["verbalized_triple"], indent=2))
new_data.append(data[i])
data = [ _transform(x) for x in data]
yield sample_id, {
"id": sample_id,
"turns": data
}