albertvillanova HF staff commited on
Commit
f001950
·
verified ·
1 Parent(s): 24c2ae4

Delete loading script

Browse files
Files changed (1) hide show
  1. squad_it.py +0 -116
squad_it.py DELETED
@@ -1,116 +0,0 @@
1
- """TODO(squad_it): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
- from datasets.tasks import QuestionAnsweringExtractive
8
-
9
-
10
- # TODO(squad_it): BibTeX citation
11
- _CITATION = """\
12
- @InProceedings{10.1007/978-3-030-03840-3_29,
13
- author={Croce, Danilo and Zelenanska, Alexandra and Basili, Roberto},
14
- editor={Ghidini, Chiara and Magnini, Bernardo and Passerini, Andrea and Traverso, Paolo",
15
- title={Neural Learning for Question Answering in Italian},
16
- booktitle={AI*IA 2018 -- Advances in Artificial Intelligence},
17
- year={2018},
18
- publisher={Springer International Publishing},
19
- address={Cham},
20
- pages={389--402},
21
- isbn={978-3-030-03840-3}
22
- }
23
- """
24
-
25
- # TODO(squad_it):
26
- _DESCRIPTION = """\
27
- SQuAD-it is derived from the SQuAD dataset and it is obtained through semi-automatic translation of the SQuAD dataset
28
- into Italian. It represents a large-scale dataset for open question answering processes on factoid questions in Italian.
29
- The dataset contains more than 60,000 question/answer pairs derived from the original English dataset. The dataset is
30
- split into training and test sets to support the replicability of the benchmarking of QA systems:
31
- """
32
-
33
- _URL = "https://github.com/crux82/squad-it/raw/master/"
34
- _URLS = {
35
- "train": _URL + "SQuAD_it-train.json.gz",
36
- "test": _URL + "SQuAD_it-test.json.gz",
37
- }
38
-
39
-
40
- class SquadIt(datasets.GeneratorBasedBuilder):
41
- """TODO(squad_it): Short description of my dataset."""
42
-
43
- # TODO(squad_it): Set up version.
44
- VERSION = datasets.Version("0.1.0")
45
-
46
- def _info(self):
47
- # TODO(squad_it): Specifies the datasets.DatasetInfo object
48
- return datasets.DatasetInfo(
49
- # This is the description that will appear on the datasets page.
50
- description=_DESCRIPTION,
51
- # datasets.features.FeatureConnectors
52
- features=datasets.Features(
53
- {
54
- "id": datasets.Value("string"),
55
- "context": datasets.Value("string"),
56
- "question": datasets.Value("string"),
57
- "answers": datasets.features.Sequence(
58
- {
59
- "text": datasets.Value("string"),
60
- "answer_start": datasets.Value("int32"),
61
- }
62
- ),
63
- # These are the features of your dataset like images, labels ...
64
- }
65
- ),
66
- # If there's a common (input, target) tuple from the features,
67
- # specify them here. They'll be used if as_supervised=True in
68
- # builder.as_dataset.
69
- supervised_keys=None,
70
- # Homepage of the dataset for documentation
71
- homepage="https://github.com/crux82/squad-it",
72
- citation=_CITATION,
73
- task_templates=[
74
- QuestionAnsweringExtractive(
75
- question_column="question", context_column="context", answers_column="answers"
76
- )
77
- ],
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
- # TODO(squad_it): Downloads the data and defines the splits
83
- # dl_manager is a datasets.download.DownloadManager that can be used to
84
- # download and extract URLs
85
- urls_to_download = _URLS
86
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
87
-
88
- return [
89
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
90
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
91
- ]
92
-
93
- def _generate_examples(self, filepath):
94
- """Yields examples."""
95
- # TODO(squad_it): Yields (key, example) tuples from the dataset
96
- with open(filepath, encoding="utf-8") as f:
97
- data = json.load(f)
98
- for example in data["data"]:
99
- for paragraph in example["paragraphs"]:
100
- context = paragraph["context"].strip()
101
- for qa in paragraph["qas"]:
102
- question = qa["question"].strip()
103
- id_ = qa["id"]
104
-
105
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
106
- answers = [answer["text"].strip() for answer in qa["answers"]]
107
-
108
- yield id_, {
109
- "context": context,
110
- "question": question,
111
- "id": id_,
112
- "answers": {
113
- "answer_start": answer_starts,
114
- "text": answers,
115
- },
116
- }