Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
26c68f2
·
verified ·
1 Parent(s): c0bab4d

Delete loading script

Browse files
Files changed (1) hide show
  1. gutenberg_time.py +0 -108
gutenberg_time.py DELETED
@@ -1,108 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Recognizing the flow of time in a story is a crucial aspect of understanding it. Prior work related to time has primarily focused on identifying temporal expressions or relative sequencing of events, but here we propose computationally annotating each line of a book with wall clock times, even in the absence of explicit time-descriptive phrases. To do so, we construct a data set of hourly time phrases from 52,183 fictional books."""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @misc{kim2020time,
26
- title={What time is it? Temporal Analysis of Novels},
27
- author={Allen Kim and Charuta Pethe and Steven Skiena},
28
- year={2020},
29
- eprint={2011.04124},
30
- archivePrefix={arXiv},
31
- primaryClass={cs.CL}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- A clean data resource containing all explicit time references in a dataset of 52,183 novels whose full text is available via Project Gutenberg.
37
- """
38
-
39
- _HOMEPAGE = "https://github.com/allenkim/what-time-is-it"
40
-
41
- _LICENSE = "[More Information needed]"
42
-
43
- # The HuggingFace dataset library don't host the datasets but only point to the original files
44
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
45
- _URLs = {
46
- "gutenberg": "https://github.com/TevenLeScao/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true",
47
- }
48
-
49
-
50
- class GutenbergTime(datasets.GeneratorBasedBuilder):
51
- """Novel extracts with time-of-the-day information"""
52
-
53
- VERSION = datasets.Version("1.1.3")
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="gutenberg", description="Data pulled from the Gutenberg project"),
56
- ]
57
-
58
- def _info(self):
59
- features = datasets.Features(
60
- {
61
- "guten_id": datasets.Value("string"),
62
- "hour_reference": datasets.Value("string"),
63
- "time_phrase": datasets.Value("string"),
64
- "is_ambiguous": datasets.Value("bool_"),
65
- "time_pos_start": datasets.Value("int64"),
66
- "time_pos_end": datasets.Value("int64"),
67
- "tok_context": datasets.Value("string"),
68
- }
69
- )
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=features,
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- license=_LICENSE,
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- my_urls = _URLs[self.config.name]
82
- data = dl_manager.download_and_extract(my_urls)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- # These kwargs will be passed to _generate_examples
87
- gen_kwargs={
88
- "filepath": os.path.join(data, "gutenberg_time_phrases.csv"),
89
- "split": "train",
90
- },
91
- )
92
- ]
93
-
94
- def _generate_examples(self, filepath, split):
95
-
96
- with open(filepath, encoding="utf8") as f:
97
- data = csv.reader(f)
98
- next(data)
99
- for id_, row in enumerate(data):
100
- yield id_, {
101
- "guten_id": row[0],
102
- "hour_reference": row[1],
103
- "time_phrase": row[2],
104
- "is_ambiguous": row[3],
105
- "time_pos_start": row[4],
106
- "time_pos_end": row[5],
107
- "tok_context": row[6],
108
- }