Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
DOI:
Libraries:
Datasets
pandas
License:
LuciaTormo commited on
Commit
1f4d4e1
1 Parent(s): 0a25c7c

Upload 4 files

Browse files
Files changed (4) hide show
  1. data/caBreu.py +137 -0
  2. data/dev.json +0 -0
  3. data/test.json +0 -0
  4. data/train.json +3 -0
data/caBreu.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the CaSum dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """"""
8
+
9
+ _DESCRIPTION = """caBreu is a summarization dataset.
10
+ It consists of 3,000 articles, each averaging about 700 words in length, along with extreme, abstractive and extractive summaries,
11
+ manually generated by three annotators.
12
+
13
+ The source material for the articles was gathered from various Catalan news sources, including the Catalan News Agency ([Agència Catalana de Notícies; ACN](https://www.acn.cat/)),
14
+ [VilaWeb](https://www.vilaweb.cat/) and [NacióDigital](https://www.naciodigital.cat/).
15
+ """
16
+
17
+ _HOMEPAGE = """https://github.com/TeMU-BSC/seq-to-seq-catalan"""
18
+
19
+ _URL = "https://huggingface.co/datasets/projecte-aina/caBreu/resolve/main/"
20
+ _TRAIN_FILE = "train.json"
21
+ _VAL_FILE = "dev.json"
22
+ _TEST_FILE = "test.json"
23
+
24
+ class caBreuConfig(datasets.BuilderConfig):
25
+ """ Builder config for the caBreu dataset """
26
+
27
+ def __init__(self, **kwargs):
28
+ """BuilderConfig for caBreu.
29
+ Args:
30
+ **kwargs: keyword arguments forwarded to super.
31
+ """
32
+ super(caBreuConfig, self).__init__(**kwargs)
33
+
34
+
35
+ class caBreu(datasets.GeneratorBasedBuilder):
36
+ """caBreu Dataset."""
37
+
38
+ BUILDER_CONFIGS = [
39
+ caBreuConfig(
40
+ name="caBreu",
41
+ version=datasets.Version("1.0.0"),
42
+ description="caBreu dataset"
43
+ ),
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "title": datasets.Value("string"),
53
+ "subtitle": datasets.Value("string"),
54
+ "content": datasets.Value("string"),
55
+ "category": datasets.Sequence(datasets.Value("string")),
56
+ "source": datasets.Value("string"),
57
+ "summaries":
58
+ {
59
+ "extreme":
60
+ {
61
+ "a1": datasets.Value("string"),
62
+ "a2": datasets.Value("string"),
63
+ "a3": datasets.Value("string")
64
+ },
65
+ "abstractive":
66
+ {
67
+ "a1": datasets.Value("string"),
68
+ "a2": datasets.Value("string"),
69
+ "a3": datasets.Value("string")
70
+ },
71
+ "extractive":
72
+ {
73
+ "a1": datasets.Value("string"),
74
+ "a2": datasets.Value("string"),
75
+ "a3": datasets.Value("string")
76
+ }
77
+ }
78
+ }
79
+
80
+ ),
81
+ supervised_keys=None,
82
+ homepage=_HOMEPAGE,
83
+ citation=_CITATION
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ """Returns SplitGenerators."""
88
+ urls_to_download = {
89
+ "train": f"{_URL}{_TRAIN_FILE}",
90
+ "dev": f"{_URL}{_VAL_FILE}",
91
+ "test": f"{_URL}{_TEST_FILE}"
92
+ }
93
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
94
+
95
+ return [
96
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
97
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
98
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
99
+ ]
100
+
101
+ def _generate_examples(self, filepath):
102
+ """This function returns the examples in the raw (text) form."""
103
+ logger.info("generating examples from = %s", filepath)
104
+ with open(filepath) as f:
105
+ data = json.load(f)
106
+ for article in data:
107
+ id_ = article['id']
108
+ title = article['title']
109
+ subtitle = article['subtitle']
110
+ content = article['content']
111
+ category = article['category']
112
+ if isinstance(category, str):
113
+ category = []
114
+ source = article['source']
115
+ a1_extreme = article['summaries']['extreme']['a1']
116
+ a2_extreme = article['summaries']['extreme']['a2']
117
+ a3_extreme = article['summaries']['extreme']['a3']
118
+ a1_abstractive = article['summaries']['abstractive']['a1']
119
+ a2_abstractive = article['summaries']['abstractive']['a2']
120
+ a3_abstractive = article['summaries']['abstractive']['a3']
121
+ a1_extractive = article['summaries']['extractive']['a1']
122
+ a2_extractive = article['summaries']['extractive']['a2']
123
+ a3_extractive = article['summaries']['extractive']['a3']
124
+ yield id_, {
125
+ "id": id_,
126
+ "title": title,
127
+ "subtitle": subtitle,
128
+ "content": content,
129
+ "category": category,
130
+ "source": source,
131
+ "summaries":
132
+ {
133
+ "extreme": { "a1": a1_extreme,"a2": a2_extreme,"a3": a3_extreme },
134
+ "abstractive": { "a1": a1_abstractive,"a2": a2_abstractive,"a3": a3_abstractive },
135
+ "extractive": { "a1": a1_extractive,"a2": a2_extractive,"a3": a3_extractive }
136
+ }
137
+ }
data/dev.json ADDED
The diff for this file is too large to render. See raw diff
 
data/test.json ADDED
The diff for this file is too large to render. See raw diff
 
data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c7d6909f15429b4f94d60f9c81d517fc23fde93bdbe5070213f1fd35578ad6
3
+ size 22825376