Rohambarack commited on
Commit
da633ea
·
1 Parent(s): ba61a96

ncc_newspaper separated from ncc

Browse files
CHANGELOG.md CHANGED
@@ -6,12 +6,12 @@ All notable changes to this project will be documented in this file.
6
  The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
7
 
8
 
9
- ## [v1.0.12] - 2025-04-16
10
 
11
  ### Added
12
 
13
- - Added new dataset ( ~1.61B tokens)
14
- - Norwegian Colossal Corpus (ncc)
15
 
16
 
17
  ## [v1.0.11] - 2025-03-29
 
6
  The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
7
 
8
 
9
+ ## [v1.0.12] - 2025-05-05
10
 
11
  ### Added
12
 
13
+ - Added new datasets
14
+ - Norwegian Colossal Corpus (newspaper) (~1M tokens)
15
 
16
 
17
  ## [v1.0.11] - 2025-03-29
README.md CHANGED
@@ -125,10 +125,10 @@ configs:
125
  data_files:
126
  - split: train
127
  path: data/nota/*.parquet
128
- - config_name: ncc
129
  data_files:
130
  - split: train
131
- path: data/ncc/*.parquet
132
  annotations_creators:
133
  - no-annotation
134
  language_creators:
@@ -334,8 +334,8 @@ Below follows a brief overview of the sources in the corpus along with their ind
334
  | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
335
  | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
336
  | [cellar] | The official digital repository for European Union legal documents and open data | 1.28B | [CC-BY-SA 4.0] |
337
- | [ncc] | Danish subset of [NCC](https://huggingface.co/datasets/NbAiLab/NCC), The Norwegian Colossal Corpus | 1.61B | [CC-0] [NLOD 2.0] [CC-BY-SA 3.0] |
338
- | **Total** | | 5.1B | |
339
 
340
  [ai-aktindsigt]: data/ai-aktindsigt/ai-aktindsigt.md
341
  [cellar]: data/cellar/cellar.md
@@ -367,7 +367,7 @@ Below follows a brief overview of the sources in the corpus along with their ind
367
  [nordjyllandnews]: data/nordjyllandnews/nordjyllandnews.md
368
  [relig]: data/relig/relig.md
369
  [nota]: data/nota/nota.md
370
- [ncc]: data/ncc/ncc.md
371
 
372
 
373
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
 
125
  data_files:
126
  - split: train
127
  path: data/nota/*.parquet
128
+ - config_name: ncc_newspaper
129
  data_files:
130
  - split: train
131
+ path: data/ncc_newspaper/*.parquet
132
  annotations_creators:
133
  - no-annotation
134
  language_creators:
 
334
  | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
335
  | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
336
  | [cellar] | The official digital repository for European Union legal documents and open data | 1.28B | [CC-BY-SA 4.0] |
337
+ | [ncc_newspaper] | Danish subset of [NCC](https://huggingface.co/datasets/NbAiLab/NCC), The Norwegian Colossal Corpus (newspaper) | ? | [CC-0] |
338
+ | **Total** | | 3.49B | |
339
 
340
  [ai-aktindsigt]: data/ai-aktindsigt/ai-aktindsigt.md
341
  [cellar]: data/cellar/cellar.md
 
367
  [nordjyllandnews]: data/nordjyllandnews/nordjyllandnews.md
368
  [relig]: data/relig/relig.md
369
  [nota]: data/nota/nota.md
370
+ [ncc_newspaper]: data/ncc_newspaper/ncc_newspaper.md
371
 
372
 
373
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
data/ncc/create.py DELETED
@@ -1,256 +0,0 @@
1
- # /// script
2
- # requires-python = ">=3.12"
3
- # dependencies = [
4
- # "datasets>=3.2.0",
5
- # ]
6
- # ///
7
- # setup
8
- from pathlib import Path
9
- from datetime import datetime
10
- from datasets import Dataset, load_dataset
11
-
12
- source = "ncc"
13
-
14
-
15
- # functions
16
- def word_tokenize(text: str) -> list[str]:
17
- """
18
- Tokenizes a string into words, splitting on whitespace and punctuation.
19
-
20
- Example:
21
- >>> word_tokenize("Hello, world!")
22
- ['Hello', ',', 'world', '!']
23
- >>> word_tokenize("This is a test.")
24
- ['This', 'is', 'a', 'test', '.']
25
- >>> word_tokenize("Many spaces between words.")
26
- ['Many', 'spaces', 'between', 'words', '.']
27
- """
28
-
29
- punkt = [",", ".", "!", "?", ":", ";", "(", ")", "[", "]", "{", "}", '"', "'"]
30
- for p in punkt:
31
- text = text.replace(p, f" {p} ")
32
- return text.split()
33
-
34
-
35
- def count_min_target(given_list: list, target_list: list, min: int) -> bool:
36
- """
37
- Iterates through given list, until at least min items match any items from target list
38
-
39
- """
40
- c_item = 0
41
- given_list_iter = iter(given_list)
42
- while c_item < min:
43
- try:
44
- current_item = next(given_list_iter)
45
- if current_item in target_list:
46
- c_item += 1
47
- except StopIteration:
48
- break
49
-
50
- return c_item == min
51
-
52
-
53
- def min_alpha_ratio(text: str | list[str], min: float = 0.7) -> bool:
54
- """
55
- If not split already to words, splits text with word_tokenize()
56
- Calculates ratio of words with only alphabetical characters
57
- Compares it to min
58
-
59
- """
60
- if type(text) is str:
61
- text = word_tokenize(text)
62
- else:
63
- pass
64
-
65
- alpha_ratio = 1 - sum(not word.isalpha() for word in text) / len(text)
66
-
67
- return alpha_ratio >= min
68
-
69
-
70
- def lookup_ref_dict(ref_dictionary: dict[str, list[str]], string_item: str) -> str:
71
- """
72
- Takes a reference dictionary and an item,
73
- Outputs the key, where the item contains any element in the value list.
74
- e.g:
75
- ref_dictionary = {"ab": ["a","b"],
76
- "cd": ["c","d"]
77
- }
78
- string_item = "*a*" | "*b*"
79
- output = "ab"
80
-
81
- !!! WARNING: will return last match !!!
82
- string_item = "*a*d*"
83
- output = "cd"
84
-
85
- """
86
- for key, values in ref_dictionary.items():
87
- for each_value in values:
88
- if each_value in string_item:
89
- output = key
90
- else:
91
- pass
92
-
93
- try:
94
- return output
95
- except UnboundLocalError:
96
- print(f"WARNING: ref_lookup_dict() unknown value in data --> {string_item}")
97
-
98
-
99
- class document_filter:
100
- """
101
- Document filtering from a dictionary
102
- Made for https://huggingface.co/datasets/NbAiLab/NCC
103
-
104
- confidence in language > 0.5, -> below mostly noise
105
- check language == da, -> unwanted data if not
106
- text length > 10 words, -> short text, likely noise
107
- check alpha > 0.7, -> too many words with numbers in them, likely
108
- noise
109
- stopwords > 2, -> no stopwords, likely not coherent text, likely
110
- noise
111
-
112
- """
113
-
114
- def __init__(
115
- self,
116
- req_language: str = "da",
117
- min_conf: float = 0.5,
118
- min_w_l: int = 10,
119
- min_alpha: float = 0.7,
120
- min_s_w_l: int = 2,
121
- ):
122
- self.req_language = req_language
123
- self.min_conf = min_conf
124
- self.min_w_l = min_w_l
125
- self.min_alpha = min_alpha
126
- self.min_s_w_l = min_s_w_l
127
- self.date_today = datetime.now().strftime("%Y-%m-%d")
128
-
129
- def first_layer_filter(self, meta_document: dict[str, str | int]) -> bool:
130
- """
131
- Filtering based on already available data in the dictionary:
132
- Language
133
- Confidence in language classification
134
-
135
- """
136
- language = meta_document.get("lang_fasttext")
137
- confidence = float(meta_document.get("lang_fasttext_conf"))
138
-
139
- return (confidence >= self.min_conf) and (language == self.req_language)
140
-
141
- def second_layer_filter(self, text: str, stop_words: list[str]) -> bool:
142
- """
143
- Filtering based on data derived from the document text:
144
- text length:
145
- text is segmented to words by word_tokenize()
146
- measured by len()
147
- alpha ratio:
148
- by min_alpha_ratio()
149
- minimum stop words present:
150
- by count_min_target()
151
- """
152
-
153
- word_list = word_tokenize(text)
154
-
155
- text_length_pass = len(word_list) >= self.min_w_l
156
- alpha_pass = min_alpha_ratio(word_list, self.min_alpha)
157
- s_w_pass = count_min_target(word_list, stop_words, self.min_s_w_l)
158
-
159
- return text_length_pass and alpha_pass and s_w_pass
160
-
161
- def dynaword_format(
162
- self,
163
- meta_document: dict[str, str | int],
164
- ref_dictionary_license: dict[str, list[str]],
165
- ref_dictionary_domain: dict[str, list[str]],
166
- ) -> dict[str, str | dict[str, str]]:
167
- """Reformats data to fit dynaword standards"""
168
-
169
- text = meta_document.get("text")
170
- id = meta_document.get("id")
171
- date = meta_document.get("publish_year")
172
- doc_type = meta_document.get("doc_type")
173
-
174
- newdata = {
175
- "text": text,
176
- "source": "ncc",
177
- "id": id,
178
- "added": self.date_today,
179
- "created": f"{date}-01-01, {date}-12-31",
180
- "license": lookup_ref_dict(ref_dictionary_license, doc_type),
181
- "domain": lookup_ref_dict(ref_dictionary_domain, doc_type),
182
- "metadata": {
183
- "source-pretty": "Norwegian Colossal Corpus",
184
- "source-type": doc_type,
185
- },
186
- }
187
-
188
- return newdata
189
-
190
-
191
- # main
192
- def main():
193
- # filtering setup
194
- stop_words = [
195
- 'ad', 'af', 'alle', 'alt', 'anden', 'at', 'blev', 'blive', 'bliver', 'da', 'de', 'dem', 'den', 'denne', 'der', 'deres', 'det', 'dette', 'dig', 'din', 'disse', 'dog', 'du', 'efter', 'eller', 'en', 'end', 'er', 'et', 'for', 'fra', 'ham', 'han', 'hans', 'har', 'havde', 'have', 'hende', 'hendes', 'her', 'hos', 'hun', 'hvad', 'hvis', 'hvor', 'i', 'ikke', 'ind', 'jeg', 'jer', 'jo', 'kunne', 'man', 'mange', 'med', 'meget', 'men', 'mig', 'min', 'mine', 'mit', 'mod', 'ned', 'noget', 'nogle', 'nu', 'når', 'og', 'også', 'om', 'op', 'os', 'over', 'på', 'selv', 'sig', 'sin', 'sine', 'sit', 'skal', 'skulle', 'som', 'sådan', 'thi', 'til', 'ud', 'under', 'var', 'vi', 'vil', 'ville', 'vor', 'være', 'været'
196
- ]
197
- doc_filter = document_filter()
198
- da_data = []
199
-
200
- # formatting setup
201
- ref_dictionary_license = {
202
- "other": ["government", "parliament", "publicreport", "lovdata", "maalfrid","wikipedia"],
203
- "cc0-1.0": ["newspaper", "book"]
204
- }
205
-
206
- ref_dictionary_domain = {
207
- "Legal": ["government", "parliament", "publicreport", "lovdata", "maalfrid"],
208
- "News": ["newspaper"],
209
- "Wiki & Books": ["book", "wikipedia"],
210
- }
211
-
212
-
213
- ## load all data first to get splits, then load and filter by split
214
- data = load_dataset("NbAiLab/NCC", streaming=True)
215
- data_splits=list(reversed(data.keys()))
216
-
217
-
218
- for current_split in data_splits:
219
- data = load_dataset("NbAiLab/NCC", streaming=True, split=current_split)
220
- data_iter = iter(data)
221
-
222
- # filtering and formatting
223
- while True:
224
- try:
225
- current_text = next(data_iter)
226
-
227
- meta_data_filtering = doc_filter.first_layer_filter(current_text)
228
-
229
- if meta_data_filtering:
230
- text_filtering = doc_filter.second_layer_filter(
231
- current_text.get("text"), stop_words
232
- )
233
-
234
- if meta_data_filtering and text_filtering:
235
- # formatting
236
- dynaform = doc_filter.dynaword_format(
237
- current_text, ref_dictionary_license, ref_dictionary_domain
238
- )
239
-
240
- da_data.append(dynaform)
241
- else:
242
- pass
243
- else:
244
- pass
245
-
246
- except StopIteration:
247
- break
248
-
249
- ### saving
250
- ds = Dataset.from_list(da_data)
251
- save_path = Path(__file__).parent / f"{source}.parquet"
252
- ds.to_parquet(save_path)
253
-
254
-
255
- if __name__ == "__main__":
256
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/ncc/descriptive_stats.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "number_of_samples": 65301,
3
- "average_document_length": 70916.04932543146,
4
- "number_of_tokens": 1606197164,
5
- "language": "dan, dansk, Danish",
6
- "revision": "72ebab94b5331169630c823308470471687bb921"
7
- }
 
 
 
 
 
 
 
 
data/ncc_newspaper/create.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0"
5
+ # ]
6
+ # ///
7
+ # setup
8
+ import logging
9
+ import re
10
+ import inspect
11
+
12
+ from pathlib import Path
13
+ from datetime import datetime
14
+ from collections import defaultdict
15
+ from collections.abc import Callable
16
+
17
+ import pandas as pd
18
+ from datasets import Dataset, load_dataset
19
+
20
+ logger = logging.getLogger(__name__)
21
+ ########## edit manually for each source
22
+ hf_path = "NbAiLab/NCC"
23
+ source = "ncc_newspaper"
24
+ license = "cc0-1.0"
25
+ domain = "News"
26
+ num_proc = 8
27
+ ##########
28
+ today = datetime.now().strftime("%Y-%m-%d")
29
+
30
+ #stop words taken from spaCy
31
+ #https://github.com/explosion/spaCy/blob/master/spacy/lang/da/stop_words.py
32
+ # Source: Handpicked by Jens Dahl Møllerhøj.
33
+ spacy_sw = set(
34
+ """
35
+ af aldrig alene alle allerede alligevel alt altid anden andet andre at
36
+
37
+ bag begge blandt blev blive bliver burde bør
38
+
39
+ da de dem den denne dens der derefter deres derfor derfra deri dermed derpå derved det dette dig din dine disse dog du
40
+
41
+ efter egen eller ellers en end endnu ene eneste enhver ens enten er et
42
+
43
+ flere flest fleste for foran fordi forrige fra få før først
44
+
45
+ gennem gjorde gjort god gør gøre gørende
46
+
47
+ ham han hans har havde have hel heller hen hende hendes henover her herefter heri hermed herpå hun hvad hvem hver hvilke hvilken hvilkes hvis hvor hvordan hvorefter hvorfor hvorfra hvorhen hvori hvorimod hvornår hvorved
48
+
49
+ i igen igennem ikke imellem imens imod ind indtil ingen intet
50
+
51
+ jeg jer jeres jo
52
+
53
+ kan kom kommer kun kunne
54
+
55
+ lad langs lav lave lavet lidt lige ligesom lille længere
56
+
57
+ man mange med meget mellem men mens mere mest mig min mindre mindst mine mit må måske
58
+
59
+ ned nemlig nogen nogensinde noget nogle nok nu ny nyt nær næste næsten
60
+
61
+ og også om omkring op os over overalt
62
+
63
+
64
+
65
+ samme sammen selv selvom senere ses siden sig sige skal skulle som stadig synes syntes så sådan således
66
+
67
+ temmelig tidligere til tilbage tit
68
+
69
+ ud uden udover under undtagen
70
+
71
+ var ved vi via vil ville vore vores vær være været
72
+
73
+ øvrigt
74
+ """.split()
75
+ )
76
+
77
+ # functions
78
+ def word_tokenize(text: str) -> list[str]:
79
+ """
80
+ Tokenizes a string into words, splitting on whitespace and punctuation.
81
+
82
+ Example:
83
+ >>> word_tokenize("Hello, world!")
84
+ ['Hello', ',', 'world', '!']
85
+ >>> word_tokenize("This is a test.")
86
+ ['This', 'is', 'a', 'test', '.']
87
+ >>> word_tokenize("Many spaces between words.")
88
+ ['Many', 'spaces', 'between', 'words', '.']
89
+ """
90
+
91
+ punkt = [",", ".", "!", "?", ":", ";", "(", ")", "[", "]", "{", "}", '"', "'"]
92
+ for p in punkt:
93
+ text = text.replace(p, f" {p} ")
94
+ return text.split()
95
+
96
+ def alpha_ratio(text: str | list[str]) -> float:
97
+ """
98
+ If not split already to words, splits text with word_tokenize()
99
+ Calculates ratio of words with only alphabetical characters
100
+
101
+ """
102
+ if type(text) is str:
103
+ text = word_tokenize(text)
104
+ else:
105
+ pass
106
+
107
+ alpha_ratio = 1 - sum(not word.isalpha() for word in text) / len(text)
108
+
109
+ return alpha_ratio
110
+
111
+ def count_min_target(given_list: list, target_list: list, min: int) -> bool:
112
+ """
113
+ Iterates through given list, until at least min items match any items from target list
114
+
115
+ """
116
+ c_item = 0
117
+ given_list_iter = iter(given_list)
118
+ while c_item < min:
119
+ try:
120
+ current_item = next(given_list_iter)
121
+ if current_item in target_list:
122
+ c_item += 1
123
+ except StopIteration:
124
+ break
125
+
126
+ return c_item == min
127
+
128
+ def dynaword_format(
129
+ meta_document: dict[str, str | int]
130
+ ) -> dict[str, str | dict[str, str]]:
131
+ """Reformats data to fit dynaword standards"""
132
+
133
+ text = meta_document.get("text")
134
+ id = meta_document.get("id")
135
+ date = meta_document.get("publish_year")
136
+ doc_type = meta_document.get("doc_type")
137
+
138
+ newdata = {
139
+ "text": text,
140
+ "source": source,
141
+ "id": id,
142
+ "added": today,
143
+ "created": f"{date}-01-01, {date}-12-31",
144
+ "license": license,
145
+ "domain": domain,
146
+ "metadata": {
147
+ "source-pretty": f"Norwegian Colossal Corpus ({re.sub("ncc_","",source)})",
148
+ "source-type": doc_type,
149
+ },
150
+ }
151
+
152
+ return newdata
153
+
154
+ def log_pre_filter_lang_data(lang_metadata : dict[str,dict[str,int]],
155
+ filtered_ds: Dataset):
156
+ """
157
+ Function for logging changes in a large dataset,
158
+ based on the metadata pre filering and the filtered dataset,
159
+ used for language filtering
160
+ """
161
+ all_docs = sum(lang_metadata[source].values())
162
+ no_docs = lang_metadata[source].get("no")
163
+ da_docs = lang_metadata[source].get("da")
164
+ no_perc = round(no_docs/all_docs*100,4)
165
+ da_perc = round(da_docs/all_docs*100,4)
166
+
167
+ f_length = len(filtered_ds)
168
+ f_perc = round(f_length/da_docs*100,4)
169
+ f_total_perc = round(f_length/all_docs*100,4)
170
+
171
+ logger.info(f"Documents of {source}:")
172
+ logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%")
173
+ logger.info(f"After language confidence filtering:")
174
+ logger.info(f"DA: {f_length}, lost: {100-f_perc}%")
175
+ logger.info(f"Total document change:")
176
+ logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%")
177
+
178
+ def get_var_name(var):
179
+ """ outputs the variable name """
180
+ callers_local_vars = inspect.currentframe().f_back.f_back.f_back.f_locals.items()
181
+ return [var_name for var_name, var_val in callers_local_vars if var_val is var]
182
+
183
+ def filter_with_changelog(filter_func:Callable[[Dataset],Dataset],
184
+ dataset:Dataset) -> Dataset:
185
+ """
186
+ Function, which takes a filter and a dataset.
187
+ Counts text docs and tokens before and after filtering,
188
+ Saves filtering changes to log.
189
+ """
190
+
191
+ filter_name = get_var_name(filter_func)
192
+ pre_filter_docs = len(dataset)
193
+ pre_filter_tokens= sum(len(word_tokenize(i["text"])) for i in dataset)
194
+
195
+ dataset = dataset.filter(filter_func,num_proc=num_proc)
196
+
197
+ post_filter_docs = len(dataset)
198
+ post_filter_tokens= sum(len(word_tokenize(i["text"])) for i in dataset)
199
+ tokens_removed = round((1-(post_filter_tokens/pre_filter_tokens))*100,2)
200
+ docs_removed = round((1-(post_filter_docs/pre_filter_docs))*100,2)
201
+
202
+ logger.info(f"FILTER: {filter_name}")
203
+ logger.info(f"TOKENS: pre: {pre_filter_tokens}, post: {post_filter_tokens}, loss: {tokens_removed}%")
204
+ logger.info(f"DOCUMENTS: pre: {pre_filter_docs}, post: {post_filter_docs}, loss: {docs_removed}%")
205
+
206
+ return dataset
207
+
208
+
209
+ # filters
210
+ source_filter = lambda ds : re.sub("ncc_","",source) in ds["doc_type"]
211
+ length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10
212
+ too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5
213
+ alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7
214
+ stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]),spacy_sw,2)
215
+
216
+ samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
217
+ def language_filter_with_desc_stats(ds:Dataset) -> bool:
218
+ """
219
+ Language filtering in a streamed dataset while logging all languages
220
+ """
221
+ s = source
222
+ language = ds["lang_fasttext"]
223
+ samples_pr_source[s][language] += 1
224
+
225
+ language_filter = ds["lang_fasttext"] == "da" and float(ds["lang_fasttext_conf"]) >= 0.5
226
+
227
+ return language_filter
228
+
229
+ #quality checks
230
+ def quality_checks(ds:Dataset) -> Dataset:
231
+ """
232
+ Quality checks for:
233
+ - no duplicate ids
234
+ - no duplicate texts
235
+ - logs texts > 1e5 tokens
236
+ """
237
+ #convert to pandas for the drop_duplicates()
238
+ df = pd.DataFrame(ds)
239
+ # remove duplicate ids
240
+ len_df = len(df)
241
+ df = df.drop_duplicates(subset=["id"])
242
+ logger.info(f"Removed {len_df - len(df)} duplicate ids")
243
+ # remove rows with duplicate text
244
+ len_df = len(df)
245
+ df = df.drop_duplicates(subset=["text"])
246
+ logger.info(f"Removed {len_df - len(df)} rows with duplicate text")
247
+ #reconvert and remove index
248
+ ds_f = Dataset.from_pandas(df,preserve_index=False)
249
+ try:
250
+ ds_f["__index_level_0__"]
251
+ ds_f = ds_f.remove_columns("__index_level_0__")
252
+ except KeyError:
253
+ pass
254
+
255
+ assert len(set(ds_f["id"])) == len(ds_f), "IDs are not unique"
256
+ assert len(set(ds_f["text"])) == len(ds_f), "Texts are not unique"
257
+
258
+ long_texts = ds_f.filter(too_long_filter,num_proc=num_proc)
259
+ if len(long_texts["id"]) > 0:
260
+ logger.info(f"{len(long_texts["id"])} Long texts (>~1e5 tokens) found")
261
+ for id in long_texts["id"]:
262
+ logger.info(f"id: {id}")
263
+ else:
264
+ logger.info("No long texts (>~1e5 tokens) found")
265
+
266
+ return ds_f
267
+
268
+ #main
269
+ def main():
270
+ #load all splits
271
+ logger.info(f"Loading data from: {hf_path}")
272
+ data = load_dataset(hf_path, streaming=True)
273
+ data_list = []
274
+
275
+ for split in data:
276
+ #filter by metadata
277
+ logger.info(f"Processing source: {source}, split: {split}")
278
+ s_data=data[split].filter(source_filter)
279
+
280
+
281
+ logger.info(f"Processing language, split: {split}")
282
+ s_data=s_data.filter(language_filter_with_desc_stats)
283
+
284
+ #convert from iterable dataset
285
+ data_iter = iter(s_data)
286
+ while True:
287
+ try:
288
+ data_list.append(next(data_iter))
289
+ except StopIteration:
290
+ break
291
+ danish_data = Dataset.from_list(data_list)
292
+ del data_list
293
+
294
+ #log language changes
295
+ log_pre_filter_lang_data(samples_pr_source,danish_data)
296
+
297
+ #convert to dynaword format
298
+ logger.info("Assembling whole dataset for filtering")
299
+ danish_data = danish_data.map(dynaword_format)
300
+ danish_data = danish_data.select_columns(["text",
301
+ "source",
302
+ "id",
303
+ "added",
304
+ "created",
305
+ "license",
306
+ "domain",
307
+ "metadata"])
308
+
309
+ #filter and log changes
310
+ danish_data = filter_with_changelog(length_filter,danish_data)
311
+ danish_data = filter_with_changelog(alpha_filter,danish_data)
312
+ danish_data = filter_with_changelog(stop_word_filter,danish_data)
313
+
314
+ #Quality checks
315
+ danish_data = quality_checks(danish_data)
316
+
317
+ ### saving
318
+ save_path = Path(__file__).parent / f"{source}.parquet"
319
+ danish_data.to_parquet(save_path)
320
+
321
+
322
+
323
+ if __name__ == "__main__":
324
+ log_path = Path(__file__).parent / f"{source}.log"
325
+ logging.basicConfig(
326
+ level=logging.INFO,
327
+ format="%(asctime)s - %(levelname)s - %(message)s",
328
+ handlers=[
329
+ logging.StreamHandler(),
330
+ logging.FileHandler(log_path),
331
+ ],
332
+ )
333
+ main()
data/ncc_newspaper/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 5373,
3
+ "average_document_length": 571.6929089893914,
4
+ "number_of_tokens": 1052890,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "ba61a9679152b7e3b74cf8f5b5fb36515c90e8d0"
7
+ }
data/{ncc → ncc_newspaper}/images/dist_document_length.png RENAMED
File without changes
data/ncc_newspaper/ncc_newspaper.log ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-05-01 07:46:31,692 - INFO - Loading data from: NbAiLab/NCC
2
+ 2025-05-01 07:46:35,756 - INFO - Processing source: ncc_newspaper, split: train
3
+ 2025-05-01 07:46:35,757 - INFO - Processing language, split: train
4
+ 2025-05-01 09:08:21,490 - INFO - Loading data from: NbAiLab/NCC
5
+ 2025-05-01 09:08:35,451 - INFO - Processing source: ncc_newspaper, split: train
6
+ 2025-05-01 09:08:35,453 - INFO - Processing language, split: train
7
+ 2025-05-01 09:51:35,309 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0676B40>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 8c7cfa56-5cbe-4113-ae0b-9b9192c59c61)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
8
+ 2025-05-01 09:51:35,330 - WARNING - Retrying in 1s [Retry 1/5].
9
+ 2025-05-01 09:51:36,342 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216EF3EC3E0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 8a52f0ab-4507-4af3-9de8-44600dcbe92b)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
10
+ 2025-05-01 09:51:36,343 - WARNING - Retrying in 2s [Retry 2/5].
11
+ 2025-05-01 09:51:38,346 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D0B920>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 7004c6e6-9238-4ae1-9b2c-625361ec2495)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
12
+ 2025-05-01 09:51:38,347 - WARNING - Retrying in 4s [Retry 3/5].
13
+ 2025-05-01 10:34:26,967 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D08CE0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: e818f8c8-4815-4b64-95f1-1ea5d68005b7)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
14
+ 2025-05-01 10:34:26,976 - WARNING - Retrying in 8s [Retry 4/5].
15
+ 2025-05-01 10:34:34,996 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D089E0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 3b7514c2-ff9c-4634-b738-535764ff6b86)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
16
+ 2025-05-01 10:34:34,996 - WARNING - Retrying in 8s [Retry 5/5].
17
+ 2025-05-01 10:34:43,000 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D0BE90>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 430a9373-6149-47c5-a660-aa1b82df18d3)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
18
+ 2025-05-01 10:34:43,001 - WARNING - Got disconnected from remote data host. Retrying in 5sec [1/20]
19
+ 2025-05-01 11:24:41,107 - INFO - Processing source: ncc_newspaper, split: validation
20
+ 2025-05-01 11:24:41,121 - INFO - Processing language, split: validation
21
+ 2025-05-01 11:26:07,574 - INFO - Documents of ncc_newspaper:
22
+ 2025-05-01 11:26:07,575 - INFO - NO: 487086, 73.2081% ; DA: 17516, 2.6326%
23
+ 2025-05-01 11:26:07,575 - INFO - After language confidence filtering:
24
+ 2025-05-01 11:26:07,577 - INFO - DA: 7632, lost: 56.4284%
25
+ 2025-05-01 11:26:07,577 - INFO - Total document change:
26
+ 2025-05-01 11:26:07,578 - INFO - 665344 -> 7632, loss: 98.8529%
27
+ 2025-05-01 11:26:07,578 - INFO - Assembling whole dataset for filtering
28
+ 2025-05-01 11:26:24,562 - INFO - FILTER: ['length_filter']
29
+ 2025-05-01 11:26:24,562 - INFO - TOKENS: pre: 669129, post: 661484, loss: 1.14%
30
+ 2025-05-01 11:26:24,563 - INFO - DOCUMENTS: pre: 7632, post: 6401, loss: 16.13%
31
+ 2025-05-01 11:26:31,510 - INFO - FILTER: ['alpha_filter']
32
+ 2025-05-01 11:26:31,511 - INFO - TOKENS: pre: 661484, post: 616869, loss: 6.74%
33
+ 2025-05-01 11:26:31,511 - INFO - DOCUMENTS: pre: 6401, post: 5439, loss: 15.03%
34
+ 2025-05-01 11:26:37,466 - INFO - FILTER: ['stop_word_filter']
35
+ 2025-05-01 11:26:37,467 - INFO - TOKENS: pre: 616869, post: 616059, loss: 0.13%
36
+ 2025-05-01 11:26:37,467 - INFO - DOCUMENTS: pre: 5439, post: 5374, loss: 1.2%
37
+ 2025-05-01 11:26:38,121 - INFO - Removed 0 duplicate ids
38
+ 2025-05-01 11:26:38,129 - INFO - Removed 1 rows with duplicate text
39
+ 2025-05-01 11:26:42,145 - INFO - No long texts (>~1e5 tokens) found
data/{ncc/ncc.md → ncc_newspaper/ncc_newspaper.md} RENAMED
@@ -1,9 +1,9 @@
1
  ---
2
- pretty_name: Norwegian Colossal Corpus
3
  language:
4
  - da
5
- license: other
6
- license_name: CC0 1.0, NLOD 2.0, CC BY-SA 3.0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
@@ -11,16 +11,18 @@ task_ids:
11
  - language-modeling
12
  ---
13
 
14
- # Dataset Card for Norwegian Colossal Corpus
15
 
16
  <!-- START-SHORT DESCRIPTION -->
17
- Danish language subset of [NCC](https://huggingface.co/datasets/NbAiLab/NCC)
 
18
  <!-- END-SHORT DESCRIPTION -->
19
 
20
  The Norwegian Colossal Corpus is a collection of multiple smaller Norwegian corpuses suitable for training large language models. \
21
  (desc. taken from [NCC](https://huggingface.co/datasets/NbAiLab/NCC))
22
 
23
  This subset is the result of the following filtering from all availabel data splits:
 
24
  - Document is marked as Danish
25
  - Confidence of the language classificationis at least 0.5
26
  - Document has at least 10 words (whitespace separated strings + punctuation)
@@ -33,30 +35,29 @@ This subset is the result of the following filtering from all availabel data spl
33
 
34
  <!-- START-DESC-STATS -->
35
  - **Language**: dan, dansk, Danish
36
- - **Number of samples**: 65.30K
37
- - **Number of tokens (Llama 3)**: 1.61B
38
- - **Average document length (characters)**: 70916.05
39
  <!-- END-DESC-STATS -->
40
 
41
 
42
  ## Dataset Structure
43
  An example from the dataset looks as follows.
44
-
45
-
46
  <!-- START-SAMPLE -->
47
  ```py
48
  {
49
- "text": "h) ved beregningen omhandlet i litra f) kan pengemarkedsinstrumenter eller andele eller kapitalandel[...]",
50
- "source": "ncc",
51
- "id": "maalfrid_2ede28a2c9ba7b4c0162681385ab60f99e021bfa_25",
52
- "added": "2025-04-15",
53
- "created": "2021-01-01, 2021-12-31",
54
- "license": "other",
55
- "domain": "Legal",
56
  "metadata": {
57
  "source-pretty": "Norwegian Colossal Corpus",
58
- "source-type": "maalfrid_regjeringen"
59
- }
 
60
  }
61
  ```
62
 
@@ -72,10 +73,11 @@ An entry in the dataset consists of the following fields:
72
  - `license` (`str`): The license of the document. The licenses vary according to the source.
73
  - `domain` (`str`): The domain of the source
74
  - `metadata/source-pretty` (`str`): The long form version of the short-form source name
75
- - `metadata/source-type`: (`str`) The exact document identifier from the original data
76
  <!-- END-SAMPLE -->
77
 
78
 
 
79
  ### Dataset Statistics
80
 
81
  <!-- START-DATASET PLOTS -->
@@ -83,23 +85,53 @@ An entry in the dataset consists of the following fields:
83
  <img>
84
  <!-- END-DATASET PLOTS -->
85
 
86
-
87
-
88
  ## Additional Information
89
 
90
  ## License Information
91
- The dataset consists of multiple types of documents, with various licenses:
92
- - [NLOD 2.0](https://data.norge.no/nlod/en/2.0) Norwegian government, parliament and legal documents.
93
- In this subset of the NCC, license is marked as "other" for the documents, which are under this license. To filter documents with this license, look for:
94
- - license == "other"
95
- - domain == "Legal"
96
- - metadata/source-type == "government*" or "parliament*" or "maalfrid*" or "publireport*" or "lovdata"
97
- - [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)
98
- - [CC BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/) Wikipedia articles. In this subset of the NCC, license is marked as "other" for the documents, which are under this license. To filter documents with this license, look for:
99
- - license == "other"
100
- - domain == "Wiki & Books"
101
- - metadata/source-type == "wikipedia*"
102
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  ### Citation Information
105
  ```
 
1
  ---
2
+ pretty_name: Norwegian Colossal Corpus (newspaper)
3
  language:
4
  - da
5
+ license: cc0-1.0
6
+ license_name: CC0 1.0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
 
11
  - language-modeling
12
  ---
13
 
14
+ # Dataset Card for Norwegian Colossal Corpus (newspaper)
15
 
16
  <!-- START-SHORT DESCRIPTION -->
17
+ Danish language subset of [NCC](https://huggingface.co/datasets/NbAiLab/NCC) \
18
+ Source: Newspaper articles
19
  <!-- END-SHORT DESCRIPTION -->
20
 
21
  The Norwegian Colossal Corpus is a collection of multiple smaller Norwegian corpuses suitable for training large language models. \
22
  (desc. taken from [NCC](https://huggingface.co/datasets/NbAiLab/NCC))
23
 
24
  This subset is the result of the following filtering from all availabel data splits:
25
+ - Document comes from newspaper articles
26
  - Document is marked as Danish
27
  - Confidence of the language classificationis at least 0.5
28
  - Document has at least 10 words (whitespace separated strings + punctuation)
 
35
 
36
  <!-- START-DESC-STATS -->
37
  - **Language**: dan, dansk, Danish
38
+ - **Number of samples**: 5.37K
39
+ - **Number of tokens (Llama 3)**: 1.05M
40
+ - **Average document length (characters)**: 571.69
41
  <!-- END-DESC-STATS -->
42
 
43
 
44
  ## Dataset Structure
45
  An example from the dataset looks as follows.
 
 
46
  <!-- START-SAMPLE -->
47
  ```py
48
  {
49
+ "text": "STOCKHOLM: Det er kommet melding Ul den svenske turlst forenlng om at de to svenske ljellklatrerne s[...]",
50
+ "source": "ncc_newspaper",
51
+ "id": "fylkestidendeforsognogfjordane_null_null_19410723_69_54_1_MODSMD_ARTICLE5",
52
+ "added": "2025-04-30",
53
+ "created": "1941-01-01, 1941-12-31",
54
+ "license": "cc0-1.0",
55
+ "domain": "News",
56
  "metadata": {
57
  "source-pretty": "Norwegian Colossal Corpus",
58
+ "source-type": "newspaper_ocr"
59
+ },
60
+ "__index_level_0__": 0
61
  }
62
  ```
63
 
 
73
  - `license` (`str`): The license of the document. The licenses vary according to the source.
74
  - `domain` (`str`): The domain of the source
75
  - `metadata/source-pretty` (`str`): The long form version of the short-form source name
76
+ - `metadata/*`: Potentially additional metadata
77
  <!-- END-SAMPLE -->
78
 
79
 
80
+
81
  ### Dataset Statistics
82
 
83
  <!-- START-DATASET PLOTS -->
 
85
  <img>
86
  <!-- END-DATASET PLOTS -->
87
 
 
 
88
  ## Additional Information
89
 
90
  ## License Information
91
+ [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)
92
+
93
+ ## Filtering log
94
+ ```bash
95
+ 2025-05-01 07:46:31,692 - INFO - Loading data from: NbAiLab/NCC
96
+ 2025-05-01 07:46:35,756 - INFO - Processing source: ncc_newspaper, split: train
97
+ 2025-05-01 07:46:35,757 - INFO - Processing language, split: train
98
+ 2025-05-01 09:08:21,490 - INFO - Loading data from: NbAiLab/NCC
99
+ 2025-05-01 09:08:35,451 - INFO - Processing source: ncc_newspaper, split: train
100
+ 2025-05-01 09:08:35,453 - INFO - Processing language, split: train
101
+ 2025-05-01 09:51:35,309 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0676B40>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 8c7cfa56-5cbe-4113-ae0b-9b9192c59c61)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
102
+ 2025-05-01 09:51:35,330 - WARNING - Retrying in 1s [Retry 1/5].
103
+ 2025-05-01 09:51:36,342 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216EF3EC3E0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 8a52f0ab-4507-4af3-9de8-44600dcbe92b)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
104
+ 2025-05-01 09:51:36,343 - WARNING - Retrying in 2s [Retry 2/5].
105
+ 2025-05-01 09:51:38,346 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D0B920>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 7004c6e6-9238-4ae1-9b2c-625361ec2495)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
106
+ 2025-05-01 09:51:38,347 - WARNING - Retrying in 4s [Retry 3/5].
107
+ 2025-05-01 10:34:26,967 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D08CE0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: e818f8c8-4815-4b64-95f1-1ea5d68005b7)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
108
+ 2025-05-01 10:34:26,976 - WARNING - Retrying in 8s [Retry 4/5].
109
+ 2025-05-01 10:34:34,996 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D089E0>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 3b7514c2-ff9c-4634-b738-535764ff6b86)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
110
+ 2025-05-01 10:34:34,996 - WARNING - Retrying in 8s [Retry 5/5].
111
+ 2025-05-01 10:34:43,000 - WARNING - '(MaxRetryError('HTTPSConnectionPool(host=\'huggingface.co\', port=443): Max retries exceeded with url: /datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x00000216E0D0BE90>: Failed to resolve \'huggingface.co\' ([Errno 11001] getaddrinfo failed)"))'), '(Request ID: 430a9373-6149-47c5-a660-aa1b82df18d3)')' thrown while requesting GET https://huggingface.co/datasets/NbAiLab/NCC/resolve/857a5832b73ef33c66b5674d970777c39d991c0e/data/train-shard-0010-of-0046.jsonl
112
+ 2025-05-01 10:34:43,001 - WARNING - Got disconnected from remote data host. Retrying in 5sec [1/20]
113
+ 2025-05-01 11:24:41,107 - INFO - Processing source: ncc_newspaper, split: validation
114
+ 2025-05-01 11:24:41,121 - INFO - Processing language, split: validation
115
+ 2025-05-01 11:26:07,574 - INFO - Documents of ncc_newspaper:
116
+ 2025-05-01 11:26:07,575 - INFO - NO: 487086, 73.2081% ; DA: 17516, 2.6326%
117
+ 2025-05-01 11:26:07,575 - INFO - After language confidence filtering:
118
+ 2025-05-01 11:26:07,577 - INFO - DA: 7632, lost: 56.4284%
119
+ 2025-05-01 11:26:07,577 - INFO - Total document change:
120
+ 2025-05-01 11:26:07,578 - INFO - 665344 -> 7632, loss: 98.8529%
121
+ 2025-05-01 11:26:07,578 - INFO - Assembling whole dataset for filtering
122
+ 2025-05-01 11:26:24,562 - INFO - FILTER: ['length_filter']
123
+ 2025-05-01 11:26:24,562 - INFO - TOKENS: pre: 669129, post: 661484, loss: 1.14%
124
+ 2025-05-01 11:26:24,563 - INFO - DOCUMENTS: pre: 7632, post: 6401, loss: 16.13%
125
+ 2025-05-01 11:26:31,510 - INFO - FILTER: ['alpha_filter']
126
+ 2025-05-01 11:26:31,511 - INFO - TOKENS: pre: 661484, post: 616869, loss: 6.74%
127
+ 2025-05-01 11:26:31,511 - INFO - DOCUMENTS: pre: 6401, post: 5439, loss: 15.03%
128
+ 2025-05-01 11:26:37,466 - INFO - FILTER: ['stop_word_filter']
129
+ 2025-05-01 11:26:37,467 - INFO - TOKENS: pre: 616869, post: 616059, loss: 0.13%
130
+ 2025-05-01 11:26:37,467 - INFO - DOCUMENTS: pre: 5439, post: 5374, loss: 1.2%
131
+ 2025-05-01 11:26:38,121 - INFO - Removed 0 duplicate ids
132
+ 2025-05-01 11:26:38,129 - INFO - Removed 1 rows with duplicate text
133
+ 2025-05-01 11:26:42,145 - INFO - No long texts (>~1e5 tokens) found
134
+ ```
135
 
136
  ### Citation Information
137
  ```
data/{ncc/ncc.parquet → ncc_newspaper/ncc_newspaper.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0366a6adccb2a85dded98063eb9dada0e6ef1125e0d819fe9982fa41edea529
3
- size 2812773765
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7c54cb4d95bafb3863b1a1560d8192adfb20e28fe26a02b78ddee4e08dd109
3
+ size 2409158