KennethEnevoldsen commited on
Commit
84883d8
·
1 Parent(s): 62e1cea

initial addition

Browse files
Files changed (35) hide show
  1. .vscode/settings.json +1 -1
  2. README.md +66 -29
  3. data/ai-aktindsigt/ai-aktindsigt.md +90 -0
  4. data/ai-aktindsigt/ai-aktindsigt.parquet +3 -0
  5. data/ai-aktindsigt/create.py +63 -0
  6. data/ai-aktindsigt/descriptive_stats.json +7 -0
  7. data/ai-aktindsigt/images/dist_document_length.png +3 -0
  8. data/cellar/cellar.md +82 -0
  9. data/cellar/cellar.parquet +3 -0
  10. data/cellar/create.py +60 -0
  11. data/cellar/descriptive_stats.json +7 -0
  12. data/cellar/images/dist_document_length.png +3 -0
  13. data/danske-taler/create.py +219 -0
  14. data/danske-taler/danske-taler.md +138 -0
  15. data/danske-taler/danske-taler.parquet +3 -0
  16. data/danske-taler/descriptive_stats.json +7 -0
  17. data/danske-taler/images/dist_document_length.png +3 -0
  18. data/eur-lex-sum-da/create.py +50 -0
  19. data/eur-lex-sum-da/descriptive_stats.json +7 -0
  20. data/eur-lex-sum-da/eur-lex-sum-da.md +86 -0
  21. data/eur-lex-sum-da/eur-lex-sum-da.parquet +3 -0
  22. data/eur-lex-sum-da/images/dist_document_length.png +3 -0
  23. data/fm-udgivelser/create.py +50 -0
  24. data/fm-udgivelser/descriptive_stats.json +7 -0
  25. data/fm-udgivelser/fm-udgivelser.md +92 -0
  26. data/fm-udgivelser/fm-udgivelser.parquet +3 -0
  27. data/fm-udgivelser/images/dist_document_length.png +3 -0
  28. data/miljoeportalen/create.py +50 -0
  29. data/miljoeportalen/descriptive_stats.json +7 -0
  30. data/miljoeportalen/images/dist_document_length.png +3 -0
  31. data/miljoeportalen/miljoeportalen.md +103 -0
  32. data/miljoeportalen/miljoeportalen.parquet +3 -0
  33. src/tests/test_dataset_schema.py +1 -1
  34. src/update_descriptive_statistics.py +33 -11
  35. uv.lock +1 -1
.vscode/settings.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "python.testing.pytestArgs": [
3
- "src/tests"
4
  ],
5
  "python.testing.unittestEnabled": false,
6
  "python.testing.pytestEnabled": true,
 
1
  {
2
  "python.testing.pytestArgs": [
3
+ "src"
4
  ],
5
  "python.testing.unittestEnabled": false,
6
  "python.testing.pytestEnabled": true,
README.md CHANGED
@@ -5,6 +5,30 @@ configs:
5
  data_files:
6
  - split: train
7
  path: 'data/*/*.parquet'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  - config_name: memo
9
  data_files:
10
  - split: train
@@ -275,34 +299,46 @@ This data generally contains no annotation besides the metadata attached to each
275
  Below follows a brief overview of the sources in the corpus along with their individual license.
276
 
277
  <!-- START-MAIN TABLE -->
278
- | Source | Description | N. Tokens | License |
279
- |:--------------------|:-----------------------------------------------------------------------------------------------------------------------------|:------------|:-----------------------|
280
- | [memo] | The MeMo corpus comprising almost all Danish novels from the period 1870-1899, known as the Modern Breakthrough | 9.28M | [CC-BY-SA 4.0] |
281
- | [opensubtitles] | Danish subsection of [OpenSubtitles](https://opus.nlpl.eu/OpenSubtitles/corpus/version/OpenSubtitles) | 271.60M | [CC-0] |
282
- | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
283
- | [ep] | The Danish subsection of [Europarl](https://aclanthology.org/2005.mtsummit-papers.11/) | 100.89M | [CC-0] |
284
- | [ft] | Records from all meetings of The Danish parliament (Folketinget) in the parliament hall | 114.09M | [CC-0] |
285
- | [wikisource] | The Danish subsection of [Wikisource](https://en.wikisource.org/wiki/Main_Page) | 5.34M | [CC-0] |
286
- | [spont] | Conversational samples collected as a part of research projects at Aarhus University | 1.56M | [CC-0] |
287
- | [tv2r] | Contemporary Danish newswire articles published between 2010 and 2019 | 21.67M | [CC-BY-SA 4.0] |
288
- | [adl] | Danish literature from 1700-2023 from the Archive for Danish Literature (ADL) | 58.49M | [CC-0] |
289
- | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
290
- | [skat] | Skat is the Danish tax authority. This dataset contains content from its website skat.dk | 122.12M | [CC-0] |
291
- | [dannet] | [DanNet](https://cst.ku.dk/projekter/dannet) is a Danish WordNet | 1.52M | [DanNet 1.0 License] |
292
- | [retspraksis] | Case law or judical practice in Denmark derived from [Retspraksis](https://da.wikipedia.org/wiki/Retspraksis) | 57.08M | [CC-0] |
293
- | [wikibooks] | The Danish Subsection of [Wikibooks](https://www.wikibooks.org) | 6.24M | [CC-0] |
294
- | [jvj] | The works of the Danish author and poet, [Johannes V. Jensen](https://da.wikipedia.org/wiki/Johannes_V._Jensen) | 3.55M | [CC-BY-SA 4.0] |
295
- | [gutenberg] | The Danish subsection from Project [Gutenberg](https://www.gutenberg.org) | 6.76M | [Gutenberg License] |
296
- | [botxt] | The Bornholmsk Ordbog Dictionary Projec | 847.97K | [CC-0] |
297
- | [depbank] | The Danish subsection of the [Universal Dependencies Treebank](https://github.com/UniversalDependencies/UD_Danish-DDT) | 185.45K | [CC-BY-SA 4.0] |
298
- | [naat] | Danish speeches from 1930-2022 | 286.68K | [CC-0] |
299
- | [synne] | Dataset collected from [synnejysk forening's website](https://www.synnejysk.dk), covering the Danish dialect sønderjysk | 52.51K | [CC-0] |
300
- | [wiki] | The Danish subsection of [wikipedia](https://en.wikipedia.org/wiki/Main_Page) | 122.00M | [CC-0] |
301
- | [nordjyllandnews] | Articles from the Danish Newspaper [TV2 Nord](https://www.tv2nord.dk) | 37.91M | [CC-0] |
302
- | [relig] | Danish religious text from the 1700-2022 | 1.24M | [CC-0] |
303
- | [nota] | The text only part of the [Nota lyd- og tekstdata](https://sprogteknologi.dk/dataset/nota-lyd-og-tekstdata) dataset | 7.30M | [CC-0] |
304
- | **Total** | | 1.86B | |
305
-
 
 
 
 
 
 
 
 
 
 
 
 
306
  [memo]: data/memo/memo.md
307
  [opensubtitles]: data/opensubtitles/opensubtitles.md
308
  [retsinformationdk]: data/retsinformationdk/retsinformationdk.md
@@ -331,13 +367,14 @@ Below follows a brief overview of the sources in the corpus along with their ind
331
 
332
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
333
  [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
 
334
  [Danish Copyright Law]: ./data/retsinformationdk/retsinformationdk.md#license-information
335
  [DanNet 1.0 License]: ./data/dannet/dannet.md#license-information
336
  [Gutenberg License]: ./data/gutenberg/gutenberg.md#license-information
337
  <!-- END-MAIN TABLE -->
338
 
339
 
340
- You can learn more about each dataset by pressing
341
 
342
  <!-- ### Quality Control
343
 
 
5
  data_files:
6
  - split: train
7
  path: 'data/*/*.parquet'
8
+ - config_name: ai-aktindsigt
9
+ data_files:
10
+ - split: train
11
+ path: data/ai-aktindsigt/*.parquet
12
+ - config_name: cellar
13
+ data_files:
14
+ - split: train
15
+ path: data/cellar/*.parquet
16
+ - config_name: danske-taler
17
+ data_files:
18
+ - split: train
19
+ path: data/danske-taler/*.parquet
20
+ - config_name: eur-lex-sum-da
21
+ data_files:
22
+ - split: train
23
+ path: data/eur-lex-sum-da/*.parquet
24
+ - config_name: miljoeportalen
25
+ data_files:
26
+ - split: train
27
+ path: data/miljoeportalen/*.parquet
28
+ - config_name: fm-udgivelser
29
+ data_files:
30
+ - split: train
31
+ path: data/fm-udgivelser/*.parquet
32
  - config_name: memo
33
  data_files:
34
  - split: train
 
299
  Below follows a brief overview of the sources in the corpus along with their individual license.
300
 
301
  <!-- START-MAIN TABLE -->
302
+ | Source | Description | N. Tokens | License |
303
+ |:--------------------|:----------------------------------------------------------------------------------------------------------------------------------------------|:------------|:-----------------------|
304
+ | [synne] | Dataset collected from [synnejysk forening's website](https://www.synnejysk.dk), covering the Danish dialect sønderjysk | 52.51K | [CC-0] |
305
+ | [depbank] | The Danish subsection of the [Universal Dependencies Treebank](https://github.com/UniversalDependencies/UD_Danish-DDT) | 185.45K | [CC-BY-SA 4.0] |
306
+ | [naat] | Danish speeches from 1930-2022 | 286.68K | [CC-0] |
307
+ | [botxt] | The Bornholmsk Ordbog Dictionary Projec | 847.97K | [CC-0] |
308
+ | [relig] | Danish religious text from the 1700-2022 | 1.24M | [CC-0] |
309
+ | [dannet] | [DanNet](https://cst.ku.dk/projekter/dannet) is a Danish WordNet | 1.52M | [DanNet 1.0 License] |
310
+ | [spont] | Conversational samples collected as a part of research projects at Aarhus University | 1.56M | [CC-0] |
311
+ | [jvj] | The works of the Danish author and poet, [Johannes V. Jensen](https://da.wikipedia.org/wiki/Johannes_V._Jensen) | 3.55M | [CC-BY-SA 4.0] |
312
+ | [wikisource] | The Danish subsection of [Wikisource](https://en.wikisource.org/wiki/Main_Page) | 5.34M | [CC-0] |
313
+ | [wikibooks] | The Danish Subsection of [Wikibooks](https://www.wikibooks.org) | 6.24M | [CC-0] |
314
+ | [gutenberg] | The Danish subsection from Project [Gutenberg](https://www.gutenberg.org) | 6.76M | [Gutenberg License] |
315
+ | [nota] | The text only part of the [Nota lyd- og tekstdata](https://sprogteknologi.dk/dataset/nota-lyd-og-tekstdata) dataset | 7.30M | [CC-0] |
316
+ | [danske-taler] | Danish Speeches from [dansketaler.dk](https://www.dansketaler.dk | 8.23M | [CC-0] |
317
+ | [memo] | The MeMo corpus comprising almost all Danish novels from the period 1870-1899, known as the Modern Breakthrough | 9.28M | [CC-BY-SA 4.0] |
318
+ | [tv2r] | Contemporary Danish newswire articles published between 2010 and 2019 | 21.67M | [CC-BY-SA 4.0] |
319
+ | [eur-lex-sum-da] | The Danish subsection of EUR-lex SUM consisting of EU legislation paired with professionally written summaries | 31.37M | [CC-BY-SA 4.0] |
320
+ | [nordjyllandnews] | Articles from the Danish Newspaper [TV2 Nord](https://www.tv2nord.dk) | 37.91M | [CC-0] |
321
+ | [fm-udgivelser] | The official publication series of the Danish Ministry of Finance containing economic analyses, budget proposals, and fiscal policy documents | 50.34M | [CC-BY-SA 4.0] |
322
+ | [retspraksis] | Case law or judical practice in Denmark derived from [Retspraksis](https://da.wikipedia.org/wiki/Retspraksis) | 57.08M | [CC-0] |
323
+ | [adl] | Danish literature from 1700-2023 from the Archive for Danish Literature (ADL) | 58.49M | [CC-0] |
324
+ | [ep] | The Danish subsection of [Europarl](https://aclanthology.org/2005.mtsummit-papers.11/) | 100.89M | [CC-0] |
325
+ | [ft] | Records from all meetings of The Danish parliament (Folketinget) in the parliament hall | 114.09M | [CC-0] |
326
+ | [wiki] | The Danish subsection of [wikipedia](https://en.wikipedia.org/wiki/Main_Page) | 122.00M | [CC-0] |
327
+ | [skat] | Skat is the Danish tax authority. This dataset contains content from its website skat.dk | 122.12M | [CC-0] |
328
+ | [miljoeportalen] | Data from [Danmarks Miljøportalen](https://www.miljoeportal.dk/om-danmarks-miljoeportal/) (Denmark's Environment Portal | 128.48M | [CC-0] |
329
+ | [ai-aktindsigt] | Multiple web scrapes from municipality websites collected as a part of the [AI-aktindsigt](https://ai-aktindsigt.dk) project | 139.23M | [Apache License 2.0] |
330
+ | [opensubtitles] | Danish subsection of [OpenSubtitles](https://opus.nlpl.eu/OpenSubtitles/corpus/version/OpenSubtitles) | 271.60M | [CC-0] |
331
+ | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
332
+ | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
333
+ | [cellar] | The official digital repository for European Union legal documents and open data | 1.28B | [CC-BY-SA 4.0] |
334
+ | **Total** | | 3.49B | |
335
+
336
+ [ai-aktindsigt]: data/ai-aktindsigt/ai-aktindsigt.md
337
+ [cellar]: data/cellar/cellar.md
338
+ [danske-taler]: data/danske-taler/danske-taler.md
339
+ [eur-lex-sum-da]: data/eur-lex-sum-da/eur-lex-sum-da.md
340
+ [miljoeportalen]: data/miljoeportalen/miljoeportalen.md
341
+ [fm-udgivelser]: data/fm-udgivelser/fm-udgivelser.md
342
  [memo]: data/memo/memo.md
343
  [opensubtitles]: data/opensubtitles/opensubtitles.md
344
  [retsinformationdk]: data/retsinformationdk/retsinformationdk.md
 
367
 
368
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
369
  [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
370
+ [Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0
371
  [Danish Copyright Law]: ./data/retsinformationdk/retsinformationdk.md#license-information
372
  [DanNet 1.0 License]: ./data/dannet/dannet.md#license-information
373
  [Gutenberg License]: ./data/gutenberg/gutenberg.md#license-information
374
  <!-- END-MAIN TABLE -->
375
 
376
 
377
+ You can learn more about each dataset by pressing the link.
378
 
379
  <!-- ### Quality Control
380
 
data/ai-aktindsigt/ai-aktindsigt.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: AI Aktindsigt
3
+ language:
4
+ - da
5
+ license: apache-2.0
6
+ license_name: Apache License 2.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ source_datasets:
13
+ - AI-aktindsigt/Skrabet_kommunale_hjemmesider
14
+ ---
15
+
16
+ # Dataset Card for AI Aktindsigt
17
+
18
+ <!-- START-SHORT DESCRIPTION -->
19
+ Multiple web scrapes from municipality websites collected as a part of the [AI-aktindsigt](https://ai-aktindsigt.dk) project.
20
+ <!-- END-SHORT DESCRIPTION -->
21
+
22
+ The dataset consists of multiple scrapes of municipal websites compiled in connection with the work on the [AI-aktindsigt](https://ai-aktindsigt.dk) project. The scrape is made across different domains from several different municipalities.
23
+
24
+ ## Dataset Description
25
+
26
+
27
+ <!-- START-DESC-STATS -->
28
+ - **Language**: dan, dansk, Danish
29
+ - **Number of samples**: 200.91K
30
+ - **Number of tokens (Llama 3)**: 139.23M
31
+ - **Average document length (characters)**: 2030.75
32
+ <!-- END-DESC-STATS -->
33
+
34
+
35
+ ## Dataset Structure
36
+ An example from the dataset looks as follows.
37
+
38
+
39
+ <!-- START-SAMPLE -->
40
+ ```py
41
+ {
42
+ "text": "Vallensbæk Stationstorv 100 2665 Vallensbæk Strand Telefon: +45 4797 4000",
43
+ "source": "ai-aktindsigt",
44
+ "id": "ai-aktindsigt_0",
45
+ "added": "2025-03-24",
46
+ "created": "2010-01-01, 2024-03-18",
47
+ "license": "Apache-2.0",
48
+ "domain": "Web",
49
+ "metadata": {
50
+ "source-pretty": "AI Aktindsigt"
51
+ }
52
+ }
53
+ ```
54
+
55
+ ### Data Fields
56
+
57
+ An entry in the dataset consists of the following fields:
58
+
59
+ - `text`(`str`): The content of the document.
60
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
61
+ - `id` (`str`): An unique identifier for each document.
62
+ - `added` (`str`): An date for when the document was added to this collection.
63
+ - `created` (`str`): An date range for when the document was originally created.
64
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
65
+ - `domain` (`str`): The domain of the source
66
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
67
+ - `metadata/*`: Potentially additional metadata
68
+ <!-- END-SAMPLE -->
69
+
70
+
71
+ ### Dataset Statistics
72
+
73
+ <!-- START-DATASET PLOTS -->
74
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
75
+ <img>
76
+ <!-- END-DATASET PLOTS -->
77
+
78
+
79
+
80
+ ## Additional Information
81
+
82
+
83
+
84
+ ### Sourced data
85
+ This dataset is derived from [`AI-aktindsigt/Skrabet_kommunale_hjemmesider`](https://huggingface.co/datasets/AI-aktindsigt/Skrabet_kommunale_hjemmesider/tree/main
86
+ )
87
+
88
+ ### Citation Information
89
+
90
+ No citation is applicable for this work. We recommend citing the huggingface repository.
data/ai-aktindsigt/ai-aktindsigt.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b395bdf2fe3c9f7beb8b7073a1aea72952237e2a63965302ebd199ca46af632a
3
+ size 213799195
data/ai-aktindsigt/create.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+ """
8
+ This script is used to create the data for the AI-aktindsigt project.
9
+
10
+ This derived the data from a .json.gz file.
11
+ """
12
+
13
+ from pathlib import Path
14
+ from typing import cast
15
+
16
+ from datasets import Dataset, load_dataset
17
+
18
+ source = "ai-aktindsigt"
19
+
20
+ def convert_sample(example):
21
+ # {'text': 'Vallensbæk Stationstorv 100 2665 Vallensbæk Strand Telefon: +45 4797 4000',
22
+ # 'id': '0_03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645',
23
+ # 'source': 'AI-aktindsigt',
24
+ # 'added': '2024-04-16T12:35:52.000Z',
25
+ # 'metadata': {'url': 'https://vallensbaek.dk/', 'kommune': 'vallensbaek', 'sentence': 1,
26
+ # 'ppl_score': [634.6341],
27
+ # 'sha512': '03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645'}
28
+ # }
29
+
30
+ new_example = dict(
31
+ text_new=example["text"],
32
+ source=source,
33
+ domain="Web",
34
+ license="Apache-2.0",
35
+ added="2025-03-24",
36
+ created="2010-01-01, 2024-03-18", # Start date is approximate guess end date is the date of the last update
37
+ metadata={"source-pretty": "AI Aktindsigt"},
38
+ )
39
+
40
+ return new_example
41
+
42
+
43
+ def main():
44
+ data_path = Path(
45
+ "/work/dfm-data/pre-training/ai_aktindsigt/documents/ai_aktindsigt.jsonl.gz"
46
+ )
47
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
48
+
49
+ ds = cast(Dataset, ds)
50
+
51
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
52
+ ds = ds.rename_columns({"text_new": "text"})
53
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
54
+ ds = ds.select_columns(
55
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
56
+ )
57
+
58
+ save_path = Path(__file__).parent / f"{source}.parquet"
59
+ ds.to_parquet(save_path)
60
+
61
+
62
+ if __name__ == "__main__":
63
+ main()
data/ai-aktindsigt/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 200914,
3
+ "average_document_length": 2030.7490916511542,
4
+ "number_of_tokens": 139234696,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/ai-aktindsigt/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: ffd9fd1eec77175d6957fe7efff104e37a2343f24ce521300c479b8e448e023e
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/cellar/cellar.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Cellar
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Finansministeriets Udgivelser
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The official digital repository for European Union legal documents and open data.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ The EU Dataset Cellar serves as the central access point for all official EU publications, legislation, and open data resources. Maintained by the Publications Office of the European Union, this comprehensive digital archive contains millions of documents in multiple languages, including regulations, directives, decisions, treaties, case law, and preparatory acts dating back decades. The repository employs standardized metadata and unique identifiers to organize its vast collection, making it an essential resource for researchers, legal professionals, policymakers, and citizens seeking authoritative information on EU law and policy. The Cellar's linked data architecture also enables sophisticated search capabilities and integration with other information systems across the European Union's digital landscape.
21
+
22
+
23
+ ## Dataset Description
24
+
25
+ <!-- START-DESC-STATS -->
26
+ - **Language**: dan, dansk, Danish
27
+ - **Number of samples**: 65.74K
28
+ - **Number of tokens (Llama 3)**: 1.28B
29
+ - **Average document length (characters)**: 64221.30
30
+ <!-- END-DESC-STATS -->
31
+
32
+
33
+ ## Dataset Structure
34
+ An example from the dataset looks as follows.
35
+
36
+
37
+ <!-- START-SAMPLE -->
38
+ ```py
39
+ {
40
+ "text": "\n\n\n\n© Европейски съюз, 2017 г.\n\nВъзпроизвеждането е разрешено при позоваване на оригинала.\n\n© Unión [...]",
41
+ "source": "cellar",
42
+ "id": "cellar_0",
43
+ "added": "2025-03-25",
44
+ "created": "2024-01-01, 2026-01-01",
45
+ "license": "cc-by-sa-4.0",
46
+ "domain": "Legal",
47
+ "metadata": {
48
+ "source-pretty": "Cellar"
49
+ }
50
+ }
51
+ ```
52
+
53
+ ### Data Fields
54
+
55
+ An entry in the dataset consists of the following fields:
56
+
57
+ - `text`(`str`): The content of the document.
58
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
59
+ - `id` (`str`): An unique identifier for each document.
60
+ - `added` (`str`): An date for when the document was added to this collection.
61
+ - `created` (`str`): An date range for when the document was originally created.
62
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
63
+ - `domain` (`str`): The domain of the source
64
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
65
+ - `metadata/*`: Potentially additional metadata
66
+ <!-- END-SAMPLE -->
67
+
68
+
69
+ ### Dataset Statistics
70
+
71
+ <!-- START-DATASET PLOTS -->
72
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
73
+ <img>
74
+ <!-- END-DATASET PLOTS -->
75
+
76
+
77
+
78
+ ## Additional Information
79
+
80
+ ### Citation Information
81
+
82
+ No citation is applicable for this work.
data/cellar/cellar.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90fb4cadd0d6ab84c7e1c6e9029210e0bfadaa1d9a503240067d4069d38b9bcd
3
+ size 1433372916
data/cellar/create.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+ from datasets import Dataset, load_dataset, concatenate_datasets
11
+
12
+ source = "cellar"
13
+
14
+
15
+ def convert_sample(example):
16
+ new_example = dict(
17
+ text_new=example["text"],
18
+ source=source,
19
+ domain="Legal",
20
+ license="cc-by-sa-4.0",
21
+ added="2025-03-25",
22
+ created="2024-01-01, 2026-01-01", # Scrape happened within these years - data likely written earlier
23
+ metadata={"source-pretty": "Cellar"},
24
+ )
25
+
26
+ return new_example
27
+
28
+
29
+ def main():
30
+ data_path = Path("/work/dfm-data/pre-training/cellar/documents")
31
+ data_paths = [p.as_posix() for p in data_path.glob("DAN*.jsonl.gz")]
32
+ dfs = []
33
+ for i, path in enumerate(data_paths):
34
+ print(i, path.split("/")[-1])
35
+ try:
36
+ ds = load_dataset(
37
+ "json", data_files=path, split="train"
38
+ ) # a few datasets fail to load
39
+ dfs.append(ds)
40
+ print("\tSuccess")
41
+ except Exception:
42
+ print("\tFail")
43
+
44
+ ds = concatenate_datasets(dsets=dfs)
45
+
46
+ ds = cast(Dataset, ds)
47
+
48
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
49
+ ds = ds.rename_columns({"text_new": "text"})
50
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
51
+ ds = ds.select_columns(
52
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
53
+ )
54
+
55
+ save_path = Path(__file__).parent / f"{source}.parquet"
56
+ ds.to_parquet(save_path)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ main()
data/cellar/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 65736,
3
+ "average_document_length": 64221.30202628697,
4
+ "number_of_tokens": 1280909738,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/cellar/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: d55c527928475485bf7f4b8b3050f5acd773e684546afd842c59f2f57a36c4d8
  • Pointer size: 131 Bytes
  • Size of remote file: 181 kB
data/danske-taler/create.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "beautifulsoup4==4.13.3",
5
+ # "datasets>=3.0.0",
6
+ # ]
7
+ # ///
8
+ """
9
+ Danske Taler API Downloader
10
+ This script downloads speeches/articles from the Danske Taler API: https://www.dansketaler.dk/api/v1
11
+
12
+ It saves it into the following structure:
13
+
14
+ ```
15
+ {
16
+ "text": "Lav et referat af nedenstående tekst:\n\nTekst:\nOpdatering: Manden er nu fundet af Nordjyllands Politi[...]",
17
+ "source": "nordjyllandnews",
18
+ "id": "nordjyllandnews_0",
19
+ "added": "2024-12-16",
20
+ "created": "2000-01-01, 2024-01-01",
21
+ "license": "Creative Commons Legal Code\n\nCC0 1.0 Universal",
22
+ "domain": "News",
23
+ "metadata": {
24
+ "source-pretty": "Nordjylland News"
25
+ }
26
+ }
27
+ ```
28
+
29
+ """
30
+
31
+ import logging
32
+ import time
33
+ from datetime import date
34
+ from pathlib import Path
35
+ from typing import Any
36
+
37
+ import datasets
38
+ import pandas as pd
39
+ import requests
40
+ from bs4 import BeautifulSoup
41
+ from tqdm import tqdm
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+ # Configuration
46
+ API_BASE_URL = "https://www.dansketaler.dk/api/v1"
47
+
48
+
49
+ def get_all_speeches() -> list[dict[str, Any]]:
50
+ # fetch first page, notably the total number of pages
51
+ url = f"{API_BASE_URL}/speeches"
52
+ response = requests.get(url)
53
+ response.raise_for_status()
54
+ speeches = response.json()
55
+ meta = speeches["meta"]
56
+ total_pages = meta["total_pages"]
57
+
58
+ # fetch all pages
59
+ all_speeches = []
60
+ for page in range(1, total_pages + 1):
61
+ url = f"{API_BASE_URL}/speeches?page={page}"
62
+ response = requests.get(url)
63
+ response.raise_for_status()
64
+ speeches = response.json()
65
+ all_speeches.extend(speeches["speeches"])
66
+
67
+ return all_speeches
68
+
69
+
70
+ def fetch_license_div(
71
+ url: str, max_retries: int = 3, backoff_factor: float = 0.5
72
+ ) -> str | None:
73
+ """
74
+ Fetches the license div from the page with retry logic.
75
+
76
+ Args:
77
+ url: The URL to fetch the license div from
78
+ max_retries: Maximum number of retry attempts
79
+ backoff_factor: Factor to determine exponential backoff time between retries
80
+
81
+ Returns:
82
+ The text content of the license div if found, None otherwise
83
+ """
84
+ retries = 0
85
+
86
+ while retries <= max_retries:
87
+ try:
88
+ response = requests.get(url, timeout=10)
89
+ response.raise_for_status()
90
+
91
+ soup = BeautifulSoup(response.text, "html.parser")
92
+ license_div = soup.find("div", class_="speech-copyright")
93
+
94
+ return license_div.text if license_div else None
95
+
96
+ except (requests.RequestException, AttributeError) as e:
97
+ retries += 1
98
+
99
+ if retries > max_retries:
100
+ print(f"Failed to fetch license after {max_retries} attempts: {str(e)}")
101
+ return None
102
+
103
+ # Calculate backoff time using exponential backoff
104
+ wait_time = backoff_factor * (2 ** (retries - 1))
105
+ print(f"Attempt {retries} failed. Retrying in {wait_time:.2f} seconds...")
106
+ time.sleep(wait_time)
107
+
108
+ return None
109
+
110
+
111
+ def convert_to_license(license_information: str | None) -> str | None:
112
+ """checks if "Materialet er fri af ophavsret" is in the page"""
113
+
114
+ if license_information and (
115
+ ("Materialet er fri af ophavsret" in license_information)
116
+ or ("Materialet er fri af ophvasret" in license_information)
117
+ or ("Ophavsretten er bortfaldet" in license_information)
118
+ or ("Manuskriptet er fri af ophavsret" in license_information)
119
+ or ("Offentlig " == license_information)
120
+ ):
121
+ return "cc0"
122
+
123
+ return license_information
124
+
125
+
126
+ def convert_to_row(speech_meta: dict[str, Any]) -> dict[str, Any]:
127
+ speech_id = speech_meta["id"]
128
+
129
+ date_of_speech = speech_meta["date"]["iso_date"]
130
+ date_of_speech_start = f"{date_of_speech}"
131
+ date_of_speech_end = f"{date_of_speech}"
132
+
133
+ license_information = fetch_license_div(speech_meta["url"])
134
+
135
+ row = {
136
+ "text": speech_meta["transcription"],
137
+ "source": "danske-taler",
138
+ "id": f"danske-taler_{speech_id}",
139
+ # current date
140
+ "added": date.today().isoformat(),
141
+ "created": f"{date_of_speech_start}, {date_of_speech_end}",
142
+ "license_information": license_information,
143
+ "domain": "Spoken",
144
+ "metadata": {"source-pretty": "Danske Taler"},
145
+ }
146
+
147
+ return row
148
+
149
+
150
+ def download_speeches() -> pd.DataFrame:
151
+ logger.info("Fetching all speeches from Danske Taler API")
152
+ speeches = get_all_speeches()
153
+ logger.info(f"Found {len(speeches)} speeches")
154
+
155
+ rows = []
156
+ for speech in tqdm(speeches):
157
+ row = convert_to_row(speech)
158
+ rows.append(row)
159
+
160
+ logger.info(f"Saving {len(rows)} speeches to dataset")
161
+ df = pd.DataFrame(rows)
162
+ return df
163
+
164
+
165
+ def main():
166
+ save_path = Path(__file__).parent / "danske-taler.parquet"
167
+ save_path_all = Path(__file__).parent / "tmp" / "danske-taler-all.parquet"
168
+ save_path_all.parent.mkdir(parents=False, exist_ok=True)
169
+
170
+ if save_path_all.exists():
171
+ logger.info(f"Loading dataset from {save_path_all}")
172
+ df = pd.read_parquet(save_path_all)
173
+ else:
174
+ logger.info(f"Downloading speeches and saving to {save_path_all}")
175
+ df = download_speeches()
176
+ df.to_parquet(save_path_all)
177
+
178
+ licenses = [convert_to_license(license) for license in df["license_information"]]
179
+ df["license"] = licenses
180
+
181
+ uniques_licenses = set(df["license"].tolist())
182
+ logger.info("Unique licenses:")
183
+ for license in uniques_licenses:
184
+ logger.info(f"\t{license}")
185
+
186
+ # remove documents without a cc0 license
187
+ len_df = len(df)
188
+ df = df[df["license"] == "cc0"]
189
+ logger.info(f"Removed {len_df - len(df)} documents without a cc0 license")
190
+
191
+ # remove duplicate ids
192
+ len_df = len(df)
193
+ df = df.drop_duplicates(subset=["id"])
194
+ logger.info(f"Removed {len_df - len(df)} duplicate ids")
195
+
196
+ # remove rows with empty text
197
+ len_df = len(df)
198
+ df = df[df["text"].str.strip() != ""]
199
+ logger.info(f"Removed {len_df - len(df)} rows with empty text")
200
+
201
+ # remove rows with duplicate text
202
+ len_df = len(df)
203
+ df = df.drop_duplicates(subset=["text"])
204
+ logger.info(f"Removed {len_df - len(df)} rows with duplicate text")
205
+
206
+ dataset = datasets.Dataset.from_pandas(df)
207
+ assert len(set(dataset["id"])) == len(dataset), "IDs are not unique"
208
+ assert len(set(dataset["text"])) == len(dataset), "Texts are not unique"
209
+ assert len(set(dataset["license"])) == 1, "Multiple licenses found"
210
+
211
+ # check for html tags in text
212
+ assert not df["text"].str.contains("<[^>]*>").any(), "HTML tags found in text"
213
+
214
+ dataset.to_parquet(save_path)
215
+
216
+
217
+ if __name__ == "__main__":
218
+ logging.basicConfig(level=logging.INFO)
219
+ main()
data/danske-taler/danske-taler.md ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Danske Taler
3
+ language:
4
+ - da
5
+ license: cc0-1.0
6
+ license_name: CC-0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Nordjylland News
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ Danish Speeches from [dansketaler.dk](https://www.dansketaler.dk)
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+
21
+ The database dansketaler.dk is managed by Danske Taler, an independent institution that in addition to managing the database and carries out cultural
22
+ and democratic projects based on speeches.
23
+ Danske Taler state as their goals that they seek to preserve our cultural heritage and promotes active citizenship and democratic confidence through its work.
24
+ Additionally, Danske Taler provides data to a number of online resources, including: lex.dk, sprogteknologi.dk, and ordnet.dk.
25
+
26
+ The goal of the dataset is to collect historical and timely speeches and make them available for the public.
27
+
28
+ Learn more about danske taler by reading their [about us](https://www.dansketaler.dk/om-os) page.
29
+
30
+ ## Dataset Description
31
+
32
+
33
+ <!-- START-DESC-STATS -->
34
+ - **Language**: dan, dansk, Danish
35
+ - **Number of samples**: 2.66K
36
+ - **Number of tokens (Llama 3)**: 8.23M
37
+ - **Average document length (characters)**: 9446.88
38
+ <!-- END-DESC-STATS -->
39
+
40
+
41
+ ## Dataset Structure
42
+ An example from the dataset looks as follows.
43
+
44
+
45
+ <!-- START-SAMPLE -->
46
+ ```py
47
+ {
48
+ "text": "Den 1. august i år var der forløbet 25 år siden den sidste verdenskrigs udbrud. En måned senere - de[...]",
49
+ "source": "danske-taler",
50
+ "id": "danske-taler_278",
51
+ "added": "2025-03-28",
52
+ "created": "1939-09-20T00:00:00Z, 1939-09-20T23:59:59Z",
53
+ "license": "cc0",
54
+ "license_information": "Materialet er fri af ophavsret",
55
+ "domain": "Spoken",
56
+ "metadata": {
57
+ "source-pretty": "Danske Taler"
58
+ },
59
+ "__index_level_0__": 20
60
+ }
61
+ ```
62
+
63
+ ### Data Fields
64
+
65
+ An entry in the dataset consists of the following fields:
66
+
67
+ - `text`(`str`): The content of the document.
68
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
69
+ - `id` (`str`): An unique identifier for each document.
70
+ - `added` (`str`): An date for when the document was added to this collection.
71
+ - `created` (`str`): An date range for when the document was originally created.
72
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
73
+ - `domain` (`str`): The domain of the source
74
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
75
+ - `metadata/*`: Potentially additional metadata
76
+ <!-- END-SAMPLE -->
77
+
78
+
79
+ ### Dataset Statistics
80
+
81
+ <!-- START-DATASET PLOTS -->
82
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
83
+ <img>
84
+ <!-- END-DATASET PLOTS -->
85
+
86
+
87
+
88
+ ## Additional Information
89
+
90
+
91
+ ### Dataset Collection Process
92
+
93
+ This dataset was collected using the publicly available [API](https://www.dansketaler.dk/api/v1).
94
+
95
+ ### Quality Assurance
96
+ We check for and remove exact duplicates, empty texts, duplicate ids after the initial download. We additionally check if the articles contain any HTML.
97
+
98
+ ## Opportunities for Improvement
99
+
100
+ While this dataset can be updated to include the latest availabe speeches.
101
+
102
+ We consider the quality of the current collection high with a low chance of
103
+ incorrect formatting,
104
+ spelling errors,
105
+ empty documents or
106
+ misformatted segments.
107
+ This stems both from the quality assurance, source of documents and subjective inspection.
108
+
109
+ ### License Information
110
+ Since the license information isn't avaiable through the API we collect this data directly from the webpage of each article under the header
111
+ "Ophavsret".
112
+
113
+ For speeches where it is noted that *"Materialet er fri af ophavsret"* (The material is in the public domain) or similarly we assign it a `cc0` license.
114
+
115
+ Such an example can be seen here:
116
+
117
+ > **Ophavsret**
118
+ >
119
+ > Materialet er fri af ophavsret. Taler, som er holdt i offentligheden, er ikke omfattet af ophavsret (Jf. ophavsretslovens § 26 og 32).
120
+ > Det betyder, at når en tale er indgået i Danske Talers database, kan den bruges af tredjeparter, fx til undervisning eller forskning.
121
+ >
122
+ > *source: [Ursula von der Leyens tale om europæisk forsvar og sikkerhed på Hærens Officersskole](https://www.dansketaler.dk/tale/tale-om-europaeisk-forsvar-og-sikkerhed-pa-haerens-officersskole)*
123
+
124
+ Speeches without this mention is removed. Such an example include:
125
+
126
+ > **Ophavsret**
127
+ >
128
+ > Materialet er beskyttet af ophavsret
129
+ >
130
+ > *Source: [Christina Egelunds tale ved Aarhus Universitets årsfest](https://www.dansketaler.dk/tale/christina-egelunds-tale-ved-aarhus-universitets-arsfest)*
131
+
132
+ We manually checked the unique set of license descriptions to see if any were open licenses that weren't included in the current criteria.
133
+
134
+ For specific filtering criteria see the `create.py` script.
135
+
136
+ ### Citation Information
137
+
138
+ No citation is applicable for this work. We recommend citing the huggingface repository.
data/danske-taler/danske-taler.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:654fbad07d68e4ec79dfa09aca5e6f9e96a889d336872b7b6d562fcbb83cbd94
3
+ size 15091764
data/danske-taler/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 2657,
3
+ "average_document_length": 9446.875799774181,
4
+ "number_of_tokens": 8225350,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/danske-taler/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: 8abb94950063fe4e069fc28e9e4e03d5db685166b9696ccf83247c855320a53e
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/eur-lex-sum-da/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "eur-lex-sum-da"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Legal",
21
+ license="cc-by-sa-4.0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2025-01-01", # Scrape happen within the year - data likely written earlier
24
+ metadata={"source-pretty": "Eur-lex-sum-da"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/eur-lex-sum-da/documents/eur-lex-sum-da.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/eur-lex-sum-da/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 1002,
3
+ "average_document_length": 87627.37025948103,
4
+ "number_of_tokens": 31367665,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/eur-lex-sum-da/eur-lex-sum-da.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: EUR-Lex SUM
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for EUR-Lex SUM
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The Danish subsection of EUR-lex SUM consisting of EU legislation paired with professionally written summaries.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ EUR-Lex SUM is a dataset containing summaries of EU legislation from the EUR-Lex database. It consists of pairs of full legal texts and their corresponding professionally written summaries, covering European Union legal documents.
21
+ The dataset is designed for training and evaluating automatic text summarization systems, particularly for legal documents. It's valuable for natural language processing (NLP) research since it provides high-quality, human-written summaries of complex legal texts in a specialized domain.
22
+
23
+
24
+ ## Dataset Description
25
+
26
+ <!-- START-DESC-STATS -->
27
+ - **Language**: dan, dansk, Danish
28
+ - **Number of samples**: 1.00K
29
+ - **Number of tokens (Llama 3)**: 31.37M
30
+ - **Average document length (characters)**: 87627.37
31
+ <!-- END-DESC-STATS -->
32
+
33
+
34
+ ## Dataset Structure
35
+ An example from the dataset looks as follows.
36
+
37
+
38
+ <!-- START-SAMPLE -->
39
+ ```py
40
+ {
41
+ "text": "21.6.2019\nDA\nDen Europæiske Unions Tidende\nL 166/26\nKOMMISSIONENS DELEGEREDE FORORDNING (EU) 2019/98[...]",
42
+ "source": "eur-lex-sum-da",
43
+ "id": "eur-lex-sum-da_0",
44
+ "added": "2025-03-24 00:00:00",
45
+ "created": "2024-01-01, 2025-01-01",
46
+ "license": "cc-by-sa-4.0",
47
+ "domain": "Legal",
48
+ "metadata": {
49
+ "source-pretty": "Eur-lex-sum-da"
50
+ }
51
+ }
52
+ ```
53
+
54
+ ### Data Fields
55
+
56
+ An entry in the dataset consists of the following fields:
57
+
58
+ - `text`(`str`): The content of the document.
59
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
60
+ - `id` (`str`): An unique identifier for each document.
61
+ - `added` (`str`): An date for when the document was added to this collection.
62
+ - `created` (`str`): An date range for when the document was originally created.
63
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
64
+ - `domain` (`str`): The domain of the source
65
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
66
+ - `metadata/*`: Potentially additional metadata
67
+ <!-- END-SAMPLE -->
68
+
69
+
70
+ ### Dataset Statistics
71
+
72
+ <!-- START-DATASET PLOTS -->
73
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
74
+ <img>
75
+ <!-- END-DATASET PLOTS -->
76
+
77
+
78
+
79
+ ## Additional Information
80
+
81
+
82
+
83
+
84
+ ### Citation Information
85
+
86
+ No citation is applicable for this work.
data/eur-lex-sum-da/eur-lex-sum-da.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:960737a58b29e80b12f5150dacd7e0559c2ec7d3f2878a626e264b92595d9c02
3
+ size 35849965
data/eur-lex-sum-da/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: a351dbe01d1d663d848ea5ceaee24903b8814cbd914818c3487a0542efb0f872
  • Pointer size: 131 Bytes
  • Size of remote file: 185 kB
data/fm-udgivelser/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "fm-udgivelser"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Legal",
21
+ license="cc-by-sa-4.0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2026-01-01", # Scrape happen within these years - data likely written earlier
24
+ metadata={"source-pretty": "Finansministeriets Udgivelser"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/fm-udgivelser/documents/finans-ministeriet.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/fm-udgivelser/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 443,
3
+ "average_document_length": 490101.9300225734,
4
+ "number_of_tokens": 50335291,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/fm-udgivelser/fm-udgivelser.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Finansministeriets Udgivelser
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Finansministeriets Udgivelser
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The official publication series of the Danish Ministry of Finance containing economic analyses, budget proposals, and fiscal policy documents.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ Finansministeriets Udgivelser (translated as "Publications of the Ministry of Finance") is the publishing arm or publication series of the Danish Ministry of Finance. It includes official reports, economic analyses, budget proposals, fiscal policy documents, and various other publications related to Denmark's public finances, economic policy, and financial governance.
21
+
22
+ These publications typically provide insights into Denmark's economic outlook, public spending plans, tax policies, and financial reforms. They serve as important reference materials for economists, policy makers, researchers, and citizens interested in understanding Denmark's financial policies and economic direction.
23
+
24
+ The publications are authoritative sources of information on Danish fiscal policy and are often used by various stakeholders to track and analyze the country's economic performance and public finance management.
25
+
26
+
27
+ ## Dataset Description
28
+
29
+ <!-- START-DESC-STATS -->
30
+ - **Language**: dan, dansk, Danish
31
+ - **Number of samples**: 443
32
+ - **Number of tokens (Llama 3)**: 50.34M
33
+ - **Average document length (characters)**: 490101.93
34
+ <!-- END-DESC-STATS -->
35
+
36
+
37
+ ## Dataset Structure
38
+ An example from the dataset looks as follows.
39
+
40
+
41
+ <!-- START-SAMPLE -->
42
+ ```py
43
+ {
44
+ "text": "\n\nFinanslov for\n\nfinansåret 2023 Tekst og anmærkninger\n\n§ 1. Dronningen\n\n\n\n\n\n§ 1.\n\nDronningen\n\nTekst[...]",
45
+ "source": "fm-udgivelser",
46
+ "id": "fm-udgivelser_0",
47
+ "added": "2025-03-24",
48
+ "created": "2024-01-01, 2026-01-01",
49
+ "license": "cc-by-sa-4.0",
50
+ "domain": "Legal",
51
+ "metadata": {
52
+ "source-pretty": "Finansministeriets Udgivelser"
53
+ }
54
+ }
55
+ ```
56
+
57
+ ### Data Fields
58
+
59
+ An entry in the dataset consists of the following fields:
60
+
61
+ - `text`(`str`): The content of the document.
62
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
63
+ - `id` (`str`): An unique identifier for each document.
64
+ - `added` (`str`): An date for when the document was added to this collection.
65
+ - `created` (`str`): An date range for when the document was originally created.
66
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
67
+ - `domain` (`str`): The domain of the source
68
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
69
+ - `metadata/*`: Potentially additional metadata
70
+ <!-- END-SAMPLE -->
71
+
72
+
73
+ ### Dataset Statistics
74
+
75
+ <!-- START-DATASET PLOTS -->
76
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
77
+ <img>
78
+ <!-- END-DATASET PLOTS -->
79
+
80
+
81
+
82
+ ## Additional Information
83
+
84
+ <!--
85
+ ### How was the data collected
86
+
87
+ TOOD: KRISTIAN
88
+ -->
89
+
90
+ ### Citation Information
91
+
92
+ No citation is applicable for this work.
data/fm-udgivelser/fm-udgivelser.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:402b9281913aba87938e0600a994cb0387331efd7cf73829699b5103989d8747
3
+ size 59885539
data/fm-udgivelser/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: 1c15a9bbebc18ac4b07b46c8838414e9624c2b4cc5ffbfdcef4d1d97cad3162e
  • Pointer size: 131 Bytes
  • Size of remote file: 182 kB
data/miljoeportalen/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "miljoeportalen"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Web",
21
+ license="cc0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2025-01-01", # Scrape happen within the year - data likely written earlier
24
+ metadata={"source-pretty": "Miljøportalen"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/miljoeportal/documents/miljoeportal.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/miljoeportalen/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 2169,
3
+ "average_document_length": 224704.25034578147,
4
+ "number_of_tokens": 128477101,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/miljoeportalen/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: c404cebb1c94e0ce0210074ba18c1b48356f69328990b8eac52673ad2ef47a14
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/miljoeportalen/miljoeportalen.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Miljøportalen
3
+ language:
4
+ - da
5
+ license: cc0
6
+ license_name: CC-0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Miljøportalen
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ Data from [Danmarks Miljøportalen](https://www.miljoeportal.dk/om-danmarks-miljoeportal/) (Denmark's Environment Portal)
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ Denmark's Environment Portal (Danmarks Miljøportal) is a joint public partnership owned by the state, municipalities, and regions, which aims to support digital environmental management in Denmark.
21
+
22
+ Danmarks Miljøportal's goal is for environmental data to be included early in all decisions that have an environmental impact. They do this by creating easy and open access to environmental data, making it possible for authorities and businesses to integrate the environment into their decisions.
23
+
24
+ This can be decisions specifically targeted at the environment such as water plans, Green Tripartite Agreement, biodiversity and nature restoration, but also decisions about, for example, renewable energy, climate adaptation, new roads, residential areas, and industrial enterprises, where environmental aspects need to be considered.
25
+
26
+
27
+ ## Dataset Description
28
+
29
+ <!-- START-DESC-STATS -->
30
+ - **Language**: dan, dansk, Danish
31
+ - **Number of samples**: 2.17K
32
+ - **Number of tokens (Llama 3)**: 128.48M
33
+ - **Average document length (characters)**: 224704.25
34
+ <!-- END-DESC-STATS -->
35
+
36
+
37
+ ## Dataset Structure
38
+ An example from the dataset looks as follows.
39
+
40
+
41
+ <!-- START-SAMPLE -->
42
+ ```py
43
+ {
44
+ "text": "Bila110 g 1 101 10 - miljTIL R lj TIL RTIL RøraÆTSHUSKO pp ÆTSHUS KOÆTSHUS Kort\n\nLOKALPLAN NR[...]",
45
+ "source": "miljoeportalen",
46
+ "id": "miljoeportalen_0",
47
+ "added": "2025-03-24",
48
+ "created": "2024-01-01, 2025-01-01",
49
+ "license": "cc0",
50
+ "domain": "Web",
51
+ "metadata": {
52
+ "source-pretty": "Miljøportalen"
53
+ }
54
+ }
55
+ ```
56
+
57
+ ### Data Fields
58
+
59
+ An entry in the dataset consists of the following fields:
60
+
61
+ - `text`(`str`): The content of the document.
62
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
63
+ - `id` (`str`): An unique identifier for each document.
64
+ - `added` (`str`): An date for when the document was added to this collection.
65
+ - `created` (`str`): An date range for when the document was originally created.
66
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
67
+ - `domain` (`str`): The domain of the source
68
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
69
+ - `metadata/*`: Potentially additional metadata
70
+ <!-- END-SAMPLE -->
71
+
72
+
73
+ ### Dataset Statistics
74
+
75
+ <!-- START-DATASET PLOTS -->
76
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
77
+ <img>
78
+ <!-- END-DATASET PLOTS -->
79
+
80
+
81
+
82
+ ## Additional Information
83
+
84
+
85
+ <!-- ### Data includes
86
+
87
+ TODO: KRISTIAN I assume this is just the website or is it also reports?
88
+ -->
89
+
90
+ ### License information
91
+ This dataset is licensed under CCO this license was clarified by [email protected]:
92
+
93
+ > Data er underlagt Creative Common CC0, se:
94
+ > https://creativecommons.org/publicdomain/zero/1.0/deed.da.
95
+ >
96
+ > Lad mig vide hvis du har yderligere spørgsmål.
97
+ > Har du spørgsmål til din sag eller yderligere kommentarer, bedes du besvare denne mail.
98
+
99
+
100
+
101
+ ### Citation Information
102
+
103
+ No citation is applicable for this work.
data/miljoeportalen/miljoeportalen.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5342a7dbeaf5fa93b48b78f16308867d07e0c0ec1053830f0df7acb8f774eacf
3
+ size 169639678
src/tests/test_dataset_schema.py CHANGED
@@ -48,7 +48,7 @@ def test_sample_schema(repo_path: Path, dataset_name: str):
48
  class FrontmatterSchema(BaseModel):
49
  pretty_name: str
50
  language: list[Literal["da"]]
51
- license: Literal["cc0-1.0", "other", "cc-by-sa-4.0"]
52
 
53
 
54
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
 
48
  class FrontmatterSchema(BaseModel):
49
  pretty_name: str
50
  language: list[Literal["da"]]
51
+ license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"]
52
 
53
 
54
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
src/update_descriptive_statistics.py CHANGED
@@ -14,6 +14,7 @@ import multiprocessing
14
  from dataclasses import dataclass
15
  from pathlib import Path
16
  from textwrap import dedent
 
17
  from typing import Self, cast
18
 
19
  import pandas as pd
@@ -31,7 +32,6 @@ repo_path = Path(__file__).parent.parent
31
  tokenizer_name = "AI-Sweden-Models/Llama-3-8B-instruct"
32
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True)
33
 
34
-
35
  def human_readable_large_int(value: int) -> str:
36
  thresholds = [
37
  (1_000_000_000, "B"),
@@ -64,12 +64,13 @@ def _count_tokens(batch):
64
  def calculate_number_of_tokens(
65
  dataset: Dataset,
66
  text_column: str = "text",
 
67
  ) -> int:
68
  token_counts = dataset.map(
69
  _count_tokens,
70
  batched=True,
71
  batch_size=1000,
72
- num_proc=multiprocessing.cpu_count(),
73
  )
74
  return sum(token_counts["token_count"])
75
 
@@ -82,11 +83,11 @@ class DescriptiveStatsOverview:
82
  language: str = "dan, dansk, Danish"
83
 
84
  @classmethod
85
- def from_dataset(cls, dataset: Dataset) -> Self:
86
  return cls(
87
  number_of_samples=len(dataset),
88
  average_document_length=calculate_average_document_length(dataset),
89
- number_of_tokens=calculate_number_of_tokens(dataset),
90
  )
91
 
92
  def to_markdown(self) -> str:
@@ -149,8 +150,10 @@ def add_sample(markdown_path: Path, dataset: Dataset, max_str_len: int = 100):
149
  logger.info("Adding dataset sample to readme")
150
  sample = dataset[0]
151
  for k in sample:
152
- if isinstance(k, str) and len(sample[k]) > max_str_len:
153
  sample[k] = sample[k][:max_str_len] + "[...]"
 
 
154
 
155
  json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
156
  sample_str = sample_template.format(sample=json_sample)
@@ -205,9 +208,10 @@ def add_desc_statitics(
205
  markdown_path: Path,
206
  dataset: Dataset,
207
  desc_stats_path: Path,
 
208
  ) -> None:
209
  logger.info("Adding descriptive statistics to readme.")
210
- desc_stats = DescriptiveStatsOverview.from_dataset(dataset)
211
  desc_stats.to_disk(desc_stats_path)
212
  desc_stats.add_to_markdown(markdown_path)
213
 
@@ -217,6 +221,7 @@ def update_dataset(
217
  name: str,
218
  readme_name: None | str = None,
219
  force: bool = False,
 
220
  ) -> None:
221
  rev = get_latest_revision(dataset_path)
222
  desc_stats_path = dataset_path / "descriptive_stats.json"
@@ -241,10 +246,10 @@ def update_dataset(
241
  ds = load_dataset(str(repo_path), name, split="train")
242
  ds = cast(Dataset, ds)
243
 
244
- add_desc_statitics(markdown_path, ds, desc_stats_path)
245
  add_sample(markdown_path, ds)
246
  add_descriptive_statistics_plots(markdown_path, ds)
247
-
248
 
249
  def create_parser():
250
  parser = argparse.ArgumentParser(
@@ -275,6 +280,12 @@ def create_parser():
275
  type=str,
276
  help="The repository where to calculate the descriptive statistics from",
277
  )
 
 
 
 
 
 
278
  return parser
279
 
280
 
@@ -297,6 +308,7 @@ def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, s
297
  license_references = (
298
  "[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n"
299
  + "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n"
 
300
  )
301
 
302
  for dataset in datasets:
@@ -331,7 +343,9 @@ def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, s
331
  table["N. Tokens"] += [sum(table["N. Tokens"])]
332
 
333
  df = pd.DataFrame.from_dict(table)
 
334
  df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
 
335
  return df, readme_references, license_references
336
 
337
 
@@ -352,21 +366,28 @@ def main(
352
  logging_level: int = 20,
353
  force: bool = False,
354
  repo_path: Path = repo_path,
 
355
  ) -> None:
356
  logging.basicConfig(level=logging_level)
357
 
 
 
358
  if dataset and dataset != "default":
359
  dataset_path = repo_path / "data" / dataset
360
- update_dataset(dataset_path, dataset_path.name, force=force)
361
  return
362
 
363
  if dataset is None:
364
  datasets = (repo_path / "data").glob("*")
365
  for dataset_path in datasets:
366
- update_dataset(dataset_path, dataset_path.name, force=force)
 
 
367
 
368
  if dataset is None or dataset == "default":
369
- update_dataset(repo_path, "default", "README.md", force=force)
 
 
370
  update_main_table(repo_path)
371
 
372
 
@@ -379,4 +400,5 @@ if __name__ == "__main__":
379
  logging_level=args.logging_level,
380
  force=args.force,
381
  repo_path=Path(args.repo_path),
 
382
  )
 
14
  from dataclasses import dataclass
15
  from pathlib import Path
16
  from textwrap import dedent
17
+ from datetime import datetime
18
  from typing import Self, cast
19
 
20
  import pandas as pd
 
32
  tokenizer_name = "AI-Sweden-Models/Llama-3-8B-instruct"
33
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True)
34
 
 
35
  def human_readable_large_int(value: int) -> str:
36
  thresholds = [
37
  (1_000_000_000, "B"),
 
64
  def calculate_number_of_tokens(
65
  dataset: Dataset,
66
  text_column: str = "text",
67
+ num_proc: int = 1,
68
  ) -> int:
69
  token_counts = dataset.map(
70
  _count_tokens,
71
  batched=True,
72
  batch_size=1000,
73
+ num_proc=num_proc,
74
  )
75
  return sum(token_counts["token_count"])
76
 
 
83
  language: str = "dan, dansk, Danish"
84
 
85
  @classmethod
86
+ def from_dataset(cls, dataset: Dataset, num_proc: int = 1) -> Self:
87
  return cls(
88
  number_of_samples=len(dataset),
89
  average_document_length=calculate_average_document_length(dataset),
90
+ number_of_tokens=calculate_number_of_tokens(dataset, num_proc=num_proc),
91
  )
92
 
93
  def to_markdown(self) -> str:
 
150
  logger.info("Adding dataset sample to readme")
151
  sample = dataset[0]
152
  for k in sample:
153
+ if isinstance(sample[k], str) and len(sample[k]) > max_str_len:
154
  sample[k] = sample[k][:max_str_len] + "[...]"
155
+ if isinstance(sample[k], datetime):
156
+ sample[k] = str(sample[k])
157
 
158
  json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
159
  sample_str = sample_template.format(sample=json_sample)
 
208
  markdown_path: Path,
209
  dataset: Dataset,
210
  desc_stats_path: Path,
211
+ num_proc: int = 1,
212
  ) -> None:
213
  logger.info("Adding descriptive statistics to readme.")
214
+ desc_stats = DescriptiveStatsOverview.from_dataset(dataset, num_proc=num_proc)
215
  desc_stats.to_disk(desc_stats_path)
216
  desc_stats.add_to_markdown(markdown_path)
217
 
 
221
  name: str,
222
  readme_name: None | str = None,
223
  force: bool = False,
224
+ num_proc: int = 1,
225
  ) -> None:
226
  rev = get_latest_revision(dataset_path)
227
  desc_stats_path = dataset_path / "descriptive_stats.json"
 
246
  ds = load_dataset(str(repo_path), name, split="train")
247
  ds = cast(Dataset, ds)
248
 
249
+ add_desc_statitics(markdown_path, ds, desc_stats_path, num_proc=num_proc)
250
  add_sample(markdown_path, ds)
251
  add_descriptive_statistics_plots(markdown_path, ds)
252
+
253
 
254
  def create_parser():
255
  parser = argparse.ArgumentParser(
 
280
  type=str,
281
  help="The repository where to calculate the descriptive statistics from",
282
  )
283
+ parser.add_argument(
284
+ "--num_proc",
285
+ default=multiprocessing.cpu_count(),
286
+ type=int,
287
+ help="The number of processes to use.",
288
+ )
289
  return parser
290
 
291
 
 
308
  license_references = (
309
  "[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n"
310
  + "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n"
311
+ + "[Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0\n"
312
  )
313
 
314
  for dataset in datasets:
 
343
  table["N. Tokens"] += [sum(table["N. Tokens"])]
344
 
345
  df = pd.DataFrame.from_dict(table)
346
+ df = df.sort_values("N. Tokens")
347
  df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
348
+
349
  return df, readme_references, license_references
350
 
351
 
 
366
  logging_level: int = 20,
367
  force: bool = False,
368
  repo_path: Path = repo_path,
369
+ num_proc: int | None = None,
370
  ) -> None:
371
  logging.basicConfig(level=logging_level)
372
 
373
+ num_proc = multiprocessing.cpu_count() if num_proc is None else num_proc
374
+
375
  if dataset and dataset != "default":
376
  dataset_path = repo_path / "data" / dataset
377
+ update_dataset(dataset_path, dataset_path.name, force=force, num_proc=num_proc)
378
  return
379
 
380
  if dataset is None:
381
  datasets = (repo_path / "data").glob("*")
382
  for dataset_path in datasets:
383
+ update_dataset(
384
+ dataset_path, dataset_path.name, force=force, num_proc=num_proc
385
+ )
386
 
387
  if dataset is None or dataset == "default":
388
+ update_dataset(
389
+ repo_path, "default", "README.md", force=force, num_proc=num_proc
390
+ )
391
  update_main_table(repo_path)
392
 
393
 
 
400
  logging_level=args.logging_level,
401
  force=args.force,
402
  repo_path=Path(args.repo_path),
403
+ num_proc=args.num_proc,
404
  )
uv.lock CHANGED
@@ -202,7 +202,7 @@ wheels = [
202
 
203
  [[package]]
204
  name = "danish-dynaword"
205
- version = "1.0.9"
206
  source = { virtual = "." }
207
  dependencies = [
208
  { name = "datasets" },
 
202
 
203
  [[package]]
204
  name = "danish-dynaword"
205
+ version = "1.0.10"
206
  source = { virtual = "." }
207
  dependencies = [
208
  { name = "datasets" },