Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
NCC tests now pass
Browse files- README.md +13 -19
- data/ncc_books/ncc_books.md +2 -0
- data/ncc_maalfrid/ncc_maalfrid.md +2 -0
- data/ncc_newspaper/ncc_newspaper.parquet +2 -2
- data/ncc_parliament/ncc_parliament.md +2 -0
- src/dynaword/dataset_structure.py +0 -1
- src/dynaword/plots.py +0 -24
- src/dynaword/typings.py +0 -1
- src/dynaword/update_descriptive_statistics.py +29 -1
- src/tests/test_unique_ids.py +4 -1
README.md
CHANGED
@@ -21,10 +21,10 @@ configs:
|
|
21 |
data_files:
|
22 |
- split: train
|
23 |
path: data/ncc_books/*.parquet
|
24 |
-
- config_name:
|
25 |
data_files:
|
26 |
- split: train
|
27 |
-
path: data/
|
28 |
- config_name: ncc_maalfrid
|
29 |
data_files:
|
30 |
- split: train
|
@@ -141,22 +141,6 @@ configs:
|
|
141 |
data_files:
|
142 |
- split: train
|
143 |
path: data/nota/*.parquet
|
144 |
-
- config_name: ncc_newspapers
|
145 |
-
data_files:
|
146 |
-
- split: train
|
147 |
-
path: data/ncc_newspapers/*.parquet
|
148 |
-
- config_name: ncc_books
|
149 |
-
data_files:
|
150 |
-
- split: train
|
151 |
-
path: data/ncc_books/*.parquet
|
152 |
-
- config_name: ncc_maalfrid
|
153 |
-
data_files:
|
154 |
-
- split: train
|
155 |
-
path: data/ncc_maalfrid/*.parquet
|
156 |
-
- config_name: ncc_parliament
|
157 |
-
data_files:
|
158 |
-
- split: train
|
159 |
-
path: data/ncc_parliament/*.parquet
|
160 |
annotations_creators:
|
161 |
- no-annotation
|
162 |
language_creators:
|
@@ -190,7 +174,7 @@ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md
|
|
190 |
<!-- START README TABLE -->
|
191 |
| | |
|
192 |
| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
193 |
-
| **Version**
|
194 |
| **Language** | dan, dansk, Danish |
|
195 |
| **License** | Openly Licensed, See the respective dataset |
|
196 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
@@ -417,12 +401,22 @@ TODO:
|
|
417 |
|
418 |
|
419 |
### Dataset Statistics
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
420 |
|
|
|
|
|
421 |
<!-- START-DATASET PLOTS -->
|
422 |
<p align="center">
|
423 |
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
424 |
</p>
|
425 |
<!-- END-DATASET PLOTS -->
|
|
|
426 |
|
427 |
## Additional Information
|
428 |
|
|
|
21 |
data_files:
|
22 |
- split: train
|
23 |
path: data/ncc_books/*.parquet
|
24 |
+
- config_name: ncc_newspaper
|
25 |
data_files:
|
26 |
- split: train
|
27 |
+
path: data/ncc_newspaper/*.parquet
|
28 |
- config_name: ncc_maalfrid
|
29 |
data_files:
|
30 |
- split: train
|
|
|
141 |
data_files:
|
142 |
- split: train
|
143 |
path: data/nota/*.parquet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
annotations_creators:
|
145 |
- no-annotation
|
146 |
language_creators:
|
|
|
174 |
<!-- START README TABLE -->
|
175 |
| | |
|
176 |
| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
177 |
+
| **Version** | 1.1.0 ([Changelog](/CHANGELOG.md)) |
|
178 |
| **Language** | dan, dansk, Danish |
|
179 |
| **License** | Openly Licensed, See the respective dataset |
|
180 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
|
|
401 |
|
402 |
|
403 |
### Dataset Statistics
|
404 |
+
The following plot show the domains distribution of the following within the dynaword:
|
405 |
+
|
406 |
+
<p align="center">
|
407 |
+
<img src="./images/domain_distribution.png" width="600" style="margin-right: 10px;" />
|
408 |
+
</p>
|
409 |
+
|
410 |
+
|
411 |
|
412 |
+
<details>
|
413 |
+
<summary>Per dataset histograms</summary>
|
414 |
<!-- START-DATASET PLOTS -->
|
415 |
<p align="center">
|
416 |
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
417 |
</p>
|
418 |
<!-- END-DATASET PLOTS -->
|
419 |
+
</details>
|
420 |
|
421 |
## Additional Information
|
422 |
|
data/ncc_books/ncc_books.md
CHANGED
@@ -9,6 +9,8 @@ task_categories:
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
|
|
|
|
12 |
---
|
13 |
|
14 |
# Dataset Card for Norwegian Colossal Corpus (books)
|
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
12 |
+
domains:
|
13 |
+
- Books
|
14 |
---
|
15 |
|
16 |
# Dataset Card for Norwegian Colossal Corpus (books)
|
data/ncc_maalfrid/ncc_maalfrid.md
CHANGED
@@ -9,6 +9,8 @@ task_categories:
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
|
|
|
|
12 |
---
|
13 |
|
14 |
# Dataset Card for Norwegian Colossal Corpus (maalfrid)
|
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
12 |
+
domains:
|
13 |
+
- Web
|
14 |
---
|
15 |
|
16 |
# Dataset Card for Norwegian Colossal Corpus (maalfrid)
|
data/ncc_newspaper/ncc_newspaper.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6876f892b2ce4e0ca59fc19d6742547fb909edfe28e33466ad172420770311d2
|
3 |
+
size 2419112
|
data/ncc_parliament/ncc_parliament.md
CHANGED
@@ -9,6 +9,8 @@ task_categories:
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
|
|
|
|
12 |
---
|
13 |
|
14 |
# Dataset Card for Norwegian Colossal Corpus (parliament)
|
|
|
9 |
- fill-mask
|
10 |
task_ids:
|
11 |
- language-modeling
|
12 |
+
domains:
|
13 |
+
- Other
|
14 |
---
|
15 |
|
16 |
# Dataset Card for Norwegian Colossal Corpus (parliament)
|
src/dynaword/dataset_structure.py
CHANGED
@@ -18,7 +18,6 @@ class SampleSchema(BaseModel):
|
|
18 |
id: str
|
19 |
text: str
|
20 |
source: str
|
21 |
-
domain: str # TODO: convert to literal
|
22 |
added: date
|
23 |
created: Annotated[tuple[date, date], BeforeValidator(ensure_tuple)]
|
24 |
token_count: int
|
|
|
18 |
id: str
|
19 |
text: str
|
20 |
source: str
|
|
|
21 |
added: date
|
22 |
created: Annotated[tuple[date, date], BeforeValidator(ensure_tuple)]
|
23 |
token_count: int
|
src/dynaword/plots.py
CHANGED
@@ -2,36 +2,12 @@ import logging
|
|
2 |
from pathlib import Path
|
3 |
|
4 |
import pandas as pd
|
5 |
-
import plotly.express as px
|
6 |
import plotnine as pn
|
7 |
from datasets import Dataset
|
8 |
|
9 |
-
from dynaword.tables import create_overview_table
|
10 |
-
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
|
14 |
-
def create_domain_distribution_plot(
|
15 |
-
save_dir: Path,
|
16 |
-
):
|
17 |
-
df = create_overview_table(
|
18 |
-
add_readable_tokens=False, add_total_row=False, add_readme_references=False
|
19 |
-
)
|
20 |
-
fig = px.sunburst(df, path=["Domain", "Source"], values="N. Tokens")
|
21 |
-
|
22 |
-
fig.update_traces(textinfo="label+percent entry")
|
23 |
-
fig.update_layout(title="Dataset Distribution by Domain and Source")
|
24 |
-
|
25 |
-
img_path = save_dir / "images"
|
26 |
-
img_path.mkdir(parents=False, exist_ok=True)
|
27 |
-
save_path = img_path / "domain_distribution.png"
|
28 |
-
fig.write_image(
|
29 |
-
save_path,
|
30 |
-
width=800,
|
31 |
-
height=800,
|
32 |
-
scale=2,
|
33 |
-
)
|
34 |
-
|
35 |
def create_descriptive_statistics_plots(
|
36 |
dataset: Dataset,
|
37 |
save_dir: Path,
|
|
|
2 |
from pathlib import Path
|
3 |
|
4 |
import pandas as pd
|
|
|
5 |
import plotnine as pn
|
6 |
from datasets import Dataset
|
7 |
|
|
|
|
|
8 |
logger = logging.getLogger(__name__)
|
9 |
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def create_descriptive_statistics_plots(
|
12 |
dataset: Dataset,
|
13 |
save_dir: Path,
|
src/dynaword/typings.py
CHANGED
@@ -3,7 +3,6 @@ from typing import Literal
|
|
3 |
DOMAIN = Literal[
|
4 |
"Books",
|
5 |
"Conversation",
|
6 |
-
"Conversations",
|
7 |
"Dialect",
|
8 |
"Encyclopedic",
|
9 |
"Legal",
|
|
|
3 |
DOMAIN = Literal[
|
4 |
"Books",
|
5 |
"Conversation",
|
|
|
6 |
"Dialect",
|
7 |
"Encyclopedic",
|
8 |
"Legal",
|
src/dynaword/update_descriptive_statistics.py
CHANGED
@@ -13,6 +13,7 @@ import logging
|
|
13 |
from pathlib import Path
|
14 |
from typing import cast
|
15 |
|
|
|
16 |
from datasets import Dataset, load_dataset
|
17 |
|
18 |
from dynaword.datasheet import DataSheet
|
@@ -22,7 +23,7 @@ from dynaword.git_utilities import (
|
|
22 |
get_latest_revision,
|
23 |
)
|
24 |
from dynaword.paths import repo_path
|
25 |
-
from dynaword.tables import create_overview_table_str
|
26 |
|
27 |
logger = logging.getLogger(__name__)
|
28 |
|
@@ -34,6 +35,31 @@ _datasets = [
|
|
34 |
]
|
35 |
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def update_dataset(
|
38 |
dataset_name: str,
|
39 |
force: bool = False,
|
@@ -79,6 +105,8 @@ def update_dataset(
|
|
79 |
logger.info("Updating Overview table")
|
80 |
package = create_overview_table_str()
|
81 |
sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE")
|
|
|
|
|
82 |
|
83 |
sheet.write_to_path()
|
84 |
|
|
|
13 |
from pathlib import Path
|
14 |
from typing import cast
|
15 |
|
16 |
+
import plotly.express as px
|
17 |
from datasets import Dataset, load_dataset
|
18 |
|
19 |
from dynaword.datasheet import DataSheet
|
|
|
23 |
get_latest_revision,
|
24 |
)
|
25 |
from dynaword.paths import repo_path
|
26 |
+
from dynaword.tables import create_overview_table, create_overview_table_str
|
27 |
|
28 |
logger = logging.getLogger(__name__)
|
29 |
|
|
|
35 |
]
|
36 |
|
37 |
|
38 |
+
logger = logging.getLogger(__name__)
|
39 |
+
|
40 |
+
|
41 |
+
def create_domain_distribution_plot(
|
42 |
+
save_dir: Path = repo_path,
|
43 |
+
):
|
44 |
+
df = create_overview_table(
|
45 |
+
add_readable_tokens=False, add_total_row=False, add_readme_references=False
|
46 |
+
)
|
47 |
+
fig = px.sunburst(df, path=["Domain", "Source"], values="N. Tokens")
|
48 |
+
|
49 |
+
fig.update_traces(textinfo="label+percent entry")
|
50 |
+
fig.update_layout(title="Dataset Distribution by Domain and Source")
|
51 |
+
|
52 |
+
img_path = save_dir / "images"
|
53 |
+
img_path.mkdir(parents=False, exist_ok=True)
|
54 |
+
save_path = img_path / "domain_distribution.png"
|
55 |
+
fig.write_image(
|
56 |
+
save_path,
|
57 |
+
width=800,
|
58 |
+
height=800,
|
59 |
+
scale=2,
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
def update_dataset(
|
64 |
dataset_name: str,
|
65 |
force: bool = False,
|
|
|
105 |
logger.info("Updating Overview table")
|
106 |
package = create_overview_table_str()
|
107 |
sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE")
|
108 |
+
package = create_domain_distribution_plot()
|
109 |
+
sheet.body = sheet.replace_tag(package=package, tag="DOMAIN PLOT")
|
110 |
|
111 |
sheet.write_to_path()
|
112 |
|
src/tests/test_unique_ids.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from typing import cast
|
2 |
|
3 |
from datasets import Dataset, load_dataset
|
@@ -9,4 +10,6 @@ def test_ensure_ids_are_unique():
|
|
9 |
name = str(repo_path.resolve())
|
10 |
ds = load_dataset(name, split="train")
|
11 |
ds = cast(Dataset, ds)
|
12 |
-
|
|
|
|
|
|
1 |
+
from collections import Counter
|
2 |
from typing import cast
|
3 |
|
4 |
from datasets import Dataset, load_dataset
|
|
|
10 |
name = str(repo_path.resolve())
|
11 |
ds = load_dataset(name, split="train")
|
12 |
ds = cast(Dataset, ds)
|
13 |
+
counter = Counter(ds["id"])
|
14 |
+
duplicates = [item for item, count in counter.items() if count > 1]
|
15 |
+
assert len(duplicates) == 0, f"Duplicate IDs found: {duplicates}"
|