Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
# /// script | |
# requires-python = ">=3.12" | |
# dependencies = [ | |
# "datasets>=3.2.0" | |
# ] | |
# /// | |
# setup | |
import logging | |
import re | |
import inspect | |
from pathlib import Path | |
from datetime import datetime | |
from collections import defaultdict | |
from collections.abc import Callable | |
import pandas as pd | |
from datasets import Dataset, load_dataset | |
logger = logging.getLogger(__name__) | |
########## edit manually for each source | |
hf_path = "NbAiLab/NCC" | |
source = "ncc_newspaper" | |
license = "cc0-1.0" | |
domain = "News" | |
num_proc = 8 | |
########## | |
today = datetime.now().strftime("%Y-%m-%d") | |
# stop words taken from spaCy | |
# https://github.com/explosion/spaCy/blob/master/spacy/lang/da/stop_words.py | |
# Source: Handpicked by Jens Dahl Møllerhøj. | |
spacy_sw = set( | |
""" | |
af aldrig alene alle allerede alligevel alt altid anden andet andre at | |
bag begge blandt blev blive bliver burde bør | |
da de dem den denne dens der derefter deres derfor derfra deri dermed derpå derved det dette dig din dine disse dog du | |
efter egen eller ellers en end endnu ene eneste enhver ens enten er et | |
flere flest fleste for foran fordi forrige fra få før først | |
gennem gjorde gjort god gør gøre gørende | |
ham han hans har havde have hel heller hen hende hendes henover her herefter heri hermed herpå hun hvad hvem hver hvilke hvilken hvilkes hvis hvor hvordan hvorefter hvorfor hvorfra hvorhen hvori hvorimod hvornår hvorved | |
i igen igennem ikke imellem imens imod ind indtil ingen intet | |
jeg jer jeres jo | |
kan kom kommer kun kunne | |
lad langs lav lave lavet lidt lige ligesom lille længere | |
man mange med meget mellem men mens mere mest mig min mindre mindst mine mit må måske | |
ned nemlig nogen nogensinde noget nogle nok nu ny nyt nær næste næsten | |
og også om omkring op os over overalt | |
på | |
samme sammen selv selvom senere ses siden sig sige skal skulle som stadig synes syntes så sådan således | |
temmelig tidligere til tilbage tit | |
ud uden udover under undtagen | |
var ved vi via vil ville vore vores vær være været | |
øvrigt | |
""".split() | |
) | |
# functions | |
def word_tokenize(text: str) -> list[str]: | |
""" | |
Tokenizes a string into words, splitting on whitespace and punctuation. | |
Example: | |
>>> word_tokenize("Hello, world!") | |
['Hello', ',', 'world', '!'] | |
>>> word_tokenize("This is a test.") | |
['This', 'is', 'a', 'test', '.'] | |
>>> word_tokenize("Many spaces between words.") | |
['Many', 'spaces', 'between', 'words', '.'] | |
""" | |
punkt = [",", ".", "!", "?", ":", ";", "(", ")", "[", "]", "{", "}", '"', "'"] | |
for p in punkt: | |
text = text.replace(p, f" {p} ") | |
return text.split() | |
def alpha_ratio(text: str | list[str]) -> float: | |
""" | |
If not split already to words, splits text with word_tokenize() | |
Calculates ratio of words with only alphabetical characters | |
""" | |
if type(text) is str: | |
text = word_tokenize(text) | |
else: | |
pass | |
alpha_ratio = 1 - sum(not word.isalpha() for word in text) / len(text) | |
return alpha_ratio | |
def count_min_target(given_list: list, target_list: list, min: int) -> bool: | |
""" | |
Iterates through given list, until at least min items match any items from target list | |
""" | |
c_item = 0 | |
given_list_iter = iter(given_list) | |
while c_item < min: | |
try: | |
current_item = next(given_list_iter) | |
if current_item in target_list: | |
c_item += 1 | |
except StopIteration: | |
break | |
return c_item == min | |
def dynaword_format( | |
meta_document: dict[str, str | int], | |
) -> dict[str, str | dict[str, str]]: | |
"""Reformats data to fit dynaword standards""" | |
text = meta_document.get("text") | |
id = meta_document.get("id") | |
date = meta_document.get("publish_year") | |
doc_type = meta_document.get("doc_type") | |
newdata = { | |
"text": text, | |
"source": source, | |
"id": id, | |
"added": today, | |
"created": f"{date}-01-01, {date}-12-31", | |
"license": license, | |
"domain": domain, | |
"metadata": { | |
"source-pretty": f"Norwegian Colossal Corpus ({re.sub("ncc_","",source)})", | |
"source-type": doc_type, | |
}, | |
} | |
return newdata | |
def log_pre_filter_lang_data( | |
lang_metadata: dict[str, dict[str, int]], filtered_ds: Dataset | |
): | |
""" | |
Function for logging changes in a large dataset, | |
based on the metadata pre filering and the filtered dataset, | |
used for language filtering | |
""" | |
all_docs = sum(lang_metadata[source].values()) | |
no_docs = lang_metadata[source].get("no") | |
da_docs = lang_metadata[source].get("da") | |
no_perc = round(no_docs / all_docs * 100, 4) | |
da_perc = round(da_docs / all_docs * 100, 4) | |
f_length = len(filtered_ds) | |
f_perc = round(f_length / da_docs * 100, 4) | |
f_total_perc = round(f_length / all_docs * 100, 4) | |
logger.info(f"Documents of {source}:") | |
logger.info(f"NO: {no_docs}, {no_perc}% ; DA: {da_docs}, {da_perc}%") | |
logger.info("After language confidence filtering:") | |
logger.info(f"DA: {f_length}, lost: {100-f_perc}%") | |
logger.info("Total document change:") | |
logger.info(f"{all_docs} -> {f_length}, loss: {100-f_total_perc}%") | |
def get_var_name(var): | |
"""outputs the variable name""" | |
callers_local_vars = inspect.currentframe().f_back.f_back.f_back.f_locals.items() | |
return [var_name for var_name, var_val in callers_local_vars if var_val is var] | |
def filter_with_changelog( | |
filter_func: Callable[[Dataset], Dataset], dataset: Dataset | |
) -> Dataset: | |
""" | |
Function, which takes a filter and a dataset. | |
Counts text docs and tokens before and after filtering, | |
Saves filtering changes to log. | |
""" | |
filter_name = get_var_name(filter_func) | |
pre_filter_docs = len(dataset) | |
pre_filter_tokens = sum(len(word_tokenize(i["text"])) for i in dataset) | |
dataset = dataset.filter(filter_func, num_proc=num_proc) | |
post_filter_docs = len(dataset) | |
post_filter_tokens = sum(len(word_tokenize(i["text"])) for i in dataset) | |
tokens_removed = round((1 - (post_filter_tokens / pre_filter_tokens)) * 100, 2) | |
docs_removed = round((1 - (post_filter_docs / pre_filter_docs)) * 100, 2) | |
logger.info(f"FILTER: {filter_name}") | |
logger.info( | |
f"TOKENS: pre: {pre_filter_tokens}, post: {post_filter_tokens}, loss: {tokens_removed}%" | |
) | |
logger.info( | |
f"DOCUMENTS: pre: {pre_filter_docs}, post: {post_filter_docs}, loss: {docs_removed}%" | |
) | |
return dataset | |
# filters | |
source_filter = lambda ds: re.sub("ncc_", "", source) in ds["doc_type"] # noqa | |
length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa | |
too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa | |
alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa | |
stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa | |
samples_pr_source: dict = defaultdict(lambda: defaultdict(int)) | |
def language_filter_with_desc_stats(ds: Dataset) -> bool: | |
""" | |
Language filtering in a streamed dataset while logging all languages | |
""" | |
s = source | |
language = ds["lang_fasttext"] | |
samples_pr_source[s][language] += 1 | |
language_filter = ( | |
ds["lang_fasttext"] == "da" and float(ds["lang_fasttext_conf"]) >= 0.5 | |
) | |
return language_filter | |
# quality checks | |
def quality_checks(ds: Dataset) -> Dataset: | |
""" | |
Quality checks for: | |
- no duplicate ids | |
- no duplicate texts | |
- logs texts > 1e5 tokens | |
""" | |
# convert to pandas for the drop_duplicates() | |
df = pd.DataFrame(ds) | |
# remove duplicate ids | |
len_df = len(df) | |
df = df.drop_duplicates(subset=["id"]) | |
logger.info(f"Removed {len_df - len(df)} duplicate ids") | |
# remove rows with duplicate text | |
len_df = len(df) | |
df = df.drop_duplicates(subset=["text"]) | |
logger.info(f"Removed {len_df - len(df)} rows with duplicate text") | |
# reconvert and remove index | |
ds_f = Dataset.from_pandas(df, preserve_index=False) | |
try: | |
ds_f["__index_level_0__"] | |
ds_f = ds_f.remove_columns("__index_level_0__") | |
except KeyError: | |
pass | |
assert len(set(ds_f["id"])) == len(ds_f), "IDs are not unique" | |
assert len(set(ds_f["text"])) == len(ds_f), "Texts are not unique" | |
long_texts = ds_f.filter(too_long_filter, num_proc=num_proc) | |
if len(long_texts["id"]) > 0: | |
logger.info(f"{len(long_texts["id"])} Long texts (>~1e5 tokens) found") | |
for id in long_texts["id"]: | |
logger.info(f"id: {id}") | |
else: | |
logger.info("No long texts (>~1e5 tokens) found") | |
return ds_f | |
# main | |
def main(): | |
# load all splits | |
logger.info(f"Loading data from: {hf_path}") | |
data = load_dataset(hf_path, streaming=True) | |
data_list = [] | |
for split in data: | |
# filter by metadata | |
logger.info(f"Processing source: {source}, split: {split}") | |
s_data = data[split].filter(source_filter) | |
logger.info(f"Processing language, split: {split}") | |
s_data = s_data.filter(language_filter_with_desc_stats) | |
# convert from iterable dataset | |
data_iter = iter(s_data) | |
while True: | |
try: | |
data_list.append(next(data_iter)) | |
except StopIteration: | |
break | |
danish_data = Dataset.from_list(data_list) | |
del data_list | |
# log language changes | |
log_pre_filter_lang_data(samples_pr_source, danish_data) | |
# convert to dynaword format | |
logger.info("Assembling whole dataset for filtering") | |
danish_data = danish_data.map(dynaword_format) | |
danish_data = danish_data.select_columns( | |
["text", "source", "id", "added", "created", "license", "domain", "metadata"] | |
) | |
# filter and log changes | |
danish_data = filter_with_changelog(length_filter, danish_data) | |
danish_data = filter_with_changelog(alpha_filter, danish_data) | |
danish_data = filter_with_changelog(stop_word_filter, danish_data) | |
# Quality checks | |
danish_data = quality_checks(danish_data) | |
### saving | |
save_path = Path(__file__).parent / f"{source}.parquet" | |
danish_data.to_parquet(save_path) | |
if __name__ == "__main__": | |
log_path = Path(__file__).parent / f"{source}.log" | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
handlers=[ | |
logging.StreamHandler(), | |
logging.FileHandler(log_path), | |
], | |
) | |
main() | |