Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
# /// script | |
# requires-python = "==3.12" | |
# dependencies = [ | |
# "datasets==3.2.0", | |
# ] | |
# /// | |
from datetime import datetime, timedelta | |
from pathlib import Path | |
from typing import cast | |
from datasets import Dataset, load_dataset | |
column_order = [ | |
"text", | |
"source", | |
"id", | |
"added", | |
"created", | |
"license", | |
"domain", | |
"metadata", | |
] | |
def convert_sample(example: dict) -> dict: | |
# from sample: | |
# { | |
# "filename": "1894_Aagaard_UnderligeFyre", | |
# "full_firstnames": "Oscar", | |
# "auth_first": "Oscar", | |
# "auth_last_modern": "Aagaard", | |
# "pseudonym": None, | |
# "publ_date": 1894, | |
# "title_modern": "Underlige Fyre", | |
# "published_under_gender": "male", | |
# "real_gender": "male", | |
# "nationality": "no", | |
# "auth_id": 1, | |
# "auth_last": "Aagaard", | |
# "title": "Underlige Fyre", | |
# "surname": "Aagaard", | |
# "title.1": "Underlige Fyre", | |
# "subtitle": "Fortælling", | |
# "volume": None, | |
# "year": 1894, | |
# "pages": 263.0, | |
# "illustrations": "n", | |
# "typeface": "roman", | |
# "publisher": "Gyldendal", | |
# "price": 3.0, | |
# "source": "KB", | |
# "notes": None, | |
# "filepath": None, | |
# "historical": None, | |
# "period": "nan", | |
# "period_notes": "nan", | |
# "novel_start": 13.0, | |
# "novel_end": 275.0, | |
# "serialno": 854.0, | |
# "category": "O", | |
# "e_canon": 0, | |
# "ce_canon": 0, | |
# "lex_canon": 0, | |
# "text": "Første kapitel. Argus & co. Waterclerker — hvormange er der vel, ...", | |
# } | |
min_date = datetime.fromisoformat(f"{example["year"]}-01-01") | |
max_date = datetime.fromisoformat(f"{example["year"]}-12-31") | |
text = f"{example['title_modern']}\n\nSkrevet af {example['full_firstnames']} {example['auth_last_modern']}\nPubliceret {example['year']} af {example['publisher']}\n\n{example['text']}" | |
new_example = dict( | |
text_new=text, | |
id=example["filename"], | |
source="memo", | |
domain="Wiki & Books", | |
license="cc-by-sa-4.0", | |
added="2025-03-08", | |
created=f"{min_date.date()}, {max_date.date()}", | |
metadata={"source-pretty": "MeMo Canonical Novels"}, | |
) | |
return new_example | |
def main(): | |
ds = load_dataset("chcaa/memo-canonical-novels", split="train") | |
ds = cast(Dataset, ds) | |
dates = [datetime.fromisoformat(f"{date}-01-01").date() for date in ds["year"]] | |
max_date = max(dates) + timedelta(days=364) | |
print(str(min(dates)), ",", str(max_date)) # 1870-01-01 , 1899-12-31 | |
assert len(set(ds["filename"])) == len(ds), "IDs are not unique" | |
assert len(set(ds["text"])) == len(ds), "Texts are not unique" | |
ds = ds.map(convert_sample, num_proc=4) | |
ds = ds.select_columns(column_order[1:] + ["text_new"]) | |
ds = ds.rename_columns({"text_new": "text"}) | |
# ensure order | |
ds = ds.select_columns(column_order) | |
dir = Path(__file__).parent | |
save_path = dir / f"{dir.name}.parquet" | |
ds.to_parquet(save_path) | |
if __name__ == "__main__": | |
main() | |