Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
File size: 1,309 Bytes
ed22468 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "datasets>=3.2.0",
# ]
# ///
from pathlib import Path
from typing import cast
from datasets import Dataset, load_dataset
source = "fm-udgivelser"
def convert_sample(example):
new_example = dict(
text_new=example["text"],
source=source,
domain="Legal",
license="cc-by-sa-4.0",
added="2025-03-24",
created="2024-01-01, 2026-01-01", # Scrape happen within these years - data likely written earlier
metadata={"source-pretty": "Finansministeriets Udgivelser"},
)
return new_example
def main():
data_path = Path(
"/work/dfm-data/pre-training/fm-udgivelser/documents/finans-ministeriet.jsonl.gz"
)
ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
ds = cast(Dataset, ds)
ds = ds.map(convert_sample, remove_columns=ds.column_names)
ds = ds.rename_columns({"text_new": "text"})
ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
ds = ds.select_columns(
["text", "source", "id", "added", "created", "license", "domain", "metadata"]
)
save_path = Path(__file__).parent / f"{source}.parquet"
ds.to_parquet(save_path)
if __name__ == "__main__":
main()
|