Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
10M - 100M
ArXiv:
DOI:
License:
| # /// script | |
| # requires-python = ">=3.12" | |
| # dependencies = [ | |
| # "datasets>=3.2.0", | |
| # ] | |
| # /// | |
| from pathlib import Path | |
| from typing import cast | |
| from datasets import Dataset, load_dataset | |
| source = "miljoeportalen" | |
| def convert_sample(example): | |
| new_example = dict( | |
| text_new=example["text"], | |
| source=source, | |
| domain="Web", | |
| license="cc0", | |
| added="2025-03-24", | |
| created="2024-01-01, 2025-01-01", # Scrape happen within the year - data likely written earlier | |
| metadata={"source-pretty": "Miljøportalen"}, | |
| ) | |
| return new_example | |
| def main(): | |
| data_path = Path( | |
| "/work/dfm-data/pre-training/miljoeportal/documents/miljoeportal.jsonl.gz" | |
| ) | |
| ds = load_dataset("json", data_files=data_path.as_posix(), split="train") | |
| ds = cast(Dataset, ds) | |
| ds = ds.map(convert_sample, remove_columns=ds.column_names) | |
| ds = ds.rename_columns({"text_new": "text"}) | |
| ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore | |
| ds = ds.select_columns( | |
| ["text", "source", "id", "added", "created", "license", "domain", "metadata"] | |
| ) | |
| save_path = Path(__file__).parent / f"{source}.parquet" | |
| ds.to_parquet(save_path) | |
| if __name__ == "__main__": | |
| main() | |