Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
# /// script | |
# requires-python = ">=3.12" | |
# dependencies = [ | |
# "datasets>=3.2.0", | |
# ] | |
# /// | |
from pathlib import Path | |
from typing import cast | |
from datasets import Dataset, load_dataset, concatenate_datasets | |
source = "cellar" | |
def convert_sample(example): | |
new_example = dict( | |
text_new=example["text"], | |
source=source, | |
domain="Legal", | |
license="cc-by-sa-4.0", | |
added="2025-03-25", | |
created="2024-01-01, 2026-01-01", # Scrape happened within these years - data likely written earlier | |
metadata={"source-pretty": "Cellar"}, | |
) | |
return new_example | |
def main(): | |
data_path = Path("/work/dfm-data/pre-training/cellar/documents") | |
data_paths = [p.as_posix() for p in data_path.glob("DAN*.jsonl.gz")] | |
dfs = [] | |
for i, path in enumerate(data_paths): | |
print(i, path.split("/")[-1]) | |
try: | |
ds = load_dataset( | |
"json", data_files=path, split="train" | |
) # a few datasets fail to load | |
dfs.append(ds) | |
print("\tSuccess") | |
except Exception: | |
print("\tFail") | |
ds = concatenate_datasets(dsets=dfs) | |
ds = cast(Dataset, ds) | |
ds = ds.map(convert_sample, remove_columns=ds.column_names) | |
ds = ds.rename_columns({"text_new": "text"}) | |
ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore | |
ds = ds.select_columns( | |
["text", "source", "id", "added", "created", "license", "domain", "metadata"] | |
) | |
save_path = Path(__file__).parent / f"{source}.parquet" | |
ds.to_parquet(save_path) | |
if __name__ == "__main__": | |
main() | |