KennethEnevoldsen's picture
Added Open DFM data (>1B tokens) (#48)
ed22468 verified
raw
history blame
2.01 kB
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "datasets>=3.2.0",
# ]
# ///
"""
This script is used to create the data for the AI-aktindsigt project.
This derived the data from a .json.gz file.
"""
from pathlib import Path
from typing import cast
from datasets import Dataset, load_dataset
source = "ai-aktindsigt"
def convert_sample(example):
# {'text': 'Vallensbæk Stationstorv 100 2665 Vallensbæk Strand Telefon: +45 4797 4000',
# 'id': '0_03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645',
# 'source': 'AI-aktindsigt',
# 'added': '2024-04-16T12:35:52.000Z',
# 'metadata': {'url': 'https://vallensbaek.dk/', 'kommune': 'vallensbaek', 'sentence': 1,
# 'ppl_score': [634.6341],
# 'sha512': '03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645'}
# }
new_example = dict(
text_new=example["text"],
source=source,
domain="Web",
license="Apache-2.0",
added="2025-03-24",
created="2010-01-01, 2024-03-18", # Start date is approximate guess end date is the date of the last update
metadata={"source-pretty": "AI Aktindsigt"},
)
return new_example
def main():
data_path = Path(
"/work/dfm-data/pre-training/ai_aktindsigt/documents/ai_aktindsigt.jsonl.gz"
)
ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
ds = cast(Dataset, ds)
ds = ds.map(convert_sample, remove_columns=ds.column_names)
ds = ds.rename_columns({"text_new": "text"})
ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
ds = ds.select_columns(
["text", "source", "id", "added", "created", "license", "domain", "metadata"]
)
save_path = Path(__file__).parent / f"{source}.parquet"
ds.to_parquet(save_path)
if __name__ == "__main__":
main()