kris927b's picture
Adding Scrape Hovedstaden (#70)
7df022e verified
raw
history blame
2.27 kB
# /// script
# requires-python = "==3.12"
# dependencies = [
# "datasets==3.2.0",
# "dynaword"
# ]
# [tool.uv.sources]
# dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" }
# ///
"""
Script for downloading and processing the Scrape Hovedstaden texts.
Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword:
```bash
GIT_LFS_SKIP_SMUDGE=1 uv run data/scrape_hovedstaden/create.py
```
"""
import logging
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Any, cast
import pandas as pd
from datasets import Dataset, load_dataset
from dynaword.process_dataset import (
add_token_count,
ensure_column_order,
remove_duplicate_text,
remove_empty_texts,
)
logger = logging.getLogger(__name__)
download_path = Path(__file__).parent / "tmp"
def main():
save_path = Path(__file__).parent / "scrape_hovedstaden.parquet"
# Download data from repo: Den-Intelligente-Patientjournal/region_hovedstaden_text
ds = load_dataset(
"Den-Intelligente-Patientjournal/region_hovedstaden_text", split="train"
)
dataset: Dataset = cast(Dataset, ds)
# Extract the cleaned column
dataset = dataset.rename_column("cleaned", "text")
# Add created column: 2015 and 2020
dataset = dataset.add_column("created", ["2015-01-01, 2020-12-31"] * len(dataset)) # type: ignore
# Add added column: today
dataset = dataset.add_column(
"added", [datetime.today().date().strftime("%Y-%m-%d")] * len(dataset)
) # type: ignore
# Add source column: scrape_hovedstaden
dataset = dataset.add_column("source", ["scrape_hovedstaden"] * len(dataset)) # type: ignore
# Add id column: scrape_hovedstade_{idx}
dataset = dataset.add_column(
"id", [f"scrape_hovedstaden_{i}" for i in range(len(dataset))]
) # type: ignore
# quality checks and processing
dataset = remove_empty_texts(dataset)
dataset = remove_duplicate_text(dataset)
dataset = add_token_count(dataset)
dataset = ensure_column_order(dataset)
# save to parquet
dataset.to_parquet(save_path)
if __name__ == "__main__":
main()