Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
# /// script | |
# requires-python = "==3.12" | |
# dependencies = [ | |
# "datasets==3.2.0", | |
# "dynaword" | |
# ] | |
# [tool.uv.sources] | |
# dynaword = { git = "https://huggingface.co/datasets/danish-foundation-models/danish-dynaword", rev = "00e7f2aee7f7ad2da423419f77ecbb9c0536de0d" } | |
# /// | |
""" | |
Script for downloading and processing the Danish Memo repository. | |
Note: To run this script, you need to set `GIT_LFS_SKIP_SMUDGE=1` to be able to install dynaword: | |
```bash | |
GIT_LFS_SKIP_SMUDGE=1 uv run data/memo/create.py | |
``` | |
This second version fixed previous issues with the download and processing of the Danish Memo repository: | |
https://huggingface.co/datasets/danish-foundation-models/danish-dynaword/discussions/67 | |
""" | |
import logging | |
import subprocess | |
from datetime import datetime | |
from pathlib import Path | |
from typing import Any | |
import pandas as pd | |
from datasets import Dataset | |
from dynaword.process_dataset import ( | |
add_token_count, | |
ensure_column_order, | |
remove_duplicate_text, | |
remove_empty_texts, | |
) | |
logger = logging.getLogger(__name__) | |
download_path = Path(__file__).parent / "tmp" | |
def download_repo( | |
download_path: Path = download_path, | |
repo_url: str = "https://huggingface.co/datasets/MiMe-MeMo/Corpus-v1.1", | |
revision="7205897f1f3ee65e296072f3e96d49488e54e8ce", | |
) -> Path: | |
""" | |
Downloads the repository from the given URL to the specified path. | |
""" | |
logger.info(f"Downloading repository to {download_path}") | |
if not download_path.exists(): | |
download_path.mkdir(parents=True, exist_ok=True) | |
repo_path = download_path / repo_url.split("/")[-1] | |
if repo_path.exists(): | |
logger.info(f"Repository already exists at {repo_path}, skipping download.") | |
return repo_path | |
# Use git to clone the repository running it from the download path | |
subprocess.run(["git", "clone", repo_url], check=True, cwd=download_path) | |
# Checkout the specific revision | |
subprocess.run(["git", "checkout", revision], check=True, cwd=repo_path) | |
logger.info("Download complete.") | |
return repo_path | |
def load_texts(repo_path: Path) -> list[dict[str, str]]: | |
""" | |
Loads texts from the downloaded repository. | |
""" | |
text_files_path = repo_path / "texts" | |
text_files = list(text_files_path.glob("*.txt")) | |
texts = [] | |
for file in text_files: | |
name = file.stem | |
with file.open("r") as f: | |
content = f.read() | |
texts.append({"name": name, "text": content}) | |
logger.info(f"Loaded {len(texts)} texts from the repository.") | |
return texts | |
def load_memo(repo_path: Path) -> pd.DataFrame: | |
texts = load_texts(repo_path) | |
metadata_csv = repo_path / "MeMo-corpus-metadata-v1.1-2023-06-20.csv" | |
metadata = pd.read_csv(metadata_csv) | |
# remove .pdf from "filename" | |
metadata["filename"] = metadata["filename"].str.replace(".pdf", "", regex=False) | |
texts_df = pd.DataFrame(texts) | |
text_df_fileames = set(texts_df["name"]) | |
metadata_filenames = set(metadata["filename"]) | |
text_without_metadata = [t for t in text_df_fileames if t not in metadata_filenames] | |
assert ( | |
len(text_without_metadata) == 0 | |
), f"Some texts in the repository do not have metadata: {text_without_metadata}" | |
# merge texts with metadata | |
merged_df = pd.merge( | |
texts_df, metadata, left_on="name", right_on="filename", how="inner" | |
) | |
logger.info(f"Loaded {len(merged_df)} rows from the MeMo dataset.") | |
return merged_df | |
def convert_to_dynaword_format(memo_df: pd.DataFrame) -> Dataset: | |
# convert to dynaword samples | |
samples: list[dict[str, Any]] = [] | |
for _, row in memo_df.iterrows(): | |
text = row["text"] | |
assert isinstance(text, str), f"Text is not a string: {text}" | |
# if there is a title then add it to the text | |
title = row["title"] if pd.notna(row["title"]) else "Ukendt titel" | |
subtitle = row["subtitle"] if pd.notna(row["subtitle"]) else "" | |
title = f"{title} {subtitle}".strip() | |
first_name = row["firstname"] | |
last_name = row["surname"] | |
pseudonym = row["pseudonym"] | |
full_name = f"{first_name} {last_name}".strip() | |
if not full_name: | |
full_name = pseudonym if pd.notna(pseudonym) else "Ukendt forfatter" | |
else: | |
# add pseudonym if it exists | |
if pd.notna(pseudonym) and pseudonym != full_name: | |
full_name += f" (Pseudonym: {pseudonym})" | |
# create a new text with the title and author | |
text_new = f"{title}\n\nSkrevet af {full_name}\nPubliceret {row['year']} af {row['publisher']}\n ------- \n\n{text}" | |
today = datetime.now().date() | |
sample = { | |
"id": row["filename"], | |
"text": text_new, | |
"source": "memo", | |
"added": today.isoformat(), | |
"created": f"{row['year']}-01-01, {row['year']}-12-31", | |
} | |
samples.append(sample) | |
ds = Dataset.from_list(samples) | |
logger.info(f"Converted to dynaword format with {len(ds)} samples.") | |
return ds | |
ds = convert_to_dynaword_format(memo_df) | |
def main(): | |
save_path = Path(__file__).parent / "memo.parquet" | |
repo_path = download_repo(download_path) | |
memo_df = load_memo(repo_path) | |
ds = convert_to_dynaword_format(memo_df) | |
# quality checks and processing | |
ds = remove_empty_texts(ds) | |
ds = remove_duplicate_text(ds) | |
ds = add_token_count(ds) | |
ds = ensure_column_order(ds) | |
# save to parquet | |
ds.to_parquet(save_path) | |
if __name__ == "__main__": | |
log_path = Path(__file__).parent / "memo.log" | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
handlers=[ | |
logging.StreamHandler(), | |
logging.FileHandler(log_path), | |
], | |
) | |
main() | |