Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
import logging | |
from pathlib import Path | |
from typing import cast | |
from datasets import Dataset, load_dataset | |
from dynaword.process_dataset import ( | |
add_token_count, | |
ensure_column_order, | |
remove_duplicate_text, | |
remove_empty_texts, | |
) | |
from src.tests.readme_parsing import read_frontmatter_and_body | |
logger = logging.getLogger(__name__) | |
logging.basicConfig( | |
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", | |
level=logging.INFO, | |
) | |
root = Path(__file__).parent | |
data_path = root / "data" | |
frontmatter, _ = read_frontmatter_and_body(root / "README.md") | |
for dataset_cfg in frontmatter["configs"][1:]: | |
logger.info(f"Processing {dataset_cfg['config_name']}") | |
_data_path = data_path / dataset_cfg["config_name"] | |
readme = data_path / dataset_cfg["config_name"] / f"{dataset_cfg['config_name']}.md" | |
frontmatter, _ = read_frontmatter_and_body(readme) | |
ds = load_dataset(_data_path.as_posix(), split="train") | |
ds = cast(Dataset, ds) | |
ds = ds.remove_columns(["license", "metadata"]) | |
ds = add_token_count(ds) | |
ds = remove_empty_texts(ds) | |
ds = remove_duplicate_text(ds) | |
ds = ensure_column_order(ds) | |
# save dataset | |
ds.to_parquet( | |
_data_path / f"{dataset_cfg['config_name']}.parquet", | |
) | |
logger.info(f"Saved {dataset_cfg['config_name']}.parquet") | |
del ds | |