Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
""" | |
A simple CLI to updates descriptive statistics on all datasets. | |
Example use: | |
uv run src/dynaword/update_descriptive_statistics.py --dataset wikisource | |
""" | |
import argparse | |
import json | |
import logging | |
from pathlib import Path | |
from typing import cast | |
import plotly.express as px | |
from datasets import Dataset, load_dataset | |
from dynaword.datasheet import DataSheet | |
from dynaword.descriptive_stats import DescriptiveStatsOverview | |
from dynaword.git_utilities import ( | |
check_is_ancestor, | |
get_latest_revision, | |
) | |
from dynaword.paths import repo_path | |
from dynaword.tables import create_overview_table, create_overview_table_str | |
logger = logging.getLogger(__name__) | |
main_sheet = DataSheet.load_from_path(repo_path / "README.md") | |
_datasets = [ | |
cfg["config_name"] # type: ignore | |
for cfg in main_sheet.frontmatter["configs"] # type: ignore | |
if cfg["config_name"] != "default" # type: ignore | |
] | |
logger = logging.getLogger(__name__) | |
def create_domain_distribution_plot( | |
save_dir: Path = repo_path, | |
): | |
df = create_overview_table( | |
add_readable_tokens=False, add_total_row=False, add_readme_references=False | |
) | |
fig = px.sunburst(df, path=["Domain", "Source"], values="N. Tokens") | |
fig.update_traces(textinfo="label+percent entry") | |
fig.update_layout(title="Dataset Distribution by Domain and Source") | |
img_path = save_dir / "images" | |
img_path.mkdir(parents=False, exist_ok=True) | |
save_path = img_path / "domain_distribution.png" | |
fig.write_image( | |
save_path, | |
width=800, | |
height=800, | |
scale=2, | |
) | |
def update_dataset( | |
dataset_name: str, | |
force: bool = False, | |
) -> None: | |
dataset_path = ( | |
repo_path / "data" / dataset_name if dataset_name != "default" else repo_path | |
) | |
if dataset_name == "default": | |
readme_name = "README.md" | |
else: | |
readme_name = f"{dataset_name}.md" | |
rev = get_latest_revision(dataset_path) | |
desc_stats_path = dataset_path / "descriptive_stats.json" | |
markdown_path = dataset_path / readme_name | |
if desc_stats_path.exists() and force is False: | |
with desc_stats_path.open("r") as f: | |
last_update = json.load(f).get("revision", None) | |
if last_update is None: | |
logger.warning(f"revision is not defined in {desc_stats_path}.") | |
elif check_is_ancestor(ancestor_rev=last_update, rev=rev): | |
logger.info( | |
f"descriptive statistics for '{dataset_name}' is already up to date, skipping." | |
) | |
return | |
logger.info(f"Computing descriptive stats for: {dataset_name}") | |
ds = load_dataset(str(repo_path), dataset_name, split="train") | |
ds = cast(Dataset, ds) | |
desc_stats = DescriptiveStatsOverview.from_dataset(ds) | |
desc_stats.to_disk(desc_stats_path) | |
logger.info(f"Updating datasheet for: {dataset_name}") | |
sheet = DataSheet.load_from_path(markdown_path) | |
sheet.body = sheet.add_descriptive_stats(descriptive_stats=desc_stats) | |
sheet.body = sheet.add_sample_and_description(ds) | |
sheet.body = sheet.add_dataset_plots(ds, create_plot=True) | |
if dataset_name == "default": | |
logger.info("Updating Overview table") | |
package = create_overview_table_str() | |
sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE") | |
create_domain_distribution_plot() | |
sheet.write_to_path() | |
def create_parser(): | |
parser = argparse.ArgumentParser( | |
description="Calculated descriptive statistics of the datasets in tha data folder" | |
) | |
parser.add_argument( | |
"--dataset", | |
default=None, | |
type=str, | |
help="Use to specify if you only want to compute the statistics from a singular dataset.", | |
) | |
parser.add_argument( | |
"--logging_level", | |
default=20, | |
type=int, | |
help="Sets the logging level. Default to 20 (INFO), other reasonable levels are 10 (DEBUG) and 30 (WARNING).", | |
) | |
parser.add_argument( | |
"--force", | |
type=bool, | |
default=False, | |
action=argparse.BooleanOptionalAction, | |
help="Should the statistics be forcefully recomputed. By default it checks the difference in commit ids.", | |
) | |
parser.add_argument( | |
"--repo_path", | |
default=str(repo_path), | |
type=str, | |
help="The repository where to calculate the descriptive statistics from", | |
) | |
return parser | |
def main( | |
dataset: str | None = None, | |
logging_level: int = 20, | |
force: bool = False, | |
repo_path: Path = repo_path, | |
) -> None: | |
logging.basicConfig(level=logging_level) | |
if dataset: | |
update_dataset(dataset, force=force) | |
else: | |
for dataset_name in _datasets: | |
update_dataset(dataset_name, force=force) | |
update_dataset("default", force=force) | |
if __name__ == "__main__": | |
parser = create_parser() | |
args = parser.parse_args() | |
main( | |
args.dataset, | |
logging_level=args.logging_level, | |
force=args.force, | |
repo_path=Path(args.repo_path), | |
) | |