Datasets:

Modalities:
Text
Languages:
Spanish
ArXiv:
Libraries:
Datasets
License:
mc4-es-sampled / mc4-es-sampled.py
versae's picture
Fix indices
82dca2e
raw
history blame
4.31 kB
"""Perplexity Sampled mC4 dataset based on Common Crawl."""
import gzip
import json
import datasets
import numpy as np
from numpy.random import default_rng
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org".
This is the processed version of Google's mC4 dataset by AllenAI.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_URL = "https://github.com/allenai/allennlp/discussions/5056"
_DATA_URL_VALIDATION = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-es-validation.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
_DATA_URL_TRAIN = "https://huggingface.co/datasets/bertin-project/mc4-es-sampled/resolve/main/mc4-es-train-50M-{config}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
_CONFIGS = [
"random",
"stepwise",
"gaussian",
]
_N_SHARDS_PER_SPLIT = {
"random": {"train": 1024, "validation": 16},
"stepwise": {"train": 1024, "validation": 16},
"gaussian": {"train": 1024, "validation": 16},
}
class Mc4EsSampledConfig(datasets.BuilderConfig):
"""BuilderConfig for mC4."""
def __init__(self, *args, configs, **kwargs):
"""BuilderConfig for mC4.
Args:
configs (:obj:`List[str]`): list of configs to load
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(configs),
**kwargs,
)
self.configs = configs
class Mc4EsSampled(datasets.GeneratorBasedBuilder):
"""mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
BUILDER_CONFIGS = [Mc4EsSampledConfig(configs=[config]) for config in _CONFIGS]
BUILDER_CONFIG_CLASS = Mc4EsSampledConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
data_urls["train"] = [
_DATA_URL_TRAIN.format(
config=config,
index=index,
n_shards=_N_SHARDS_PER_SPLIT[config]["train"],
)
for config in self.config.configs
for index in range(_N_SHARDS_PER_SPLIT[config]["train"])
]
data_urls["validation"] = [
_DATA_URL_VALIDATION.format(
index=index + 1,
n_shards=_N_SHARDS_PER_SPLIT[config]["validation"],
)
for config in self.config.configs
for index in range(_N_SHARDS_PER_SPLIT[config]["validation"])
]
train_downloaded_files = dl_manager.download(data_urls["train"])
validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1