Problem Loading the Dataset

#2
by MauroVazquez - opened

I'm encountering two separate issues when trying to load the dataset projecte-aina/corts_valencianes_asr_a from Hugging Face using the datasets library.
Attempt 1:

corts_val = load_dataset(
    "projecte-aina/corts_valencianes_asr_a",
    None,
    split="clean_train_short", 
    token=HF_TOKEN,
)

as soon as it finishes downloading the files, then it starts to print:

Extraction of ../d/08de2aad9b79cfcbbe1d83c925199c08.wav is blocked (illegal path)
Extraction of ../d/08d2ee0dc5546d8648016fdbf9ae905.wav is blocked (illegal path)
Extraction of ../d/08da668f1a0627951ed1711cbe26c36a.wav is blocked (illegal path)
Extraction of ../d/08d003e7881da129f1b7a93ae7634a3b.wav is blocked (illegal path)
....

I looked into the hub folder inside huggingface_cache and it appears to have the content correctly laid out. It weighs 80 GB.
Attempt 2:
Just in case, I deleted the second argument (name) and retried loading:

corts_val = load_dataset(
    "projecte-aina/corts_valencianes_asr_a",
    split="clean_train_short", #[:20%] Not available on streaming dataset
    token=HF_TOKEN,
)

and it raised the following error:

---------------------------------------------------------------------------
IsADirectoryError                         Traceback (most recent call last)
Cell In[6], line 1
----> 1 corts_val = load_dataset(
      2     "projecte-aina/corts_valencianes_asr_a",
      3     split="clean_train_short",
      4     token=HF_TOKEN,
      5 )

File ~/.local/lib/python3.10/site-packages/datasets/load.py:2083, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
   2080     return builder_instance.as_streaming_dataset(split=split)
   2082 # Download and prepare data
-> 2083 builder_instance.download_and_prepare(
   2084     download_config=download_config,
   2085     download_mode=download_mode,
   2086     verification_mode=verification_mode,
   2087     num_proc=num_proc,
   2088     storage_options=storage_options,
   2089 )
   2091 # Build dataset for splits
   2092 keep_in_memory = (
   2093     keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
   2094 )

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:925, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, dl_manager, base_path, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
    923 if num_proc is not None:
    924     prepare_split_kwargs["num_proc"] = num_proc
--> 925 self._download_and_prepare(
    926     dl_manager=dl_manager,
    927     verification_mode=verification_mode,
    928     **prepare_split_kwargs,
    929     **download_and_prepare_kwargs,
    930 )
    931 # Sync info
    932 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:1649, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs)
   1648 def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
-> 1649     super()._download_and_prepare(
   1650         dl_manager,
   1651         verification_mode,
   1652         check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
   1653         or verification_mode == VerificationMode.ALL_CHECKS,
   1654         **prepare_splits_kwargs,
   1655     )

File ~/.local/lib/python3.10/site-packages/datasets/builder.py:979, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
    977 split_dict = SplitDict(dataset_name=self.dataset_name)
    978 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
--> 979 split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
    981 # Checksums verification
    982 if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:

File /opt/huggingface_cache/modules/datasets_modules/datasets/projecte-aina--corts_valencianes_asr_a/88e5386f4b8a43e5aeac7fb2008ab02bb8468c55c7299b58eb5f1d54c378c9c3/corts_valencianes_asr_a.py:183, in CortsValencianesASR._split_generators(self, dl_manager)
    179 audio_paths = dl_manager.download(hash_tar_files)
    181 splits=["clean_train_short","clean_test_short","clean_dev_short","other_train_short","other_test_short","other_dev_short","clean_train_long","clean_test_long","clean_dev_long","other_train_long","other_test_long","other_dev_long"]
    182 local_extracted_audio_paths = (
--> 183     dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
    184     {
    185         split:[None] * len(audio_paths[split]) for split in splits
    186     }
    187 )
    189 return [
    190     datasets.SplitGenerator(
    191         name="clean_train_short",
   (...)
    285     ),
    286 ]

File ~/.local/lib/python3.10/site-packages/datasets/download/download_manager.py:299, in DownloadManager.extract(self, path_or_paths)
    297 download_config.extract_compressed_file = True
    298 extract_func = partial(self._download_single, download_config=download_config)
--> 299 extracted_paths = map_nested(
    300     extract_func,
    301     path_or_paths,
    302     num_proc=download_config.num_proc,
    303     desc="Extracting data files",
    304 )
    305 path_or_paths = NestedDataStructure(path_or_paths)
    306 extracted_paths = NestedDataStructure(extracted_paths)

File ~/.local/lib/python3.10/site-packages/datasets/utils/py_utils.py:496, in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, batched, batch_size, types, disable_tqdm, desc)
    494     num_proc = 1
    495 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
--> 496     mapped = [
    497         map_nested(
    498             function=function,
    499             data_struct=obj,
    500             num_proc=num_proc,
    501             parallel_min_length=parallel_min_length,
    502             batched=batched,
    503             batch_size=batch_size,
    504             types=types,
    505         )
    506         for obj in iterable
    507     ]
    508 elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
    509     if batched:

File ~/.local/lib/python3.10/site-packages/datasets/utils/py_utils.py:497, in <listcomp>(.0)
    494     num_proc = 1
    495 if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
    496     mapped = [
--> 497         map_nested(
    498             function=function,
    499             data_struct=obj,
    500             num_proc=num_proc,
    501             parallel_min_length=parallel_min_length,
    502             batched=batched,
    503             batch_size=batch_size,
    504             types=types,
    505         )
    506         for obj in iterable
    507     ]
    508 elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
    509     if batched:

File ~/.local/lib/python3.10/site-packages/datasets/utils/py_utils.py:513, in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, batched, batch_size, types, disable_tqdm, desc)
    511         batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
    512     iterable = list(iter_batched(iterable, batch_size))
--> 513 mapped = [
    514     _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
    515     for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
    516 ]
    517 if batched:
    518     mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]

File ~/.local/lib/python3.10/site-packages/datasets/utils/py_utils.py:514, in <listcomp>(.0)
    511         batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
    512     iterable = list(iter_batched(iterable, batch_size))
    513 mapped = [
--> 514     _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
    515     for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
    516 ]
    517 if batched:
    518     mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]

File ~/.local/lib/python3.10/site-packages/datasets/utils/py_utils.py:375, in _single_map_nested(args)
    373         return function([data_struct])[0]
    374     else:
--> 375         return function(data_struct)
    376 if (
    377     batched
    378     and not isinstance(data_struct, dict)
    379     and isinstance(data_struct, types)
    380     and all(not isinstance(v, (dict, types)) for v in data_struct)
    381 ):
    382     return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]

File ~/.local/lib/python3.10/site-packages/datasets/download/download_manager.py:229, in DownloadManager._download_single(self, url_or_filename, download_config)
    226 if is_relative_path(url_or_filename):
    227     # append the relative path to the base_path
    228     url_or_filename = url_or_path_join(self._base_path, url_or_filename)
--> 229 out = cached_path(url_or_filename, download_config=download_config)
    230 out = tracked_str(out)
    231 out.set_origin(url_or_filename)

File ~/.local/lib/python3.10/site-packages/datasets/utils/file_utils.py:252, in cached_path(url_or_filename, download_config, **download_kwargs)
    249             return output_path
    251     # Eager extraction
--> 252     output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
    253         output_path, force_extract=download_config.force_extract
    254     )
    255 return relative_to_absolute_path(output_path)

File ~/.local/lib/python3.10/site-packages/datasets/utils/extract.py:43, in ExtractManager.extract(self, input_path, force_extract)
     42 def extract(self, input_path: str, force_extract: bool = False) -> str:
---> 43     extractor_format = self.extractor.infer_extractor_format(input_path)
     44     if not extractor_format:
     45         return input_path

File ~/.local/lib/python3.10/site-packages/datasets/utils/extract.py:316, in Extractor.infer_extractor_format(cls, path)
    314 magic_number = cls._read_magic_number(path, magic_number_max_length)
    315 for extractor_format, extractor in cls.extractors.items():
--> 316     if extractor.is_extractable(path, magic_number=magic_number):
    317         return extractor_format

File ~/.local/lib/python3.10/site-packages/datasets/utils/extract.py:84, in TarExtractor.is_extractable(cls, path, **kwargs)
     82 @classmethod
     83 def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
---> 84     return tarfile.is_tarfile(path)

File /home/.conda/envs/whisperx/lib/python3.10/tarfile.py:2816, in is_tarfile(name)
   2814     t = open(fileobj=name)
   2815 else:
-> 2816     t = open(name)
   2817 t.close()
   2818 return True

File /home/.conda/envs/whisperx/lib/python3.10/tarfile.py:1833, in TarFile.open(cls, name, mode, fileobj, bufsize, **kwargs)
   1831     saved_pos = fileobj.tell()
   1832 try:
-> 1833     return func(name, "r", fileobj, **kwargs)
   1834 except (ReadError, CompressionError) as e:
   1835     error_msgs.append(f'- method {comptype}: {e!r}')

File /home/.conda/envs/whisperx/lib/python3.10/tarfile.py:1899, in TarFile.gzopen(cls, name, mode, fileobj, compresslevel, **kwargs)
   1896     raise CompressionError("gzip module is not available") from None
   1898 try:
-> 1899     fileobj = GzipFile(name, mode + "b", compresslevel, fileobj)
   1900 except OSError as e:
   1901     if fileobj is not None and mode == 'r':

File /home/.conda/envs/whisperx/lib/python3.10/gzip.py:174, in GzipFile.__init__(self, filename, mode, compresslevel, fileobj, mtime)
    172     mode += 'b'
    173 if fileobj is None:
--> 174     fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
    175 if filename is None:
    176     filename = getattr(fileobj, 'name', '')

IsADirectoryError: [Errno 21] Is a directory: '/opt/huggingface_cache/hub/datasets--projecte-aina--corts_valencianes_asr_a/snapshots/7e8f3355bec3ed1bd926ba26fe97e4bf134c4ed9'

I do not understand how this changed the error, given that the "name" parameter has "None" already as its default value.
I'd appreciate any insights into what's causing these errors or how I might work around them. If there's something I'm missing in the expected loading of the dataset, or if a specific version of the datasets library might solve it.

Projecte Aina org

Hi Mauro,

Thanks for reporting this. We identified that some .tar.gz files in the dataset contained unsafe paths (like ../), which caused the extraction errors you saw (Extraction of ... is blocked (illegal path)). This was triggering the problem when loading the dataset.

We have now sanitized all the tar files by cleaning their internal paths to remove these components. After this fix, the extraction should work correctly without those errors.

Please try loading the dataset again with the updated files and instructions from readme, and let us know if you encounter any further issues.

Sign up or log in to comment