fixed FileNotFoundError
Browse filesI got this error when using this dataset:
FileNotFoundError: Couldn't find file at https://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/Bacteroides_acidifaciens/latest_assembly_versions/GCF_000944395.1_1e8A_assembly/GCF_000944395.1_1e8A_assembly_genomic.fna.gz
so I added two functions to validate the urls before downloading them
- multi_species_genomes.py +29 -1
multi_species_genomes.py
CHANGED
|
@@ -15,10 +15,13 @@
|
|
| 15 |
"""Script for the multi-species genomes dataset. This dataset contains the genomes
|
| 16 |
from 850 different species."""
|
| 17 |
|
|
|
|
| 18 |
from typing import List
|
| 19 |
import datasets
|
| 20 |
import pandas as pd
|
| 21 |
from Bio import SeqIO
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
@@ -71,6 +74,28 @@ def clean_sequence(seq: str) -> str:
|
|
| 71 |
seq = ''.join(list(seq))
|
| 72 |
return seq
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
class MultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
| 76 |
"""BuilderConfig for The Human Reference Genome."""
|
|
@@ -131,7 +156,10 @@ class MultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
|
| 131 |
urls_filepath = dl_manager.download_and_extract('urls.txt')
|
| 132 |
with open(urls_filepath) as urls_file:
|
| 133 |
urls = [line.rstrip() for line in urls_file]
|
| 134 |
-
|
|
|
|
|
|
|
|
|
|
| 135 |
test_urls = urls[-50:] # 50 genomes for test set
|
| 136 |
validation_urls = urls[-100:-50] # 50 genomes for validation set
|
| 137 |
train_urls = urls[:-100] # 800 genomes for training
|
|
|
|
| 15 |
"""Script for the multi-species genomes dataset. This dataset contains the genomes
|
| 16 |
from 850 different species."""
|
| 17 |
|
| 18 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 19 |
from typing import List
|
| 20 |
import datasets
|
| 21 |
import pandas as pd
|
| 22 |
from Bio import SeqIO
|
| 23 |
+
import requests
|
| 24 |
+
|
| 25 |
|
| 26 |
|
| 27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
|
| 74 |
seq = ''.join(list(seq))
|
| 75 |
return seq
|
| 76 |
|
| 77 |
+
def validate_url(url: str) -> bool:
|
| 78 |
+
"""Check if the URL is valid by sending a HEAD request."""
|
| 79 |
+
try:
|
| 80 |
+
response = requests.head(url, allow_redirects=True)
|
| 81 |
+
return response.status_code == 200
|
| 82 |
+
except requests.RequestException as e:
|
| 83 |
+
print(f"Error validating URL {url}: {e}")
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
def validate_urls_concurrently(urls: List[str], max_workers: int = 10) -> List[str]:
|
| 87 |
+
"""Validate URLs concurrently and return a list of valid URLs."""
|
| 88 |
+
valid_urls = []
|
| 89 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
| 90 |
+
future_to_url = {executor.submit(validate_url, url): url for url in urls}
|
| 91 |
+
for future in as_completed(future_to_url):
|
| 92 |
+
url = future_to_url[future]
|
| 93 |
+
try:
|
| 94 |
+
if future.result():
|
| 95 |
+
valid_urls.append(url)
|
| 96 |
+
except Exception as e:
|
| 97 |
+
print(f"Error processing URL {url}: {e}")
|
| 98 |
+
return valid_urls
|
| 99 |
|
| 100 |
class MultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
| 101 |
"""BuilderConfig for The Human Reference Genome."""
|
|
|
|
| 156 |
urls_filepath = dl_manager.download_and_extract('urls.txt')
|
| 157 |
with open(urls_filepath) as urls_file:
|
| 158 |
urls = [line.rstrip() for line in urls_file]
|
| 159 |
+
|
| 160 |
+
# Validate URLs
|
| 161 |
+
urls=validate_urls_concurrently(urls)
|
| 162 |
+
|
| 163 |
test_urls = urls[-50:] # 50 genomes for test set
|
| 164 |
validation_urls = urls[-100:-50] # 50 genomes for validation set
|
| 165 |
train_urls = urls[:-100] # 800 genomes for training
|