|
import os |
|
import datasets |
|
from datasets import DatasetInfo, DownloadManager, Image |
|
import pickle |
|
import random |
|
from tqdm import tqdm |
|
|
|
|
|
_URLS = { |
|
"train": "train/filenames.pickle", |
|
"test": "test/filenames.pickle", |
|
"images": "images.zip", |
|
"text": "text.zip", |
|
} |
|
|
|
class CUB200Dataset(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
|
|
datasets.BuilderConfig(name="full"), |
|
] |
|
|
|
def _info(self) -> DatasetInfo: |
|
features = { |
|
"image": datasets.Image(), |
|
"text": datasets.Value("string") |
|
} |
|
info = datasets.DatasetInfo( |
|
features=datasets.Features(features), |
|
supervised_keys=None, |
|
citation="", |
|
) |
|
return info |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_path": downloaded_files["train"],"images_path":downloaded_files["images"]+"/images","text_path":downloaded_files["text"]+"/text"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_path": downloaded_files["test"],"images_path":downloaded_files["images"]+"/images","text_path":downloaded_files["text"]+"/text"}), |
|
] |
|
|
|
def _generate_examples(self, file_path, images_path, text_path): |
|
name_list = pickle.load(open(file_path, "rb"), encoding="bytes") |
|
sum = len(name_list) |
|
for index in tqdm(range(sum)): |
|
name = name_list[index] |
|
this_text_path = os.path.join(text_path, name+".txt") |
|
with open(this_text_path, "r") as f: |
|
caption = random.choice(f.readlines()).replace("\n", "").lower() |
|
image = Image().encode_example(os.path.join(images_path, name+".jpg")) |
|
yield index, { |
|
"image": image, |
|
"text": caption, |
|
} |