Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
from __future__ import annotations | |
import io | |
import tarfile | |
import gradio as gr | |
import numpy as np | |
import PIL.Image | |
from huggingface_hub import hf_hub_download | |
TITLE = "TADNE (This Anime Does Not Exist) Image Selector" | |
DESCRIPTION = """The original TADNE site is https://thisanimedoesnotexist.ai/. | |
You can view images generated by the TADNE model with seed 0-99999. | |
You can filter images based on predictions by the [DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) model. | |
The resolution of the output images in this app is 128x128, but you can | |
check the original 512x512 images from URLs like | |
https://thisanimedoesnotexist.ai/slider.html?seed=10000 using the output seeds. | |
Expected execution time on Hugging Face Spaces: 4s | |
Related Apps: | |
- [TADNE](https://huggingface.co/spaces/hysts/TADNE) | |
- [TADNE Image Viewer](https://huggingface.co/spaces/hysts/TADNE-image-viewer) | |
- [TADNE Interpolation](https://huggingface.co/spaces/hysts/TADNE-interpolation) | |
- [TADNE Image Search with DeepDanbooru](https://huggingface.co/spaces/hysts/TADNE-image-search-with-DeepDanbooru) | |
- [DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) | |
""" | |
def load_deepdanbooru_tag_dict() -> dict[str, int]: | |
path = hf_hub_download("public-data/DeepDanbooru", "tags.txt") | |
with open(path) as f: | |
tags = [line.strip() for line in f.readlines()] | |
return {tag: i for i, tag in enumerate(tags)} | |
def load_deepdanbooru_predictions(dirname: str) -> np.ndarray: | |
path = hf_hub_download( | |
"hysts/TADNE-sample-images", | |
f"prediction_results/deepdanbooru/{dirname}.npy", | |
repo_type="dataset", | |
) | |
return np.load(path) | |
image_size = 128 | |
min_seed = 0 | |
max_seed = 99999 | |
dirname = "0-99999" | |
tarball_path = hf_hub_download("hysts/TADNE-sample-images", f"{image_size}/{dirname}.tar", repo_type="dataset") | |
deepdanbooru_tag_dict = load_deepdanbooru_tag_dict() | |
deepdanbooru_predictions = load_deepdanbooru_predictions(dirname) | |
def run( | |
general_tags: list[str], | |
hair_color_tags: list[str], | |
hair_style_tags: list[str], | |
eye_color_tags: list[str], | |
image_color_tags: list[str], | |
other_tags: list[str], | |
additional_tags_str: str, | |
score_threshold: float, | |
start_index: int, | |
nrows: int, | |
ncols: int, | |
) -> tuple[int, np.ndarray, np.ndarray, str]: | |
hair_color_tags = [f"{color}_hair" for color in hair_color_tags] | |
eye_color_tags = [f"{color}_eyes" for color in eye_color_tags] | |
additional_tags = additional_tags_str.split(",") | |
tags = ( | |
general_tags | |
+ hair_color_tags | |
+ hair_style_tags | |
+ eye_color_tags | |
+ image_color_tags | |
+ other_tags | |
+ additional_tags | |
) | |
missing_tags = [tag for tag in tags if tag not in deepdanbooru_tag_dict] | |
tag_indices = [deepdanbooru_tag_dict[tag] for tag in tags if tag in deepdanbooru_tag_dict] | |
conditions = deepdanbooru_predictions[:, tag_indices] > score_threshold | |
image_indices = np.arange(len(deepdanbooru_predictions)) | |
image_indices = image_indices[conditions.all(axis=1)] | |
start_index = int(start_index) | |
num = nrows * ncols | |
seeds = [] | |
images = [] | |
dummy = np.ones((image_size, image_size, 3), dtype=np.uint8) * 255 | |
with tarfile.TarFile(tarball_path) as tar_file: | |
for index in range(start_index, start_index + num): | |
if index >= len(image_indices): | |
seeds.append(np.nan) | |
images.append(dummy) | |
continue | |
image_index = image_indices[index] | |
seeds.append(image_index) | |
member = tar_file.getmember(f"{dirname}/{image_index:07d}.jpg") | |
with tar_file.extractfile(member) as f: # type: ignore | |
data = io.BytesIO(f.read()) | |
image = PIL.Image.open(data) | |
image = np.asarray(image) | |
images.append(image) | |
res = ( | |
np.asarray(images) | |
.reshape(nrows, ncols, image_size, image_size, 3) | |
.transpose(0, 2, 1, 3, 4) | |
.reshape(nrows * image_size, ncols * image_size, 3) | |
) | |
seeds = np.asarray(seeds).reshape(nrows, ncols) | |
return len(image_indices), res, seeds, ",".join(missing_tags) | |
demo = gr.Interface( | |
fn=run, | |
inputs=[ | |
gr.CheckboxGroup( | |
label="General", | |
choices=[ | |
"1girl", | |
"1boy", | |
"multiple_girls", | |
"multiple_boys", | |
"looking_at_viewer", | |
], | |
), | |
gr.CheckboxGroup( | |
label="Hair Color", | |
choices=[ | |
"aqua", | |
"black", | |
"blonde", | |
"blue", | |
"brown", | |
"green", | |
"grey", | |
"orange", | |
"pink", | |
"purple", | |
"red", | |
"silver", | |
"white", | |
], | |
), | |
gr.CheckboxGroup( | |
label="Hair Style", | |
choices=[ | |
"bangs", | |
"curly_hair", | |
"long_hair", | |
"medium_hair", | |
"messy_hair", | |
"ponytail", | |
"short_hair", | |
"straight_hair", | |
"twintails", | |
], | |
), | |
gr.CheckboxGroup( | |
label="Eye Color", | |
choices=[ | |
"aqua", | |
"black", | |
"blue", | |
"brown", | |
"green", | |
"grey", | |
"orange", | |
"pink", | |
"purple", | |
"red", | |
"white", | |
"yellow", | |
], | |
), | |
gr.CheckboxGroup( | |
label="Image Color", | |
choices=[ | |
"greyscale", | |
"monochrome", | |
], | |
), | |
gr.CheckboxGroup( | |
label="Others", | |
choices=[ | |
"animal_ears", | |
"closed_eyes", | |
"full_body", | |
"hat", | |
"smile", | |
], | |
), | |
gr.Textbox(label="Additional Tags"), | |
gr.Slider(label="DeepDanbooru Score Threshold", minimum=0, maximum=1, step=0.1, value=0.5), | |
gr.Number(label="Start Index", value=0), | |
gr.Slider(label="Number of Rows", minimum=0, maximum=10, step=1, value=2), | |
gr.Slider(label="Number of Columns", minimum=0, maximum=10, step=1, value=5), | |
], | |
outputs=[ | |
gr.Textbox(label="Number of Found Images"), | |
gr.Image(label="Output"), | |
gr.Dataframe(label="Seed"), | |
gr.Textbox(label="Missing Tags"), | |
], | |
title=TITLE, | |
description=DESCRIPTION, | |
) | |
if __name__ == "__main__": | |
demo.queue().launch() | |