xvin1111's picture
Update app.py
37b2706 verified
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
import os
from huggingface_hub import login
import logging
from typing import List, Tuple
import gc
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Authenticate with Hugging Face
HF_TOKEN = os.getenv("HF_TOKEN")
if HF_TOKEN:
login(HF_TOKEN)
logger.info("Authenticated with Hugging Face using HF_TOKEN.")
else:
logger.warning("HF_TOKEN not found in Space secrets. Relying on user login or public model access.")
# Device setup
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {device}")
# Model setup (10 NSFW-compatible models)
model_configs = [
{"id": "runwayml/stable-diffusion-v1-5", "name": "Stable Diffusion v1.5"}, # Public model for testing
{"id": "nsfw-model-2", "name": "NSFW Model 2"}, # Replace with actual model IDs
{"id": "nsfw-model-3", "name": "NSFW Model 3"},
{"id": "nsfw-model-4", "name": "NSFW Model 4"},
{"id": "nsfw-model-5", "name": "NSFW Model 5"},
{"id": "nsfw-model-6", "name": "NSFW Model 6"},
{"id": "nsfw-model-7", "name": "NSFW Model 7"},
{"id": "nsfw-model-8", "name": "NSFW Model 8"},
{"id": "nsfw-model-9", "name": "NSFW Model 9"},
{"id": "nsfw-model-10", "name": "NSFW Model 10"},
]
# Initialize pipelines
pipelines = []
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
for config in model_configs:
try:
pipe = DiffusionPipeline.from_pretrained(
config["id"],
torch_dtype=torch_dtype,
use_auth_token=HF_TOKEN if HF_TOKEN else None
)
pipe = pipe.to(device)
# Disable safety checker for NSFW models (if permitted by license)
pipe.safety_checker = None
pipelines.append({"pipe": pipe, "name": config["name"]})
logger.info(f"Loaded model: {config['name']}")
except Exception as e:
logger.error(f"Failed to load model {config['id']}: {str(e)}")
pipelines.append({"pipe": None, "name": config["name"]})
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# Inference function
def infer(
prompt: str,
negative_prompt: str,
explicitness: float,
seed: int,
randomize_seed: bool,
width: int,
height: int,
guidance_scale: float,
num_inference_steps: int,
progress=gr.Progress(track_tqdm=True),
) -> Tuple[List[Tuple[str, object]], int]:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Adjust prompt based on explicitness level
explicitness_prompt = ""
if explicitness > 7:
explicitness_prompt = ", highly detailed, explicit"
elif explicitness > 4:
explicitness_prompt = ", suggestive, detailed"
else:
explicitness_prompt = ", tasteful, subtle"
full_prompt = f"{prompt}{explicitness_prompt}"
results = []
generator = torch.Generator(device=device).manual_seed(seed)
for pipeline in progress.tqdm(pipelines, desc="Generating images"):
if pipeline["pipe"] is None:
results.append((f"{pipeline['name']}: Failed to load model", None))
continue
try:
image = pipeline["pipe"](
prompt=full_prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
results.append((pipeline["name"], image))
except Exception as e:
logger.error(f"Error generating image with {pipeline['name']}: {str(e)}")
results.append((f"{pipeline['name']}: Error - {str(e)}", None))
# Clear memory to prevent VRAM overflow
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
return results, seed
# Example prompts (generic for testing)
examples = [
"Fantasy character in a mystical forest, cinematic lighting",
"Sci-fi figure in a neon-lit city, vibrant colors",
"Elegant figure in a luxurious setting, ultra-realistic",
]
# Custom CSS for dark-themed UI
css = """
#col-container {
margin: 0 auto;
max-width: 1200px;
background: #1a1a1a;
padding: 20px;
border-radius: 10px;
}
body {
background: #0d0d0d;
color: #e0e0e0;
}
.gr-button {
background: #ff4d4d;
color: #fff;
border: none;
border-radius: 5px;
}
.gr-button:hover {
background: #cc0000;
}
.gr-slider input {
accent-color: #ff4d4d;
}
.gr-accordion {
background: #2a2a2a;
border-radius: 5px;
}
.warning {
color: #ff4d4d;
font-weight: bold;
text-align: center;
margin-bottom: 20px;
}
.gr-gallery img {
border: 2px solid #ff4d4d;
border-radius: 5px;
}
.gr-sidebar {
background: #2a2a2a;
border-right: 1px solid #ff4d4d;
}
"""
# Gradio UI
with gr.Blocks(css=css, theme="dark", fill_height=True) as demo:
with gr.Sidebar():
gr.Markdown("# NSFW Multi-Model Generator")
gr.Markdown("This Space generates NSFW images using 10 diffusion models. Sign in with your Hugging Face account to access private models or authenticate API requests.")
button = gr.LoginButton("Sign in to Hugging Face")
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Multi-Model NSFW Text-to-Image Generator
<div class='warning'>⚠️ Warning: This tool generates explicit content from multiple models. Use responsibly and ensure compliance with local laws. For users 18+ only.</div>
"""
)
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe your scene (e.g., 'fantasy character in a provocative setting')",
lines=2,
max_lines=5,
show_label=False,
)
run_button = gr.Button("Generate", variant="primary")
result = gr.Gallery(
label="Generated Images",
show_label=True,
columns=5,
height=800,
object_fit="contain",
allow_preview=True,
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="e.g., blurry, low quality, unrealistic",
lines=1,
)
explicitness = gr.Slider(
label="Explicitness Level",
minimum=1,
maximum=10,
step=1,
value=5,
info="Controls the intensity of explicit content (1 = subtle, 10 = very explicit).",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.0,
maximum=15.0,
step=0.1,
value=7.5,
)
num_inference_steps = gr.Slider(
label="Inference Steps",
minimum=1,
maximum=50,
step=1,
value=30,
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
explicitness,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()