GiorgioV's picture
Update app.py
7601a05 verified
import gradio as gr
import numpy as np
import random
import torch
import spaces
from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler
from optimization import optimize_pipeline_
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
from huggingface_hub import InferenceClient
import math
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import os
import base64
from io import BytesIO
import json
import time # Added for history update delay
from gradio_client import Client, handle_file
import tempfile
from PIL import Image
import os
import gradio as gr
def encode_image(pil_image):
import io
buffered = io.BytesIO()
pil_image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO",
subfolder='transformer',
torch_dtype=dtype,
device_map='cuda'),torch_dtype=dtype).to(device)
pipe.load_lora_weights(
"lovis93/next-scene-qwen-image-lora-2509",
weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene"
)
pipe.set_adapters(["next-scene"], adapter_weights=[1.])
pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
pipe.unload_lora_weights()
# Apply the same optimizations from the first version
pipe.transformer.__class__ = QwenImageTransformer2DModel
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
# --- Ahead-of-time compilation ---
optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
# --- UI Constants and Helpers ---
MAX_SEED = np.iinfo(np.int32).max
def use_output_as_input(output_images):
"""Convert output images to input format for the gallery"""
if output_images is None or len(output_images) == 0:
return []
return output_images
# --- Main Inference Function (with hardcoded negative prompt) ---
@spaces.GPU(duration=120)
def infer(
image,
prompt,
seed=120,
randomize_seed=False,
true_guidance_scale=1.0,
num_inference_steps=4,
progress=gr.Progress(track_tqdm=True),
):
"""
Generates an image using the local Qwen-Image diffusers pipeline.
"""
# Hardcode the negative prompt as requested
negative_prompt = " "
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Set up the generator for reproducibility
generator = torch.Generator(device=device).manual_seed(seed)
print(f"Calling pipeline with prompt: '{prompt}'")
print(f"Negative Prompt: '{negative_prompt}'")
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}")
# Generate the image
images = pipe(
image,
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
generator=generator,
true_cfg_scale=true_guidance_scale,
num_images_per_prompt=1
).images
return images[0], seed
# --- Examples and UI Layout ---
examples = []
css = """
#col-container {
margin: 0 auto;
max-width: 1024px;
}
#edit_text{
margin-top: -62px !important
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML('<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">')
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Input Image", show_label=False, type="pil")
result = gr.Image(label="Result", show_label=False, type="pil")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
placeholder="describe the edit instruction",
container=False,
)
run_button = gr.Button("Edit!", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
# Negative prompt UI element is removed here
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
true_guidance_scale = gr.Slider(
label="True guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=1.0
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=6,
)
gr.Examples(examples=[
["neon_sign.png", "change the text to read 'Qwen Image Edit is here'"],
["cat_sitting.jpg", "make the cat floating in the air and holding a sign that reads 'this is fun' written with a blue crayon"],
["pie.png", "turn the style of the photo to vintage comic book"]],
inputs=[input_image, prompt],
outputs=[result, seed],
fn=infer,
cache_examples="lazy")
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
input_image,
prompt,
seed,
randomize_seed,
true_guidance_scale,
num_inference_steps
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()