# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import gradio as gr import huggingface_hub import pillow_avif import spaces import torch import gc from huggingface_hub import snapshot_download from pillow_heif import register_heif_opener from pipelines.pipeline_infu_flux import InfUFluxPipeline # Register HEIF support for Pillow register_heif_opener() class ModelVersion: STAGE_1 = "sim_stage1" STAGE_2 = "aes_stage2" DEFAULT_VERSION = STAGE_2 ENABLE_ANTI_BLUR_DEFAULT = False ENABLE_REALISM_DEFAULT = False loaded_pipeline_config = { "model_version": "aes_stage2", "enable_realism": False, "enable_anti_blur": False, 'pipeline': None } def download_models(): snapshot_download(repo_id='ByteDance/InfiniteYou', local_dir='./models/InfiniteYou', local_dir_use_symlinks=False) try: snapshot_download(repo_id='black-forest-labs/FLUX.1-dev', local_dir='./models/FLUX.1-dev', local_dir_use_symlinks=False) except Exception as e: print(e) print('\nYou are downloading `black-forest-labs/FLUX.1-dev` to `./models/FLUX.1-dev` but failed. ' 'Please accept the agreement and obtain access at https://huggingface.co/black-forest-labs/FLUX.1-dev. ' 'Then, use `huggingface-cli login` and your access tokens at https://huggingface.co/settings/tokens to authenticate. ' 'After that, run the code again.') print('\nYou can also download it manually from HuggingFace and put it in `./models/InfiniteYou`, ' 'or you can modify `base_model_path` in `app.py` to specify the correct path.') exit() def init_pipeline(model_version, enable_realism, enable_anti_blur): loaded_pipeline_config["enable_realism"] = enable_realism loaded_pipeline_config["enable_anti_blur"] = enable_anti_blur loaded_pipeline_config["model_version"] = model_version pipeline = loaded_pipeline_config['pipeline'] gc.collect() torch.cuda.empty_cache() model_path = f'./models/InfiniteYou/infu_flux_v1.0/{model_version}' print(f'loading model from {model_path}') pipeline = InfUFluxPipeline( base_model_path='./models/FLUX.1-dev', infu_model_path=model_path, insightface_root_path='./models/InfiniteYou/supports/insightface', image_proj_num_tokens=8, infu_flux_version='v1.0', model_version=model_version, ) loaded_pipeline_config['pipeline'] = pipeline pipeline.pipe.delete_adapters(['realism', 'anti_blur']) loras = [] if enable_realism: loras.append(['realism', 1.0]) if enable_anti_blur: loras.append(['anti_blur', 1.0]) pipeline.load_loras_state_dict(loras) return pipeline def prepare_pipeline(model_version, enable_realism, enable_anti_blur): if ( loaded_pipeline_config['pipeline'] is not None and loaded_pipeline_config["enable_realism"] == enable_realism and loaded_pipeline_config["enable_anti_blur"] == enable_anti_blur and model_version == loaded_pipeline_config["model_version"] ): return loaded_pipeline_config['pipeline'] loaded_pipeline_config["enable_realism"] = enable_realism loaded_pipeline_config["enable_anti_blur"] = enable_anti_blur loaded_pipeline_config["model_version"] = model_version pipeline = loaded_pipeline_config['pipeline'] if pipeline is None or pipeline.model_version != model_version: print(f'Switching model to {model_version}') pipeline.model_version = model_version if model_version == 'aes_stage2': pipeline.infusenet_sim.cpu() pipeline.image_proj_model_sim.cpu() torch.cuda.empty_cache() pipeline.infusenet_aes.to('cuda') pipeline.pipe.controlnet = pipeline.infusenet_aes pipeline.image_proj_model_aes.to('cuda') pipeline.image_proj_model = pipeline.image_proj_model_aes else: pipeline.infusenet_aes.cpu() pipeline.image_proj_model_aes.cpu() torch.cuda.empty_cache() pipeline.infusenet_sim.to('cuda') pipeline.pipe.controlnet = pipeline.infusenet_sim pipeline.image_proj_model_sim.to('cuda') pipeline.image_proj_model = pipeline.image_proj_model_sim loaded_pipeline_config['pipeline'] = pipeline pipeline.pipe.delete_adapters(['realism', 'anti_blur']) loras = [] if enable_realism: loras.append(['realism', 1.0]) if enable_anti_blur: loras.append(['anti_blur', 1.0]) pipeline.load_loras_state_dict(loras) return pipeline @spaces.GPU(duration=120) def generate_image( input_image, control_image, prompt, seed, width, height, guidance_scale, num_steps, infusenet_conditioning_scale, infusenet_guidance_start, infusenet_guidance_end, enable_realism, enable_anti_blur, model_version ): try: pipeline = prepare_pipeline(model_version=model_version, enable_realism=enable_realism, enable_anti_blur=enable_anti_blur) if seed == 0: seed = torch.seed() & 0xFFFFFFFF image = pipeline( id_image=input_image, prompt=prompt, control_image=control_image, seed=seed, width=width, height=height, guidance_scale=guidance_scale, num_steps=num_steps, infusenet_conditioning_scale=infusenet_conditioning_scale, infusenet_guidance_start=infusenet_guidance_start, infusenet_guidance_end=infusenet_guidance_end, ) except Exception as e: print(e) gr.Error(f"An error occurred: {e}") return gr.update() return gr.update(value=image, label=f"Generated Image, seed = {seed}") def generate_examples(id_image, control_image, prompt_text, seed, enable_realism, enable_anti_blur, model_version): return generate_image(id_image, control_image, prompt_text, seed, 864, 1152, 3.5, 30, 1.0, 0.0, 1.0, enable_realism, enable_anti_blur, model_version) sample_list = [ ['./assets/examples/yann-lecun_resize.jpg', None, 'A sophisticated gentleman exuding confidence. He is dressed in a 1990s brown plaid jacket with a high collar, paired with a dark grey turtleneck. His trousers are tailored and charcoal in color, complemented by a sleek leather belt. The background showcases an elegant library with bookshelves, a marble fireplace, and warm lighting, creating a refined and cozy atmosphere. His relaxed posture and casual hand-in-pocket stance add to his composed and stylish demeanor', 666, False, False, 'aes_stage2'], ['./assets/examples/yann-lecun_resize.jpg', './assets/examples/man_pose.jpg', 'A man, portrait, cinematic', 42, True, False, 'aes_stage2'], ['./assets/examples/yann-lecun_resize.jpg', './assets/examples/yann-lecun_resize.jpg', 'A man, portrait, cinematic', 12345, False, False, 'sim_stage1'], ['./assets/examples/yangmi.jpg', None, 'A woman, portrait, cinematic', 1621695706, False, False, 'sim_stage1'], ['./assets/examples/yangmi.jpg', None, 'A young woman holding a sign with the text "InfiniteYou", "Infinite" in black and "You" in red, pure background', 3724009366, False, False, 'aes_stage2'], ['./assets/examples/yangmi.jpg', None, 'A photo of an elegant Javanese bride in traditional attire, with long hair styled into intricate a braid made of many fresh flowers, wearing a delicate headdress made from sequins and beads. She\'s holding flowers, light smiling at the camera, against a backdrop adorned with orchid blooms. The scene captures her grace as she stands amidst soft pastel colors, adding to its dreamy atmosphere', 42, True, False, 'aes_stage2'], ['./assets/examples/yangmi.jpg', None, 'A photo of an elegant Javanese bride in traditional attire, with long hair styled into intricate a braid made of many fresh flowers, wearing a delicate headdress made from sequins and beads. She\'s holding flowers, light smiling at the camera, against a backdrop adorned with orchid blooms. The scene captures her grace as she stands amidst soft pastel colors, adding to its dreamy atmosphere', 42, False, False, 'sim_stage1'], ] with gr.Blocks() as demo: session_state = gr.State({}) default_model_version = "v1.0" gr.HTML("""