|
import torch |
|
from diffusers import LCMScheduler, AutoPipelineForText2Image,DDPMScheduler |
|
from PIL import Image |
|
import numpy as np |
|
import gradio as gr |
|
import gc |
|
|
|
|
|
def main(prompt): |
|
|
|
model_id = "stabilityai/stable-diffusion-xl-base-1.0" |
|
adapter_id = "ksyint/teu_lora" |
|
|
|
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float32, variant="fp16") |
|
pipe.scheduler = DDPMScheduler.from_config(pipe.scheduler.config) |
|
pipe.to("cuda") |
|
|
|
pipe.load_lora_weights(adapter_id) |
|
pipe.fuse_lora() |
|
|
|
image = pipe(prompt=prompt, num_inference_steps=60, guidance_scale=7.0,strength=5.0).images[0] |
|
|
|
|
|
return image |
|
|
|
|
|
iface = gr.Interface(fn=main, inputs="text", outputs="image", title="Text to Image Generation", |
|
description="Generate images based on textual prompts.") |
|
if __name__ == "__main__": |
|
|
|
iface.launch( ) |
|
|
|
|
|
|
|
|
|
|
|
|