File size: 8,589 Bytes
19a3549
 
3379d17
856cda8
6975d0e
5f0c032
19a3549
685d2fb
c392564
0cddddf
4729eb8
21a6411
19a3549
 
 
 
 
 
 
 
 
44c8d3f
21a6411
a73349b
21a6411
b673336
 
21a6411
 
 
 
652265d
21a6411
 
19a3549
 
 
 
 
 
e9d694a
19a3549
 
 
 
e9d694a
0802715
 
19a3549
158689f
 
efd4b2a
 
158689f
 
 
 
 
 
 
 
19a3549
158689f
1fadef0
4d9c9bc
24d1f39
4d9c9bc
 
 
 
 
 
 
 
 
 
 
ccc12ba
26984b9
ccc12ba
 
 
 
 
 
 
 
 
 
7ac4e01
19a3549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
685d2fb
 
 
 
 
 
 
 
19a3549
 
 
 
 
 
 
 
 
 
 
e9d694a
06d3080
 
21a6411
 
 
19a3549
 
016f9e3
19a3549
e9d694a
6975d0e
19a3549
9824f78
 
 
016f9e3
19a3549
ab9b507
19a3549
 
6f7ff2e
7ac4e01
685d2fb
 
 
 
19a3549
 
7ac4e01
 
685d2fb
19a3549
 
1497ae3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import gradio as gr
import torch
import numpy as np
from PIL import Image,ImageFilter
from diffusers.models import AutoencoderKL
from diffusers import AutoPipelineForInpainting, UNet2DConditionModel, DiffusionPipeline, StableDiffusionInpaintPipeline
import diffusers
from share_btn import community_icon_html, loading_icon_html, share_js


vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")


def read_content(file_path: str) -> str:
    """read the content of target file
    """
    with open(file_path, 'r', encoding='utf-8') as f:
        content = f.read()

    return content

def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=30, strength=0.8,model="Realistic_V5.0", scheduler="DPMSolverMultistepScheduler-Karras"):
    
    pipe = AutoPipelineForInpainting.from_pretrained("SG161222/Realistic_Vision_V5.0_noVAE",vae=vae).to("cuda")
    
    if model == "Realistic_V5.1":
        pipe = AutoPipelineForInpainting.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", vae=vae).to("cuda")
    if model == "EpicRealism":
        pipe = AutoPipelineForInpainting.from_pretrained("emilianJR/epiCRealism", vae=vae).to("cuda")
    if model == "Realistic_V6.0":
        pipe = AutoPipelineForInpainting.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", vae=vae).to("cuda")
        pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))

        
    if negative_prompt == "":
        negative_prompt = None
    scheduler_class_name = scheduler.split("-")[0]

    add_kwargs = {}
    if len(scheduler.split("-")) > 1:
        add_kwargs["use_karras_sigmas"] = True
    if len(scheduler.split("-")) > 2:
        add_kwargs["algorithm_type"] = "sde-dpmsolver++"

    scheduler = getattr(diffusers, scheduler_class_name)
    pipe.scheduler = scheduler.from_pretrained("emilianJR/epiCRealism", subfolder="scheduler", **add_kwargs)

    
    
    init_image = dict["image"]
    mask_image = dict["mask"]

    width, height = init_image.size
    mask_image = mask_image.convert("RGBA")
    data = mask_image.getdata()
    new_data = []
    for item in data:
        if item[:3] == (0, 0, 0):  # Check if the pixel is black
            new_data.append((0, 0, 0, 0))  # Add transparent pixel
        else:
            new_data.append(item)
    
    mask_image.putdata(new_data)
    mask_image = mask_image.resize(init_image.size, resample=Image.LANCZOS)
    mask_image = mask_image.filter(ImageFilter.GaussianBlur(5))
    #mask_image = pipe.mask_processor.blur(mask_image, blur_factor=15)
    with torch.cuda.amp.autocast():
        output = pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            image=init_image,
            mask_image=mask_image,
            guidance_scale=guidance_scale,
            num_inference_steps=int(steps),
            strength=strength,
            clip_skip=1
        )
    inpainted_image = output.images[0]
    inpainted_image = inpainted_image.resize(init_image.size, resample=Image.LANCZOS)
    # Combine the original and inpainted images using the mask
    combined_image = Image.composite(inpainted_image, init_image, mask_image.split()[3])

    print("Positive:", prompt)
    print("Negative:", negative_prompt)
    print("Guidance_scale:", guidance_scale)
    print("Steps:", steps)
    print("Strength:", strength)
    print("Scheduler:", scheduler)

    return inpainted_image, combined_image, gr.update(visible=True)


css = '''
.gradio-container{max-width: 1100px !important}
#image_upload{min-height:400px}
#image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px}
#mask_radio .gr-form{background:transparent; border: none}
#word_mask{margin-top: .75em !important}
#word_mask textarea:disabled{opacity: 0.3}
.footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5}
.footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white}
.dark .footer {border-color: #303030}
.dark .footer>p {background: #0b0f19}
.acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%}
#image_upload .touch-none{display: flex}
@keyframes spin {
    from {
        transform: rotate(0deg);
    }
    to {
        transform: rotate(360deg);
    }
}
#share-btn-container {padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;}
div#share-btn-container > div {flex-direction: row;background: black;align-items: center}
#share-btn-container:hover {background-color: #060606}
#share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;}
#share-btn * {all: unset}
#share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;}
#share-btn-container .wrap {display: none !important}
#share-btn-container.hidden {display: none!important}
#prompt input{width: calc(100% - 160px);border-top-right-radius: 0px;border-bottom-right-radius: 0px;}
#prompt-container{margin-top:-18px;}
#prompt-container .form{border-top-left-radius: 0;border-top-right-radius: 0}
#image_upload{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px}
'''

image_blocks = gr.Blocks(css=css, elem_id="total-container")
with image_blocks as demo:
    gr.HTML(read_content("header.html"))
    with gr.Row():
                with gr.Column():
                    image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Upload",height=512)
                    with gr.Row(elem_id="prompt-container",equal_height=True):
                        with gr.Row():
                            prompt = gr.Textbox(placeholder="Your prompt", show_label=False, elem_id="prompt", lines=5)
                    with gr.Row(equal_height=True):
                        btn = gr.Button("Inpaint!", elem_id="run_button")
                    
                    with gr.Accordion(label="Advanced Settings", open=False):
                        with gr.Row(equal_height=True):
                            guidance_scale = gr.Number(value=7.5, minimum=1.0, maximum=20.0, step=0.1, label="guidance_scale")
                            steps = gr.Number(value=40, minimum=10, maximum=100, step=1, label="steps")
                            strength = gr.Number(value=0.8, minimum=0.01, maximum=1.0, step=0.01, label="strength")
                            negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt", info="what you don't want to see in the image")
                        with gr.Row(equal_height=True):
                            models = ["Realistic_V5.0","Realistic_V5.1","Realistic_V6.0","Epic_Realism"]
                            model = gr.Dropdown(label="Models",choices=models,value="Realistic_V5.0")
                        with gr.Row(equal_height=True):
                            schedulers = ["DEISMultistepScheduler", "HeunDiscreteScheduler", "EulerDiscreteScheduler", "DPMSolverMultistepScheduler", "DPMSolverMultistepScheduler-Karras", "DPMSolverMultistepScheduler-Karras-SDE"]
                            scheduler = gr.Dropdown(label="Schedulers", choices=schedulers, value="DPMSolverMultistepScheduler-Karras")
                        
                with gr.Column():
                    image_out = gr.Image(label="Output", elem_id="output-img", height=512, width=512)
                    image_out1 = gr.Image(label="Output", elem_id="output-img", height=512, width=512)
                    with gr.Group(elem_id="share-btn-container", visible=False) as share_btn_container:
                        community_icon = gr.HTML(community_icon_html)
                        loading_icon = gr.HTML(loading_icon_html)
                        share_button = gr.Button("Share to community", elem_id="share-btn",visible=True)
            

    btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength, model, scheduler], outputs=[image_out,image_out1,share_btn_container], api_name='run')
    prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength, model, scheduler], outputs=[image_out,image_out1,share_btn_container])
    share_button.click(None, [], [], _js=share_js)


image_blocks.queue(max_size=25,api_open=True).launch(show_api=True)