Fabrice-TIERCELIN's picture
Handles HEIC
a92ad95 verified
import gradio as gr
import numpy as np
import time
import math
import random
import torch
import spaces
from diffusers import StableDiffusionXLInpaintPipeline
from PIL import Image, ImageFilter
from pillow_heif import register_heif_opener
register_heif_opener()
max_64_bit_int = np.iinfo(np.int32).max
if torch.cuda.is_available():
device = "cuda"
floatType = torch.float16
else:
device = "cpu"
floatType = torch.float32
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype = floatType)
pipe = pipe.to(device)
def update_seed(is_randomize_seed, seed):
if is_randomize_seed:
return random.randint(0, max_64_bit_int)
return seed
def toggle_debug(
is_debug_mode,
repeating_horizontally,
repeating_vertically
):
if is_debug_mode:
return [
gr.update(visible = True),
gr.update(visible = repeating_horizontally),
gr.update(visible = repeating_horizontally),
gr.update(visible = repeating_vertically),
gr.update(visible = repeating_vertically),
gr.update(visible = (repeating_horizontally and repeating_vertically)),
gr.update(visible = (repeating_horizontally and repeating_vertically))
]
return [gr.update(visible = False)] * 7
def flip(input_image, horizontally_flipped, vertically_flipped):
image_height, image_width, dummy_channel = np.array(input_image).shape
fliped_image = Image.new(mode = input_image.mode, size = (image_width, image_height), color = "black")
middle_width = image_width // 2
middle_height = image_height // 2
if horizontally_flipped and vertically_flipped:
fliped_image.paste(input_image, (middle_width, middle_height))
fliped_image.paste(input_image, (middle_width - image_width, middle_height))
fliped_image.paste(input_image, (middle_width, middle_height - image_height))
fliped_image.paste(input_image, (middle_width - image_width, middle_height - image_height))
elif horizontally_flipped:
fliped_image.paste(input_image, (middle_width, 0))
fliped_image.paste(input_image, (middle_width - image_width, 0))
elif vertically_flipped:
fliped_image.paste(input_image, (0, middle_height))
fliped_image.paste(input_image, (0, middle_height - image_height))
return fliped_image
def blur(input_image, radius):
image_height, image_width, dummy_channel = np.array(input_image).shape
duplicated_image = Image.new(mode = input_image.mode, size = (image_width * 3, image_height * 3), color = "black")
for i in range(3):
for j in range(3):
duplicated_image.paste(input_image, (image_width * i, image_height * j))
duplicated_image = duplicated_image.filter(ImageFilter.GaussianBlur(radius))
blurred_image = Image.new(mode = input_image.mode, size = (image_width, image_height), color = "black")
blurred_image.paste(duplicated_image, (-image_width, -image_height))
return blurred_image
def mask(input_image, enlarge_left, enlarge_right, enlarge_top, enlarge_bottom, horizontally_flipped, vertically_flipped, smooth_border):
image_height, image_width, dummy_channel = np.array(input_image).shape
if horizontally_flipped and vertically_flipped:
mask_image = Image.new(mode = input_image.mode, size = (enlarge_left + image_width + enlarge_right, enlarge_top + image_height + enlarge_bottom), color = (255, 255, 255, 0))
black_mask = Image.new(mode = input_image.mode, size = (image_width - smooth_border, enlarge_top + image_height + enlarge_bottom), color = (127, 127, 127, 0))
mask_image.paste(black_mask, (enlarge_left + (smooth_border // 2), 0))
black_mask = Image.new(mode = input_image.mode, size = (enlarge_left + image_width + enlarge_right, image_height - smooth_border), color = (127, 127, 127, 0))
mask_image.paste(black_mask, (0, enlarge_top + (smooth_border // 2)))
elif horizontally_flipped:
mask_image = Image.new(mode = input_image.mode, size = (enlarge_left + image_width + enlarge_right, image_height), color = (255, 255, 255, 0))
black_mask = Image.new(mode = input_image.mode, size = (image_width - smooth_border, image_height), color = (127, 127, 127, 0))
mask_image.paste(black_mask, (enlarge_left + (smooth_border // 2), 0))
elif vertically_flipped:
mask_image = Image.new(mode = input_image.mode, size = (image_width, enlarge_top + image_height + enlarge_bottom), color = (255, 255, 255, 0))
black_mask = Image.new(mode = input_image.mode, size = (image_width, image_height - smooth_border), color = (127, 127, 127, 0))
mask_image.paste(black_mask, (0, enlarge_top + (smooth_border // 2)))
mask_image = blur(mask_image, 10)
return mask_image
def canva(input_image, enlarge_left, enlarge_right, enlarge_top, enlarge_bottom):
image_height, image_width, dummy_channel = np.array(input_image).shape
output_width = enlarge_left + image_width + enlarge_right
output_height = enlarge_top + image_height + enlarge_bottom
canva_image = Image.new(mode = input_image.mode, size = (image_width, image_height), color = "black")
canva_image.paste(input_image, (0, 0))
canva_image = canva_image.resize((output_width, output_height), Image.LANCZOS)
canva_image = blur(canva_image, 20)
canva_image.paste(input_image, (enlarge_left, enlarge_top))
horizontally_mirrored_input_image = input_image.transpose(Image.FLIP_LEFT_RIGHT).resize((image_width * 2, image_height), Image.LANCZOS)
canva_image.paste(horizontally_mirrored_input_image, (enlarge_left - (image_width * 2), enlarge_top))
canva_image.paste(horizontally_mirrored_input_image, (enlarge_left + image_width, enlarge_top))
vertically_mirrored_input_image = input_image.transpose(Image.FLIP_TOP_BOTTOM).resize((image_width, image_height * 2), Image.LANCZOS)
canva_image.paste(vertically_mirrored_input_image, (enlarge_left, enlarge_top - (image_height * 2)))
canva_image.paste(vertically_mirrored_input_image, (enlarge_left, enlarge_top + image_height))
returned_input_image = input_image.transpose(Image.ROTATE_180).resize((image_width * 2, image_height * 2), Image.LANCZOS)
canva_image.paste(returned_input_image, (enlarge_left - (image_width * 2), enlarge_top - (image_height * 2)))
canva_image.paste(returned_input_image, (enlarge_left - (image_width * 2), enlarge_top + image_height))
canva_image.paste(returned_input_image, (enlarge_left + image_width, enlarge_top - (image_height * 2)))
canva_image.paste(returned_input_image, (enlarge_left + image_width, enlarge_top + image_height))
canva_image = blur(canva_image, 20)
canva_image.paste(input_image, (enlarge_left, enlarge_top))
return canva_image
def noise_color(color, noise):
return color + random.randint(- noise, noise)
def add_noise(
input_image,
canva_image,
enlarge_left,
enlarge_right,
enlarge_top,
enlarge_bottom
):
input_height, input_width, dummy_channel = np.array(input_image).shape
canva_height, canva_width, dummy_channel_2 = np.array(canva_image).shape
noise_image = Image.new(mode = input_image.mode, size = (canva_width, canva_height), color = "black")
canva_pixels = canva_image.load()
for x in range(canva_width):
for y in range(canva_height):
canva_pixel = canva_pixels[x, y]
noise = min(max(enlarge_left - x, x - (enlarge_left + input_width), enlarge_top - y, y - (enlarge_top + input_height), 0), 255)
noise_image.putpixel((x, y), (noise_color(canva_pixel[0], noise), noise_color(canva_pixel[1], noise), noise_color(canva_pixel[2], noise), 255))
canva_image.paste(noise_image, (0, 0))
return canva_image
def resizing(output_width, output_height, limitation):
resized_width = output_width
resized_height = output_height
if 1024 * 1024 < output_width * output_height:
factor = ((1024 * 1024) / (output_width * output_height))**0.5
resized_width = math.floor(output_width * factor)
resized_height = math.floor(output_height * factor)
limitation = " Due to technical limitation, the image have been downscaled and then upscaled.";
# Width and height must be multiple of 8
resized_width = resized_width - (resized_width % 8)
resized_height = resized_height - (resized_height % 8)
return resized_width, resized_height, limitation
def join_edge(
input_image,
enlarge_left,
enlarge_right,
enlarge_top,
enlarge_bottom,
horizontally_flipped,
vertically_flipped,
limitation,
prompt,
negative_prompt,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
seed,
debug_mode,
progress
):
original_height, original_width, dummy_channel = np.array(input_image).shape
output_width = (0 if vertically_flipped else enlarge_left) + original_width + (0 if vertically_flipped else enlarge_right)
output_height = (0 if horizontally_flipped else enlarge_top) + original_height + (0 if horizontally_flipped else enlarge_bottom)
current_image = canva(
input_image,
(0 if vertically_flipped else enlarge_left),
(0 if vertically_flipped else enlarge_right),
(0 if horizontally_flipped else enlarge_top),
(0 if horizontally_flipped else enlarge_bottom)
)
no_noise_image = current_image
current_image = add_noise(
input_image,
current_image,
(0 if vertically_flipped else enlarge_left),
(0 if vertically_flipped else enlarge_right),
(0 if horizontally_flipped else enlarge_top),
(0 if horizontally_flipped else enlarge_bottom)
)
current_image = flip(current_image, horizontally_flipped, vertically_flipped)
mask_image = mask(input_image, enlarge_left, enlarge_right, enlarge_top, enlarge_bottom, horizontally_flipped, vertically_flipped, smooth_border)
mask_image = flip(mask_image, horizontally_flipped, vertically_flipped)
resized_width, resized_height, limitation = resizing(output_width, output_height, limitation)
if horizontally_flipped and vertically_flipped:
progress(.85, desc = "Processing (3/3)...")
elif horizontally_flipped:
progress(.16, desc = "Processing (1/3)...")
elif vertically_flipped:
progress(.51, desc = "Processing (2/3)...")
# Artificial Intelligence computation
output_image = pipe(
seeds = [seed],
width = resized_width,
height = resized_height,
prompt = prompt,
negative_prompt = negative_prompt,
image = current_image,
mask_image = mask_image,
num_inference_steps = num_inference_steps,
guidance_scale = guidance_scale,
#image_guidance_scale = image_guidance_scale,
denoising_steps = denoising_steps,
show_progress_bar = False
).images[0]
output_image = output_image.resize((output_width, output_height), Image.LANCZOS)
output_image = flip(output_image, horizontally_flipped, vertically_flipped)
return output_image, limitation, input_image, no_noise_image, current_image, mask_image
def additional_information(processed_image, repeating_horizontally, repeating_vertically):
try:
output_height, output_width, dummy_channel = np.array(processed_image).shape
horizontal_loops = 2 if repeating_horizontally else 1
vertical_loops = 2 if repeating_vertically else 1
demonstration = Image.new(mode = processed_image.mode, size = (output_width * horizontal_loops, output_height * vertical_loops), color = "black")
for x in range(horizontal_loops):
for y in range(vertical_loops):
demonstration.paste(processed_image, (output_width * x, output_height * y))
except:
output_height = 0
output_width = 0
demonstration = None
return output_height, output_width, demonstration
def check(
processed_image,
enlarge_top,
enlarge_right,
enlarge_bottom,
enlarge_left,
prompt,
negative_prompt,
repeating_horizontally,
repeating_vertically,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
is_randomize_seed,
seed,
debug_mode,
progress = gr.Progress()
):
if processed_image is None:
raise gr.Error("Please provide an image.")
if prompt is None or prompt == "":
raise gr.Error("Please provide a prompt input.")
if repeating_horizontally == False and repeating_vertically == False:
raise gr.Error("The image should loop at least in one direction.")
if (not (enlarge_top is None)) and enlarge_top < 0:
raise gr.Error("Please only provide positive margins.")
if (not (enlarge_right is None)) and enlarge_right < 0:
raise gr.Error("Please only provide positive margins.")
if (not (enlarge_bottom is None)) and enlarge_bottom < 0:
raise gr.Error("Please only provide positive margins.")
if (not (enlarge_left is None)) and enlarge_left < 0:
raise gr.Error("Please only provide positive margins.")
if (smooth_border is None or smooth_border == 0) and (enlarge_top is None or enlarge_top == 0) and (enlarge_right is None or enlarge_right == 0) and (enlarge_bottom is None or enlarge_bottom == 0) and (enlarge_left is None or enlarge_left == 0):
raise gr.Error("At least one border must be enlarged or smoothed.")
@spaces.GPU(duration=420)
def image_to_tile(
processed_image,
enlarge_top,
enlarge_right,
enlarge_bottom,
enlarge_left,
prompt,
negative_prompt,
repeating_horizontally,
repeating_vertically,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
is_randomize_seed,
seed,
debug_mode,
progress = gr.Progress()
):
start = time.time()
progress(0, desc = "Preparing data...")
limitation = "";
if negative_prompt is None:
negative_prompt = ""
if smooth_border is None:
smooth_border = 64
if guidance_scale is None:
guidance_scale = 7
if image_guidance_scale is None:
image_guidance_scale = 1.5
if enlarge_top is None or enlarge_top == "":
enlarge_top = 0
if enlarge_right is None or enlarge_right == "":
enlarge_right = 0
if enlarge_bottom is None or enlarge_bottom == "":
enlarge_bottom = 0
if enlarge_left is None or enlarge_left == "":
enlarge_left = 0
if denoising_steps is None:
denoising_steps = 1000
if seed is None:
seed = random.randint(0, max_64_bit_int)
random.seed(seed)
torch.manual_seed(seed)
original_image = processed_image
horizontally_mirrored_image = None
horizontally_mirrored_mask = None
vertically_mirrored_image = None
vertically_mirrored_mask = None
last_image = None
last_mask = None
if repeating_horizontally:
processed_image, limitation, start_image, no_noise_image, horizontally_mirrored_image, horizontally_mirrored_mask = join_edge(
processed_image,
enlarge_left,
enlarge_right,
enlarge_top,
enlarge_bottom,
True,
False,
limitation,
prompt,
negative_prompt,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
seed,
debug_mode,
progress
)
if repeating_vertically:
processed_image, limitation, start_image, no_noise_image, vertically_mirrored_image, vertically_mirrored_mask = join_edge(
processed_image,
enlarge_left,
enlarge_right,
enlarge_top,
enlarge_bottom,
False,
True,
limitation,
prompt,
negative_prompt,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
seed,
debug_mode,
progress
)
if repeating_horizontally and repeating_vertically:
processed_image, limitation, start_image, no_noise_image, last_image, last_mask = join_edge(
processed_image,
enlarge_left,
enlarge_right,
enlarge_top,
enlarge_bottom,
True,
True,
limitation,
prompt,
negative_prompt,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
seed,
debug_mode,
progress
)
progress(.99, desc = "Finishing...")
output_height, output_width, demonstration = additional_information(processed_image, repeating_horizontally, repeating_vertically)
end = time.time()
secondes = int(end - start)
minutes = math.floor(secondes / 60)
secondes = secondes - (minutes * 60)
hours = math.floor(minutes / 60)
minutes = minutes - (hours * 60)
return [
processed_image,
("Start again to get a different result. " if is_randomize_seed else "") + "The new image is " + str(output_width) + " pixels large and " + str(output_height) + " pixels high, so an image of " + f'{output_width * output_height:,}' + " pixels. The image has been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec." + limitation,
demonstration,
original_image,
horizontally_mirrored_image,
horizontally_mirrored_mask,
vertically_mirrored_image,
vertically_mirrored_mask,
last_image,
last_mask
]
with gr.Blocks() as interface:
gr.HTML(
"""
<h1 style="text-align: center;">Make my image tile</h1>
<p style="text-align: center;">Modify the edges of your image to make your image loop horizontally and vertically seamlessly, up to 1 million pixels, freely, without account, without watermark, which can be downloaded</p>
<br/>
<br/>
✨ Powered by <i>SDXL 1.0</i> artificial intellingence
<br/>
<ul>
<li>If you need to change the <b>view angle</b> of your image, I recommend you to use <i>Zero123</i>,</li>
<li>If you need to enlarge the <b>viewpoint</b> of your image, I recommend you to use <i>Uncrop</i>,</li>
<li>If you need to <b>upscale</b> your image, I recommend you to use <i><a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR">SUPIR</a></i>,</li>
<li>If you need to <b>slightly change</b> your image, I recommend you to use <i>Image-to-Image SDXL</i>,</li>
<li>If you need to change <b>one detail</b> on your image, I recommend you to use <i>Inpaint SDXL</i>,</li>
<li>To modify <b>anything else</b> on your image, I recommend to use <i>Instruct Pix2Pix</i>.</li>
</ul>
<br/>
""" + ("πŸƒβ€β™€οΈ Estimated time: few minutes." if torch.cuda.is_available() else "🐌 Slow process... ~1 hour.") + """
Your computer must <u>not</u> enter into standby mode.<br/>You can duplicate this space on a free account, it's designed to work on CPU, GPU and ZeroGPU.<br/>
<a href='https://huggingface.co/spaces/Fabrice-TIERCELIN/Make-my-image-tiling?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a>
<br/>
βš–οΈ You can use, modify and share the generated images but not for commercial uses.
"""
)
processed_image = gr.Image(label = "Your image", sources = ["upload", "webcam", "clipboard"], type = "pil")
repeating_horizontally = gr.Checkbox(label = "Repeating horizontally", value = True, info = "Alter the left and right edges of the image")
repeating_vertically = gr.Checkbox(label = "Repeating vertically", value = True, info = "Alter the top and bottom edges of the image")
prompt = gr.Textbox(label = "Prompt", info = "Describe the subject, the background and the style of image; 77 token limit", placeholder = "Describe what you want to see in the entire image", lines = 2)
with gr.Accordion("Advanced options", open = False):
with gr.Row():
with gr.Column():
dummy_1 = gr.Label(visible = False)
with gr.Column():
enlarge_top = gr.Number(minimum = 0, value = 0, precision = 0, label = "Enlarge on top ⬆️", info = "in pixels")
with gr.Column():
dummy_2 = gr.Label(visible = False)
with gr.Row():
with gr.Column():
enlarge_left = gr.Number(minimum = 0, value = 0, precision = 0, label = "Enlarge on left ⬅️", info = "in pixels")
with gr.Column():
smooth_border = gr.Slider(minimum = 0, maximum = 1024, value = 128, step = 2, label = "Smooth border", info = "in pixels; lower=preserve original, higher=seamless")
with gr.Column():
enlarge_right = gr.Number(minimum = 0, value = 0, precision = 0, label = "Enlarge on right ➑️", info = "in pixels")
with gr.Row():
with gr.Column():
dummy_3 = gr.Label(visible = False)
with gr.Column():
enlarge_bottom = gr.Number(minimum = 0, value = 0, precision = 0, label = "Enlarge on bottom ⬇️", info = "in pixels")
with gr.Column():
dummy_4 = gr.Label(visible = False)
with gr.Row():
with gr.Column():
negative_prompt = gr.Textbox(label = 'Negative prompt', placeholder = 'Describe what you do NOT want to see in the entire image', value = 'Border, frame, painting, scribbling, smear, noise, blur, watermark')
num_inference_steps = gr.Slider(minimum = 10, maximum = 25, value = 20, step = 1, label = "Number of inference steps", info = "lower=faster, higher=image quality")
guidance_scale = gr.Slider(minimum = 1, maximum = 13, value = 7, step = 0.1, label = "Guidance Scale", info = "lower=image quality, higher=follow the prompt")
image_guidance_scale = gr.Slider(minimum = 1, value = 1.5, step = 0.1, label = "Image Guidance Scale (disabled)", info = "lower=image quality, higher=follow the image")
denoising_steps = gr.Number(minimum = 0, value = 1000, step = 1, label = "Denoising", info = "lower=irrelevant result, higher=relevant result")
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
seed = gr.Slider(minimum = 0, maximum = max_64_bit_int, step = 1, randomize = True, label = "Seed")
debug_mode = gr.Checkbox(label = "Debug mode", value = False, info = "Show intermediate results")
submit = gr.Button("πŸš€ Generate my tile", variant = "primary")
tiled_image_component = gr.Image(label = "Tiled image")
information = gr.HTML()
demonstration_component = gr.Image(label = "Demonstration")
original_image_component = gr.Image(label = "Original image", visible = False)
horizontally_mirrored_image_component = gr.Image(label = "Horizontally mirrored image", visible = False)
horizontally_mirrored_mask_component = gr.Image(label = "Horizontal mask", visible = False)
vertically_mirrored_image_component = gr.Image(label = "Vertically mirrored image", visible = False)
vertically_mirrored_mask_component = gr.Image(label = "Vertical mask", visible = False)
last_image_component = gr.Image(label = "Last image", visible = False)
last_mask_component = gr.Image(label = "Last mask", visible = False)
submit.click(fn = update_seed, inputs = [
randomize_seed,
seed
], outputs = [
seed
], queue = False, show_progress = False).then(fn = toggle_debug, inputs = [
debug_mode,
repeating_horizontally,
repeating_vertically
], outputs = [
original_image_component,
horizontally_mirrored_image_component,
horizontally_mirrored_mask_component,
vertically_mirrored_image_component,
vertically_mirrored_mask_component,
last_image_component,
last_mask_component
], queue = False, show_progress = False).then(fn = check, inputs = [
processed_image,
enlarge_top,
enlarge_right,
enlarge_bottom,
enlarge_left,
prompt,
negative_prompt,
repeating_horizontally,
repeating_vertically,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
randomize_seed,
seed,
debug_mode
], outputs = [], queue = False, show_progress = False).success(image_to_tile, inputs = [
processed_image,
enlarge_top,
enlarge_right,
enlarge_bottom,
enlarge_left,
prompt,
negative_prompt,
repeating_horizontally,
repeating_vertically,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
randomize_seed,
seed,
debug_mode
], outputs = [
tiled_image_component,
information,
demonstration_component,
original_image_component,
horizontally_mirrored_image_component,
horizontally_mirrored_mask_component,
vertically_mirrored_image_component,
vertically_mirrored_mask_component,
last_image_component,
last_mask_component
], scroll_to_output = True)
gr.Examples(
run_on_click = True,
fn = image_to_tile,
inputs = [
processed_image,
enlarge_top,
enlarge_right,
enlarge_bottom,
enlarge_left,
prompt,
negative_prompt,
repeating_horizontally,
repeating_vertically,
smooth_border,
num_inference_steps,
guidance_scale,
image_guidance_scale,
denoising_steps,
randomize_seed,
seed,
debug_mode
],
outputs = [
tiled_image_component,
information,
demonstration_component,
original_image_component,
horizontally_mirrored_image_component,
horizontally_mirrored_mask_component,
vertically_mirrored_image_component,
vertically_mirrored_mask_component,
last_image_component,
last_mask_component
],
examples = [
[
"Example1.png",
0,
0,
0,
0,
"Stone wall, front view, homogene light, ultrarealistic, realistic, photorealistic, photo, 8k",
"Border, frame, painting, drawing, cartoon, 3d, scribbling, smear, noise, blur, watermark",
True,
True,
256,
20,
7,
1.5,
1000,
False,
42,
False
],
],
cache_examples = False,
)
gr.Markdown(
"""
## How to prompt your image
To easily read your prompt, start with the subject, then describ the pose or action, then secondary elements, then the background, then the graphical style, then the image quality:
```
A Vietnamese woman, red clothes, walking, smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
```
You can use round brackets to increase the importance of a part:
```
A Vietnamese woman, (red clothes), walking, smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
```
You can use several levels of round brackets to even more increase the importance of a part:
```
A Vietnamese woman, ((red clothes)), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
```
You can use number instead of several round brackets:
```
A Vietnamese woman, (red clothes:1.5), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
```
You can do the same thing with square brackets to decrease the importance of a part:
```
A [Vietnamese] woman, (red clothes:1.5), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
```
To easily read your negative prompt, organize it the same way as your prompt (not important for the AI):
```
man, boy, hat, running, tree, bicycle, forest, drawing, painting, cartoon, 3d, monochrome, blurry, noisy, bokeh
```
"""
)
interface.queue().launch()