Spaces:
Runtime error
Runtime error
| import os | |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False" | |
| os.environ["TOKENIZERS_PARALLELISM"] = "true" | |
| import numpy as np | |
| import gradio as gr | |
| import spaces | |
| import torch | |
| import torch.nn.functional as F | |
| from PIL import Image | |
| from omegaconf import OmegaConf | |
| from transformers import AutoTokenizer | |
| from prompting_utils import UniversalPrompting, create_attention_mask_predict_next, create_attention_mask_for_mmu | |
| from training_utils import image_transform | |
| from models import Showo, MAGVITv2, get_mask_chedule | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| config = OmegaConf.load("configs/showo_demo.yaml") | |
| tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left") | |
| uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length, | |
| special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", | |
| "<|t2v|>", "<|v2v|>", "<|lvg|>"), | |
| ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob) | |
| vq_model = MAGVITv2() | |
| vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device) | |
| vq_model.requires_grad_(False) | |
| vq_model.eval() | |
| model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device) | |
| model.eval() | |
| mask_token_id = model.config.mask_token_id | |
| def text_to_image_generation(input_text, guidance_scale=1.75, generation_timesteps=18): | |
| prompts = [input_text] | |
| config.training.batch_size = config.batch_size = 1 | |
| config.training.guidance_scale = config.guidance_scale = guidance_scale | |
| config.training.generation_timesteps = config.generation_timesteps = generation_timesteps | |
| image_tokens = torch.ones((len(prompts), config.model.showo.num_vq_tokens), | |
| dtype=torch.long, device=device) * mask_token_id | |
| input_ids, _ = uni_prompting((prompts, image_tokens), 't2i_gen') | |
| if config.training.guidance_scale > 0: | |
| uncond_input_ids, _ = uni_prompting(([''] * len(prompts), image_tokens), 't2i_gen') | |
| attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| else: | |
| attention_mask = create_attention_mask_predict_next(input_ids, | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| uncond_input_ids = None | |
| if config.get("mask_schedule", None) is not None: | |
| schedule = config.mask_schedule.schedule | |
| args = config.mask_schedule.get("params", {}) | |
| mask_schedule = get_mask_chedule(schedule, **args) | |
| else: | |
| mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) | |
| with torch.no_grad(): | |
| gen_token_ids = model.t2i_generate( | |
| input_ids=input_ids, | |
| uncond_input_ids=uncond_input_ids, | |
| attention_mask=attention_mask, | |
| guidance_scale=config.training.guidance_scale, | |
| temperature=config.training.get("generation_temperature", 1.0), | |
| timesteps=config.training.generation_timesteps, | |
| noise_schedule=mask_schedule, | |
| noise_type=config.training.get("noise_type", "mask"), | |
| seq_len=config.model.showo.num_vq_tokens, | |
| uni_prompting=uni_prompting, | |
| config=config, | |
| ) | |
| gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) | |
| images = vq_model.decode_code(gen_token_ids) | |
| images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) | |
| images *= 255.0 | |
| images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) | |
| return images[0] | |
| def text_guided_inpainting(input_text, inpainting_image, inpainting_mask_input, guidance_scale=1.75, generation_timesteps=16): | |
| # pre-process inpainting mask | |
| alpha_channel = inpainting_mask_input["layers"][0][:, :, 3] | |
| mask = np.where(alpha_channel == 0, 0, 255).astype(np.uint8) | |
| if np.sum(mask) == 0: | |
| inpainting_mask = Image.fromarray(inpainting_mask_input['background']).convert('L') | |
| else: | |
| inpainting_mask = Image.fromarray(mask).convert('L') | |
| prompt = [input_text] | |
| config.training.batch_size = config.batch_size = 1 | |
| config.training.guidance_scale = config.guidance_scale = guidance_scale | |
| config.training.generation_timesteps = config.generation_timesteps = generation_timesteps | |
| inpainting_image = image_transform(inpainting_image, resolution=config.dataset.params.resolution).to(device) | |
| inpainting_mask = image_transform(inpainting_mask, resolution=config.dataset.params.resolution, normalize=False) | |
| inpainting_image = inpainting_image.unsqueeze(0).repeat(config.training.batch_size, 1, 1, 1) | |
| inpainting_mask = inpainting_mask.unsqueeze(0).to(device) | |
| inpainting_mask = F.interpolate(inpainting_mask, size=config.dataset.params.resolution // 16, mode='bicubic') | |
| inpainting_mask = inpainting_mask.repeat(config.training.batch_size, 1, 1, 1) | |
| inpainting_mask[inpainting_mask < 0.5] = 0 | |
| inpainting_mask[inpainting_mask >= 0.5] = 1 | |
| inpainting_mask = inpainting_mask.reshape(config.training.batch_size, -1) | |
| inpainting_mask = inpainting_mask.to(torch.bool) | |
| inpainting_image_tokens = vq_model.get_code(inpainting_image) + len(uni_prompting.text_tokenizer) | |
| inpainting_image_tokens[inpainting_mask] = mask_token_id | |
| input_ids, _ = uni_prompting((prompt, inpainting_image_tokens), 't2i_gen') | |
| if config.training.guidance_scale > 0: | |
| uncond_input_ids, _ = uni_prompting(([''] * len(prompt), inpainting_image_tokens), 't2i_gen') | |
| attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| else: | |
| attention_mask = create_attention_mask_predict_next(input_ids, | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| uncond_input_ids = None | |
| if config.get("mask_schedule", None) is not None: | |
| schedule = config.mask_schedule.schedule | |
| args = config.mask_schedule.get("params", {}) | |
| mask_schedule = get_mask_chedule(schedule, **args) | |
| else: | |
| mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) | |
| with torch.no_grad(): | |
| gen_token_ids = model.t2i_generate( | |
| input_ids=input_ids, | |
| uncond_input_ids=uncond_input_ids, | |
| attention_mask=attention_mask, | |
| guidance_scale=config.training.guidance_scale, | |
| temperature=config.training.get("generation_temperature", 1.0), | |
| timesteps=config.training.generation_timesteps, | |
| noise_schedule=mask_schedule, | |
| noise_type=config.training.get("noise_type", "mask"), | |
| seq_len=config.model.showo.num_vq_tokens, | |
| uni_prompting=uni_prompting, | |
| config=config, | |
| ) | |
| gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) | |
| images = vq_model.decode_code(gen_token_ids) | |
| images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) | |
| images *= 255.0 | |
| images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) | |
| return images[0] | |
| def text_guided_extrapolation(input_img, input_text, left_ext, right_ext, guidance_scale=1.75, generation_timesteps=16): | |
| config.offset = 0 | |
| config.training.batch_size = config.batch_size = 1 | |
| config.training.guidance_scale = config.guidance_scale = guidance_scale | |
| config.training.generation_timesteps = config.generation_timesteps = generation_timesteps | |
| extra_direction = ['right'] * int(right_ext) + ['left'] * int(left_ext) | |
| prompt = [input_text] * len(extra_direction) | |
| W = config.dataset.params.resolution // 16 | |
| for id, (prt, direction) in enumerate(zip(prompt, extra_direction)): | |
| prt = [prt] * config.training.batch_size | |
| if id == 0: | |
| # extrapolation_image = Image.open(config.image_path).convert("RGB") | |
| extrapolation_image = input_img | |
| extrapolation_image = image_transform(extrapolation_image, | |
| resolution=config.dataset.params.resolution).to(device) | |
| B, _, _ = extrapolation_image.shape | |
| extrapolation_image = extrapolation_image.unsqueeze(0) | |
| extrapolation_image_tokens = vq_model.get_code(extrapolation_image) + len(uni_prompting.text_tokenizer) | |
| extrapolation_image_tokens = extrapolation_image_tokens.reshape(1, | |
| config.dataset.params.resolution // 16, | |
| config.dataset.params.resolution // 16) | |
| extrapolation_image_tokens = extrapolation_image_tokens.repeat(config.training.batch_size, 1, 1) | |
| else: | |
| extrapolation_image_tokens = gen_token_ids + len(uni_prompting.text_tokenizer) | |
| image_left_part = extrapolation_image_tokens[:, :, :-(W // 2 - config.offset)] - len( | |
| uni_prompting.text_tokenizer) | |
| image_right_part = extrapolation_image_tokens[:, :, W // 2 - config.offset:] - len(uni_prompting.text_tokenizer) | |
| image_up_part = extrapolation_image_tokens[:, :-(W // 2 - config.offset), :] - len(uni_prompting.text_tokenizer) | |
| image_down_part = extrapolation_image_tokens[:, W // 2 - config.offset:, :] - len(uni_prompting.text_tokenizer) | |
| if direction in ['left', 'right']: | |
| extrapolation_mask = torch.zeros((config.training.batch_size, | |
| config.dataset.params.resolution // 16, | |
| config.dataset.params.resolution // 16 // 2 + config.offset), | |
| dtype=torch.int64, device=device) + mask_token_id | |
| else: | |
| extrapolation_mask = torch.zeros((config.training.batch_size, | |
| config.dataset.params.resolution // 16 // 2 + config.offset, | |
| config.dataset.params.resolution // 16), | |
| dtype=torch.int64, device=device) + mask_token_id | |
| if direction == 'left': | |
| extrapolation_image_tokens = torch.cat( | |
| [extrapolation_mask, extrapolation_image_tokens[:, :, :W // 2 - config.offset]], dim=-1) | |
| elif direction == 'right': | |
| extrapolation_image_tokens = torch.cat( | |
| [extrapolation_image_tokens[:, :, -(W // 2 - config.offset):], extrapolation_mask], dim=-1) | |
| elif direction == 'up': | |
| extrapolation_image_tokens = torch.cat( | |
| [extrapolation_mask, extrapolation_image_tokens[:, :W // 2 - config.offset, :]], dim=-2) | |
| else: | |
| extrapolation_image_tokens = torch.cat( | |
| [extrapolation_image_tokens[:, -(W // 2 - config.offset):, :], extrapolation_mask], dim=-2) | |
| extrapolation_image_tokens = extrapolation_image_tokens.reshape(config.training.batch_size, -1) | |
| input_ids, _ = uni_prompting((prt, extrapolation_image_tokens), 't2i_gen') | |
| if config.training.guidance_scale > 0: | |
| uncond_input_ids, _ = uni_prompting(([''] * len(prt), extrapolation_image_tokens), 't2i_gen') | |
| attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| else: | |
| attention_mask = create_attention_mask_predict_next(input_ids, | |
| pad_id=int(uni_prompting.sptids_dict['<|pad|>']), | |
| soi_id=int(uni_prompting.sptids_dict['<|soi|>']), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), | |
| rm_pad_in_image=True) | |
| uncond_input_ids = None | |
| if config.get("mask_schedule", None) is not None: | |
| schedule = config.mask_schedule.schedule | |
| args = config.mask_schedule.get("params", {}) | |
| mask_schedule = get_mask_chedule(schedule, **args) | |
| else: | |
| mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) | |
| with torch.no_grad(): | |
| gen_token_ids = model.t2i_generate( | |
| input_ids=input_ids, | |
| uncond_input_ids=uncond_input_ids, | |
| attention_mask=attention_mask, | |
| guidance_scale=config.training.guidance_scale, | |
| temperature=config.training.get("generation_temperature", 1.0), | |
| timesteps=config.training.generation_timesteps, | |
| noise_schedule=mask_schedule, | |
| noise_type=config.training.get("noise_type", "mask"), | |
| seq_len=config.model.showo.num_vq_tokens, | |
| uni_prompting=uni_prompting, | |
| config=config, | |
| ) | |
| gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) | |
| gen_token_ids = gen_token_ids.reshape(config.training.batch_size, | |
| config.dataset.params.resolution // 16, | |
| config.dataset.params.resolution // 16) | |
| if direction == 'left': | |
| gen_token_ids = torch.cat([gen_token_ids, image_right_part], dim=-1) | |
| elif direction == 'right': | |
| gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-1) | |
| elif direction == 'up': | |
| gen_token_ids = torch.cat([gen_token_ids, image_down_part], dim=-2) | |
| else: | |
| gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-2) | |
| _, h, w = gen_token_ids.shape | |
| gen_token_ids = gen_token_ids.reshape(config.training.batch_size, -1) | |
| with torch.no_grad(): | |
| images = vq_model.decode_code(gen_token_ids, shape=(h, w)) | |
| images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) | |
| images *= 255.0 | |
| images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) | |
| return images[0] | |
| def multimodal_understanding(input_img, input_text, chat_history): | |
| top_k = 1 # retain only the top_k most likely tokens, clamp others to have 0 probability | |
| image_ori = input_img | |
| image = image_transform(image_ori, resolution=config.dataset.params.resolution).to(device) | |
| image = image.unsqueeze(0) | |
| image_tokens = vq_model.get_code(image) + len(uni_prompting.text_tokenizer) | |
| question = input_text | |
| input_ids = uni_prompting.text_tokenizer(['USER: \n' + question + ' ASSISTANT:'])[ | |
| 'input_ids'] | |
| input_ids = torch.tensor(input_ids).to(device) | |
| input_ids = torch.cat([ | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|mmu|>']).to(device), | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|soi|>']).to(device), | |
| image_tokens, | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device), | |
| (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|sot|>']).to(device), | |
| input_ids | |
| ], dim=1).long() | |
| attention_mask = create_attention_mask_for_mmu(input_ids.to(device), | |
| eoi_id=int(uni_prompting.sptids_dict['<|eoi|>'])) | |
| cont_toks_list = model.mmu_generate(input_ids, attention_mask=attention_mask, | |
| max_new_tokens=100, top_k=top_k, | |
| eot_token=uni_prompting.sptids_dict['<|eot|>']) | |
| cont_toks_list = torch.stack(cont_toks_list).squeeze()[None] | |
| output_text = uni_prompting.text_tokenizer.batch_decode(cont_toks_list, skip_special_tokens=True) | |
| output_text = output_text[0].strip() | |
| chat_history.append((input_text, output_text)) | |
| return "", chat_history | |
| with gr.Blocks() as demo: | |
| gr.HTML(""" | |
| <h1 class="display-2 fw-bold title"> | |
| <a style="color: #70a8dc;">S</a><a style="color: #6fb051;">h</a><a style="color: #e06766;">o</a><a style="color: #f7b26b;">w</a>-o | |
| </h1> | |
| <p>This is the official Gradio demo for the Show-o model, a unified model that can do multimodal understanding and generation.</p> | |
| <strong>Paper:</strong> <a href="https://arxiv.org/abs/2408.12528" target="_blank">Show-o: One Single Transformer To Unify Multimodal Understanding and Generation </a> | |
| <br/> | |
| <strong>Project Website:</strong> <a href="https://showlab.github.io/Show-o/" target="_blank">Show-o Website</a> | |
| <br/> | |
| <strong>Code and Models:</strong> <a href="https://github.com/showlab/Show-o" target="_blank">GitHub</a> | |
| <br/> | |
| <br/> | |
| """) | |
| banner_1 = gr.Markdown(value="# Text-to-image Generation") | |
| with gr.Row(): | |
| with gr.Column(): | |
| text_prompt_t2i = gr.Textbox( | |
| label="Text prompt", | |
| lines=2, | |
| placeholder="Input the text prompt here for image generation." | |
| ) | |
| guidance_scale_t2i = gr.Slider( | |
| label="guidance scale", | |
| minimum=0, | |
| maximum=5, | |
| step=0.05, | |
| value=1.75 | |
| ) | |
| generation_timesteps_t2i = gr.Slider( | |
| label="timesteps", | |
| minimum=1, | |
| maximum=30, | |
| step=1, | |
| value=18 | |
| ) | |
| generated_img_t2i = gr.Image( | |
| label="Output image" | |
| ) | |
| examples_t2i = gr.Examples( | |
| label="Text to image generation examples", | |
| examples=[ | |
| "A dynamic scene of a rally car race.", | |
| "Paper artwork, layered paper, colorful Chinese dragon surrounded by clouds.", | |
| "Pixel art character riding a dragon through the clouds.", | |
| ], | |
| inputs=text_prompt_t2i, | |
| ) | |
| submit_btn_t2i = gr.Button("Generate: Text-to-image") | |
| submit_btn_t2i.click(text_to_image_generation, | |
| [text_prompt_t2i, guidance_scale_t2i, generation_timesteps_t2i], | |
| [generated_img_t2i]) | |
| banner_2 = gr.Markdown(value="# Text-guided inpainting") | |
| with gr.Row(): | |
| inpainting_input_img = gr.Image( | |
| label="Input image", | |
| type="pil", | |
| # height=256, | |
| # width=256, | |
| ) | |
| # inpainting_input_mask = gr.Image( | |
| # label="Inpainting mask", | |
| # image_mode="L", | |
| # type="pil", | |
| # height=256, | |
| # width=256, | |
| # ) | |
| inpainting_input_mask = gr.ImageMask( | |
| sources=["upload"], | |
| layers=False, | |
| transforms=[], | |
| format="png", | |
| label="Inpainting mask", | |
| show_label=True | |
| ) | |
| with gr.Column(): | |
| text_prompt_inpainting = gr.Textbox( | |
| label="Text prompt", | |
| lines=2, | |
| placeholder="Input the text prompt here for image inpainting." | |
| ) | |
| guidance_scale_inpainting = gr.Slider( | |
| label="guidance scale", | |
| minimum=0, | |
| maximum=5, | |
| step=0.05, | |
| value=1.75 | |
| ) | |
| generation_timesteps_inpainting = gr.Slider( | |
| label="timesteps", | |
| minimum=1, | |
| maximum=30, | |
| step=1, | |
| value=16 | |
| ) | |
| generated_img_inpainting = gr.Image( | |
| label="Output image" | |
| ) | |
| examples_inpainting = gr.Examples( | |
| label="Text-guided inpainting examples", | |
| examples=[ | |
| [ | |
| "a blue sports car with sleek curves and tinted windows, parked on a bustling city street.", | |
| Image.open("./inpainting_validation/bus.jpg").convert("RGB"), | |
| Image.open("./inpainting_validation/bus_mask.webp").convert("L"), | |
| ], | |
| [ | |
| "a clear, shallow river with some vibrant flowers in it.", | |
| Image.open("./inpainting_validation/train.jpg").convert("RGB"), | |
| Image.open("./inpainting_validation/train_mask.webp").convert("L"), | |
| ], | |
| ], | |
| inputs=[text_prompt_inpainting, inpainting_input_img, inpainting_input_mask], | |
| ) | |
| submit_btn_inpainting = gr.Button("Generate: Text-guided Inpainting") | |
| submit_btn_inpainting.click(text_guided_inpainting, | |
| [text_prompt_inpainting, inpainting_input_img, inpainting_input_mask, | |
| guidance_scale_inpainting, generation_timesteps_inpainting], | |
| [generated_img_inpainting]) | |
| banner_3 = gr.Markdown(value="# Text-guided extrapolation") | |
| with gr.Row(): | |
| extra_input_img = gr.Image( | |
| label="Input image", | |
| type="pil", | |
| image_mode="RGB", | |
| ) | |
| with gr.Column(): | |
| text_prompt_extrapolation = gr.Textbox( | |
| label="Text prompt", | |
| lines=1, | |
| placeholder="Input the text prompt here for image extrapolation." | |
| ) | |
| guidance_scale_extrapolation = gr.Slider( | |
| label="guidance scale", | |
| minimum=0, | |
| maximum=5, | |
| step=0.05, | |
| value=1.75 | |
| ) | |
| generation_timesteps_extrapolation = gr.Slider( | |
| label="timesteps", | |
| minimum=1, | |
| maximum=30, | |
| step=1, | |
| value=16 | |
| ) | |
| left_extrapolation = gr.Slider( | |
| label="left extrapolation", | |
| minimum=0, | |
| maximum=5, | |
| step=1, | |
| value=1 | |
| ) | |
| right_extrapolation = gr.Slider( | |
| label="right extrapolation", | |
| minimum=0, | |
| maximum=5, | |
| step=1, | |
| value=1 | |
| ) | |
| generated_img_extrapolation = gr.Image( | |
| label="Output image" | |
| ) | |
| examples_extra = gr.Examples( | |
| label="Text-guided extrapolation examples", | |
| examples=[ | |
| [ | |
| Image.open("./inpainting_validation/wukong2.jpg").convert("RGB"), | |
| "the continuous mountain ranges and jungles, with meandering rivers occasionally appearing.", | |
| 2, | |
| 2, | |
| ], | |
| [ | |
| Image.open("./inpainting_validation/alpine_lake.jpg").convert("RGB"), | |
| "a serene natural landscape featuring a clear, blue lake surrounded by lush green trees.", | |
| 2, | |
| 2, | |
| ], | |
| ], | |
| inputs=[extra_input_img, text_prompt_extrapolation, left_extrapolation, right_extrapolation], | |
| ) | |
| submit_btn_inpainting = gr.Button("Generate: Text-guided Extrapolation") | |
| submit_btn_inpainting.click(text_guided_extrapolation, | |
| [extra_input_img, text_prompt_extrapolation, left_extrapolation, right_extrapolation, | |
| guidance_scale_extrapolation, generation_timesteps_extrapolation], | |
| [generated_img_extrapolation]) | |
| banner_4 = gr.Markdown(value="# Multimodal understanding") | |
| with gr.Row(): | |
| with gr.Row(): | |
| chat_input_img = gr.Image( | |
| label="Input image", | |
| type="pil", | |
| image_mode="RGB", | |
| ) | |
| with gr.Column(): | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(label="Press Enter to send a message for chat") | |
| clear = gr.ClearButton([msg, chatbot]) | |
| msg.submit(multimodal_understanding, [chat_input_img, msg, chatbot], [msg, chatbot]) | |
| demo.launch() | |