import os
import tensorflow as tf
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
import numpy as np
import PIL.Image
import gradio as gr
import tensorflow_hub as hub
import matplotlib.pyplot as plt
from real_esrgan_app import *

'''
inference(img,mode)
'''

hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')

def tensor_to_image(tensor):
    tensor = tensor*255
    tensor = np.array(tensor, dtype=np.uint8)
    if np.ndim(tensor)>3:
      assert tensor.shape[0] == 1
      tensor = tensor[0]
    return PIL.Image.fromarray(tensor)


style_urls = {
    'Kanagawa great wave': 'The_Great_Wave_off_Kanagawa.jpg',
    'Kandinsky composition 7': 'Kandinsky_Composition_7.jpg',
    'Hubble pillars of creation': 'Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg',
    'Van gogh starry night': 'Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg',
    'Turner nantes': 'JMW_Turner_-_Nantes_from_the_Ile_Feydeau.jpg',
    'Munch scream': 'Edvard_Munch.jpg',
    'Picasso demoiselles avignon': 'Les_Demoiselles.jpg',
    'Picasso violin': 'picaso_violin.jpg',
    'Picasso bottle of rum': 'picaso_rum.jpg',
    'Fire': 'Large_bonfire.jpg',
    'Derkovits woman head': 'Derkovits_Gyula_Woman_head_1922.jpg',
    'Amadeo style life': 'Amadeo_Souza_Cardoso.jpg',
    'Derkovtis talig': 'Derkovits_Gyula_Talig.jpg',
    'Kadishman': 'kadishman.jpeg'
}


style_images = [k for k, v in style_urls.items()]

def image_click(images, evt: gr.SelectData,
    ):
    img_selected = images[evt.index]["name"]
    #print(img_selected)
    return img_selected


#radio_style = gr.Radio(style_images, label="Choose Style")

def perform_neural_transfer(content_image_input, style_image_input, super_resolution_type, hub_module = hub_module):
    content_image = content_image_input.astype(np.float32)[np.newaxis, ...] / 255.
    content_image = tf.image.resize(content_image, (400, 600))

    #style_image_input = style_urls[style_image_input]
    #style_image_input = plt.imread(style_image_input)
    style_image = style_image_input.astype(np.float32)[np.newaxis, ...] / 255.

    style_image = tf.image.resize(style_image, (256, 256))

    outputs = hub_module(tf.constant(content_image), tf.constant(style_image))
    stylized_image = outputs[0]

    stylized_image = tensor_to_image(stylized_image)
    content_image_input = tensor_to_image(content_image_input)
    stylized_image = stylized_image.resize(content_image_input.size)

    print("super_resolution_type :")
    print(super_resolution_type)
    #print(super_resolution_type.value)
    
    if super_resolution_type not in ["base", "anime"]:
        return stylized_image
    else:
        print("call else :")
        stylized_image = inference(stylized_image, super_resolution_type)
        return stylized_image

with gr.Blocks() as demo:
    gr.HTML("<h1><center> 🐑 Art Generation with Neural Style Transfer Fixed by Real-ESRGAN </center></h1>")

    with gr.Row():
        style_reference_input_gallery = gr.Gallery(list(style_urls.values()),
                        #width = 512,
                        height = 768 + 128,
                        label = "Style Image gallery (click to use)")
        with gr.Column():
            #super_resolution_type = gr.Radio(["base", "anime", "none"], type="value", default="base", label="choose Real-ESRGAN model type used to super resolution the Image Transformed")
            super_resolution_type = gr.Radio(choices = ["base", "anime", "none"],
                        value="base", label="choose Real-ESRGAN model type used to super resolution the Image Transformed", 
                                                 interactive = True)
            style_reference_input_image = gr.Image(
                        label = "Style Image (you can upload yourself or click from left gallery)",
                        #width = 512,
                        interactive = True, value = style_urls["Kanagawa great wave"]
                        )
            content_image_input = gr.Image(label="Content Image", interactive = True,
                        #width = 512
                    )
            trans_image_output = gr.Image(label="Image Transformed", interactive = True,
                    #width = 512
            )
            trans_button = gr.Button(label = "transform Content image style from Style Image")


    style_reference_input_gallery.select(
        image_click, style_reference_input_gallery, style_reference_input_image
    )

    trans_button.click(perform_neural_transfer, [content_image_input, style_reference_input_image, super_resolution_type], trans_image_output)

    gr.Examples(
        [
        [style_urls["Kanagawa great wave"], style_urls["Kadishman"], "none"],
        [style_urls["Derkovits woman head"], style_urls["Kadishman"], "base"],
        [style_urls["Kadishman"], style_urls["Kadishman"], "anime"],
        ],
        inputs = [style_reference_input_image, content_image_input, super_resolution_type],
        label = "Transform Examples"
    )

demo.launch()