Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import gradio as gr
|
| 7 |
+
import tensorflow_hub as hub
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
from real_esrgan_app import *
|
| 10 |
+
|
| 11 |
+
'''
|
| 12 |
+
inference(img,mode)
|
| 13 |
+
'''
|
| 14 |
+
|
| 15 |
+
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
|
| 16 |
+
|
| 17 |
+
def tensor_to_image(tensor):
|
| 18 |
+
tensor = tensor*255
|
| 19 |
+
tensor = np.array(tensor, dtype=np.uint8)
|
| 20 |
+
if np.ndim(tensor)>3:
|
| 21 |
+
assert tensor.shape[0] == 1
|
| 22 |
+
tensor = tensor[0]
|
| 23 |
+
return PIL.Image.fromarray(tensor)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
style_urls = {
|
| 27 |
+
'Kanagawa great wave': 'The_Great_Wave_off_Kanagawa.jpg',
|
| 28 |
+
'Kandinsky composition 7': 'Kandinsky_Composition_7.jpg',
|
| 29 |
+
'Hubble pillars of creation': 'Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg',
|
| 30 |
+
'Van gogh starry night': 'Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg',
|
| 31 |
+
'Turner nantes': 'JMW_Turner_-_Nantes_from_the_Ile_Feydeau.jpg',
|
| 32 |
+
'Munch scream': 'Edvard_Munch.jpg',
|
| 33 |
+
'Picasso demoiselles avignon': 'Les_Demoiselles.jpg',
|
| 34 |
+
'Picasso violin': 'picaso_violin.jpg',
|
| 35 |
+
'Picasso bottle of rum': 'picaso_rum.jpg',
|
| 36 |
+
'Fire': 'Large_bonfire.jpg',
|
| 37 |
+
'Derkovits woman head': 'Derkovits_Gyula_Woman_head_1922.jpg',
|
| 38 |
+
'Amadeo style life': 'Amadeo_Souza_Cardoso.jpg',
|
| 39 |
+
'Derkovtis talig': 'Derkovits_Gyula_Talig.jpg',
|
| 40 |
+
'Kadishman': 'kadishman.jpeg'
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
style_images = [k for k, v in style_urls.items()]
|
| 45 |
+
|
| 46 |
+
def image_click(images, evt: gr.SelectData,
|
| 47 |
+
):
|
| 48 |
+
img_selected = images[evt.index]["name"]
|
| 49 |
+
#print(img_selected)
|
| 50 |
+
return img_selected
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
#radio_style = gr.Radio(style_images, label="Choose Style")
|
| 54 |
+
|
| 55 |
+
def perform_neural_transfer(content_image_input, style_image_input, super_resolution_type, hub_module = hub_module):
|
| 56 |
+
content_image = content_image_input.astype(np.float32)[np.newaxis, ...] / 255.
|
| 57 |
+
content_image = tf.image.resize(content_image, (400, 600))
|
| 58 |
+
|
| 59 |
+
#style_image_input = style_urls[style_image_input]
|
| 60 |
+
#style_image_input = plt.imread(style_image_input)
|
| 61 |
+
style_image = style_image_input.astype(np.float32)[np.newaxis, ...] / 255.
|
| 62 |
+
|
| 63 |
+
style_image = tf.image.resize(style_image, (256, 256))
|
| 64 |
+
|
| 65 |
+
outputs = hub_module(tf.constant(content_image), tf.constant(style_image))
|
| 66 |
+
stylized_image = outputs[0]
|
| 67 |
+
|
| 68 |
+
stylized_image = tensor_to_image(stylized_image)
|
| 69 |
+
content_image_input = tensor_to_image(content_image_input)
|
| 70 |
+
stylized_image = stylized_image.resize(content_image_input.size)
|
| 71 |
+
if super_resolution_type is "none":
|
| 72 |
+
return stylized_image
|
| 73 |
+
else:
|
| 74 |
+
stylized_image = inference(stylized_image, super_resolution_type)
|
| 75 |
+
return stylized_image
|
| 76 |
+
|
| 77 |
+
with gr.Blocks() as demo:
|
| 78 |
+
gr.HTML("<h1><center> 🐑 Art Generation with Neural Style Transfer </center></h1>")
|
| 79 |
+
|
| 80 |
+
with gr.Row():
|
| 81 |
+
style_reference_input_gallery = gr.Gallery(list(style_urls.values()),
|
| 82 |
+
#width = 512,
|
| 83 |
+
height = 768 + 32,
|
| 84 |
+
label = "Style Image gallery (click to use)")
|
| 85 |
+
with gr.Column():
|
| 86 |
+
super_resolution_type = gr.Radio(["none" ,"base", "anime"], type="value", default="none", label="model used to super resolution the Image Transformed")
|
| 87 |
+
style_reference_input_image = gr.Image(
|
| 88 |
+
label = "Style Image (you can upload yourself or click from left gallery)",
|
| 89 |
+
#width = 512,
|
| 90 |
+
interactive = True, value = style_urls["Kanagawa great wave"]
|
| 91 |
+
)
|
| 92 |
+
content_image_input = gr.Image(label="Content Image", interactive = True,
|
| 93 |
+
#width = 512
|
| 94 |
+
)
|
| 95 |
+
trans_image_output = gr.Image(label="Image Transformed", interactive = True,
|
| 96 |
+
#width = 512
|
| 97 |
+
)
|
| 98 |
+
trans_button = gr.Button(label = "transform Content image style from Style Image")
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
style_reference_input_gallery.select(
|
| 102 |
+
image_click, style_reference_input_gallery, style_reference_input_image
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
trans_button.click(perform_neural_transfer, [content_image_input, style_reference_input_image], trans_image_output)
|
| 106 |
+
|
| 107 |
+
gr.Examples(
|
| 108 |
+
[
|
| 109 |
+
[style_urls["Kanagawa great wave"], style_urls["Kadishman"]],
|
| 110 |
+
[style_urls["Derkovits woman head"], style_urls["Kadishman"]],
|
| 111 |
+
],
|
| 112 |
+
inputs = [style_reference_input_image, content_image_input],
|
| 113 |
+
label = "Transform Examples"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
demo.launch()
|