try fix serverside not finding files
Browse files- .DS_Store +0 -0
- .gitignore +2 -0
- app.py +23 -2
.DS_Store
CHANGED
|
Binary files a/.DS_Store and b/.DS_Store differ
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
.DS_Store
|
app.py
CHANGED
|
@@ -197,6 +197,9 @@ def on_embeddings_changed_update_plot(embeddings_b64):
|
|
| 197 |
# height=300,
|
| 198 |
width=embeddings.shape[0])
|
| 199 |
|
|
|
|
|
|
|
|
|
|
| 200 |
device = torch.device("mps" if torch.backends.mps.is_available() else "cuda:0" if torch.cuda.is_available() else "cpu")
|
| 201 |
torch_size = torch.float16 if device == ('cuda') else torch.float32
|
| 202 |
# torch_size = torch.float32
|
|
@@ -222,6 +225,15 @@ examples = [
|
|
| 222 |
# ["SohoJoeEth.jpeg", "Snoop Dogg.jpg", "SohoJoeEth + Snoop Dogg.jpeg"],
|
| 223 |
]
|
| 224 |
tile_size = 100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
|
| 227 |
|
|
@@ -248,8 +260,11 @@ Try uploading a few images and/or add some text prompts and click generate image
|
|
| 248 |
with gr.Row():
|
| 249 |
for image in example:
|
| 250 |
with gr.Column(scale=1, min_width=tile_size):
|
| 251 |
-
|
| 252 |
-
gr.Image(
|
|
|
|
|
|
|
|
|
|
| 253 |
|
| 254 |
with gr.Row():
|
| 255 |
for i in range(max_tabs):
|
|
@@ -260,6 +275,12 @@ Try uploading a few images and/or add some text prompts and click generate image
|
|
| 260 |
with gr.Column(scale=3, min_width=600):
|
| 261 |
embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
|
| 262 |
# input_image.change(on_image_load, inputs= [input_image, plot])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
with gr.Row():
|
| 264 |
with gr.Column(scale=2, min_width=240):
|
| 265 |
input_prompts[i] = gr.Textbox()
|
|
|
|
| 197 |
# height=300,
|
| 198 |
width=embeddings.shape[0])
|
| 199 |
|
| 200 |
+
def on_example_image_click_set_image(input_image, image_url):
|
| 201 |
+
input_image.value = image_url
|
| 202 |
+
|
| 203 |
device = torch.device("mps" if torch.backends.mps.is_available() else "cuda:0" if torch.cuda.is_available() else "cpu")
|
| 204 |
torch_size = torch.float16 if device == ('cuda') else torch.float32
|
| 205 |
# torch_size = torch.float32
|
|
|
|
| 225 |
# ["SohoJoeEth.jpeg", "Snoop Dogg.jpg", "SohoJoeEth + Snoop Dogg.jpeg"],
|
| 226 |
]
|
| 227 |
tile_size = 100
|
| 228 |
+
# image_folder = os.path.join("file", "images")
|
| 229 |
+
image_folder ="images"
|
| 230 |
+
|
| 231 |
+
image_examples = [
|
| 232 |
+
"SohoJoeEth.jpeg",
|
| 233 |
+
"Ray-Liotta-Goodfellas.jpg", "Donkey.jpg", "Snoop Dogg.jpg",
|
| 234 |
+
"pup1.jpg", "pup2.jpg", "pup3.jpg", "pup4.jpeg", "pup5.jpg",
|
| 235 |
+
]
|
| 236 |
+
image_examples_tile_size = 50
|
| 237 |
|
| 238 |
|
| 239 |
|
|
|
|
| 260 |
with gr.Row():
|
| 261 |
for image in example:
|
| 262 |
with gr.Column(scale=1, min_width=tile_size):
|
| 263 |
+
local_path = os.path.join(image_folder, image)
|
| 264 |
+
gr.Image(
|
| 265 |
+
value = local_path, shape=(tile_size,tile_size),
|
| 266 |
+
show_label=False, interactive=False) \
|
| 267 |
+
.style(height=tile_size, width=tile_size)
|
| 268 |
|
| 269 |
with gr.Row():
|
| 270 |
for i in range(max_tabs):
|
|
|
|
| 275 |
with gr.Column(scale=3, min_width=600):
|
| 276 |
embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
|
| 277 |
# input_image.change(on_image_load, inputs= [input_image, plot])
|
| 278 |
+
# with gr.Row():
|
| 279 |
+
# examples_with_path = [os.path.join(image_folder, image) for image in image_examples]
|
| 280 |
+
# gr.Examples(
|
| 281 |
+
# examples=examples_with_path,
|
| 282 |
+
# inputs=input_images[i],
|
| 283 |
+
# )
|
| 284 |
with gr.Row():
|
| 285 |
with gr.Column(scale=2, min_width=240):
|
| 286 |
input_prompts[i] = gr.Textbox()
|