0x90e commited on
Commit
0d7de2e
·
1 Parent(s): d570bef

UI and memory improvements.

Browse files
Files changed (3) hide show
  1. app.py +14 -46
  2. inference_manga_v2.py +40 -17
  3. process_image.py +43 -0
app.py CHANGED
@@ -1,50 +1,12 @@
1
- import os
2
  import gradio as gr
3
  import util
 
4
  from run_cmd import run_cmd
5
- from random import randint
6
- from PIL import Image
7
- import tempfile
8
 
9
- temp_path = tempfile.gettempdir()
10
 
11
  is_colab = util.is_google_colab()
12
 
13
- run_cmd("pip install pngquant")
14
-
15
- def inference(img, size, type):
16
- _id = randint(1, 10000)
17
- INPUT_DIR = os.path.join(temp_path, f"input_image{str(_id)}")
18
- OUTPUT_DIR = os.path.join(temp_path, f"output_image{str(_id)}")
19
- img_in_path = os.path.join(INPUT_DIR, "1.jpg")
20
- img_out_path = os.path.join(OUTPUT_DIR, f"1_{size}.png")
21
- run_cmd(f"rm -rf {INPUT_DIR}")
22
- run_cmd(f"rm -rf {OUTPUT_DIR}")
23
- run_cmd(f"mkdir {INPUT_DIR}")
24
- run_cmd(f"mkdir {OUTPUT_DIR}")
25
- img.save(img_in_path, "PNG")
26
-
27
- if type == "Manga":
28
- run_cmd(f"python inference_manga_v2.py {img_in_path} {img_out_path}")
29
- else:
30
- run_cmd(f"python inference.py {img_in_path} {img_out_path} {type}")
31
-
32
- img_out = Image.open(img_out_path)
33
-
34
- if size == "x2":
35
- img_out = img_out.resize((img_out.width // 2, img_out.height // 2), resample=Image.BICUBIC)
36
-
37
- #img_out.save(img_out_path, optimize=True) # Add more optimizations
38
- #img_out = Image.open(img_out_path)
39
-
40
- # Remove input and output image
41
- run_cmd(f"rm -f {img_in_path}")
42
- #run_cmd(f"rm -f {img_out_path}")
43
-
44
- out_file.update(value=img_out_path, visible=True)
45
-
46
- return img_out, gr.File.update(value=img_out_path, visible=True)
47
-
48
  css = '''
49
  .file-preview {
50
  overflow: hidden !important;
@@ -59,17 +21,23 @@ css = '''
59
  .file-preview div div:nth-child(3) {
60
  text-align: right !important;
61
  }
 
 
 
 
 
 
62
  '''
63
 
64
  title = "ESRGAN Upscaling With Custom Models"
65
- description = "This space uses old ESRGAN architecture to upscale images, using models made by the community."
66
- article = "<p><a href='https://upscale.wiki/wiki/Model_Database'>Model Database</a></p>"
67
 
68
  with gr.Blocks(title=title, css=css) as demo:
69
  gr.Markdown(
70
  f"""
71
  # {title}
72
- {description}
 
 
73
  """)
74
 
75
  with gr.Box():
@@ -83,14 +51,14 @@ with gr.Blocks(title=title, css=css) as demo:
83
  upscale_btn = gr.Button(value="Upscale", variant="primary")
84
 
85
  with gr.Column():
86
- output_image = gr.Image(type="filepath", interactive=False, label="Upscaled image", )
87
 
88
  with gr.Row():
89
  out_file = gr.File(interactive=False, show_label=False, visible=False)
90
 
91
- gr.HTML(value=article)
92
 
93
- upscale_btn.click(inference, inputs=[input_image, upscale_size, upscale_type], outputs=[output_image, out_file])
94
 
95
  demo.queue()
96
  demo.launch(debug=is_colab, share=is_colab, inline=is_colab)
 
 
1
  import gradio as gr
2
  import util
3
+ import process_image
4
  from run_cmd import run_cmd
 
 
 
5
 
6
+ run_cmd("pip install split-image")
7
 
8
  is_colab = util.is_google_colab()
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  css = '''
11
  .file-preview {
12
  overflow: hidden !important;
 
21
  .file-preview div div:nth-child(3) {
22
  text-align: right !important;
23
  }
24
+
25
+ #preview_img {
26
+ user-select: none !important;
27
+ touch-action: none !important;
28
+ pointer-events: none !important;
29
+ }
30
  '''
31
 
32
  title = "ESRGAN Upscaling With Custom Models"
 
 
33
 
34
  with gr.Blocks(title=title, css=css) as demo:
35
  gr.Markdown(
36
  f"""
37
  # {title}
38
+ This space uses old ESRGAN architecture to upscale images, using models made by the community.
39
+
40
+ Once upscaled, click or tap the download button under the image to download it.
41
  """)
42
 
43
  with gr.Box():
 
51
  upscale_btn = gr.Button(value="Upscale", variant="primary")
52
 
53
  with gr.Column():
54
+ output_image = gr.Image(type="filepath", interactive=False, label="Upscaled image", elem_id="preview_img")
55
 
56
  with gr.Row():
57
  out_file = gr.File(interactive=False, show_label=False, visible=False)
58
 
59
+ gr.HTML(value="<p><a href='https://upscale.wiki/wiki/Model_Database'>Model Database</a></p>")
60
 
61
+ upscale_btn.click(process_image.inference, inputs=[input_image, upscale_size, upscale_type], outputs=[output_image, out_file])
62
 
63
  demo.queue()
64
  demo.launch(debug=is_colab, share=is_colab, inline=is_colab)
inference_manga_v2.py CHANGED
@@ -4,6 +4,10 @@ import cv2
4
  import numpy as np
5
  import torch
6
  import architecture as arch
 
 
 
 
7
 
8
  def is_cuda():
9
  if torch.cuda.is_available():
@@ -31,20 +35,39 @@ for k, v in model.named_parameters():
31
  v.requires_grad = False
32
  model = model.to(device)
33
 
34
- base = os.path.splitext(os.path.basename(img_path))[0]
35
-
36
- # Read image
37
- img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
38
- img = img * 1.0 / 255
39
- img = torch.from_numpy(img[np.newaxis, :, :]).float()
40
- img_LR = img.unsqueeze(0)
41
- img_LR = img_LR.to(device)
42
-
43
- print('Start upscaling...')
44
- with torch.no_grad():
45
- output = model(img_LR).squeeze(dim=0).float().cpu().clamp_(0, 1).numpy()
46
- output = np.transpose(output, (1, 2, 0))
47
- output = (output * 255.0).round()
48
- print('Finished upscaling, saving image.')
49
- print(output_dir)
50
- cv2.imwrite(output_dir, output, [int(cv2.IMWRITE_PNG_COMPRESSION), 9])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import numpy as np
5
  import torch
6
  import architecture as arch
7
+ from split_image import split_image
8
+ from run_cmd import run_cmd
9
+ import re
10
+ from pathlib import Path
11
 
12
  def is_cuda():
13
  if torch.cuda.is_available():
 
35
  v.requires_grad = False
36
  model = model.to(device)
37
 
38
+ base = os.path.dirname(img_path)
39
+
40
+ # Split image
41
+ run_cmd(f"split-image {img_path} 7 7 --output-dir {base} --quiet")
42
+
43
+ for root, dirs, files in os.walk(base, topdown=True):
44
+ for x, name in enumerate(files):
45
+ file_path = os.path.join(root, name)
46
+
47
+ if file_path == img_path:
48
+ print(file_path)
49
+ continue
50
+
51
+ # Read image
52
+ img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
53
+ img = img * 1.0 / 255
54
+ img = torch.from_numpy(img[np.newaxis, :, :]).float()
55
+ img_LR = img.unsqueeze(0)
56
+ img_LR = img_LR.to(device)
57
+
58
+ #print(f"Start upscaling tile {x}...")
59
+ with torch.no_grad():
60
+ output = model(img_LR).squeeze(dim=0).float().cpu().clamp_(0, 1).numpy()
61
+ output = np.transpose(output, (1, 2, 0))
62
+ output = (output * 255.0).round()
63
+ #print(f"Finished upscaling tile {x}, saving tile.")
64
+ #print(output_dir)
65
+ cv2.imwrite(file_path, output)
66
+
67
+ # Join all tiles
68
+ run_cmd(f"cd {base} && split-image input.jpg 7 7 -r --quiet")
69
+
70
+ # Open image and save as png with the ouput name
71
+ img_out = cv2.imread(img_path);
72
+ cv2.imwrite(output_dir, img_out, [int(cv2.IMWRITE_PNG_COMPRESSION), 9])
73
+
process_image.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from run_cmd import run_cmd
4
+ from random import randint
5
+ from PIL import Image
6
+ import tempfile
7
+
8
+ temp_path = tempfile.gettempdir()
9
+
10
+ def inference(img, size, type):
11
+ if not img:
12
+ raise Exception("No image!")
13
+
14
+ _id = randint(1, 10000)
15
+ INPUT_DIR = os.path.join(temp_path, f"input_image{str(_id)}")
16
+ OUTPUT_DIR = os.path.join(temp_path, f"output_image{str(_id)}")
17
+ img_in_path = os.path.join(INPUT_DIR, "input.jpg")
18
+ img_out_path = os.path.join(OUTPUT_DIR, f"output_{size}.png")
19
+ run_cmd(f"rm -rf {INPUT_DIR}")
20
+ run_cmd(f"rm -rf {OUTPUT_DIR}")
21
+ run_cmd(f"mkdir {INPUT_DIR}")
22
+ run_cmd(f"mkdir {OUTPUT_DIR}")
23
+ img.save(img_in_path, "PNG")
24
+
25
+ if type == "Manga":
26
+ run_cmd(f"python inference_manga_v2.py {img_in_path} {img_out_path}")
27
+ else:
28
+ run_cmd(f"python inference.py {img_in_path} {img_out_path} {type}")
29
+
30
+ img_out = Image.open(img_out_path)
31
+
32
+ if size == "x2":
33
+ img_out = img_out.resize((img_out.width // 2, img_out.height // 2), resample=Image.BICUBIC)
34
+
35
+ #img_out.save(img_out_path, optimize=True) # Add more optimizations
36
+ #img_out = Image.open(img_out_path)
37
+
38
+ # Remove input and output image
39
+ #run_cmd(f"rm -f {img_in_path}")
40
+
41
+ img_out.thumbnail((600, 600), Image.ANTIALIAS)
42
+
43
+ return img_out, gr.File.update(value=img_out_path, visible=True)