hysts HF staff commited on
Commit
f521e88
β€’
1 Parent(s): 89efab8

Migrate from yapf to black

Browse files
.pre-commit-config.yaml CHANGED
@@ -1,36 +1,55 @@
1
  repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: double-quote-string-fixer
12
- - id: end-of-file-fixer
13
- - id: mixed-line-ending
14
- args: ['--fix=lf']
15
- - id: requirements-txt-fixer
16
- - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
- hooks:
20
- - id: docformatter
21
- args: ['--in-place']
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.12.0
24
- hooks:
25
- - id: isort
26
- - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
- hooks:
29
- - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
- hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.12.0
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ ["types-python-slugify", "types-requests", "types-PyYAML"]
33
+ - repo: https://github.com/psf/black
34
+ rev: 23.9.1
35
+ hooks:
36
+ - id: black
37
+ language_version: python3.10
38
+ args: ["--line-length", "119"]
39
+ - repo: https://github.com/kynan/nbstripout
40
+ rev: 0.6.1
41
+ hooks:
42
+ - id: nbstripout
43
+ args:
44
+ [
45
+ "--extra-keys",
46
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
47
+ ]
48
+ - repo: https://github.com/nbQA-dev/nbQA
49
+ rev: 1.7.0
50
+ hooks:
51
+ - id: nbqa-black
52
+ - id: nbqa-pyupgrade
53
+ args: ["--py37-plus"]
54
+ - id: nbqa-isort
55
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json CHANGED
@@ -1,18 +1,21 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
  "source.organizeImports": true
14
  }
15
  },
 
 
 
 
 
 
 
 
 
 
16
  "editor.formatOnSave": true,
17
  "files.insertFinalNewline": true
18
  }
 
1
  {
 
 
 
 
 
 
 
 
2
  "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
  "source.organizeImports": true
7
  }
8
  },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
  "editor.formatOnSave": true,
20
  "files.insertFinalNewline": true
21
  }
app.py CHANGED
@@ -13,83 +13,79 @@ from app_mlsd import create_demo as create_demo_mlsd
13
  from app_normal import create_demo as create_demo_normal
14
  from app_openpose import create_demo as create_demo_openpose
15
  from app_scribble import create_demo as create_demo_scribble
16
- from app_scribble_interactive import \
17
- create_demo as create_demo_scribble_interactive
18
  from app_segmentation import create_demo as create_demo_segmentation
19
  from app_shuffle import create_demo as create_demo_shuffle
20
  from app_softedge import create_demo as create_demo_softedge
21
  from model import Model
22
- from settings import (ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID,
23
- SHOW_DUPLICATE_BUTTON)
24
 
25
- DESCRIPTION = '# ControlNet v1.1'
26
 
27
  if not torch.cuda.is_available():
28
- DESCRIPTION += '\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>'
29
 
30
- model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='Canny')
31
 
32
- with gr.Blocks(css='style.css') as demo:
33
  gr.Markdown(DESCRIPTION)
34
- gr.DuplicateButton(value='Duplicate Space for private use',
35
- elem_id='duplicate-button',
36
- visible=SHOW_DUPLICATE_BUTTON)
37
 
38
  with gr.Tabs():
39
- with gr.TabItem('Canny'):
40
  create_demo_canny(model.process_canny)
41
- with gr.TabItem('MLSD'):
42
  create_demo_mlsd(model.process_mlsd)
43
- with gr.TabItem('Scribble'):
44
  create_demo_scribble(model.process_scribble)
45
- with gr.TabItem('Scribble Interactive'):
46
- create_demo_scribble_interactive(
47
- model.process_scribble_interactive)
48
- with gr.TabItem('SoftEdge'):
49
  create_demo_softedge(model.process_softedge)
50
- with gr.TabItem('OpenPose'):
51
  create_demo_openpose(model.process_openpose)
52
- with gr.TabItem('Segmentation'):
53
  create_demo_segmentation(model.process_segmentation)
54
- with gr.TabItem('Depth'):
55
  create_demo_depth(model.process_depth)
56
- with gr.TabItem('Normal map'):
57
  create_demo_normal(model.process_normal)
58
- with gr.TabItem('Lineart'):
59
  create_demo_lineart(model.process_lineart)
60
- with gr.TabItem('Content Shuffle'):
61
  create_demo_shuffle(model.process_shuffle)
62
- with gr.TabItem('Instruct Pix2Pix'):
63
  create_demo_ip2p(model.process_ip2p)
64
 
65
- with gr.Accordion(label='Base model', open=False):
66
  with gr.Row():
67
  with gr.Column(scale=5):
68
- current_base_model = gr.Text(label='Current base model')
69
  with gr.Column(scale=1):
70
- check_base_model_button = gr.Button('Check current base model')
71
  with gr.Row():
72
  with gr.Column(scale=5):
73
  new_base_model_id = gr.Text(
74
- label='New base model',
75
  max_lines=1,
76
- placeholder='runwayml/stable-diffusion-v1-5',
77
- info=
78
- 'The base model must be compatible with Stable Diffusion v1.5.',
79
- interactive=ALLOW_CHANGING_BASE_MODEL)
80
  with gr.Column(scale=1):
81
- change_base_model_button = gr.Button(
82
- 'Change base model', interactive=ALLOW_CHANGING_BASE_MODEL)
83
  if not ALLOW_CHANGING_BASE_MODEL:
84
  gr.Markdown(
85
- '''The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space.'''
86
  )
87
 
88
  check_base_model_button.click(
89
  fn=lambda: model.base_model_id,
90
  outputs=current_base_model,
91
  queue=False,
92
- api_name='check_base_model',
93
  )
94
  new_base_model_id.submit(
95
  fn=model.set_base_model,
 
13
  from app_normal import create_demo as create_demo_normal
14
  from app_openpose import create_demo as create_demo_openpose
15
  from app_scribble import create_demo as create_demo_scribble
16
+ from app_scribble_interactive import create_demo as create_demo_scribble_interactive
 
17
  from app_segmentation import create_demo as create_demo_segmentation
18
  from app_shuffle import create_demo as create_demo_shuffle
19
  from app_softedge import create_demo as create_demo_softedge
20
  from model import Model
21
+ from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
 
22
 
23
+ DESCRIPTION = "# ControlNet v1.1"
24
 
25
  if not torch.cuda.is_available():
26
+ DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
27
 
28
+ model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
29
 
30
+ with gr.Blocks(css="style.css") as demo:
31
  gr.Markdown(DESCRIPTION)
32
+ gr.DuplicateButton(
33
+ value="Duplicate Space for private use", elem_id="duplicate-button", visible=SHOW_DUPLICATE_BUTTON
34
+ )
35
 
36
  with gr.Tabs():
37
+ with gr.TabItem("Canny"):
38
  create_demo_canny(model.process_canny)
39
+ with gr.TabItem("MLSD"):
40
  create_demo_mlsd(model.process_mlsd)
41
+ with gr.TabItem("Scribble"):
42
  create_demo_scribble(model.process_scribble)
43
+ with gr.TabItem("Scribble Interactive"):
44
+ create_demo_scribble_interactive(model.process_scribble_interactive)
45
+ with gr.TabItem("SoftEdge"):
 
46
  create_demo_softedge(model.process_softedge)
47
+ with gr.TabItem("OpenPose"):
48
  create_demo_openpose(model.process_openpose)
49
+ with gr.TabItem("Segmentation"):
50
  create_demo_segmentation(model.process_segmentation)
51
+ with gr.TabItem("Depth"):
52
  create_demo_depth(model.process_depth)
53
+ with gr.TabItem("Normal map"):
54
  create_demo_normal(model.process_normal)
55
+ with gr.TabItem("Lineart"):
56
  create_demo_lineart(model.process_lineart)
57
+ with gr.TabItem("Content Shuffle"):
58
  create_demo_shuffle(model.process_shuffle)
59
+ with gr.TabItem("Instruct Pix2Pix"):
60
  create_demo_ip2p(model.process_ip2p)
61
 
62
+ with gr.Accordion(label="Base model", open=False):
63
  with gr.Row():
64
  with gr.Column(scale=5):
65
+ current_base_model = gr.Text(label="Current base model")
66
  with gr.Column(scale=1):
67
+ check_base_model_button = gr.Button("Check current base model")
68
  with gr.Row():
69
  with gr.Column(scale=5):
70
  new_base_model_id = gr.Text(
71
+ label="New base model",
72
  max_lines=1,
73
+ placeholder="runwayml/stable-diffusion-v1-5",
74
+ info="The base model must be compatible with Stable Diffusion v1.5.",
75
+ interactive=ALLOW_CHANGING_BASE_MODEL,
76
+ )
77
  with gr.Column(scale=1):
78
+ change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL)
 
79
  if not ALLOW_CHANGING_BASE_MODEL:
80
  gr.Markdown(
81
+ """The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space."""
82
  )
83
 
84
  check_base_model_button.click(
85
  fn=lambda: model.base_model_id,
86
  outputs=current_base_model,
87
  queue=False,
88
+ api_name="check_base_model",
89
  )
90
  new_base_model_id.submit(
91
  fn=model.set_base_model,
app_canny.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,62 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- num_samples = gr.Slider(label='Number of images',
19
- minimum=1,
20
- maximum=MAX_NUM_IMAGES,
21
- value=DEFAULT_NUM_IMAGES,
22
- step=1)
23
  image_resolution = gr.Slider(
24
- label='Image resolution',
25
  minimum=256,
26
  maximum=MAX_IMAGE_RESOLUTION,
27
  value=DEFAULT_IMAGE_RESOLUTION,
28
- step=256)
 
29
  canny_low_threshold = gr.Slider(
30
- label='Canny low threshold',
31
- minimum=1,
32
- maximum=255,
33
- value=100,
34
- step=1)
35
  canny_high_threshold = gr.Slider(
36
- label='Canny high threshold',
37
- minimum=1,
38
- maximum=255,
39
- value=200,
40
- step=1)
41
- num_steps = gr.Slider(label='Number of steps',
42
- minimum=1,
43
- maximum=100,
44
- value=20,
45
- step=1)
46
- guidance_scale = gr.Slider(label='Guidance scale',
47
- minimum=0.1,
48
- maximum=30.0,
49
- value=9.0,
50
- step=0.1)
51
- seed = gr.Slider(label='Seed',
52
- minimum=0,
53
- maximum=MAX_SEED,
54
- step=1,
55
- value=0)
56
- randomize_seed = gr.Checkbox(label='Randomize seed',
57
- value=True)
58
- a_prompt = gr.Textbox(
59
- label='Additional prompt',
60
- value='best quality, extremely detailed')
61
  n_prompt = gr.Textbox(
62
- label='Negative prompt',
63
- value=
64
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
65
  )
66
  with gr.Column():
67
- result = gr.Gallery(label='Output',
68
- show_label=False,
69
- columns=2,
70
- object_fit='scale-down')
71
  inputs = [
72
  image,
73
  prompt,
@@ -103,13 +82,14 @@ def create_demo(process):
103
  fn=process,
104
  inputs=inputs,
105
  outputs=result,
106
- api_name='canny',
107
  )
108
  return demo
109
 
110
 
111
- if __name__ == '__main__':
112
  from model import Model
113
- model = Model(task_name='Canny')
 
114
  demo = create_demo(model.process_canny)
115
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ num_samples = gr.Slider(
24
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
25
+ )
 
 
26
  image_resolution = gr.Slider(
27
+ label="Image resolution",
28
  minimum=256,
29
  maximum=MAX_IMAGE_RESOLUTION,
30
  value=DEFAULT_IMAGE_RESOLUTION,
31
+ step=256,
32
+ )
33
  canny_low_threshold = gr.Slider(
34
+ label="Canny low threshold", minimum=1, maximum=255, value=100, step=1
35
+ )
 
 
 
36
  canny_high_threshold = gr.Slider(
37
+ label="Canny high threshold", minimum=1, maximum=255, value=200, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="canny",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="Canny")
94
  demo = create_demo(model.process_canny)
95
  demo.queue().launch()
app_depth.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,61 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
  preprocessor_name = gr.Radio(
19
- label='Preprocessor',
20
- choices=['Midas', 'DPT', 'None'],
21
- type='value',
22
- value='DPT')
23
- num_samples = gr.Slider(label='Number of images',
24
- minimum=1,
25
- maximum=MAX_NUM_IMAGES,
26
- value=DEFAULT_NUM_IMAGES,
27
- step=1)
28
  image_resolution = gr.Slider(
29
- label='Image resolution',
30
  minimum=256,
31
  maximum=MAX_IMAGE_RESOLUTION,
32
  value=DEFAULT_IMAGE_RESOLUTION,
33
- step=256)
 
34
  preprocess_resolution = gr.Slider(
35
- label='Preprocess resolution',
36
- minimum=128,
37
- maximum=512,
38
- value=384,
39
- step=1)
40
- num_steps = gr.Slider(label='Number of steps',
41
- minimum=1,
42
- maximum=100,
43
- value=20,
44
- step=1)
45
- guidance_scale = gr.Slider(label='Guidance scale',
46
- minimum=0.1,
47
- maximum=30.0,
48
- value=9.0,
49
- step=0.1)
50
- seed = gr.Slider(label='Seed',
51
- minimum=0,
52
- maximum=MAX_SEED,
53
- step=1,
54
- value=0)
55
- randomize_seed = gr.Checkbox(label='Randomize seed',
56
- value=True)
57
- a_prompt = gr.Textbox(
58
- label='Additional prompt',
59
- value='best quality, extremely detailed')
60
  n_prompt = gr.Textbox(
61
- label='Negative prompt',
62
- value=
63
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
64
  )
65
  with gr.Column():
66
- result = gr.Gallery(label='Output',
67
- show_label=False,
68
- columns=2,
69
- object_fit='scale-down')
70
  inputs = [
71
  image,
72
  prompt,
@@ -102,13 +82,14 @@ def create_demo(process):
102
  fn=process,
103
  inputs=inputs,
104
  outputs=result,
105
- api_name='depth',
106
  )
107
  return demo
108
 
109
 
110
- if __name__ == '__main__':
111
  from model import Model
112
- model = Model(task_name='depth')
 
113
  demo = create_demo(model.process_depth)
114
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["Midas", "DPT", "None"], type="value", value="DPT"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
  preprocess_resolution = gr.Slider(
37
+ label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="depth",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="depth")
94
  demo = create_demo(model.process_depth)
95
  demo.queue().launch()
app_ip2p.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,50 +17,30 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- num_samples = gr.Slider(label='Number of images',
19
- minimum=1,
20
- maximum=MAX_NUM_IMAGES,
21
- value=DEFAULT_NUM_IMAGES,
22
- step=1)
23
  image_resolution = gr.Slider(
24
- label='Image resolution',
25
  minimum=256,
26
  maximum=MAX_IMAGE_RESOLUTION,
27
  value=DEFAULT_IMAGE_RESOLUTION,
28
- step=256)
29
- num_steps = gr.Slider(label='Number of steps',
30
- minimum=1,
31
- maximum=100,
32
- value=20,
33
- step=1)
34
- guidance_scale = gr.Slider(label='Guidance scale',
35
- minimum=0.1,
36
- maximum=30.0,
37
- value=9.0,
38
- step=0.1)
39
- seed = gr.Slider(label='Seed',
40
- minimum=0,
41
- maximum=MAX_SEED,
42
- step=1,
43
- value=0)
44
- randomize_seed = gr.Checkbox(label='Randomize seed',
45
- value=True)
46
- a_prompt = gr.Textbox(
47
- label='Additional prompt',
48
- value='best quality, extremely detailed')
49
  n_prompt = gr.Textbox(
50
- label='Negative prompt',
51
- value=
52
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
53
  )
54
  with gr.Column():
55
- result = gr.Gallery(label='Output',
56
- show_label=False,
57
- columns=2,
58
- object_fit='scale-down')
59
  inputs = [
60
  image,
61
  prompt,
@@ -89,13 +74,14 @@ def create_demo(process):
89
  fn=process,
90
  inputs=inputs,
91
  outputs=result,
92
- api_name='ip2p',
93
  )
94
  return demo
95
 
96
 
97
- if __name__ == '__main__':
98
  from model import Model
99
- model = Model(task_name='ip2p')
 
100
  demo = create_demo(model.process_ip2p)
101
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ num_samples = gr.Slider(
24
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
25
+ )
 
 
26
  image_resolution = gr.Slider(
27
+ label="Image resolution",
28
  minimum=256,
29
  maximum=MAX_IMAGE_RESOLUTION,
30
  value=DEFAULT_IMAGE_RESOLUTION,
31
+ step=256,
32
+ )
33
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
34
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
35
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
36
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
37
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  n_prompt = gr.Textbox(
39
+ label="Negative prompt",
40
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
41
  )
42
  with gr.Column():
43
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
44
  inputs = [
45
  image,
46
  prompt,
 
74
  fn=process,
75
  inputs=inputs,
76
  outputs=result,
77
+ api_name="ip2p",
78
  )
79
  return demo
80
 
81
 
82
+ if __name__ == "__main__":
83
  from model import Model
84
+
85
+ model = Model(task_name="ip2p")
86
  demo = create_demo(model.process_ip2p)
87
  demo.queue().launch()
app_lineart.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,70 +17,46 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
  preprocessor_name = gr.Radio(
19
- label='Preprocessor',
20
  choices=[
21
- 'Lineart',
22
- 'Lineart coarse',
23
- 'None',
24
- 'Lineart (anime)',
25
- 'None (anime)',
26
  ],
27
- type='value',
28
- value='Lineart',
29
- info=
30
- 'Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.'
 
 
31
  )
32
- num_samples = gr.Slider(label='Number of images',
33
- minimum=1,
34
- maximum=MAX_NUM_IMAGES,
35
- value=DEFAULT_NUM_IMAGES,
36
- step=1)
37
  image_resolution = gr.Slider(
38
- label='Image resolution',
39
  minimum=256,
40
  maximum=MAX_IMAGE_RESOLUTION,
41
  value=DEFAULT_IMAGE_RESOLUTION,
42
- step=256)
 
43
  preprocess_resolution = gr.Slider(
44
- label='Preprocess resolution',
45
- minimum=128,
46
- maximum=512,
47
- value=512,
48
- step=1)
49
- num_steps = gr.Slider(label='Number of steps',
50
- minimum=1,
51
- maximum=100,
52
- value=20,
53
- step=1)
54
- guidance_scale = gr.Slider(label='Guidance scale',
55
- minimum=0.1,
56
- maximum=30.0,
57
- value=9.0,
58
- step=0.1)
59
- seed = gr.Slider(label='Seed',
60
- minimum=0,
61
- maximum=MAX_SEED,
62
- step=1,
63
- value=0)
64
- randomize_seed = gr.Checkbox(label='Randomize seed',
65
- value=True)
66
- a_prompt = gr.Textbox(
67
- label='Additional prompt',
68
- value='best quality, extremely detailed')
69
  n_prompt = gr.Textbox(
70
- label='Negative prompt',
71
- value=
72
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
73
  )
74
  with gr.Column():
75
- result = gr.Gallery(label='Output',
76
- show_label=False,
77
- columns=2,
78
- object_fit='scale-down')
79
  inputs = [
80
  image,
81
  prompt,
@@ -111,13 +92,14 @@ def create_demo(process):
111
  fn=process,
112
  inputs=inputs,
113
  outputs=result,
114
- api_name='lineart',
115
  )
116
  return demo
117
 
118
 
119
- if __name__ == '__main__':
120
  from model import Model
121
- model = Model(task_name='lineart')
 
122
  demo = create_demo(model.process_lineart)
123
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
+ label="Preprocessor",
25
  choices=[
26
+ "Lineart",
27
+ "Lineart coarse",
28
+ "None",
29
+ "Lineart (anime)",
30
+ "None (anime)",
31
  ],
32
+ type="value",
33
+ value="Lineart",
34
+ info='Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.',
35
+ )
36
+ num_samples = gr.Slider(
37
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
38
  )
 
 
 
 
 
39
  image_resolution = gr.Slider(
40
+ label="Image resolution",
41
  minimum=256,
42
  maximum=MAX_IMAGE_RESOLUTION,
43
  value=DEFAULT_IMAGE_RESOLUTION,
44
+ step=256,
45
+ )
46
  preprocess_resolution = gr.Slider(
47
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
48
+ )
49
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
50
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
51
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
52
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
53
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  n_prompt = gr.Textbox(
55
+ label="Negative prompt",
56
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
57
  )
58
  with gr.Column():
59
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
60
  inputs = [
61
  image,
62
  prompt,
 
92
  fn=process,
93
  inputs=inputs,
94
  outputs=result,
95
+ api_name="lineart",
96
  )
97
  return demo
98
 
99
 
100
+ if __name__ == "__main__":
101
  from model import Model
102
+
103
+ model = Model(task_name="lineart")
104
  demo = create_demo(model.process_lineart)
105
  demo.queue().launch()
app_mlsd.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,68 +17,39 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- num_samples = gr.Slider(label='Number of images',
19
- minimum=1,
20
- maximum=MAX_NUM_IMAGES,
21
- value=DEFAULT_NUM_IMAGES,
22
- step=1)
23
  image_resolution = gr.Slider(
24
- label='Image resolution',
25
  minimum=256,
26
  maximum=MAX_IMAGE_RESOLUTION,
27
  value=DEFAULT_IMAGE_RESOLUTION,
28
- step=256)
 
29
  preprocess_resolution = gr.Slider(
30
- label='Preprocess resolution',
31
- minimum=128,
32
- maximum=512,
33
- value=512,
34
- step=1)
35
  mlsd_value_threshold = gr.Slider(
36
- label='Hough value threshold (MLSD)',
37
- minimum=0.01,
38
- maximum=2.0,
39
- value=0.1,
40
- step=0.01)
41
  mlsd_distance_threshold = gr.Slider(
42
- label='Hough distance threshold (MLSD)',
43
- minimum=0.01,
44
- maximum=20.0,
45
- value=0.1,
46
- step=0.01)
47
- num_steps = gr.Slider(label='Number of steps',
48
- minimum=1,
49
- maximum=100,
50
- value=20,
51
- step=1)
52
- guidance_scale = gr.Slider(label='Guidance scale',
53
- minimum=0.1,
54
- maximum=30.0,
55
- value=9.0,
56
- step=0.1)
57
- seed = gr.Slider(label='Seed',
58
- minimum=0,
59
- maximum=MAX_SEED,
60
- step=1,
61
- value=0)
62
- randomize_seed = gr.Checkbox(label='Randomize seed',
63
- value=True)
64
- a_prompt = gr.Textbox(
65
- label='Additional prompt',
66
- value='best quality, extremely detailed')
67
  n_prompt = gr.Textbox(
68
- label='Negative prompt',
69
- value=
70
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
71
  )
72
  with gr.Column():
73
- result = gr.Gallery(label='Output',
74
- show_label=False,
75
- columns=2,
76
- object_fit='scale-down')
77
  inputs = [
78
  image,
79
  prompt,
@@ -110,13 +86,14 @@ def create_demo(process):
110
  fn=process,
111
  inputs=inputs,
112
  outputs=result,
113
- api_name='mlsd',
114
  )
115
  return demo
116
 
117
 
118
- if __name__ == '__main__':
119
  from model import Model
120
- model = Model(task_name='MLSD')
 
121
  demo = create_demo(model.process_mlsd)
122
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ num_samples = gr.Slider(
24
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
25
+ )
 
 
26
  image_resolution = gr.Slider(
27
+ label="Image resolution",
28
  minimum=256,
29
  maximum=MAX_IMAGE_RESOLUTION,
30
  value=DEFAULT_IMAGE_RESOLUTION,
31
+ step=256,
32
+ )
33
  preprocess_resolution = gr.Slider(
34
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
35
+ )
 
 
 
36
  mlsd_value_threshold = gr.Slider(
37
+ label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01
38
+ )
 
 
 
39
  mlsd_distance_threshold = gr.Slider(
40
+ label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01
41
+ )
42
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
43
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
44
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
45
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
46
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  n_prompt = gr.Textbox(
48
+ label="Negative prompt",
49
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
50
  )
51
  with gr.Column():
52
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
53
  inputs = [
54
  image,
55
  prompt,
 
86
  fn=process,
87
  inputs=inputs,
88
  outputs=result,
89
+ api_name="mlsd",
90
  )
91
  return demo
92
 
93
 
94
+ if __name__ == "__main__":
95
  from model import Model
96
+
97
+ model = Model(task_name="MLSD")
98
  demo = create_demo(model.process_mlsd)
99
  demo.queue().launch()
app_normal.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,60 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- preprocessor_name = gr.Radio(label='Preprocessor',
19
- choices=['NormalBae', 'None'],
20
- type='value',
21
- value='NormalBae')
22
- num_samples = gr.Slider(label='Images',
23
- minimum=1,
24
- maximum=MAX_NUM_IMAGES,
25
- value=DEFAULT_NUM_IMAGES,
26
- step=1)
27
  image_resolution = gr.Slider(
28
- label='Image resolution',
29
  minimum=256,
30
  maximum=MAX_IMAGE_RESOLUTION,
31
  value=DEFAULT_IMAGE_RESOLUTION,
32
- step=256)
 
33
  preprocess_resolution = gr.Slider(
34
- label='Preprocess resolution',
35
- minimum=128,
36
- maximum=512,
37
- value=384,
38
- step=1)
39
- num_steps = gr.Slider(label='Number of steps',
40
- minimum=1,
41
- maximum=100,
42
- value=20,
43
- step=1)
44
- guidance_scale = gr.Slider(label='Guidance scale',
45
- minimum=0.1,
46
- maximum=30.0,
47
- value=9.0,
48
- step=0.1)
49
- seed = gr.Slider(label='Seed',
50
- minimum=0,
51
- maximum=MAX_SEED,
52
- step=1,
53
- value=0)
54
- randomize_seed = gr.Checkbox(label='Randomize seed',
55
- value=True)
56
- a_prompt = gr.Textbox(
57
- label='Additional prompt',
58
- value='best quality, extremely detailed')
59
  n_prompt = gr.Textbox(
60
- label='Negative prompt',
61
- value=
62
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
  )
64
  with gr.Column():
65
- result = gr.Gallery(label='Output',
66
- show_label=False,
67
- columns=2,
68
- object_fit='scale-down')
69
  inputs = [
70
  image,
71
  prompt,
@@ -101,13 +82,14 @@ def create_demo(process):
101
  fn=process,
102
  inputs=inputs,
103
  outputs=result,
104
- api_name='normal',
105
  )
106
  return demo
107
 
108
 
109
- if __name__ == '__main__':
110
  from model import Model
111
- model = Model(task_name='NormalBae')
 
112
  demo = create_demo(model.process_normal)
113
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["NormalBae", "None"], type="value", value="NormalBae"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
  preprocess_resolution = gr.Slider(
37
+ label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="normal",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="NormalBae")
94
  demo = create_demo(model.process_normal)
95
  demo.queue().launch()
app_openpose.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,60 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button(label='Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- preprocessor_name = gr.Radio(label='Preprocessor',
19
- choices=['Openpose', 'None'],
20
- type='value',
21
- value='Openpose')
22
- num_samples = gr.Slider(label='Number of images',
23
- minimum=1,
24
- maximum=MAX_NUM_IMAGES,
25
- value=DEFAULT_NUM_IMAGES,
26
- step=1)
27
  image_resolution = gr.Slider(
28
- label='Image resolution',
29
  minimum=256,
30
  maximum=MAX_IMAGE_RESOLUTION,
31
  value=DEFAULT_IMAGE_RESOLUTION,
32
- step=256)
 
33
  preprocess_resolution = gr.Slider(
34
- label='Preprocess resolution',
35
- minimum=128,
36
- maximum=512,
37
- value=512,
38
- step=1)
39
- num_steps = gr.Slider(label='Number of steps',
40
- minimum=1,
41
- maximum=100,
42
- value=20,
43
- step=1)
44
- guidance_scale = gr.Slider(label='Guidance scale',
45
- minimum=0.1,
46
- maximum=30.0,
47
- value=9.0,
48
- step=0.1)
49
- seed = gr.Slider(label='Seed',
50
- minimum=0,
51
- maximum=MAX_SEED,
52
- step=1,
53
- value=0)
54
- randomize_seed = gr.Checkbox(label='Randomize seed',
55
- value=True)
56
- a_prompt = gr.Textbox(
57
- label='Additional prompt',
58
- value='best quality, extremely detailed')
59
  n_prompt = gr.Textbox(
60
- label='Negative prompt',
61
- value=
62
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
  )
64
  with gr.Column():
65
- result = gr.Gallery(label='Output',
66
- show_label=False,
67
- columns=2,
68
- object_fit='scale-down')
69
  inputs = [
70
  image,
71
  prompt,
@@ -101,13 +82,14 @@ def create_demo(process):
101
  fn=process,
102
  inputs=inputs,
103
  outputs=result,
104
- api_name='openpose',
105
  )
106
  return demo
107
 
108
 
109
- if __name__ == '__main__':
110
  from model import Model
111
- model = Model(task_name='Openpose')
 
112
  demo = create_demo(model.process_openpose)
113
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button(label="Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["Openpose", "None"], type="value", value="Openpose"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
  preprocess_resolution = gr.Slider(
37
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="openpose",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="Openpose")
94
  demo = create_demo(model.process_openpose)
95
  demo.queue().launch()
app_scribble.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,61 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
  preprocessor_name = gr.Radio(
19
- label='Preprocessor',
20
- choices=['HED', 'PidiNet', 'None'],
21
- type='value',
22
- value='HED')
23
- num_samples = gr.Slider(label='Number of images',
24
- minimum=1,
25
- maximum=MAX_NUM_IMAGES,
26
- value=DEFAULT_NUM_IMAGES,
27
- step=1)
28
  image_resolution = gr.Slider(
29
- label='Image resolution',
30
  minimum=256,
31
  maximum=MAX_IMAGE_RESOLUTION,
32
  value=DEFAULT_IMAGE_RESOLUTION,
33
- step=256)
 
34
  preprocess_resolution = gr.Slider(
35
- label='Preprocess resolution',
36
- minimum=128,
37
- maximum=512,
38
- value=512,
39
- step=1)
40
- num_steps = gr.Slider(label='Number of steps',
41
- minimum=1,
42
- maximum=100,
43
- value=20,
44
- step=1)
45
- guidance_scale = gr.Slider(label='Guidance scale',
46
- minimum=0.1,
47
- maximum=30.0,
48
- value=9.0,
49
- step=0.1)
50
- seed = gr.Slider(label='Seed',
51
- minimum=0,
52
- maximum=MAX_SEED,
53
- step=1,
54
- value=0)
55
- randomize_seed = gr.Checkbox(label='Randomize seed',
56
- value=True)
57
- a_prompt = gr.Textbox(
58
- label='Additional prompt',
59
- value='best quality, extremely detailed')
60
  n_prompt = gr.Textbox(
61
- label='Negative prompt',
62
- value=
63
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
64
  )
65
  with gr.Column():
66
- result = gr.Gallery(label='Output',
67
- show_label=False,
68
- columns=2,
69
- object_fit='scale-down')
70
  inputs = [
71
  image,
72
  prompt,
@@ -102,13 +82,14 @@ def create_demo(process):
102
  fn=process,
103
  inputs=inputs,
104
  outputs=result,
105
- api_name='scribble',
106
  )
107
  return demo
108
 
109
 
110
- if __name__ == '__main__':
111
  from model import Model
112
- model = Model(task_name='scribble')
 
113
  demo = create_demo(model.process_scribble)
114
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["HED", "PidiNet", "None"], type="value", value="HED"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
  preprocess_resolution = gr.Slider(
37
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="scribble",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="scribble")
94
  demo = create_demo(model.process_scribble)
95
  demo.queue().launch()
app_scribble_interactive.py CHANGED
@@ -3,8 +3,13 @@
3
  import gradio as gr
4
  import numpy as np
5
 
6
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
7
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
8
  from utils import randomize_seed_fn
9
 
10
 
@@ -16,62 +21,46 @@ def create_demo(process):
16
  with gr.Blocks() as demo:
17
  with gr.Row():
18
  with gr.Column():
19
- canvas_width = gr.Slider(label='Canvas width',
20
- minimum=256,
21
- maximum=MAX_IMAGE_RESOLUTION,
22
- value=DEFAULT_IMAGE_RESOLUTION,
23
- step=1)
24
- canvas_height = gr.Slider(label='Canvas height',
25
- minimum=256,
26
- maximum=MAX_IMAGE_RESOLUTION,
27
- value=DEFAULT_IMAGE_RESOLUTION,
28
- step=1)
29
- create_button = gr.Button('Open drawing canvas!')
30
- image = gr.Image(tool='sketch', brush_radius=10)
31
- prompt = gr.Textbox(label='Prompt')
32
- run_button = gr.Button('Run')
33
- with gr.Accordion('Advanced options', open=False):
34
- num_samples = gr.Slider(label='Number of images',
35
- minimum=1,
36
- maximum=MAX_NUM_IMAGES,
37
- value=DEFAULT_NUM_IMAGES,
38
- step=1)
 
 
39
  image_resolution = gr.Slider(
40
- label='Image resolution',
41
  minimum=256,
42
  maximum=MAX_IMAGE_RESOLUTION,
43
  value=DEFAULT_IMAGE_RESOLUTION,
44
- step=256)
45
- num_steps = gr.Slider(label='Number of steps',
46
- minimum=1,
47
- maximum=100,
48
- value=20,
49
- step=1)
50
- guidance_scale = gr.Slider(label='Guidance scale',
51
- minimum=0.1,
52
- maximum=30.0,
53
- value=9.0,
54
- step=0.1)
55
- seed = gr.Slider(label='Seed',
56
- minimum=0,
57
- maximum=MAX_SEED,
58
- step=1,
59
- value=0)
60
- randomize_seed = gr.Checkbox(label='Randomize seed',
61
- value=True)
62
- a_prompt = gr.Textbox(
63
- label='Additional prompt',
64
- value='best quality, extremely detailed')
65
  n_prompt = gr.Textbox(
66
- label='Negative prompt',
67
- value=
68
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
  )
70
  with gr.Column():
71
- result = gr.Gallery(label='Output',
72
- show_label=False,
73
- columns=2,
74
- object_fit='scale-down')
75
 
76
  create_button.click(
77
  fn=create_canvas,
@@ -118,8 +107,9 @@ def create_demo(process):
118
  return demo
119
 
120
 
121
- if __name__ == '__main__':
122
  from model import Model
123
- model = Model(task_name='scribble')
 
124
  demo = create_demo(model.process_scribble_interactive)
125
  demo.queue().launch()
 
3
  import gradio as gr
4
  import numpy as np
5
 
6
+ from settings import (
7
+ DEFAULT_IMAGE_RESOLUTION,
8
+ DEFAULT_NUM_IMAGES,
9
+ MAX_IMAGE_RESOLUTION,
10
+ MAX_NUM_IMAGES,
11
+ MAX_SEED,
12
+ )
13
  from utils import randomize_seed_fn
14
 
15
 
 
21
  with gr.Blocks() as demo:
22
  with gr.Row():
23
  with gr.Column():
24
+ canvas_width = gr.Slider(
25
+ label="Canvas width",
26
+ minimum=256,
27
+ maximum=MAX_IMAGE_RESOLUTION,
28
+ value=DEFAULT_IMAGE_RESOLUTION,
29
+ step=1,
30
+ )
31
+ canvas_height = gr.Slider(
32
+ label="Canvas height",
33
+ minimum=256,
34
+ maximum=MAX_IMAGE_RESOLUTION,
35
+ value=DEFAULT_IMAGE_RESOLUTION,
36
+ step=1,
37
+ )
38
+ create_button = gr.Button("Open drawing canvas!")
39
+ image = gr.Image(tool="sketch", brush_radius=10)
40
+ prompt = gr.Textbox(label="Prompt")
41
+ run_button = gr.Button("Run")
42
+ with gr.Accordion("Advanced options", open=False):
43
+ num_samples = gr.Slider(
44
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
45
+ )
46
  image_resolution = gr.Slider(
47
+ label="Image resolution",
48
  minimum=256,
49
  maximum=MAX_IMAGE_RESOLUTION,
50
  value=DEFAULT_IMAGE_RESOLUTION,
51
+ step=256,
52
+ )
53
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
54
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
55
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
56
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
57
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  n_prompt = gr.Textbox(
59
+ label="Negative prompt",
60
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
61
  )
62
  with gr.Column():
63
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
64
 
65
  create_button.click(
66
  fn=create_canvas,
 
107
  return demo
108
 
109
 
110
+ if __name__ == "__main__":
111
  from model import Model
112
+
113
+ model = Model(task_name="scribble")
114
  demo = create_demo(model.process_scribble_interactive)
115
  demo.queue().launch()
app_segmentation.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,60 +17,36 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- preprocessor_name = gr.Radio(label='Preprocessor',
19
- choices=['UPerNet', 'None'],
20
- type='value',
21
- value='UPerNet')
22
- num_samples = gr.Slider(label='Number of images',
23
- minimum=1,
24
- maximum=MAX_NUM_IMAGES,
25
- value=DEFAULT_NUM_IMAGES,
26
- step=1)
27
  image_resolution = gr.Slider(
28
- label='Image resolution',
29
  minimum=256,
30
  maximum=MAX_IMAGE_RESOLUTION,
31
  value=DEFAULT_IMAGE_RESOLUTION,
32
- step=256)
 
33
  preprocess_resolution = gr.Slider(
34
- label='Preprocess resolution',
35
- minimum=128,
36
- maximum=512,
37
- value=512,
38
- step=1)
39
- num_steps = gr.Slider(label='Number of steps',
40
- minimum=1,
41
- maximum=100,
42
- value=20,
43
- step=1)
44
- guidance_scale = gr.Slider(label='Guidance scale',
45
- minimum=0.1,
46
- maximum=30.0,
47
- value=9.0,
48
- step=0.1)
49
- seed = gr.Slider(label='Seed',
50
- minimum=0,
51
- maximum=MAX_SEED,
52
- step=1,
53
- value=0)
54
- randomize_seed = gr.Checkbox(label='Randomize seed',
55
- value=True)
56
- a_prompt = gr.Textbox(
57
- label='Additional prompt',
58
- value='best quality, extremely detailed')
59
  n_prompt = gr.Textbox(
60
- label='Negative prompt',
61
- value=
62
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
63
  )
64
  with gr.Column():
65
- result = gr.Gallery(label='Output',
66
- show_label=False,
67
- columns=2,
68
- object_fit='scale-down')
69
  inputs = [
70
  image,
71
  prompt,
@@ -101,13 +82,14 @@ def create_demo(process):
101
  fn=process,
102
  inputs=inputs,
103
  outputs=result,
104
- api_name='segmentation',
105
  )
106
  return demo
107
 
108
 
109
- if __name__ == '__main__':
110
  from model import Model
111
- model = Model(task_name='segmentation')
 
112
  demo = create_demo(model.process_segmentation)
113
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["UPerNet", "None"], type="value", value="UPerNet"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
  preprocess_resolution = gr.Slider(
37
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
+ )
39
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  n_prompt = gr.Textbox(
45
+ label="Negative prompt",
46
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
50
  inputs = [
51
  image,
52
  prompt,
 
82
  fn=process,
83
  inputs=inputs,
84
  outputs=result,
85
+ api_name="segmentation",
86
  )
87
  return demo
88
 
89
 
90
+ if __name__ == "__main__":
91
  from model import Model
92
+
93
+ model = Model(task_name="segmentation")
94
  demo = create_demo(model.process_segmentation)
95
  demo.queue().launch()
app_shuffle.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,55 +17,33 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
  preprocessor_name = gr.Radio(
19
- label='Preprocessor',
20
- choices=['ContentShuffle', 'None'],
21
- type='value',
22
- value='ContentShuffle')
23
- num_samples = gr.Slider(label='Number of images',
24
- minimum=1,
25
- maximum=MAX_NUM_IMAGES,
26
- value=DEFAULT_NUM_IMAGES,
27
- step=1)
28
  image_resolution = gr.Slider(
29
- label='Image resolution',
30
  minimum=256,
31
  maximum=MAX_IMAGE_RESOLUTION,
32
  value=DEFAULT_IMAGE_RESOLUTION,
33
- step=256)
34
- num_steps = gr.Slider(label='Number of steps',
35
- minimum=1,
36
- maximum=100,
37
- value=20,
38
- step=1)
39
- guidance_scale = gr.Slider(label='Guidance scale',
40
- minimum=0.1,
41
- maximum=30.0,
42
- value=9.0,
43
- step=0.1)
44
- seed = gr.Slider(label='Seed',
45
- minimum=0,
46
- maximum=MAX_SEED,
47
- step=1,
48
- value=0)
49
- randomize_seed = gr.Checkbox(label='Randomize seed',
50
- value=True)
51
- a_prompt = gr.Textbox(
52
- label='Additional prompt',
53
- value='best quality, extremely detailed')
54
  n_prompt = gr.Textbox(
55
- label='Negative prompt',
56
- value=
57
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
58
  )
59
  with gr.Column():
60
- result = gr.Gallery(label='Output',
61
- show_label=False,
62
- columns=2,
63
- object_fit='scale-down')
64
  inputs = [
65
  image,
66
  prompt,
@@ -95,13 +78,14 @@ def create_demo(process):
95
  fn=process,
96
  inputs=inputs,
97
  outputs=result,
98
- api_name='content-shuffle',
99
  )
100
  return demo
101
 
102
 
103
- if __name__ == '__main__':
104
  from model import Model
105
- model = Model(task_name='shuffle')
 
106
  demo = create_demo(model.process_shuffle)
107
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
+ label="Preprocessor", choices=["ContentShuffle", "None"], type="value", value="ContentShuffle"
25
+ )
26
+ num_samples = gr.Slider(
27
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
28
+ )
 
 
 
 
29
  image_resolution = gr.Slider(
30
+ label="Image resolution",
31
  minimum=256,
32
  maximum=MAX_IMAGE_RESOLUTION,
33
  value=DEFAULT_IMAGE_RESOLUTION,
34
+ step=256,
35
+ )
36
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
37
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
38
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
39
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
40
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  n_prompt = gr.Textbox(
42
+ label="Negative prompt",
43
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
44
  )
45
  with gr.Column():
46
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
47
  inputs = [
48
  image,
49
  prompt,
 
78
  fn=process,
79
  inputs=inputs,
80
  outputs=result,
81
+ api_name="content-shuffle",
82
  )
83
  return demo
84
 
85
 
86
+ if __name__ == "__main__":
87
  from model import Model
88
+
89
+ model = Model(task_name="shuffle")
90
  demo = create_demo(model.process_shuffle)
91
  demo.queue().launch()
app_softedge.py CHANGED
@@ -2,8 +2,13 @@
2
 
3
  import gradio as gr
4
 
5
- from settings import (DEFAULT_IMAGE_RESOLUTION, DEFAULT_NUM_IMAGES,
6
- MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES, MAX_SEED)
 
 
 
 
 
7
  from utils import randomize_seed_fn
8
 
9
 
@@ -12,66 +17,45 @@ def create_demo(process):
12
  with gr.Row():
13
  with gr.Column():
14
  image = gr.Image()
15
- prompt = gr.Textbox(label='Prompt')
16
- run_button = gr.Button('Run')
17
- with gr.Accordion('Advanced options', open=False):
18
- preprocessor_name = gr.Radio(label='Preprocessor',
19
- choices=[
20
- 'HED',
21
- 'PidiNet',
22
- 'HED safe',
23
- 'PidiNet safe',
24
- 'None',
25
- ],
26
- type='value',
27
- value='PidiNet')
28
- num_samples = gr.Slider(label='Number of images',
29
- minimum=1,
30
- maximum=MAX_NUM_IMAGES,
31
- value=DEFAULT_NUM_IMAGES,
32
- step=1)
33
  image_resolution = gr.Slider(
34
- label='Image resolution',
35
  minimum=256,
36
  maximum=MAX_IMAGE_RESOLUTION,
37
  value=DEFAULT_IMAGE_RESOLUTION,
38
- step=256)
 
39
  preprocess_resolution = gr.Slider(
40
- label='Preprocess resolution',
41
- minimum=128,
42
- maximum=512,
43
- value=512,
44
- step=1)
45
- num_steps = gr.Slider(label='Number of steps',
46
- minimum=1,
47
- maximum=100,
48
- value=20,
49
- step=1)
50
- guidance_scale = gr.Slider(label='Guidance scale',
51
- minimum=0.1,
52
- maximum=30.0,
53
- value=9.0,
54
- step=0.1)
55
- seed = gr.Slider(label='Seed',
56
- minimum=0,
57
- maximum=MAX_SEED,
58
- step=1,
59
- value=0)
60
- randomize_seed = gr.Checkbox(label='Randomize seed',
61
- value=True)
62
- a_prompt = gr.Textbox(
63
- label='Additional prompt',
64
- value='best quality, extremely detailed')
65
  n_prompt = gr.Textbox(
66
- label='Negative prompt',
67
- value=
68
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
  )
70
  with gr.Column():
71
- result = gr.Gallery(label='Output',
72
- show_label=False,
73
- columns=2,
74
- object_fit='scale-down')
75
  inputs = [
76
  image,
77
  prompt,
@@ -107,13 +91,14 @@ def create_demo(process):
107
  fn=process,
108
  inputs=inputs,
109
  outputs=result,
110
- api_name='softedge',
111
  )
112
  return demo
113
 
114
 
115
- if __name__ == '__main__':
116
  from model import Model
117
- model = Model(task_name='softedge')
 
118
  demo = create_demo(model.process_softedge)
119
  demo.queue().launch()
 
2
 
3
  import gradio as gr
4
 
5
+ from settings import (
6
+ DEFAULT_IMAGE_RESOLUTION,
7
+ DEFAULT_NUM_IMAGES,
8
+ MAX_IMAGE_RESOLUTION,
9
+ MAX_NUM_IMAGES,
10
+ MAX_SEED,
11
+ )
12
  from utils import randomize_seed_fn
13
 
14
 
 
17
  with gr.Row():
18
  with gr.Column():
19
  image = gr.Image()
20
+ prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
+ with gr.Accordion("Advanced options", open=False):
23
+ preprocessor_name = gr.Radio(
24
+ label="Preprocessor",
25
+ choices=[
26
+ "HED",
27
+ "PidiNet",
28
+ "HED safe",
29
+ "PidiNet safe",
30
+ "None",
31
+ ],
32
+ type="value",
33
+ value="PidiNet",
34
+ )
35
+ num_samples = gr.Slider(
36
+ label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
37
+ )
38
  image_resolution = gr.Slider(
39
+ label="Image resolution",
40
  minimum=256,
41
  maximum=MAX_IMAGE_RESOLUTION,
42
  value=DEFAULT_IMAGE_RESOLUTION,
43
+ step=256,
44
+ )
45
  preprocess_resolution = gr.Slider(
46
+ label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
47
+ )
48
+ num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
49
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
50
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
51
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
52
+ a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  n_prompt = gr.Textbox(
54
+ label="Negative prompt",
55
+ value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
 
56
  )
57
  with gr.Column():
58
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
 
 
 
59
  inputs = [
60
  image,
61
  prompt,
 
91
  fn=process,
92
  inputs=inputs,
93
  outputs=result,
94
+ api_name="softedge",
95
  )
96
  return demo
97
 
98
 
99
+ if __name__ == "__main__":
100
  from model import Model
101
+
102
+ model = Model(task_name="softedge")
103
  demo = create_demo(model.process_softedge)
104
  demo.queue().launch()
depth_estimator.py CHANGED
@@ -8,17 +8,17 @@ from cv_utils import resize_image
8
 
9
  class DepthEstimator:
10
  def __init__(self):
11
- self.model = pipeline('depth-estimation')
12
 
13
  def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
14
- detect_resolution = kwargs.pop('detect_resolution', 512)
15
- image_resolution = kwargs.pop('image_resolution', 512)
16
  image = np.array(image)
17
  image = HWC3(image)
18
  image = resize_image(image, resolution=detect_resolution)
19
  image = PIL.Image.fromarray(image)
20
  image = self.model(image)
21
- image = image['depth']
22
  image = np.array(image)
23
  image = HWC3(image)
24
  image = resize_image(image, resolution=image_resolution)
 
8
 
9
  class DepthEstimator:
10
  def __init__(self):
11
+ self.model = pipeline("depth-estimation")
12
 
13
  def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
14
+ detect_resolution = kwargs.pop("detect_resolution", 512)
15
+ image_resolution = kwargs.pop("image_resolution", 512)
16
  image = np.array(image)
17
  image = HWC3(image)
18
  image = resize_image(image, resolution=detect_resolution)
19
  image = PIL.Image.fromarray(image)
20
  image = self.model(image)
21
+ image = image["depth"]
22
  image = np.array(image)
23
  image = HWC3(image)
24
  image = resize_image(image, resolution=image_resolution)
image_segmentor.py CHANGED
@@ -10,30 +10,24 @@ from cv_utils import resize_image
10
 
11
  class ImageSegmentor:
12
  def __init__(self):
13
- self.image_processor = AutoImageProcessor.from_pretrained(
14
- 'openmmlab/upernet-convnext-small')
15
- self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
16
- 'openmmlab/upernet-convnext-small')
17
 
18
  @torch.inference_mode()
19
  def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
20
- detect_resolution = kwargs.pop('detect_resolution', 512)
21
- image_resolution = kwargs.pop('image_resolution', 512)
22
  image = HWC3(image)
23
  image = resize_image(image, resolution=detect_resolution)
24
  image = PIL.Image.fromarray(image)
25
 
26
- pixel_values = self.image_processor(image,
27
- return_tensors='pt').pixel_values
28
  outputs = self.image_segmentor(pixel_values)
29
- seg = self.image_processor.post_process_semantic_segmentation(
30
- outputs, target_sizes=[image.size[::-1]])[0]
31
  color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
32
  for label, color in enumerate(ade_palette()):
33
  color_seg[seg == label, :] = color
34
  color_seg = color_seg.astype(np.uint8)
35
 
36
- color_seg = resize_image(color_seg,
37
- resolution=image_resolution,
38
- interpolation=cv2.INTER_NEAREST)
39
  return PIL.Image.fromarray(color_seg)
 
10
 
11
  class ImageSegmentor:
12
  def __init__(self):
13
+ self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
14
+ self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
 
 
15
 
16
  @torch.inference_mode()
17
  def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
18
+ detect_resolution = kwargs.pop("detect_resolution", 512)
19
+ image_resolution = kwargs.pop("image_resolution", 512)
20
  image = HWC3(image)
21
  image = resize_image(image, resolution=detect_resolution)
22
  image = PIL.Image.fromarray(image)
23
 
24
+ pixel_values = self.image_processor(image, return_tensors="pt").pixel_values
 
25
  outputs = self.image_segmentor(pixel_values)
26
+ seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
 
27
  color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
28
  for label, color in enumerate(ade_palette()):
29
  color_seg[seg == label, :] = color
30
  color_seg = color_seg.astype(np.uint8)
31
 
32
+ color_seg = resize_image(color_seg, resolution=image_resolution, interpolation=cv2.INTER_NEAREST)
 
 
33
  return PIL.Image.fromarray(color_seg)
model.py CHANGED
@@ -6,28 +6,31 @@ import numpy as np
6
  import PIL.Image
7
  import torch
8
  from controlnet_aux.util import HWC3
9
- from diffusers import (ControlNetModel, DiffusionPipeline,
10
- StableDiffusionControlNetPipeline,
11
- UniPCMultistepScheduler)
 
 
 
12
 
13
  from cv_utils import resize_image
14
  from preprocessor import Preprocessor
15
  from settings import MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES
16
 
17
  CONTROLNET_MODEL_IDS = {
18
- 'Openpose': 'lllyasviel/control_v11p_sd15_openpose',
19
- 'Canny': 'lllyasviel/control_v11p_sd15_canny',
20
- 'MLSD': 'lllyasviel/control_v11p_sd15_mlsd',
21
- 'scribble': 'lllyasviel/control_v11p_sd15_scribble',
22
- 'softedge': 'lllyasviel/control_v11p_sd15_softedge',
23
- 'segmentation': 'lllyasviel/control_v11p_sd15_seg',
24
- 'depth': 'lllyasviel/control_v11f1p_sd15_depth',
25
- 'NormalBae': 'lllyasviel/control_v11p_sd15_normalbae',
26
- 'lineart': 'lllyasviel/control_v11p_sd15_lineart',
27
- 'lineart_anime': 'lllyasviel/control_v11p_sd15s2_lineart_anime',
28
- 'shuffle': 'lllyasviel/control_v11e_sd15_shuffle',
29
- 'ip2p': 'lllyasviel/control_v11e_sd15_ip2p',
30
- 'inpaint': 'lllyasviel/control_v11e_sd15_inpaint',
31
  }
32
 
33
 
@@ -37,31 +40,28 @@ def download_all_controlnet_weights() -> None:
37
 
38
 
39
  class Model:
40
- def __init__(self,
41
- base_model_id: str = 'runwayml/stable-diffusion-v1-5',
42
- task_name: str = 'Canny'):
43
- self.device = torch.device(
44
- 'cuda:0' if torch.cuda.is_available() else 'cpu')
45
- self.base_model_id = ''
46
- self.task_name = ''
47
  self.pipe = self.load_pipe(base_model_id, task_name)
48
  self.preprocessor = Preprocessor()
49
 
50
  def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
51
- if base_model_id == self.base_model_id and task_name == self.task_name and hasattr(
52
- self, 'pipe') and self.pipe is not None:
 
 
 
 
53
  return self.pipe
54
  model_id = CONTROLNET_MODEL_IDS[task_name]
55
- controlnet = ControlNetModel.from_pretrained(model_id,
56
- torch_dtype=torch.float16)
57
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
58
- base_model_id,
59
- safety_checker=None,
60
- controlnet=controlnet,
61
- torch_dtype=torch.float16)
62
- pipe.scheduler = UniPCMultistepScheduler.from_config(
63
- pipe.scheduler.config)
64
- if self.device.type == 'cuda':
65
  pipe.enable_xformers_memory_efficient_attention()
66
  pipe.to(self.device)
67
  torch.cuda.empty_cache()
@@ -85,13 +85,12 @@ class Model:
85
  def load_controlnet_weight(self, task_name: str) -> None:
86
  if task_name == self.task_name:
87
  return
88
- if self.pipe is not None and hasattr(self.pipe, 'controlnet'):
89
  del self.pipe.controlnet
90
  torch.cuda.empty_cache()
91
  gc.collect()
92
  model_id = CONTROLNET_MODEL_IDS[task_name]
93
- controlnet = ControlNetModel.from_pretrained(model_id,
94
- torch_dtype=torch.float16)
95
  controlnet.to(self.device)
96
  torch.cuda.empty_cache()
97
  gc.collect()
@@ -102,10 +101,10 @@ class Model:
102
  if not prompt:
103
  prompt = additional_prompt
104
  else:
105
- prompt = f'{prompt}, {additional_prompt}'
106
  return prompt
107
 
108
- @torch.autocast('cuda')
109
  def run_pipe(
110
  self,
111
  prompt: str,
@@ -117,13 +116,15 @@ class Model:
117
  seed: int,
118
  ) -> list[PIL.Image.Image]:
119
  generator = torch.Generator().manual_seed(seed)
120
- return self.pipe(prompt=prompt,
121
- negative_prompt=negative_prompt,
122
- guidance_scale=guidance_scale,
123
- num_images_per_prompt=num_images,
124
- num_inference_steps=num_steps,
125
- generator=generator,
126
- image=control_image).images
 
 
127
 
128
  @torch.inference_mode()
129
  def process_canny(
@@ -147,13 +148,12 @@ class Model:
147
  if num_images > MAX_NUM_IMAGES:
148
  raise ValueError
149
 
150
- self.preprocessor.load('Canny')
151
- control_image = self.preprocessor(image=image,
152
- low_threshold=low_threshold,
153
- high_threshold=high_threshold,
154
- detect_resolution=image_resolution)
155
 
156
- self.load_controlnet_weight('Canny')
157
  results = self.run_pipe(
158
  prompt=self.get_prompt(prompt, additional_prompt),
159
  negative_prompt=negative_prompt,
@@ -188,7 +188,7 @@ class Model:
188
  if num_images > MAX_NUM_IMAGES:
189
  raise ValueError
190
 
191
- self.preprocessor.load('MLSD')
192
  control_image = self.preprocessor(
193
  image=image,
194
  image_resolution=image_resolution,
@@ -196,7 +196,7 @@ class Model:
196
  thr_v=value_threshold,
197
  thr_d=distance_threshold,
198
  )
199
- self.load_controlnet_weight('MLSD')
200
  results = self.run_pipe(
201
  prompt=self.get_prompt(prompt, additional_prompt),
202
  negative_prompt=negative_prompt,
@@ -230,11 +230,11 @@ class Model:
230
  if num_images > MAX_NUM_IMAGES:
231
  raise ValueError
232
 
233
- if preprocessor_name == 'None':
234
  image = HWC3(image)
235
  image = resize_image(image, resolution=image_resolution)
236
  control_image = PIL.Image.fromarray(image)
237
- elif preprocessor_name == 'HED':
238
  self.preprocessor.load(preprocessor_name)
239
  control_image = self.preprocessor(
240
  image=image,
@@ -242,7 +242,7 @@ class Model:
242
  detect_resolution=preprocess_resolution,
243
  scribble=False,
244
  )
245
- elif preprocessor_name == 'PidiNet':
246
  self.preprocessor.load(preprocessor_name)
247
  control_image = self.preprocessor(
248
  image=image,
@@ -250,7 +250,7 @@ class Model:
250
  detect_resolution=preprocess_resolution,
251
  safe=False,
252
  )
253
- self.load_controlnet_weight('scribble')
254
  results = self.run_pipe(
255
  prompt=self.get_prompt(prompt, additional_prompt),
256
  negative_prompt=negative_prompt,
@@ -282,12 +282,12 @@ class Model:
282
  if num_images > MAX_NUM_IMAGES:
283
  raise ValueError
284
 
285
- image = image_and_mask['mask']
286
  image = HWC3(image)
287
  image = resize_image(image, resolution=image_resolution)
288
  control_image = PIL.Image.fromarray(image)
289
 
290
- self.load_controlnet_weight('scribble')
291
  results = self.run_pipe(
292
  prompt=self.get_prompt(prompt, additional_prompt),
293
  negative_prompt=negative_prompt,
@@ -321,22 +321,22 @@ class Model:
321
  if num_images > MAX_NUM_IMAGES:
322
  raise ValueError
323
 
324
- if preprocessor_name == 'None':
325
  image = HWC3(image)
326
  image = resize_image(image, resolution=image_resolution)
327
  control_image = PIL.Image.fromarray(image)
328
- elif preprocessor_name in ['HED', 'HED safe']:
329
- safe = 'safe' in preprocessor_name
330
- self.preprocessor.load('HED')
331
  control_image = self.preprocessor(
332
  image=image,
333
  image_resolution=image_resolution,
334
  detect_resolution=preprocess_resolution,
335
  scribble=safe,
336
  )
337
- elif preprocessor_name in ['PidiNet', 'PidiNet safe']:
338
- safe = 'safe' in preprocessor_name
339
- self.preprocessor.load('PidiNet')
340
  control_image = self.preprocessor(
341
  image=image,
342
  image_resolution=image_resolution,
@@ -345,7 +345,7 @@ class Model:
345
  )
346
  else:
347
  raise ValueError
348
- self.load_controlnet_weight('softedge')
349
  results = self.run_pipe(
350
  prompt=self.get_prompt(prompt, additional_prompt),
351
  negative_prompt=negative_prompt,
@@ -379,19 +379,19 @@ class Model:
379
  if num_images > MAX_NUM_IMAGES:
380
  raise ValueError
381
 
382
- if preprocessor_name == 'None':
383
  image = HWC3(image)
384
  image = resize_image(image, resolution=image_resolution)
385
  control_image = PIL.Image.fromarray(image)
386
  else:
387
- self.preprocessor.load('Openpose')
388
  control_image = self.preprocessor(
389
  image=image,
390
  image_resolution=image_resolution,
391
  detect_resolution=preprocess_resolution,
392
  hand_and_face=True,
393
  )
394
- self.load_controlnet_weight('Openpose')
395
  results = self.run_pipe(
396
  prompt=self.get_prompt(prompt, additional_prompt),
397
  negative_prompt=negative_prompt,
@@ -425,7 +425,7 @@ class Model:
425
  if num_images > MAX_NUM_IMAGES:
426
  raise ValueError
427
 
428
- if preprocessor_name == 'None':
429
  image = HWC3(image)
430
  image = resize_image(image, resolution=image_resolution)
431
  control_image = PIL.Image.fromarray(image)
@@ -436,7 +436,7 @@ class Model:
436
  image_resolution=image_resolution,
437
  detect_resolution=preprocess_resolution,
438
  )
439
- self.load_controlnet_weight('segmentation')
440
  results = self.run_pipe(
441
  prompt=self.get_prompt(prompt, additional_prompt),
442
  negative_prompt=negative_prompt,
@@ -470,7 +470,7 @@ class Model:
470
  if num_images > MAX_NUM_IMAGES:
471
  raise ValueError
472
 
473
- if preprocessor_name == 'None':
474
  image = HWC3(image)
475
  image = resize_image(image, resolution=image_resolution)
476
  control_image = PIL.Image.fromarray(image)
@@ -481,7 +481,7 @@ class Model:
481
  image_resolution=image_resolution,
482
  detect_resolution=preprocess_resolution,
483
  )
484
- self.load_controlnet_weight('depth')
485
  results = self.run_pipe(
486
  prompt=self.get_prompt(prompt, additional_prompt),
487
  negative_prompt=negative_prompt,
@@ -515,18 +515,18 @@ class Model:
515
  if num_images > MAX_NUM_IMAGES:
516
  raise ValueError
517
 
518
- if preprocessor_name == 'None':
519
  image = HWC3(image)
520
  image = resize_image(image, resolution=image_resolution)
521
  control_image = PIL.Image.fromarray(image)
522
  else:
523
- self.preprocessor.load('NormalBae')
524
  control_image = self.preprocessor(
525
  image=image,
526
  image_resolution=image_resolution,
527
  detect_resolution=preprocess_resolution,
528
  )
529
- self.load_controlnet_weight('NormalBae')
530
  results = self.run_pipe(
531
  prompt=self.get_prompt(prompt, additional_prompt),
532
  negative_prompt=negative_prompt,
@@ -560,30 +560,30 @@ class Model:
560
  if num_images > MAX_NUM_IMAGES:
561
  raise ValueError
562
 
563
- if preprocessor_name in ['None', 'None (anime)']:
564
  image = HWC3(image)
565
  image = resize_image(image, resolution=image_resolution)
566
  control_image = PIL.Image.fromarray(image)
567
- elif preprocessor_name in ['Lineart', 'Lineart coarse']:
568
- coarse = 'coarse' in preprocessor_name
569
- self.preprocessor.load('Lineart')
570
  control_image = self.preprocessor(
571
  image=image,
572
  image_resolution=image_resolution,
573
  detect_resolution=preprocess_resolution,
574
  coarse=coarse,
575
  )
576
- elif preprocessor_name == 'Lineart (anime)':
577
- self.preprocessor.load('LineartAnime')
578
  control_image = self.preprocessor(
579
  image=image,
580
  image_resolution=image_resolution,
581
  detect_resolution=preprocess_resolution,
582
  )
583
- if 'anime' in preprocessor_name:
584
- self.load_controlnet_weight('lineart_anime')
585
  else:
586
- self.load_controlnet_weight('lineart')
587
  results = self.run_pipe(
588
  prompt=self.get_prompt(prompt, additional_prompt),
589
  negative_prompt=negative_prompt,
@@ -616,7 +616,7 @@ class Model:
616
  if num_images > MAX_NUM_IMAGES:
617
  raise ValueError
618
 
619
- if preprocessor_name == 'None':
620
  image = HWC3(image)
621
  image = resize_image(image, resolution=image_resolution)
622
  control_image = PIL.Image.fromarray(image)
@@ -626,7 +626,7 @@ class Model:
626
  image=image,
627
  image_resolution=image_resolution,
628
  )
629
- self.load_controlnet_weight('shuffle')
630
  results = self.run_pipe(
631
  prompt=self.get_prompt(prompt, additional_prompt),
632
  negative_prompt=negative_prompt,
@@ -661,7 +661,7 @@ class Model:
661
  image = HWC3(image)
662
  image = resize_image(image, resolution=image_resolution)
663
  control_image = PIL.Image.fromarray(image)
664
- self.load_controlnet_weight('ip2p')
665
  results = self.run_pipe(
666
  prompt=self.get_prompt(prompt, additional_prompt),
667
  negative_prompt=negative_prompt,
 
6
  import PIL.Image
7
  import torch
8
  from controlnet_aux.util import HWC3
9
+ from diffusers import (
10
+ ControlNetModel,
11
+ DiffusionPipeline,
12
+ StableDiffusionControlNetPipeline,
13
+ UniPCMultistepScheduler,
14
+ )
15
 
16
  from cv_utils import resize_image
17
  from preprocessor import Preprocessor
18
  from settings import MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES
19
 
20
  CONTROLNET_MODEL_IDS = {
21
+ "Openpose": "lllyasviel/control_v11p_sd15_openpose",
22
+ "Canny": "lllyasviel/control_v11p_sd15_canny",
23
+ "MLSD": "lllyasviel/control_v11p_sd15_mlsd",
24
+ "scribble": "lllyasviel/control_v11p_sd15_scribble",
25
+ "softedge": "lllyasviel/control_v11p_sd15_softedge",
26
+ "segmentation": "lllyasviel/control_v11p_sd15_seg",
27
+ "depth": "lllyasviel/control_v11f1p_sd15_depth",
28
+ "NormalBae": "lllyasviel/control_v11p_sd15_normalbae",
29
+ "lineart": "lllyasviel/control_v11p_sd15_lineart",
30
+ "lineart_anime": "lllyasviel/control_v11p_sd15s2_lineart_anime",
31
+ "shuffle": "lllyasviel/control_v11e_sd15_shuffle",
32
+ "ip2p": "lllyasviel/control_v11e_sd15_ip2p",
33
+ "inpaint": "lllyasviel/control_v11e_sd15_inpaint",
34
  }
35
 
36
 
 
40
 
41
 
42
  class Model:
43
+ def __init__(self, base_model_id: str = "runwayml/stable-diffusion-v1-5", task_name: str = "Canny"):
44
+ self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
45
+ self.base_model_id = ""
46
+ self.task_name = ""
 
 
 
47
  self.pipe = self.load_pipe(base_model_id, task_name)
48
  self.preprocessor = Preprocessor()
49
 
50
  def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
51
+ if (
52
+ base_model_id == self.base_model_id
53
+ and task_name == self.task_name
54
+ and hasattr(self, "pipe")
55
+ and self.pipe is not None
56
+ ):
57
  return self.pipe
58
  model_id = CONTROLNET_MODEL_IDS[task_name]
59
+ controlnet = ControlNetModel.from_pretrained(model_id, torch_dtype=torch.float16)
 
60
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
61
+ base_model_id, safety_checker=None, controlnet=controlnet, torch_dtype=torch.float16
62
+ )
63
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
64
+ if self.device.type == "cuda":
 
 
 
65
  pipe.enable_xformers_memory_efficient_attention()
66
  pipe.to(self.device)
67
  torch.cuda.empty_cache()
 
85
  def load_controlnet_weight(self, task_name: str) -> None:
86
  if task_name == self.task_name:
87
  return
88
+ if self.pipe is not None and hasattr(self.pipe, "controlnet"):
89
  del self.pipe.controlnet
90
  torch.cuda.empty_cache()
91
  gc.collect()
92
  model_id = CONTROLNET_MODEL_IDS[task_name]
93
+ controlnet = ControlNetModel.from_pretrained(model_id, torch_dtype=torch.float16)
 
94
  controlnet.to(self.device)
95
  torch.cuda.empty_cache()
96
  gc.collect()
 
101
  if not prompt:
102
  prompt = additional_prompt
103
  else:
104
+ prompt = f"{prompt}, {additional_prompt}"
105
  return prompt
106
 
107
+ @torch.autocast("cuda")
108
  def run_pipe(
109
  self,
110
  prompt: str,
 
116
  seed: int,
117
  ) -> list[PIL.Image.Image]:
118
  generator = torch.Generator().manual_seed(seed)
119
+ return self.pipe(
120
+ prompt=prompt,
121
+ negative_prompt=negative_prompt,
122
+ guidance_scale=guidance_scale,
123
+ num_images_per_prompt=num_images,
124
+ num_inference_steps=num_steps,
125
+ generator=generator,
126
+ image=control_image,
127
+ ).images
128
 
129
  @torch.inference_mode()
130
  def process_canny(
 
148
  if num_images > MAX_NUM_IMAGES:
149
  raise ValueError
150
 
151
+ self.preprocessor.load("Canny")
152
+ control_image = self.preprocessor(
153
+ image=image, low_threshold=low_threshold, high_threshold=high_threshold, detect_resolution=image_resolution
154
+ )
 
155
 
156
+ self.load_controlnet_weight("Canny")
157
  results = self.run_pipe(
158
  prompt=self.get_prompt(prompt, additional_prompt),
159
  negative_prompt=negative_prompt,
 
188
  if num_images > MAX_NUM_IMAGES:
189
  raise ValueError
190
 
191
+ self.preprocessor.load("MLSD")
192
  control_image = self.preprocessor(
193
  image=image,
194
  image_resolution=image_resolution,
 
196
  thr_v=value_threshold,
197
  thr_d=distance_threshold,
198
  )
199
+ self.load_controlnet_weight("MLSD")
200
  results = self.run_pipe(
201
  prompt=self.get_prompt(prompt, additional_prompt),
202
  negative_prompt=negative_prompt,
 
230
  if num_images > MAX_NUM_IMAGES:
231
  raise ValueError
232
 
233
+ if preprocessor_name == "None":
234
  image = HWC3(image)
235
  image = resize_image(image, resolution=image_resolution)
236
  control_image = PIL.Image.fromarray(image)
237
+ elif preprocessor_name == "HED":
238
  self.preprocessor.load(preprocessor_name)
239
  control_image = self.preprocessor(
240
  image=image,
 
242
  detect_resolution=preprocess_resolution,
243
  scribble=False,
244
  )
245
+ elif preprocessor_name == "PidiNet":
246
  self.preprocessor.load(preprocessor_name)
247
  control_image = self.preprocessor(
248
  image=image,
 
250
  detect_resolution=preprocess_resolution,
251
  safe=False,
252
  )
253
+ self.load_controlnet_weight("scribble")
254
  results = self.run_pipe(
255
  prompt=self.get_prompt(prompt, additional_prompt),
256
  negative_prompt=negative_prompt,
 
282
  if num_images > MAX_NUM_IMAGES:
283
  raise ValueError
284
 
285
+ image = image_and_mask["mask"]
286
  image = HWC3(image)
287
  image = resize_image(image, resolution=image_resolution)
288
  control_image = PIL.Image.fromarray(image)
289
 
290
+ self.load_controlnet_weight("scribble")
291
  results = self.run_pipe(
292
  prompt=self.get_prompt(prompt, additional_prompt),
293
  negative_prompt=negative_prompt,
 
321
  if num_images > MAX_NUM_IMAGES:
322
  raise ValueError
323
 
324
+ if preprocessor_name == "None":
325
  image = HWC3(image)
326
  image = resize_image(image, resolution=image_resolution)
327
  control_image = PIL.Image.fromarray(image)
328
+ elif preprocessor_name in ["HED", "HED safe"]:
329
+ safe = "safe" in preprocessor_name
330
+ self.preprocessor.load("HED")
331
  control_image = self.preprocessor(
332
  image=image,
333
  image_resolution=image_resolution,
334
  detect_resolution=preprocess_resolution,
335
  scribble=safe,
336
  )
337
+ elif preprocessor_name in ["PidiNet", "PidiNet safe"]:
338
+ safe = "safe" in preprocessor_name
339
+ self.preprocessor.load("PidiNet")
340
  control_image = self.preprocessor(
341
  image=image,
342
  image_resolution=image_resolution,
 
345
  )
346
  else:
347
  raise ValueError
348
+ self.load_controlnet_weight("softedge")
349
  results = self.run_pipe(
350
  prompt=self.get_prompt(prompt, additional_prompt),
351
  negative_prompt=negative_prompt,
 
379
  if num_images > MAX_NUM_IMAGES:
380
  raise ValueError
381
 
382
+ if preprocessor_name == "None":
383
  image = HWC3(image)
384
  image = resize_image(image, resolution=image_resolution)
385
  control_image = PIL.Image.fromarray(image)
386
  else:
387
+ self.preprocessor.load("Openpose")
388
  control_image = self.preprocessor(
389
  image=image,
390
  image_resolution=image_resolution,
391
  detect_resolution=preprocess_resolution,
392
  hand_and_face=True,
393
  )
394
+ self.load_controlnet_weight("Openpose")
395
  results = self.run_pipe(
396
  prompt=self.get_prompt(prompt, additional_prompt),
397
  negative_prompt=negative_prompt,
 
425
  if num_images > MAX_NUM_IMAGES:
426
  raise ValueError
427
 
428
+ if preprocessor_name == "None":
429
  image = HWC3(image)
430
  image = resize_image(image, resolution=image_resolution)
431
  control_image = PIL.Image.fromarray(image)
 
436
  image_resolution=image_resolution,
437
  detect_resolution=preprocess_resolution,
438
  )
439
+ self.load_controlnet_weight("segmentation")
440
  results = self.run_pipe(
441
  prompt=self.get_prompt(prompt, additional_prompt),
442
  negative_prompt=negative_prompt,
 
470
  if num_images > MAX_NUM_IMAGES:
471
  raise ValueError
472
 
473
+ if preprocessor_name == "None":
474
  image = HWC3(image)
475
  image = resize_image(image, resolution=image_resolution)
476
  control_image = PIL.Image.fromarray(image)
 
481
  image_resolution=image_resolution,
482
  detect_resolution=preprocess_resolution,
483
  )
484
+ self.load_controlnet_weight("depth")
485
  results = self.run_pipe(
486
  prompt=self.get_prompt(prompt, additional_prompt),
487
  negative_prompt=negative_prompt,
 
515
  if num_images > MAX_NUM_IMAGES:
516
  raise ValueError
517
 
518
+ if preprocessor_name == "None":
519
  image = HWC3(image)
520
  image = resize_image(image, resolution=image_resolution)
521
  control_image = PIL.Image.fromarray(image)
522
  else:
523
+ self.preprocessor.load("NormalBae")
524
  control_image = self.preprocessor(
525
  image=image,
526
  image_resolution=image_resolution,
527
  detect_resolution=preprocess_resolution,
528
  )
529
+ self.load_controlnet_weight("NormalBae")
530
  results = self.run_pipe(
531
  prompt=self.get_prompt(prompt, additional_prompt),
532
  negative_prompt=negative_prompt,
 
560
  if num_images > MAX_NUM_IMAGES:
561
  raise ValueError
562
 
563
+ if preprocessor_name in ["None", "None (anime)"]:
564
  image = HWC3(image)
565
  image = resize_image(image, resolution=image_resolution)
566
  control_image = PIL.Image.fromarray(image)
567
+ elif preprocessor_name in ["Lineart", "Lineart coarse"]:
568
+ coarse = "coarse" in preprocessor_name
569
+ self.preprocessor.load("Lineart")
570
  control_image = self.preprocessor(
571
  image=image,
572
  image_resolution=image_resolution,
573
  detect_resolution=preprocess_resolution,
574
  coarse=coarse,
575
  )
576
+ elif preprocessor_name == "Lineart (anime)":
577
+ self.preprocessor.load("LineartAnime")
578
  control_image = self.preprocessor(
579
  image=image,
580
  image_resolution=image_resolution,
581
  detect_resolution=preprocess_resolution,
582
  )
583
+ if "anime" in preprocessor_name:
584
+ self.load_controlnet_weight("lineart_anime")
585
  else:
586
+ self.load_controlnet_weight("lineart")
587
  results = self.run_pipe(
588
  prompt=self.get_prompt(prompt, additional_prompt),
589
  negative_prompt=negative_prompt,
 
616
  if num_images > MAX_NUM_IMAGES:
617
  raise ValueError
618
 
619
+ if preprocessor_name == "None":
620
  image = HWC3(image)
621
  image = resize_image(image, resolution=image_resolution)
622
  control_image = PIL.Image.fromarray(image)
 
626
  image=image,
627
  image_resolution=image_resolution,
628
  )
629
+ self.load_controlnet_weight("shuffle")
630
  results = self.run_pipe(
631
  prompt=self.get_prompt(prompt, additional_prompt),
632
  negative_prompt=negative_prompt,
 
661
  image = HWC3(image)
662
  image = resize_image(image, resolution=image_resolution)
663
  control_image = PIL.Image.fromarray(image)
664
+ self.load_controlnet_weight("ip2p")
665
  results = self.run_pipe(
666
  prompt=self.get_prompt(prompt, additional_prompt),
667
  negative_prompt=negative_prompt,
preprocessor.py CHANGED
@@ -3,10 +3,18 @@ import gc
3
  import numpy as np
4
  import PIL.Image
5
  import torch
6
- from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
7
- LineartAnimeDetector, LineartDetector,
8
- MidasDetector, MLSDdetector, NormalBaeDetector,
9
- OpenposeDetector, PidiNetDetector)
 
 
 
 
 
 
 
 
10
  from controlnet_aux.util import HWC3
11
 
12
  from cv_utils import resize_image
@@ -15,38 +23,38 @@ from image_segmentor import ImageSegmentor
15
 
16
 
17
  class Preprocessor:
18
- MODEL_ID = 'lllyasviel/Annotators'
19
 
20
  def __init__(self):
21
  self.model = None
22
- self.name = ''
23
 
24
  def load(self, name: str) -> None:
25
  if name == self.name:
26
  return
27
- if name == 'HED':
28
  self.model = HEDdetector.from_pretrained(self.MODEL_ID)
29
- elif name == 'Midas':
30
  self.model = MidasDetector.from_pretrained(self.MODEL_ID)
31
- elif name == 'MLSD':
32
  self.model = MLSDdetector.from_pretrained(self.MODEL_ID)
33
- elif name == 'Openpose':
34
  self.model = OpenposeDetector.from_pretrained(self.MODEL_ID)
35
- elif name == 'PidiNet':
36
  self.model = PidiNetDetector.from_pretrained(self.MODEL_ID)
37
- elif name == 'NormalBae':
38
  self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID)
39
- elif name == 'Lineart':
40
  self.model = LineartDetector.from_pretrained(self.MODEL_ID)
41
- elif name == 'LineartAnime':
42
  self.model = LineartAnimeDetector.from_pretrained(self.MODEL_ID)
43
- elif name == 'Canny':
44
  self.model = CannyDetector()
45
- elif name == 'ContentShuffle':
46
  self.model = ContentShuffleDetector()
47
- elif name == 'DPT':
48
  self.model = DepthEstimator()
49
- elif name == 'UPerNet':
50
  self.model = ImageSegmentor()
51
  else:
52
  raise ValueError
@@ -55,17 +63,17 @@ class Preprocessor:
55
  self.name = name
56
 
57
  def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
58
- if self.name == 'Canny':
59
- if 'detect_resolution' in kwargs:
60
- detect_resolution = kwargs.pop('detect_resolution')
61
  image = np.array(image)
62
  image = HWC3(image)
63
  image = resize_image(image, resolution=detect_resolution)
64
  image = self.model(image, **kwargs)
65
  return PIL.Image.fromarray(image)
66
- elif self.name == 'Midas':
67
- detect_resolution = kwargs.pop('detect_resolution', 512)
68
- image_resolution = kwargs.pop('image_resolution', 512)
69
  image = np.array(image)
70
  image = HWC3(image)
71
  image = resize_image(image, resolution=detect_resolution)
 
3
  import numpy as np
4
  import PIL.Image
5
  import torch
6
+ from controlnet_aux import (
7
+ CannyDetector,
8
+ ContentShuffleDetector,
9
+ HEDdetector,
10
+ LineartAnimeDetector,
11
+ LineartDetector,
12
+ MidasDetector,
13
+ MLSDdetector,
14
+ NormalBaeDetector,
15
+ OpenposeDetector,
16
+ PidiNetDetector,
17
+ )
18
  from controlnet_aux.util import HWC3
19
 
20
  from cv_utils import resize_image
 
23
 
24
 
25
  class Preprocessor:
26
+ MODEL_ID = "lllyasviel/Annotators"
27
 
28
  def __init__(self):
29
  self.model = None
30
+ self.name = ""
31
 
32
  def load(self, name: str) -> None:
33
  if name == self.name:
34
  return
35
+ if name == "HED":
36
  self.model = HEDdetector.from_pretrained(self.MODEL_ID)
37
+ elif name == "Midas":
38
  self.model = MidasDetector.from_pretrained(self.MODEL_ID)
39
+ elif name == "MLSD":
40
  self.model = MLSDdetector.from_pretrained(self.MODEL_ID)
41
+ elif name == "Openpose":
42
  self.model = OpenposeDetector.from_pretrained(self.MODEL_ID)
43
+ elif name == "PidiNet":
44
  self.model = PidiNetDetector.from_pretrained(self.MODEL_ID)
45
+ elif name == "NormalBae":
46
  self.model = NormalBaeDetector.from_pretrained(self.MODEL_ID)
47
+ elif name == "Lineart":
48
  self.model = LineartDetector.from_pretrained(self.MODEL_ID)
49
+ elif name == "LineartAnime":
50
  self.model = LineartAnimeDetector.from_pretrained(self.MODEL_ID)
51
+ elif name == "Canny":
52
  self.model = CannyDetector()
53
+ elif name == "ContentShuffle":
54
  self.model = ContentShuffleDetector()
55
+ elif name == "DPT":
56
  self.model = DepthEstimator()
57
+ elif name == "UPerNet":
58
  self.model = ImageSegmentor()
59
  else:
60
  raise ValueError
 
63
  self.name = name
64
 
65
  def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
66
+ if self.name == "Canny":
67
+ if "detect_resolution" in kwargs:
68
+ detect_resolution = kwargs.pop("detect_resolution")
69
  image = np.array(image)
70
  image = HWC3(image)
71
  image = resize_image(image, resolution=detect_resolution)
72
  image = self.model(image, **kwargs)
73
  return PIL.Image.fromarray(image)
74
+ elif self.name == "Midas":
75
+ detect_resolution = kwargs.pop("detect_resolution", 512)
76
+ image_resolution = kwargs.pop("image_resolution", 512)
77
  image = np.array(image)
78
  image = HWC3(image)
79
  image = resize_image(image, resolution=detect_resolution)
settings.py CHANGED
@@ -2,17 +2,14 @@ import os
2
 
3
  import numpy as np
4
 
5
- DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID',
6
- 'runwayml/stable-diffusion-v1-5')
7
 
8
- MAX_NUM_IMAGES = int(os.getenv('MAX_NUM_IMAGES', '3'))
9
- DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES,
10
- int(os.getenv('DEFAULT_NUM_IMAGES', '3')))
11
- MAX_IMAGE_RESOLUTION = int(os.getenv('MAX_IMAGE_RESOLUTION', '768'))
12
- DEFAULT_IMAGE_RESOLUTION = min(
13
- MAX_IMAGE_RESOLUTION, int(os.getenv('DEFAULT_IMAGE_RESOLUTION', '768')))
14
 
15
- ALLOW_CHANGING_BASE_MODEL = os.getenv('SPACE_ID') != 'hysts/ControlNet-v1-1'
16
- SHOW_DUPLICATE_BUTTON = os.getenv('SHOW_DUPLICATE_BUTTON') == '1'
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
2
 
3
  import numpy as np
4
 
5
+ DEFAULT_MODEL_ID = os.getenv("DEFAULT_MODEL_ID", "runwayml/stable-diffusion-v1-5")
 
6
 
7
+ MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "3"))
8
+ DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES, int(os.getenv("DEFAULT_NUM_IMAGES", "3")))
9
+ MAX_IMAGE_RESOLUTION = int(os.getenv("MAX_IMAGE_RESOLUTION", "768"))
10
+ DEFAULT_IMAGE_RESOLUTION = min(MAX_IMAGE_RESOLUTION, int(os.getenv("DEFAULT_IMAGE_RESOLUTION", "768")))
 
 
11
 
12
+ ALLOW_CHANGING_BASE_MODEL = os.getenv("SPACE_ID") != "hysts/ControlNet-v1-1"
13
+ SHOW_DUPLICATE_BUTTON = os.getenv("SHOW_DUPLICATE_BUTTON") == "1"
14
 
15
  MAX_SEED = np.iinfo(np.int32).max