Update README.md
Browse files
README.md
CHANGED
@@ -71,6 +71,7 @@ import torch
|
|
71 |
from diffusers import VisualClozePipeline
|
72 |
from diffusers.utils import load_image
|
73 |
|
|
|
74 |
# Load in-context images (make sure the paths are correct and accessible)
|
75 |
image_paths = [
|
76 |
# in-context examples
|
@@ -122,20 +123,20 @@ Example with Virtual Try-On:
|
|
122 |
import torch
|
123 |
from diffusers import VisualClozePipeline
|
124 |
from diffusers.utils import load_image
|
125 |
-
|
126 |
|
127 |
# Load in-context images (make sure the paths are correct and accessible)
|
128 |
image_paths = [
|
129 |
# in-context examples
|
130 |
[
|
131 |
-
load_image('https://github.com/lzyhha/VisualCloze/
|
132 |
-
load_image('https://github.com/lzyhha/VisualCloze/
|
133 |
-
load_image('https://github.com/lzyhha/VisualCloze/
|
134 |
],
|
135 |
# query with the target image
|
136 |
[
|
137 |
-
load_image('https://github.com/lzyhha/VisualCloze/
|
138 |
-
load_image('https://github.com/lzyhha/VisualCloze/
|
139 |
None
|
140 |
],
|
141 |
]
|
@@ -153,11 +154,11 @@ image_result = pipe(
|
|
153 |
task_prompt=task_prompt,
|
154 |
content_prompt=content_prompt,
|
155 |
image=image_paths,
|
156 |
-
|
157 |
-
|
158 |
-
upsampling_strength=0.
|
159 |
guidance_scale=30,
|
160 |
-
num_inference_steps=
|
161 |
max_sequence_length=512,
|
162 |
generator=torch.Generator("cpu").manual_seed(0)
|
163 |
).images[0]
|
|
|
71 |
from diffusers import VisualClozePipeline
|
72 |
from diffusers.utils import load_image
|
73 |
|
74 |
+
|
75 |
# Load in-context images (make sure the paths are correct and accessible)
|
76 |
image_paths = [
|
77 |
# in-context examples
|
|
|
123 |
import torch
|
124 |
from diffusers import VisualClozePipeline
|
125 |
from diffusers.utils import load_image
|
126 |
+
|
127 |
|
128 |
# Load in-context images (make sure the paths are correct and accessible)
|
129 |
image_paths = [
|
130 |
# in-context examples
|
131 |
[
|
132 |
+
load_image('https://github.com/lzyhha/VisualCloze/raw/main/examples/examples/tryon/00700_00.jpg'),
|
133 |
+
load_image('https://github.com/lzyhha/VisualCloze/raw/main/examples/examples/tryon/03673_00.jpg'),
|
134 |
+
load_image('https://github.com/lzyhha/VisualCloze/raw/main/examples/examples/tryon/00700_00_tryon_catvton_0.jpg'),
|
135 |
],
|
136 |
# query with the target image
|
137 |
[
|
138 |
+
load_image('https://github.com/lzyhha/VisualCloze/raw/main/examples/examples/tryon/00555_00.jpg'),
|
139 |
+
load_image('https://github.com/lzyhha/VisualCloze/raw/main/examples/examples/tryon/12265_00.jpg'),
|
140 |
None
|
141 |
],
|
142 |
]
|
|
|
154 |
task_prompt=task_prompt,
|
155 |
content_prompt=content_prompt,
|
156 |
image=image_paths,
|
157 |
+
upsampling_height=1632,
|
158 |
+
upsampling_width=1232,
|
159 |
+
upsampling_strength=0.3,
|
160 |
guidance_scale=30,
|
161 |
+
num_inference_steps=30,
|
162 |
max_sequence_length=512,
|
163 |
generator=torch.Generator("cpu").manual_seed(0)
|
164 |
).images[0]
|