Upload 50 files
Browse files- DrawBridgeAPI/__init__.py +0 -0
- DrawBridgeAPI/api_server.py +440 -0
- DrawBridgeAPI/app.py +105 -0
- DrawBridgeAPI/backend/FLUX_falai.py +100 -0
- DrawBridgeAPI/backend/FLUX_replicate.py +112 -0
- DrawBridgeAPI/backend/SD_A1111_webui.py +88 -0
- DrawBridgeAPI/backend/SD_civitai_API.py +108 -0
- DrawBridgeAPI/backend/__init__.py +909 -0
- DrawBridgeAPI/backend/base.py +984 -0
- DrawBridgeAPI/backend/comfyui.py +423 -0
- DrawBridgeAPI/backend/liblibai.py +205 -0
- DrawBridgeAPI/backend/midjourney.py +175 -0
- DrawBridgeAPI/backend/novelai.py +161 -0
- DrawBridgeAPI/backend/seaart.py +139 -0
- DrawBridgeAPI/backend/tusiart.py +166 -0
- DrawBridgeAPI/backend/yunjie.py +133 -0
- DrawBridgeAPI/base_config.py +334 -0
- DrawBridgeAPI/comfyui_workflows/diaopony-hr.json +213 -0
- DrawBridgeAPI/comfyui_workflows/diaopony-hr_reflex.json +7 -0
- DrawBridgeAPI/comfyui_workflows/diaopony-tipo.json +132 -0
- DrawBridgeAPI/comfyui_workflows/diaopony-tipo_reflex.json +6 -0
- DrawBridgeAPI/comfyui_workflows/flux-dev.json +94 -0
- DrawBridgeAPI/comfyui_workflows/flux-dev_reflex.json +6 -0
- DrawBridgeAPI/comfyui_workflows/flux-schnell.json +94 -0
- DrawBridgeAPI/comfyui_workflows/flux-schnell_reflex.json +6 -0
- DrawBridgeAPI/comfyui_workflows/flux修手.json +254 -0
- DrawBridgeAPI/comfyui_workflows/flux修手_reflex.json +5 -0
- DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img.json +187 -0
- DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img_reflex.json +7 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_img2img.json +122 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_img2img_reflex.json +9 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_txt2img.json +107 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix.json +266 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix_reflex.json +13 -0
- DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_reflex.json +8 -0
- DrawBridgeAPI/comfyui_workflows/创意融字 工作流Jianan_创意融字海报.json +1789 -0
- DrawBridgeAPI/config_example.yaml +208 -0
- DrawBridgeAPI/locales/__init__.py +10 -0
- DrawBridgeAPI/locales/zh/LC_MESSAGES/messages.po +122 -0
- DrawBridgeAPI/ui/__init__.py +0 -0
- DrawBridgeAPI/utils/__init__.py +91 -0
- DrawBridgeAPI/utils/custom_class.py +28 -0
- DrawBridgeAPI/utils/exceptions.py +15 -0
- DrawBridgeAPI/utils/llm_caption_requirements.txt +9 -0
- DrawBridgeAPI/utils/llm_captions.py +236 -0
- DrawBridgeAPI/utils/request_model.py +153 -0
- DrawBridgeAPI/utils/shared.py +5 -0
- DrawBridgeAPI/utils/tagger-requirements.txt +5 -0
- DrawBridgeAPI/utils/tagger.py +272 -0
- DrawBridgeAPI/utils/topaz.py +66 -0
DrawBridgeAPI/__init__.py
ADDED
File without changes
|
DrawBridgeAPI/api_server.py
ADDED
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
import httpx
|
4 |
+
import asyncio
|
5 |
+
import time
|
6 |
+
import traceback
|
7 |
+
import json
|
8 |
+
import itertools
|
9 |
+
import argparse
|
10 |
+
import uvicorn
|
11 |
+
import logging
|
12 |
+
import warnings
|
13 |
+
import uuid
|
14 |
+
import aiofiles
|
15 |
+
import gradio
|
16 |
+
import threading
|
17 |
+
|
18 |
+
os.environ['CIVITAI_API_TOKEN'] = 'kunkun'
|
19 |
+
os.environ['FAL_KEY'] = 'Daisuki'
|
20 |
+
path_env = os.getenv("CONF_PATH")
|
21 |
+
|
22 |
+
from .utils import request_model, topaz, run_later
|
23 |
+
from .base_config import setup_logger, init_instance
|
24 |
+
|
25 |
+
from fastapi import FastAPI, Request
|
26 |
+
from fastapi.responses import JSONResponse, RedirectResponse
|
27 |
+
from fastapi.exceptions import HTTPException
|
28 |
+
from pathlib import Path
|
29 |
+
|
30 |
+
from .locales import _
|
31 |
+
|
32 |
+
app = FastAPI()
|
33 |
+
|
34 |
+
parser = argparse.ArgumentParser(description='Run the FastAPI application.')
|
35 |
+
parser.add_argument('--host', type=str, default='0.0.0.0',
|
36 |
+
help='The host IP address to listen on (default: 0.0.0.0).')
|
37 |
+
parser.add_argument('--port', type=int, default=8000,
|
38 |
+
help='The port number to listen on (default: 8000).')
|
39 |
+
parser.add_argument('--conf', '-c', type=str, default='./config.yaml',
|
40 |
+
help='配置文件路径', dest='conf')
|
41 |
+
|
42 |
+
args = parser.parse_args()
|
43 |
+
port = args.port
|
44 |
+
host = args.host
|
45 |
+
config_file_path = path_env or args.conf
|
46 |
+
|
47 |
+
init_instance.init(config_file_path)
|
48 |
+
config = init_instance.config
|
49 |
+
redis_client = init_instance.redis_client
|
50 |
+
|
51 |
+
from .backend import TaskHandler, Backend, StaticHandler
|
52 |
+
|
53 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
54 |
+
|
55 |
+
logger = setup_logger("[API]")
|
56 |
+
logging.getLogger("uvicorn.access").disabled = True
|
57 |
+
logging.getLogger("uvicorn.error").disabled = True
|
58 |
+
logging.getLogger("fastapi").disabled = True
|
59 |
+
|
60 |
+
|
61 |
+
class Api:
|
62 |
+
def __init__(self):
|
63 |
+
self.app = app
|
64 |
+
self.backend_instance = Backend()
|
65 |
+
|
66 |
+
self.add_api_route(
|
67 |
+
"/sdapi/v1/txt2img",
|
68 |
+
self.txt2img_api,
|
69 |
+
methods=["POST"],
|
70 |
+
# response_model=request_model.Txt2ImgRequest
|
71 |
+
)
|
72 |
+
self.add_api_route(
|
73 |
+
"/sdapi/v1/img2img",
|
74 |
+
self.img2img_api,
|
75 |
+
methods=["POST"],
|
76 |
+
# response_model=request_model.Img2ImgRequest
|
77 |
+
)
|
78 |
+
self.add_api_route(
|
79 |
+
"/sdapi/v1/sd-models",
|
80 |
+
self.get_sd_models,
|
81 |
+
methods=["GET"]
|
82 |
+
)
|
83 |
+
self.add_api_route(
|
84 |
+
"/sdapi/v1/progress",
|
85 |
+
self.get_progress,
|
86 |
+
methods=["GET"]
|
87 |
+
)
|
88 |
+
self.add_api_route(
|
89 |
+
"/sdapi/v1/memory",
|
90 |
+
self.get_memory,
|
91 |
+
methods=["GET"]
|
92 |
+
)
|
93 |
+
self.add_api_route(
|
94 |
+
"/sdapi/v1/options",
|
95 |
+
self.get_options,
|
96 |
+
methods=["GET"]
|
97 |
+
)
|
98 |
+
self.add_api_route(
|
99 |
+
"/sdapi/v1/options",
|
100 |
+
self.set_options,
|
101 |
+
methods=["POST"]
|
102 |
+
)
|
103 |
+
self.add_api_route(
|
104 |
+
"/sdapi/v1/prompt-styles",
|
105 |
+
self.get_prompt_styles,
|
106 |
+
methods=["GET"]
|
107 |
+
)
|
108 |
+
|
109 |
+
if config.server_settings['build_in_tagger']:
|
110 |
+
|
111 |
+
from .utils.tagger import wd_tagger_handler, wd_logger
|
112 |
+
self.add_api_route(
|
113 |
+
"/tagger/v1/interrogate",
|
114 |
+
self.tagger,
|
115 |
+
methods=["POST"],
|
116 |
+
response_model=request_model.TaggerRequest
|
117 |
+
)
|
118 |
+
|
119 |
+
if config.server_settings['llm_caption']['enable']:
|
120 |
+
from .utils.llm_captions import llm_logger, joy_caption_handler
|
121 |
+
self.add_api_route(
|
122 |
+
"/llm/caption",
|
123 |
+
self.llm_caption,
|
124 |
+
methods=["POST"],
|
125 |
+
response_model=request_model.TaggerRequest
|
126 |
+
)
|
127 |
+
|
128 |
+
if config.server_settings['build_in_photoai']['exec_path']:
|
129 |
+
self.add_api_route(
|
130 |
+
"/topazai/image",
|
131 |
+
self.topaz_ai,
|
132 |
+
methods=["POST"]
|
133 |
+
)
|
134 |
+
|
135 |
+
def add_api_route(self, path: str, endpoint, **kwargs):
|
136 |
+
return self.app.add_api_route(path, endpoint, **kwargs)
|
137 |
+
|
138 |
+
@staticmethod
|
139 |
+
async def generate_handle(data) -> TaskHandler:
|
140 |
+
|
141 |
+
model_to_backend = None
|
142 |
+
if data['override_settings'].get("sd_model_checkpoint", None):
|
143 |
+
model_to_backend = data['override_settings'].get("sd_model_checkpoint", None)
|
144 |
+
|
145 |
+
styles = data.get('styles', [])
|
146 |
+
selected_style = []
|
147 |
+
selected_comfyui_style = []
|
148 |
+
|
149 |
+
logger.error(styles)
|
150 |
+
|
151 |
+
if styles:
|
152 |
+
api_styles = StaticHandler.get_prompt_style()
|
153 |
+
|
154 |
+
for index, i in enumerate(api_styles):
|
155 |
+
for style in styles:
|
156 |
+
if style in i['name']:
|
157 |
+
if 'comfyui' in i['name']:
|
158 |
+
logger.info(f"{_('Selected ComfyUI style')} - {i['name']}")
|
159 |
+
selected_comfyui_style.append(i['name'])
|
160 |
+
else:
|
161 |
+
selected_style.append(i['name'])
|
162 |
+
|
163 |
+
if selected_style:
|
164 |
+
for i in selected_style:
|
165 |
+
data['prompt'] = data.get('prompt', '') + i['prompt']
|
166 |
+
data['negative_prompt'] = data.get('negative_prompt', '') + i['negative_prompt']
|
167 |
+
|
168 |
+
task_handler = TaskHandler(
|
169 |
+
data,
|
170 |
+
model_to_backend=model_to_backend,
|
171 |
+
comfyui_json=selected_comfyui_style[0].replace('comfyui-work-flows-', '') if selected_comfyui_style else None
|
172 |
+
)
|
173 |
+
|
174 |
+
return task_handler
|
175 |
+
|
176 |
+
@staticmethod
|
177 |
+
async def txt2img_api(request: request_model.Txt2ImgRequest, api: Request):
|
178 |
+
|
179 |
+
data = request.model_dump()
|
180 |
+
client_host = api.client.host
|
181 |
+
|
182 |
+
task_handler = await Api.generate_handle(data)
|
183 |
+
|
184 |
+
try:
|
185 |
+
logger.info(f"{_('Exec TXT2IMG')} - {client_host}")
|
186 |
+
result = await task_handler.txt2img()
|
187 |
+
except Exception as e:
|
188 |
+
logger.error(traceback.format_exc())
|
189 |
+
raise HTTPException(status_code=500, detail=str(e))
|
190 |
+
|
191 |
+
if result is None:
|
192 |
+
raise HTTPException(500, detail='Result not found')
|
193 |
+
|
194 |
+
return result
|
195 |
+
|
196 |
+
@staticmethod
|
197 |
+
async def img2img_api(request: request_model.Img2ImgRequest, api: Request):
|
198 |
+
data = request.model_dump()
|
199 |
+
client_host = api.client.host
|
200 |
+
|
201 |
+
if len(data['init_images']) == 0:
|
202 |
+
raise HTTPException(status_code=400, detail=_('IMG2IMG Requires image to start'))
|
203 |
+
|
204 |
+
task_handler = await Api.generate_handle(data)
|
205 |
+
|
206 |
+
try:
|
207 |
+
logger.info(f"{_('Exec IMG2IMG')} - {client_host}")
|
208 |
+
result = await task_handler.img2img()
|
209 |
+
except Exception as e:
|
210 |
+
logger.error(traceback.format_exc())
|
211 |
+
raise HTTPException(status_code=500, detail=str(e))
|
212 |
+
|
213 |
+
if result is None:
|
214 |
+
raise HTTPException(500, detail='Result not found')
|
215 |
+
|
216 |
+
return result
|
217 |
+
|
218 |
+
@staticmethod
|
219 |
+
async def get_sd_models():
|
220 |
+
|
221 |
+
task_list = []
|
222 |
+
path = '/sdapi/v1/sd-models'
|
223 |
+
|
224 |
+
task_handler = TaskHandler({}, None, path, reutrn_instance=True, override_model_select=True)
|
225 |
+
instance_list: list[Backend] = await task_handler.txt2img()
|
226 |
+
|
227 |
+
for i in instance_list:
|
228 |
+
task_list.append(i.get_models())
|
229 |
+
resp = await asyncio.gather(*task_list)
|
230 |
+
|
231 |
+
models_dict = {}
|
232 |
+
api_respond = []
|
233 |
+
for i in resp:
|
234 |
+
models_dict = models_dict | i
|
235 |
+
api_respond = api_respond + list(i.values())
|
236 |
+
|
237 |
+
api_respond = list(itertools.chain.from_iterable(api_respond))
|
238 |
+
|
239 |
+
redis_resp: bytes = redis_client.get('models')
|
240 |
+
redis_resp: dict = json.loads(redis_resp.decode('utf-8'))
|
241 |
+
redis_resp.update(models_dict)
|
242 |
+
redis_client.set('models', json.dumps(redis_resp))
|
243 |
+
return api_respond
|
244 |
+
|
245 |
+
async def tagger(self, request: request_model.TaggerRequest):
|
246 |
+
from .utils.tagger import wd_tagger_handler, wd_logger
|
247 |
+
|
248 |
+
data = request.model_dump()
|
249 |
+
base64_image = await self.download_img_from_url(data)
|
250 |
+
caption = await wd_tagger_handler.tagger_main(base64_image, data['threshold'], data['exclude_tags'])
|
251 |
+
resp = {}
|
252 |
+
|
253 |
+
resp['caption'] = caption
|
254 |
+
wd_logger.info(f"{_('Caption Successful')}, {caption}")
|
255 |
+
return JSONResponse(resp)
|
256 |
+
|
257 |
+
async def llm_caption(self, request: request_model.TaggerRequest):
|
258 |
+
|
259 |
+
from .utils.llm_captions import llm_logger, joy_caption_handler
|
260 |
+
from .utils.tagger import wd_tagger_handler, wd_logger
|
261 |
+
|
262 |
+
data = request.model_dump()
|
263 |
+
base64_image = await self.download_img_from_url(data)
|
264 |
+
|
265 |
+
try:
|
266 |
+
caption = await joy_caption_handler.get_caption(base64_image, data['exclude_tags'])
|
267 |
+
except Exception as e:
|
268 |
+
traceback.print_exc()
|
269 |
+
raise HTTPException(status_code=500, detail=str(e))
|
270 |
+
|
271 |
+
resp = {}
|
272 |
+
|
273 |
+
resp['llm'] = caption
|
274 |
+
llm_logger.info(f"{_('Caption Successful')}, {caption}")
|
275 |
+
# caption = await wd_tagger_handler.tagger_main(
|
276 |
+
# base64_image,
|
277 |
+
# data['threshold'],
|
278 |
+
# data['exclude_tags']
|
279 |
+
# )
|
280 |
+
#
|
281 |
+
# resp['caption'] = caption
|
282 |
+
# wd_logger.info(f"打标成功,{caption}")
|
283 |
+
return JSONResponse(resp)
|
284 |
+
|
285 |
+
async def get_progress(self):
|
286 |
+
return JSONResponse(self.backend_instance.format_progress_api_resp(0.0, time.time()))
|
287 |
+
|
288 |
+
async def get_memory(self):
|
289 |
+
return JSONResponse(self.backend_instance.format_vram_api_resp())
|
290 |
+
|
291 |
+
@staticmethod
|
292 |
+
async def get_options():
|
293 |
+
return JSONResponse(StaticHandler.get_backend_options())
|
294 |
+
|
295 |
+
@staticmethod
|
296 |
+
async def set_options(request: request_model.SetConfigRequest):
|
297 |
+
|
298 |
+
data = request.model_dump()
|
299 |
+
if data.get('sd_model_checkpoint', None):
|
300 |
+
logger.info(_("Lock to backend has configured"))
|
301 |
+
StaticHandler.set_lock_to_backend(data.get('sd_model_checkpoint'))
|
302 |
+
|
303 |
+
return
|
304 |
+
|
305 |
+
@staticmethod
|
306 |
+
async def topaz_ai(request: request_model.TopazAiRequest):
|
307 |
+
data = request.model_dump()
|
308 |
+
|
309 |
+
unique_id = str(uuid.uuid4())
|
310 |
+
save_dir = Path("saved_images") / unique_id
|
311 |
+
processed_dir = save_dir / 'processed'
|
312 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
313 |
+
del data['output_folder']
|
314 |
+
|
315 |
+
try:
|
316 |
+
|
317 |
+
if data['image']:
|
318 |
+
base64_image = data['image']
|
319 |
+
input_image_path = save_dir / f"{unique_id}_image.png"
|
320 |
+
async with aiofiles.open(input_image_path, "wb") as image_file:
|
321 |
+
await image_file.write(base64.b64decode(base64_image))
|
322 |
+
output, error, return_code = await asyncio.get_running_loop().run_in_executor(
|
323 |
+
None, topaz.run_tpai(
|
324 |
+
input_folder=str(save_dir.resolve()),
|
325 |
+
output_folder=str(processed_dir.resolve()),
|
326 |
+
**data
|
327 |
+
)
|
328 |
+
)
|
329 |
+
elif data['input_folder']:
|
330 |
+
output, error, return_code = await asyncio.get_running_loop().run_in_executor(
|
331 |
+
None, topaz.run_tpai(
|
332 |
+
output_folder=str(processed_dir.resolve()),
|
333 |
+
**data
|
334 |
+
)
|
335 |
+
)
|
336 |
+
except:
|
337 |
+
traceback.print_exc()
|
338 |
+
raise HTTPException(status_code=500, detail="Error occurred while processing the image.")
|
339 |
+
|
340 |
+
if return_code == 0:
|
341 |
+
files = list(processed_dir.glob("*"))
|
342 |
+
|
343 |
+
processed_image_path = files[0]
|
344 |
+
if processed_image_path.exists():
|
345 |
+
async with aiofiles.open(processed_image_path, "rb") as img_file:
|
346 |
+
encoded_image = base64.b64encode(await img_file.read()).decode('utf-8')
|
347 |
+
processed_dir.rmdir()
|
348 |
+
return {"status": "success", "image": encoded_image}
|
349 |
+
else:
|
350 |
+
raise HTTPException(status_code=500, detail="Processed image not found.")
|
351 |
+
else:
|
352 |
+
raise HTTPException(status_code=500, detail=f"Error: {error}")
|
353 |
+
|
354 |
+
async def download_img_from_url(self, data):
|
355 |
+
|
356 |
+
base64_image = data['image']
|
357 |
+
|
358 |
+
if data['image'].startswith("http"):
|
359 |
+
image_url = data['image']
|
360 |
+
logger.info(f"{_('URL detected')}: {image_url}")
|
361 |
+
response = await self.backend_instance.http_request(
|
362 |
+
"GET",
|
363 |
+
image_url,
|
364 |
+
format=False
|
365 |
+
)
|
366 |
+
|
367 |
+
if response.status_code != 200:
|
368 |
+
logger.warning(_("Image download failed!"))
|
369 |
+
|
370 |
+
base64_image = base64.b64encode(response.read())
|
371 |
+
|
372 |
+
return base64_image
|
373 |
+
|
374 |
+
@staticmethod
|
375 |
+
async def get_prompt_styles():
|
376 |
+
|
377 |
+
task_list = []
|
378 |
+
path = '/sdapi/v1/prompt-styles'
|
379 |
+
|
380 |
+
task_handler = TaskHandler({}, None, path, reutrn_instance=True, override_model_select=True)
|
381 |
+
instance_list: list[Backend] = await task_handler.txt2img()
|
382 |
+
|
383 |
+
for i in instance_list:
|
384 |
+
task_list.append(i.get_all_prompt_style())
|
385 |
+
resp = await asyncio.gather(*task_list)
|
386 |
+
|
387 |
+
api_respond = []
|
388 |
+
for i in resp:
|
389 |
+
api_respond += i
|
390 |
+
|
391 |
+
StaticHandler.set_prompt_style(api_respond)
|
392 |
+
|
393 |
+
return api_respond
|
394 |
+
|
395 |
+
async def init_api(self):
|
396 |
+
await self.get_sd_models()
|
397 |
+
await self.get_prompt_styles()
|
398 |
+
|
399 |
+
|
400 |
+
api_instance = Api()
|
401 |
+
|
402 |
+
|
403 |
+
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
|
404 |
+
async def proxy(path: str, request: Request):
|
405 |
+
client_host = request.client.host
|
406 |
+
|
407 |
+
task_handler = TaskHandler({}, request, path)
|
408 |
+
|
409 |
+
try:
|
410 |
+
logger.info(f"{_('Exec forwarding')} - {client_host}")
|
411 |
+
result = await task_handler.sd_api()
|
412 |
+
except Exception as e:
|
413 |
+
logger.error(traceback.format_exc())
|
414 |
+
raise HTTPException(500, detail=str(e))
|
415 |
+
|
416 |
+
if result is None:
|
417 |
+
raise HTTPException(500, detail='Result not found')
|
418 |
+
|
419 |
+
return result
|
420 |
+
|
421 |
+
|
422 |
+
@app.get("/backend-control")
|
423 |
+
async def get_backend_control(backend: str, key: str, value: bool):
|
424 |
+
pass
|
425 |
+
|
426 |
+
|
427 |
+
@app.on_event("startup")
|
428 |
+
async def startup_event():
|
429 |
+
logger.info(_('Waiting for API initialization'))
|
430 |
+
await api_instance.init_api()
|
431 |
+
logger.info(_('API initialization completed'))
|
432 |
+
|
433 |
+
|
434 |
+
if __name__ == "__main__":
|
435 |
+
|
436 |
+
# if config.server_settings['start_gradio']:
|
437 |
+
# demo = create_gradio_interface(host, port)
|
438 |
+
# app = gradio.mount_gradio_app(api_instance.app, demo, path="/")
|
439 |
+
|
440 |
+
uvicorn.run(api_instance.app, host=host, port=port)
|
DrawBridgeAPI/app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import asyncio
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
os.environ['CIVITAI_API_TOKEN'] = 'kunkun'
|
6 |
+
os.environ['FAL_KEY'] = 'Daisuki'
|
7 |
+
os.environ['CONF_PATH'] = './config.yaml'
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
import io
|
11 |
+
import base64
|
12 |
+
import httpx
|
13 |
+
from .base_config import init_instance
|
14 |
+
from .backend import TaskHandler
|
15 |
+
from .locales import _
|
16 |
+
|
17 |
+
|
18 |
+
class Gradio:
|
19 |
+
def __init__(self, host, port):
|
20 |
+
self.host = '127.0.0.1' if host == '0.0.0.0' else host
|
21 |
+
self.port = port
|
22 |
+
|
23 |
+
def get_caption(self, image):
|
24 |
+
caption = httpx.post(
|
25 |
+
f"http://{self.host}:{self.port}/tagger/v1/interrogate",
|
26 |
+
json=json.loads({"image": image}), timeout=600).json()
|
27 |
+
return caption
|
28 |
+
|
29 |
+
|
30 |
+
def format_caption_output(caption_result):
|
31 |
+
llm_text = caption_result.get("llm", '')
|
32 |
+
word_scores = "\n".join([f"{word}: {score}" for word, score in caption_result["caption"].items()])
|
33 |
+
word_ = ",".join([f"{word}" for word in caption_result["caption"].keys()])
|
34 |
+
return llm_text, word_scores, word_
|
35 |
+
|
36 |
+
|
37 |
+
async def create_gradio_interface(host, port):
|
38 |
+
|
39 |
+
gradio_api = Gradio(host, port)
|
40 |
+
from .api_server import api_instance
|
41 |
+
all_models = [i['title'] for i in await api_instance.get_sd_models()]
|
42 |
+
init_instance.logger.info(f"{_('Server is ready!')} Listen on {host}:{port}")
|
43 |
+
|
44 |
+
async def get_image(model, prompt, negative_prompt, width, height, cfg_scale, steps):
|
45 |
+
|
46 |
+
payload = {
|
47 |
+
"prompt": prompt,
|
48 |
+
"negative_prompt": negative_prompt,
|
49 |
+
"width": width,
|
50 |
+
"height": height,
|
51 |
+
"steps": steps,
|
52 |
+
"cfg_scale": cfg_scale
|
53 |
+
}
|
54 |
+
|
55 |
+
task_handler = TaskHandler(payload, model_to_backend=model)
|
56 |
+
result = await task_handler.txt2img()
|
57 |
+
image_data = result.get("images")[0]
|
58 |
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
59 |
+
return image
|
60 |
+
|
61 |
+
with gr.Blocks() as demo:
|
62 |
+
with gr.Tab("txt2img"):
|
63 |
+
with gr.Row():
|
64 |
+
with gr.Column():
|
65 |
+
model = gr.Dropdown(label="Model", choices=all_models)
|
66 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
|
67 |
+
negative_prompt = gr.Textbox(label="Negative Prompt",
|
68 |
+
placeholder="Enter your negative prompt here...")
|
69 |
+
width = gr.Slider(label="Width", minimum=64, maximum=2048, step=1, value=512)
|
70 |
+
height = gr.Slider(label="Height", minimum=64, maximum=2048, step=1, value=512)
|
71 |
+
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=30, step=0.1, value=7.5)
|
72 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=200, step=1, value=20)
|
73 |
+
generate_button = gr.Button("Generate Image")
|
74 |
+
|
75 |
+
with gr.Column():
|
76 |
+
output_image = gr.Image(label="Generated Image")
|
77 |
+
|
78 |
+
generate_button.click(get_image, [model, prompt, negative_prompt, width, height, cfg_scale, steps],
|
79 |
+
output_image)
|
80 |
+
|
81 |
+
with gr.Tab("Caption"):
|
82 |
+
with gr.Row():
|
83 |
+
with gr.Column():
|
84 |
+
input_image = gr.Image(label="Input Image")
|
85 |
+
caption_button = gr.Button("Get Caption")
|
86 |
+
|
87 |
+
with gr.Column():
|
88 |
+
llm_output = gr.Textbox(label="Natural Language Description")
|
89 |
+
word_output_ = gr.Textbox(label="Keywords", lines=6)
|
90 |
+
word_output = gr.Textbox(label="Keywords with Scores", lines=6)
|
91 |
+
|
92 |
+
caption_button.click(
|
93 |
+
lambda image: format_caption_output(gradio_api.get_caption(image)),
|
94 |
+
inputs=[input_image],
|
95 |
+
outputs=[llm_output, word_output, word_output_]
|
96 |
+
)
|
97 |
+
|
98 |
+
return demo
|
99 |
+
|
100 |
+
|
101 |
+
async def run_gradio(host, port):
|
102 |
+
interface = await create_gradio_interface(host, port)
|
103 |
+
interface.launch(server_name=host, server_port=port+1)
|
104 |
+
|
105 |
+
asyncio.run(run_gradio("127.0.0.1", 5421))
|
DrawBridgeAPI/backend/FLUX_falai.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback
|
2 |
+
import piexif
|
3 |
+
import fal_client
|
4 |
+
import os
|
5 |
+
|
6 |
+
from io import BytesIO
|
7 |
+
from .base import Backend
|
8 |
+
|
9 |
+
|
10 |
+
class AIDRAW(Backend):
|
11 |
+
|
12 |
+
def __init__(self, count, payload, **kwargs):
|
13 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
14 |
+
|
15 |
+
self.model = "Fal-AI - FLUX.1 [schnell]"
|
16 |
+
self.model_hash = "c7352c5d2f"
|
17 |
+
self.logger = self.setup_logger('[FLUX-FalAI]')
|
18 |
+
|
19 |
+
token = self.config.fal_ai[self.count]
|
20 |
+
self.token = token
|
21 |
+
self.backend_name = self.config.backend_name_list[2]
|
22 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
23 |
+
|
24 |
+
async def get_shape(self):
|
25 |
+
|
26 |
+
aspect_ratio = self.width / self.height
|
27 |
+
tolerance = 0.05
|
28 |
+
|
29 |
+
def is_close_to_ratio(ratio):
|
30 |
+
return abs(aspect_ratio - ratio) < tolerance
|
31 |
+
|
32 |
+
if self.width == self.height:
|
33 |
+
return "square"
|
34 |
+
elif is_close_to_ratio(4 / 3):
|
35 |
+
return "portrait_4_3" if self.height > self.width else "landscape_4_3"
|
36 |
+
elif is_close_to_ratio(16 / 9):
|
37 |
+
return "portrait_16_9" if self.height > self.width else "landscape_16_9"
|
38 |
+
else:
|
39 |
+
return "portrait_4_3"
|
40 |
+
|
41 |
+
async def update_progress(self):
|
42 |
+
# 覆写函数
|
43 |
+
pass
|
44 |
+
|
45 |
+
async def get_img_comment(self):
|
46 |
+
|
47 |
+
image_data = self.img_btyes[0]
|
48 |
+
image_file = BytesIO(image_data)
|
49 |
+
image_bytes = image_file.getvalue()
|
50 |
+
exif_dict = piexif.load(image_bytes)
|
51 |
+
try:
|
52 |
+
user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
|
53 |
+
except Exception:
|
54 |
+
return 'No Raw Data'
|
55 |
+
|
56 |
+
return user_comment.decode('utf-8', errors='ignore')
|
57 |
+
|
58 |
+
async def check_backend_usability(self):
|
59 |
+
pass
|
60 |
+
|
61 |
+
async def err_formating_to_sd_style(self):
|
62 |
+
|
63 |
+
await self.download_img()
|
64 |
+
|
65 |
+
self.format_api_respond()
|
66 |
+
|
67 |
+
self.result = self.build_respond
|
68 |
+
|
69 |
+
async def posting(self):
|
70 |
+
|
71 |
+
os.environ['FAL_KEY'] = self.token
|
72 |
+
image_shape = await self.get_shape()
|
73 |
+
self.steps = int(self.steps / 3)
|
74 |
+
|
75 |
+
handler = await fal_client.submit_async(
|
76 |
+
"fal-ai/flux/schnell",
|
77 |
+
arguments={
|
78 |
+
"prompt": self.tags,
|
79 |
+
"image_size": image_shape,
|
80 |
+
"seed": self.seed,
|
81 |
+
"num_inference_steps": self.steps, # FLUX不需要很高的步数
|
82 |
+
"num_images": self.total_img_count,
|
83 |
+
"enable_safety_checker": True
|
84 |
+
},
|
85 |
+
)
|
86 |
+
|
87 |
+
response = await handler.get()
|
88 |
+
|
89 |
+
try:
|
90 |
+
if response['images']:
|
91 |
+
images_list = response['images']
|
92 |
+
for i in images_list:
|
93 |
+
self.img_url.append(i['url'])
|
94 |
+
else:
|
95 |
+
raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
|
96 |
+
except Exception as e:
|
97 |
+
self.fail_on_requesting = True
|
98 |
+
self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
|
99 |
+
|
100 |
+
await self.err_formating_to_sd_style()
|
DrawBridgeAPI/backend/FLUX_replicate.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback
|
2 |
+
import piexif
|
3 |
+
import os
|
4 |
+
import replicate
|
5 |
+
|
6 |
+
from io import BytesIO
|
7 |
+
|
8 |
+
from .base import Backend
|
9 |
+
|
10 |
+
|
11 |
+
class AIDRAW(Backend):
|
12 |
+
|
13 |
+
def __init__(self, count, payload, **kwargs):
|
14 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
15 |
+
|
16 |
+
self.model = "Replicate - FLUX.1 [schnell]"
|
17 |
+
self.model_hash = "c7352c5d2f"
|
18 |
+
self.logger = self.setup_logger('[FLUX-Replicate]')
|
19 |
+
|
20 |
+
token = self.config.replicate[self.count]
|
21 |
+
self.token = token
|
22 |
+
self.backend_name = self.config.backend_name_list[3]
|
23 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
24 |
+
|
25 |
+
async def get_shape(self):
|
26 |
+
|
27 |
+
aspect_ratio = self.width / self.height
|
28 |
+
tolerance = 0.05
|
29 |
+
|
30 |
+
def is_close_to_ratio(ratio):
|
31 |
+
return abs(aspect_ratio - ratio) < tolerance
|
32 |
+
|
33 |
+
if self.width == self.height:
|
34 |
+
return "1:1"
|
35 |
+
elif is_close_to_ratio(16 / 9):
|
36 |
+
return "16:9"
|
37 |
+
elif is_close_to_ratio(21 / 9):
|
38 |
+
return "21:9"
|
39 |
+
elif is_close_to_ratio(2 / 3):
|
40 |
+
return "2:3"
|
41 |
+
elif is_close_to_ratio(3 / 2):
|
42 |
+
return "3:2"
|
43 |
+
elif is_close_to_ratio(4 / 5):
|
44 |
+
return "4:5"
|
45 |
+
elif is_close_to_ratio(5 / 4):
|
46 |
+
return "5:4"
|
47 |
+
elif is_close_to_ratio(9 / 16):
|
48 |
+
return "9:16"
|
49 |
+
elif is_close_to_ratio(9 / 21):
|
50 |
+
return "9:21"
|
51 |
+
else:
|
52 |
+
return "2:3"
|
53 |
+
|
54 |
+
async def update_progress(self):
|
55 |
+
# 覆写函数
|
56 |
+
pass
|
57 |
+
|
58 |
+
async def get_img_comment(self):
|
59 |
+
|
60 |
+
image_data = self.img_btyes[0]
|
61 |
+
image_file = BytesIO(image_data)
|
62 |
+
image_bytes = image_file.getvalue()
|
63 |
+
exif_dict = piexif.load(image_bytes)
|
64 |
+
try:
|
65 |
+
user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
|
66 |
+
except Exception:
|
67 |
+
return 'No Raw Data'
|
68 |
+
|
69 |
+
return user_comment.decode('utf-8', errors='ignore')
|
70 |
+
|
71 |
+
async def check_backend_usability(self):
|
72 |
+
pass
|
73 |
+
|
74 |
+
async def err_formating_to_sd_style(self):
|
75 |
+
|
76 |
+
await self.download_img()
|
77 |
+
|
78 |
+
self.format_api_respond()
|
79 |
+
|
80 |
+
self.result = self.build_respond
|
81 |
+
|
82 |
+
async def posting(self):
|
83 |
+
|
84 |
+
os.environ['REPLICATE_API_TOKEN'] = self.token
|
85 |
+
image_shape = await self.get_shape()
|
86 |
+
|
87 |
+
input_ = {
|
88 |
+
"prompt": self.tags,
|
89 |
+
"seed": self.seed,
|
90 |
+
"num_outputs": self.total_img_count,
|
91 |
+
"aspect_ratio": image_shape,
|
92 |
+
"output_format": 'png',
|
93 |
+
"output_quality": 90
|
94 |
+
}
|
95 |
+
|
96 |
+
output = await replicate.async_run(
|
97 |
+
"black-forest-labs/flux-schnell",
|
98 |
+
input=input_
|
99 |
+
)
|
100 |
+
|
101 |
+
try:
|
102 |
+
if output:
|
103 |
+
for i in output:
|
104 |
+
self.img_url.append(i)
|
105 |
+
else:
|
106 |
+
raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
|
107 |
+
except Exception as e:
|
108 |
+
self.fail_on_requesting = True
|
109 |
+
self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
|
110 |
+
|
111 |
+
await self.err_formating_to_sd_style()
|
112 |
+
|
DrawBridgeAPI/backend/SD_A1111_webui.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from urllib.parse import urlencode
|
2 |
+
|
3 |
+
from .base import Backend
|
4 |
+
|
5 |
+
|
6 |
+
class AIDRAW(Backend):
|
7 |
+
|
8 |
+
def __init__(self, count, payload, **kwargs):
|
9 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
10 |
+
|
11 |
+
self.model = "StableDiffusion"
|
12 |
+
self.model_hash = "c7352c5d2f"
|
13 |
+
self.logger = self.setup_logger('[SD-A1111]')
|
14 |
+
self.current_config: dict = self.config.a1111webui_setting
|
15 |
+
|
16 |
+
self.backend_url = self.current_config['backend_url'][self.count]
|
17 |
+
name = self.current_config['name'][self.count]
|
18 |
+
self.backend_name = self.config.backend_name_list[1]
|
19 |
+
self.workload_name = f"{self.backend_name}-{name}"
|
20 |
+
|
21 |
+
async def exec_login(self):
|
22 |
+
login_data = {
|
23 |
+
'username': self.current_config['username'][self.count],
|
24 |
+
'password': self.current_config['password'][self.count]
|
25 |
+
}
|
26 |
+
encoded_data = urlencode(login_data)
|
27 |
+
|
28 |
+
response = await self.http_request(
|
29 |
+
method="POST",
|
30 |
+
target_url=f"{self.backend_url}/login",
|
31 |
+
headers={
|
32 |
+
"Content-Type": "application/x-www-form-urlencoded",
|
33 |
+
"accept": "application/json"
|
34 |
+
},
|
35 |
+
content=encoded_data,
|
36 |
+
)
|
37 |
+
if response.get('error') == "error":
|
38 |
+
self.logger.warning(f"后端{self.backend_name}登录失败")
|
39 |
+
self.fail_on_login = True
|
40 |
+
return False, 500
|
41 |
+
else:
|
42 |
+
self.logger.info(f"后端{self.backend_name}登录成功")
|
43 |
+
return True, 200
|
44 |
+
|
45 |
+
async def check_backend_usability(self):
|
46 |
+
|
47 |
+
if self.login:
|
48 |
+
resp = await self.exec_login()
|
49 |
+
if resp[0] is None:
|
50 |
+
self.fail_on_login = True
|
51 |
+
self.logger.warning(f"后端{self.backend_name}登陆失败")
|
52 |
+
return False, resp
|
53 |
+
|
54 |
+
async def get_backend_working_progress(self):
|
55 |
+
"""
|
56 |
+
获取后端工作进度, 默认A1111
|
57 |
+
:return:
|
58 |
+
"""
|
59 |
+
self.get_backend_id()
|
60 |
+
respond = await self.http_request(
|
61 |
+
"GET",
|
62 |
+
f"{self.backend_url}/sdapi/v1/options",
|
63 |
+
verify=False,
|
64 |
+
proxy=False,
|
65 |
+
use_aiohttp=False
|
66 |
+
)
|
67 |
+
|
68 |
+
print(respond)
|
69 |
+
|
70 |
+
self.model = respond['sd_model_checkpoint']
|
71 |
+
self.model_hash = respond
|
72 |
+
|
73 |
+
if self.current_config['auth'][self.count]:
|
74 |
+
self.login = True
|
75 |
+
await self.exec_login()
|
76 |
+
|
77 |
+
api_url = f"{self.backend_url}/sdapi/v1/progress"
|
78 |
+
|
79 |
+
resp = await self.http_request(
|
80 |
+
method="GET",
|
81 |
+
target_url=api_url,
|
82 |
+
format=False
|
83 |
+
)
|
84 |
+
|
85 |
+
resp_json = resp.json()
|
86 |
+
return resp_json, resp.status_code, self.backend_url, resp.status_code
|
87 |
+
|
88 |
+
|
DrawBridgeAPI/backend/SD_civitai_API.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback
|
2 |
+
import piexif
|
3 |
+
import os
|
4 |
+
import civitai
|
5 |
+
|
6 |
+
from io import BytesIO
|
7 |
+
|
8 |
+
from .base import Backend
|
9 |
+
|
10 |
+
class AIDRAW(Backend):
|
11 |
+
|
12 |
+
def __init__(self, count, payload, **kwargs):
|
13 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
14 |
+
|
15 |
+
self.model = "Civitai - urn:air:sd1:checkpoint:civitai:4201@130072"
|
16 |
+
self.model_hash = "c7352c5d2f"
|
17 |
+
self.logger = self.setup_logger('[Civitai]')
|
18 |
+
|
19 |
+
token = self.config.civitai[self.count]
|
20 |
+
self.token = token
|
21 |
+
self.backend_name = self.config.backend_name_list[0]
|
22 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
23 |
+
|
24 |
+
|
25 |
+
async def update_progress(self):
|
26 |
+
# 覆写函数
|
27 |
+
pass
|
28 |
+
|
29 |
+
async def get_img_comment(self):
|
30 |
+
|
31 |
+
image_data = self.img_btyes[0]
|
32 |
+
image_file = BytesIO(image_data)
|
33 |
+
image_bytes = image_file.getvalue()
|
34 |
+
exif_dict = piexif.load(image_bytes)
|
35 |
+
try:
|
36 |
+
user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
|
37 |
+
except KeyError:
|
38 |
+
return 'No Raw Data'
|
39 |
+
|
40 |
+
return user_comment.decode('utf-8', errors='ignore')
|
41 |
+
|
42 |
+
async def check_backend_usability(self):
|
43 |
+
|
44 |
+
self.headers['Authorization'] = f"Bearer {self.token}"
|
45 |
+
response = await self.http_request(
|
46 |
+
method="GET",
|
47 |
+
target_url='https://civitai.com/api/v1/models',
|
48 |
+
headers=self.headers,
|
49 |
+
params=None,
|
50 |
+
format=True
|
51 |
+
)
|
52 |
+
|
53 |
+
if isinstance(response, dict) and 'error' in response:
|
54 |
+
self.fail_on_login = True
|
55 |
+
return False
|
56 |
+
else:
|
57 |
+
resp_json = response
|
58 |
+
return True, (resp_json, 200)
|
59 |
+
|
60 |
+
async def err_formating_to_sd_style(self):
|
61 |
+
|
62 |
+
await self.download_img()
|
63 |
+
self.format_api_respond()
|
64 |
+
self.result = self.build_respond
|
65 |
+
|
66 |
+
async def posting(self):
|
67 |
+
|
68 |
+
self.logger.info(f"开始使用{self.token}获取图片")
|
69 |
+
|
70 |
+
os.environ['CIVITAI_API_TOKEN'] = self.token
|
71 |
+
os.environ['HTTP_PROXY'] = self.config.civitai_setting['proxy'][self.count]
|
72 |
+
os.environ['HTTPS_PROXY'] = self.config.civitai_setting['proxy'][self.count]
|
73 |
+
await self.check_backend_usability()
|
74 |
+
|
75 |
+
input_ = {
|
76 |
+
"model": "urn:air:sd1:checkpoint:civitai:4201@130072",
|
77 |
+
"params": {
|
78 |
+
"prompt": self.tags,
|
79 |
+
"negativePrompt": self.ntags,
|
80 |
+
"scheduler": self.sampler,
|
81 |
+
"steps": self.steps,
|
82 |
+
"cfgScale": self.scale,
|
83 |
+
"width": self.width,
|
84 |
+
"height": self.height,
|
85 |
+
"clipSkip": 2,
|
86 |
+
"seed": self.seed
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
self.logger.info(f"任务已经发送!本次生图{self.total_img_count}张")
|
91 |
+
|
92 |
+
for i in range(self.total_img_count):
|
93 |
+
|
94 |
+
try:
|
95 |
+
response = await civitai.image.create(input_, wait=True)
|
96 |
+
if response['jobs'][0]['result'].get('available'):
|
97 |
+
self.img_url.append(response['jobs'][0]['result'].get('blobUrl'))
|
98 |
+
else:
|
99 |
+
raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
|
100 |
+
except Exception as e:
|
101 |
+
self.fail_on_requesting = True
|
102 |
+
self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
|
103 |
+
|
104 |
+
await self.err_formating_to_sd_style()
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
DrawBridgeAPI/backend/__init__.py
ADDED
@@ -0,0 +1,909 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import random
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
|
6 |
+
import aiofiles
|
7 |
+
|
8 |
+
from tqdm import tqdm
|
9 |
+
from pathlib import Path
|
10 |
+
from fastapi import Request
|
11 |
+
from fastapi.responses import JSONResponse
|
12 |
+
from typing import Union
|
13 |
+
from colorama import Fore, Style
|
14 |
+
from colorama import init
|
15 |
+
init()
|
16 |
+
|
17 |
+
from ..base_config import setup_logger, init_instance
|
18 |
+
from .SD_civitai_API import AIDRAW
|
19 |
+
from .SD_A1111_webui import AIDRAW as AIDRAW2
|
20 |
+
from .FLUX_falai import AIDRAW as AIDRAW3
|
21 |
+
from .FLUX_replicate import AIDRAW as AIDRAW4
|
22 |
+
from .liblibai import AIDRAW as AIDRAW5
|
23 |
+
from .tusiart import AIDRAW as AIDRAW6
|
24 |
+
from .seaart import AIDRAW as AIDRAW7
|
25 |
+
from .yunjie import AIDRAW as AIDRAW8
|
26 |
+
from .comfyui import AIDRAW as AIDRAW9
|
27 |
+
from .novelai import AIDRAW as AIDRAW10
|
28 |
+
from .midjourney import AIDRAW as AIDRAW11
|
29 |
+
from .base import Backend
|
30 |
+
|
31 |
+
from DrawBridgeAPI.locales import _ as i18n
|
32 |
+
|
33 |
+
class BaseHandler:
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
payload,
|
38 |
+
request: Request = None,
|
39 |
+
path: str = None,
|
40 |
+
comfyui_task=None,
|
41 |
+
):
|
42 |
+
self.task_list = []
|
43 |
+
self.instance_list: list[Backend] = []
|
44 |
+
self.payload = payload
|
45 |
+
self.request = request
|
46 |
+
self.path = path
|
47 |
+
self.config = init_instance.config
|
48 |
+
self.all_task_list = list(range(len(list(self.config.name_url[0].keys()))))
|
49 |
+
self.enable_backend: dict = {}
|
50 |
+
self.comfyui_task: str = comfyui_task
|
51 |
+
|
52 |
+
async def get_enable_task(
|
53 |
+
self,
|
54 |
+
enable_task
|
55 |
+
):
|
56 |
+
"""
|
57 |
+
此函数的作用是获取示例并且只保留选择了的后端
|
58 |
+
:param enable_task:
|
59 |
+
:return:
|
60 |
+
"""
|
61 |
+
tasks = [
|
62 |
+
self.get_civitai_task(),
|
63 |
+
self.get_a1111_task(),
|
64 |
+
self.get_falai_task(),
|
65 |
+
self.get_replicate_task(),
|
66 |
+
self.get_liblibai_task(),
|
67 |
+
self.get_tusiart_task(),
|
68 |
+
self.get_seaart_task(),
|
69 |
+
self.get_yunjie_task(),
|
70 |
+
self.get_comfyui_task(),
|
71 |
+
self.get_novelai_task(),
|
72 |
+
self.get_midjourney_task()
|
73 |
+
]
|
74 |
+
|
75 |
+
all_backend_instance = await asyncio.gather(*tasks)
|
76 |
+
all_backend_instance_list = [item for sublist in all_backend_instance for item in sublist]
|
77 |
+
|
78 |
+
# 获取启动的后端字典
|
79 |
+
all_backend_dict: dict = self.config.name_url[0]
|
80 |
+
items = list(all_backend_dict.items())
|
81 |
+
self.enable_backend = dict([items[i] for i in enable_task])
|
82 |
+
|
83 |
+
self.instance_list = [all_backend_instance_list[i] for i in enable_task]
|
84 |
+
|
85 |
+
async def get_civitai_task(self):
|
86 |
+
instance_list = []
|
87 |
+
counter = 0
|
88 |
+
for i in self.config.civitai:
|
89 |
+
if i is not None:
|
90 |
+
aidraw_instance = AIDRAW(count=counter, payload=self.payload)
|
91 |
+
counter += 1
|
92 |
+
instance_list.append(aidraw_instance)
|
93 |
+
|
94 |
+
return instance_list
|
95 |
+
|
96 |
+
async def get_a1111_task(self):
|
97 |
+
|
98 |
+
instance_list = []
|
99 |
+
counter = 0
|
100 |
+
for i in self.config.a1111webui['name']:
|
101 |
+
aidraw_instance = AIDRAW2(
|
102 |
+
count=counter,
|
103 |
+
payload=self.payload,
|
104 |
+
request=self.request,
|
105 |
+
path=self.path
|
106 |
+
)
|
107 |
+
counter += 1
|
108 |
+
instance_list.append(aidraw_instance)
|
109 |
+
|
110 |
+
return instance_list
|
111 |
+
|
112 |
+
async def get_falai_task(self):
|
113 |
+
|
114 |
+
instance_list = []
|
115 |
+
counter = 0
|
116 |
+
for i in self.config.fal_ai:
|
117 |
+
if i is not None:
|
118 |
+
aidraw_instance = AIDRAW3(count=counter, payload=self.payload)
|
119 |
+
counter += 1
|
120 |
+
instance_list.append(aidraw_instance)
|
121 |
+
|
122 |
+
return instance_list
|
123 |
+
|
124 |
+
async def get_replicate_task(self):
|
125 |
+
|
126 |
+
instance_list = []
|
127 |
+
counter = 0
|
128 |
+
for i in self.config.replicate:
|
129 |
+
if i is not None:
|
130 |
+
aidraw_instance = AIDRAW4(count=counter, payload=self.payload)
|
131 |
+
counter += 1
|
132 |
+
instance_list.append(aidraw_instance)
|
133 |
+
|
134 |
+
return instance_list
|
135 |
+
|
136 |
+
async def get_liblibai_task(self):
|
137 |
+
instance_list = []
|
138 |
+
counter = 0
|
139 |
+
for i in self.config.liblibai:
|
140 |
+
if i is not None:
|
141 |
+
aidraw_instance = AIDRAW5(count=counter, payload=self.payload)
|
142 |
+
counter += 1
|
143 |
+
instance_list.append(aidraw_instance)
|
144 |
+
|
145 |
+
return instance_list
|
146 |
+
|
147 |
+
async def get_tusiart_task(self):
|
148 |
+
instance_list = []
|
149 |
+
counter = 0
|
150 |
+
for i in self.config.tusiart:
|
151 |
+
if i is not None:
|
152 |
+
aidraw_instance = AIDRAW6(count=counter, payload=self.payload)
|
153 |
+
counter += 1
|
154 |
+
instance_list.append(aidraw_instance)
|
155 |
+
|
156 |
+
return instance_list
|
157 |
+
|
158 |
+
async def get_seaart_task(self):
|
159 |
+
instance_list = []
|
160 |
+
counter = 0
|
161 |
+
for i in self.config.seaart:
|
162 |
+
if i is not None:
|
163 |
+
aidraw_instance = AIDRAW7(count=counter, payload=self.payload)
|
164 |
+
counter += 1
|
165 |
+
instance_list.append(aidraw_instance)
|
166 |
+
|
167 |
+
return instance_list
|
168 |
+
|
169 |
+
async def get_yunjie_task(self):
|
170 |
+
instance_list = []
|
171 |
+
counter = 0
|
172 |
+
for i in self.config.yunjie:
|
173 |
+
if i is not None:
|
174 |
+
aidraw_instance = AIDRAW8(count=counter, payload=self.payload)
|
175 |
+
counter += 1
|
176 |
+
instance_list.append(aidraw_instance)
|
177 |
+
|
178 |
+
return instance_list
|
179 |
+
|
180 |
+
async def get_comfyui_task(self):
|
181 |
+
|
182 |
+
instance_list = []
|
183 |
+
counter = 0
|
184 |
+
|
185 |
+
hr_mode = self.payload.get('enable_hr', None)
|
186 |
+
|
187 |
+
for i in self.config.comfyui['name']:
|
188 |
+
|
189 |
+
try:
|
190 |
+
selected_task = (
|
191 |
+
"sdbase_txt2img_hr_fix" if hr_mode
|
192 |
+
else self.config.comfyui.get('default_workflows', ['sdbase_txt2img'])[counter]
|
193 |
+
)
|
194 |
+
except IndexError:
|
195 |
+
selected_task = "sdbase_txt2img"
|
196 |
+
|
197 |
+
img2img = self.payload.get("init_images", [])
|
198 |
+
if img2img:
|
199 |
+
selected_task = "sdbase_img2img"
|
200 |
+
|
201 |
+
aidraw_instance = AIDRAW9(
|
202 |
+
count=counter,
|
203 |
+
payload=self.payload,
|
204 |
+
request=self.request,
|
205 |
+
path=self.path,
|
206 |
+
comfyui_api_json=self.comfyui_task or selected_task
|
207 |
+
)
|
208 |
+
counter += 1
|
209 |
+
instance_list.append(aidraw_instance)
|
210 |
+
|
211 |
+
return instance_list
|
212 |
+
|
213 |
+
async def get_novelai_task(self):
|
214 |
+
|
215 |
+
instance_list = []
|
216 |
+
counter = 0
|
217 |
+
for i in self.config.novelai:
|
218 |
+
aidraw_instance = AIDRAW10(
|
219 |
+
count=counter,
|
220 |
+
payload=self.payload
|
221 |
+
)
|
222 |
+
counter += 1
|
223 |
+
instance_list.append(aidraw_instance)
|
224 |
+
|
225 |
+
return instance_list
|
226 |
+
|
227 |
+
async def get_midjourney_task(self):
|
228 |
+
|
229 |
+
instance_list = []
|
230 |
+
counter = 0
|
231 |
+
|
232 |
+
for i in self.config.midjourney['name']:
|
233 |
+
|
234 |
+
aidraw_instance = AIDRAW11(
|
235 |
+
count=counter,
|
236 |
+
payload=self.payload
|
237 |
+
)
|
238 |
+
counter += 1
|
239 |
+
instance_list.append(aidraw_instance)
|
240 |
+
|
241 |
+
return instance_list
|
242 |
+
|
243 |
+
|
244 |
+
class TXT2IMGHandler(BaseHandler):
|
245 |
+
|
246 |
+
def __init__(self, payload=None, comfyui_task: str = None):
|
247 |
+
super().__init__(comfyui_task=comfyui_task, payload=payload)
|
248 |
+
|
249 |
+
async def get_all_instance(self) -> tuple[list[Backend], dict]:
|
250 |
+
# 手动选择启动的后端
|
251 |
+
man_enable_task = self.config.server_settings['enable_txt2img_backends']
|
252 |
+
if len(man_enable_task) != 0:
|
253 |
+
man_enable_task = man_enable_task
|
254 |
+
else:
|
255 |
+
man_enable_task = self.all_task_list
|
256 |
+
|
257 |
+
await self.get_enable_task(man_enable_task)
|
258 |
+
|
259 |
+
return self.instance_list, self.enable_backend
|
260 |
+
|
261 |
+
|
262 |
+
class IMG2IMGHandler(BaseHandler):
|
263 |
+
|
264 |
+
def __init__(self, payload=None, comfyui_task: str = None):
|
265 |
+
super().__init__(comfyui_task=comfyui_task, payload=payload)
|
266 |
+
|
267 |
+
async def get_all_instance(self) -> tuple[list[Backend], dict]:
|
268 |
+
# 手动选择启动的后端
|
269 |
+
man_enable_task = self.config.server_settings['enable_img2img_backends']
|
270 |
+
if len(man_enable_task) != 0:
|
271 |
+
man_enable_task = man_enable_task
|
272 |
+
else:
|
273 |
+
man_enable_task = self.all_task_list
|
274 |
+
|
275 |
+
await self.get_enable_task(man_enable_task)
|
276 |
+
|
277 |
+
return self.instance_list, self.enable_backend
|
278 |
+
|
279 |
+
|
280 |
+
class A1111WebuiHandler(BaseHandler):
|
281 |
+
|
282 |
+
async def get_all_instance(self) -> tuple[list[Backend], dict]:
|
283 |
+
|
284 |
+
await self.get_enable_task([1])
|
285 |
+
|
286 |
+
return self.instance_list, self.enable_backend
|
287 |
+
|
288 |
+
|
289 |
+
class A1111WebuiHandlerAPI(BaseHandler):
|
290 |
+
async def get_all_instance(self) -> tuple[list[Backend], dict]:
|
291 |
+
|
292 |
+
man_enable_task = self.config.server_settings['enable_sdapi_backends']
|
293 |
+
if len(man_enable_task) != 0:
|
294 |
+
man_enable_task = man_enable_task
|
295 |
+
else:
|
296 |
+
man_enable_task = self.all_task_list
|
297 |
+
|
298 |
+
await self.get_enable_task(man_enable_task)
|
299 |
+
|
300 |
+
return self.instance_list, self.enable_backend
|
301 |
+
|
302 |
+
#
|
303 |
+
# class ComfyuiHandler(BaseHandler):
|
304 |
+
#
|
305 |
+
# async def get_all_instance(self) -> tuple[list[Backend], dict]:
|
306 |
+
#
|
307 |
+
# await self.get_enable_task([1])
|
308 |
+
#
|
309 |
+
# return self.instance_list, self.enable_backend
|
310 |
+
|
311 |
+
|
312 |
+
class StaticHandler:
|
313 |
+
lock_to_backend = None
|
314 |
+
prompt_style: list = None
|
315 |
+
|
316 |
+
@classmethod
|
317 |
+
def set_lock_to_backend(cls, selected_model: str):
|
318 |
+
cls.lock_to_backend = selected_model
|
319 |
+
|
320 |
+
@classmethod
|
321 |
+
def get_lock_to_backend(cls):
|
322 |
+
return cls.lock_to_backend
|
323 |
+
|
324 |
+
@classmethod
|
325 |
+
def get_prompt_style(cls):
|
326 |
+
return cls.prompt_style
|
327 |
+
|
328 |
+
@classmethod
|
329 |
+
def set_prompt_style(cls, prompt_style: list):
|
330 |
+
cls.prompt_style = prompt_style
|
331 |
+
|
332 |
+
@classmethod
|
333 |
+
def get_backend_options(cls):
|
334 |
+
build_resp = {
|
335 |
+
"samples_save": True,
|
336 |
+
"samples_format": "png",
|
337 |
+
"samples_filename_pattern": "",
|
338 |
+
"save_images_add_number": True,
|
339 |
+
"grid_save": True,
|
340 |
+
"grid_format": "png",
|
341 |
+
"grid_extended_filename": False,
|
342 |
+
"grid_only_if_multiple": True,
|
343 |
+
"grid_prevent_empty_spots": False,
|
344 |
+
"grid_zip_filename_pattern": "",
|
345 |
+
"n_rows": -1.0,
|
346 |
+
"font": "",
|
347 |
+
"grid_text_active_color": "#000000",
|
348 |
+
"grid_text_inactive_color": "#999999",
|
349 |
+
"grid_background_color": "#ffffff",
|
350 |
+
"enable_pnginfo": True,
|
351 |
+
"save_txt": False,
|
352 |
+
"save_images_before_face_restoration": False,
|
353 |
+
"save_images_before_highres_fix": False,
|
354 |
+
"save_images_before_color_correction": False,
|
355 |
+
"save_mask": False,
|
356 |
+
"save_mask_composite": False,
|
357 |
+
"jpeg_quality": 80.0,
|
358 |
+
"webp_lossless": False,
|
359 |
+
"export_for_4chan": True,
|
360 |
+
"img_downscale_threshold": 4.0,
|
361 |
+
"target_side_length": 4000.0,
|
362 |
+
"img_max_size_mp": 200.0,
|
363 |
+
"use_original_name_batch": True,
|
364 |
+
"use_upscaler_name_as_suffix": False,
|
365 |
+
"save_selected_only": True,
|
366 |
+
"save_init_img": False,
|
367 |
+
"temp_dir": "",
|
368 |
+
"clean_temp_dir_at_start": False,
|
369 |
+
"save_incomplete_images": False,
|
370 |
+
"outdir_samples": "",
|
371 |
+
"outdir_txt2img_samples": "outputs/txt2img-images",
|
372 |
+
"outdir_img2img_samples": "outputs/img2img-images",
|
373 |
+
"outdir_extras_samples": "outputs/extras-images",
|
374 |
+
"outdir_grids": "",
|
375 |
+
"outdir_txt2img_grids": "outputs/txt2img-grids",
|
376 |
+
"outdir_img2img_grids": "outputs/img2img-grids",
|
377 |
+
"outdir_save": "log/images",
|
378 |
+
"outdir_init_images": "outputs/init-images",
|
379 |
+
"save_to_dirs": True,
|
380 |
+
"grid_save_to_dirs": True,
|
381 |
+
"use_save_to_dirs_for_ui": False,
|
382 |
+
"directories_filename_pattern": "[date]",
|
383 |
+
"directories_max_prompt_words": 8.0,
|
384 |
+
"ESRGAN_tile": 192.0,
|
385 |
+
"ESRGAN_tile_overlap": 8.0,
|
386 |
+
"realesrgan_enabled_models": [
|
387 |
+
"R-ESRGAN 4x+",
|
388 |
+
"R-ESRGAN 4x+ Anime6B"
|
389 |
+
],
|
390 |
+
"upscaler_for_img2img": None,
|
391 |
+
"face_restoration": False,
|
392 |
+
"face_restoration_model": "CodeFormer",
|
393 |
+
"code_former_weight": 0.5,
|
394 |
+
"face_restoration_unload": False,
|
395 |
+
"auto_launch_browser": "Local",
|
396 |
+
"show_warnings": False,
|
397 |
+
"show_gradio_deprecation_warnings": True,
|
398 |
+
"memmon_poll_rate": 8.0,
|
399 |
+
"samples_log_stdout": False,
|
400 |
+
"multiple_tqdm": True,
|
401 |
+
"print_hypernet_extra": False,
|
402 |
+
"list_hidden_files": True,
|
403 |
+
"disable_mmap_load_safetensors": False,
|
404 |
+
"hide_ldm_prints": True,
|
405 |
+
"api_enable_requests": True,
|
406 |
+
"api_forbid_local_requests": True,
|
407 |
+
"api_useragent": "",
|
408 |
+
"unload_models_when_training": False,
|
409 |
+
"pin_memory": False,
|
410 |
+
"save_optimizer_state": False,
|
411 |
+
"save_training_settings_to_txt": True,
|
412 |
+
"dataset_filename_word_regex": "",
|
413 |
+
"dataset_filename_join_string": " ",
|
414 |
+
"training_image_repeats_per_epoch": 1.0,
|
415 |
+
"training_write_csv_every": 500.0,
|
416 |
+
"training_xattention_optimizations": False,
|
417 |
+
"training_enable_tensorboard": False,
|
418 |
+
"training_tensorboard_save_images": False,
|
419 |
+
"training_tensorboard_flush_every": 120.0,
|
420 |
+
"sd_model_checkpoint": cls.lock_to_backend if cls.lock_to_backend else 'DrawBridgeAPI-Auto-Select',
|
421 |
+
"sd_checkpoints_limit": 1.0,
|
422 |
+
"sd_checkpoints_keep_in_cpu": True,
|
423 |
+
"sd_checkpoint_cache": 3,
|
424 |
+
"sd_unet": "None",
|
425 |
+
"enable_quantization": False,
|
426 |
+
"enable_emphasis": True,
|
427 |
+
"enable_batch_seeds": True,
|
428 |
+
"comma_padding_backtrack": 20.0,
|
429 |
+
"CLIP_stop_at_last_layers": 3.0,
|
430 |
+
"upcast_attn": False,
|
431 |
+
"randn_source": "GPU",
|
432 |
+
"tiling": False,
|
433 |
+
"hires_fix_refiner_pass": "second pass",
|
434 |
+
"sdxl_crop_top": 0.0,
|
435 |
+
"sdxl_crop_left": 0.0,
|
436 |
+
"sdxl_refiner_low_aesthetic_score": 2.5,
|
437 |
+
"sdxl_refiner_high_aesthetic_score": 6.0,
|
438 |
+
"sd_vae_explanation": "<abbr title='Variational autoencoder'>VAE</abbr> is a neural network that transforms a standard <abbr title='red/green/blue'>RGB</abbr>\nimage into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling\n(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.\nFor img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.",
|
439 |
+
"sd_vae_checkpoint_cache": 0,
|
440 |
+
"sd_vae": "None",
|
441 |
+
"sd_vae_overrides_per_model_preferences": False,
|
442 |
+
"auto_vae_precision": True,
|
443 |
+
"sd_vae_encode_method": "Full",
|
444 |
+
"sd_vae_decode_method": "Full",
|
445 |
+
"inpainting_mask_weight": 1.0,
|
446 |
+
"initial_noise_multiplier": 1.0,
|
447 |
+
"img2img_extra_noise": 0,
|
448 |
+
"img2img_color_correction": False,
|
449 |
+
"img2img_fix_steps": False,
|
450 |
+
"img2img_background_color": "#ffffff",
|
451 |
+
"img2img_editor_height": 720.0,
|
452 |
+
"img2img_sketch_default_brush_color": "#ffffff",
|
453 |
+
"img2img_inpaint_mask_brush_color": "#ffffff",
|
454 |
+
"img2img_inpaint_sketch_default_brush_color": "#ffffff",
|
455 |
+
"return_mask": False,
|
456 |
+
"return_mask_composite": False,
|
457 |
+
"cross_attention_optimization": "Automatic",
|
458 |
+
"s_min_uncond": 0.0,
|
459 |
+
"token_merging_ratio": 0.0,
|
460 |
+
"token_merging_ratio_img2img": 0.0,
|
461 |
+
"token_merging_ratio_hr": 0.0,
|
462 |
+
"pad_cond_uncond": False,
|
463 |
+
"persistent_cond_cache": True,
|
464 |
+
"batch_cond_uncond": True,
|
465 |
+
"use_old_emphasis_implementation": False,
|
466 |
+
"use_old_karras_scheduler_sigmas": False,
|
467 |
+
"no_dpmpp_sde_batch_determinism": False,
|
468 |
+
"use_old_hires_fix_width_height": False,
|
469 |
+
"dont_fix_second_order_samplers_schedule": False,
|
470 |
+
"hires_fix_use_firstpass_conds": False,
|
471 |
+
"use_old_scheduling": False,
|
472 |
+
"interrogate_keep_models_in_memory": False,
|
473 |
+
"interrogate_return_ranks": False,
|
474 |
+
"interrogate_clip_num_beams": 1.0,
|
475 |
+
"interrogate_clip_min_length": 24.0,
|
476 |
+
"interrogate_clip_max_length": 48.0,
|
477 |
+
"interrogate_clip_dict_limit": 1500.0,
|
478 |
+
"interrogate_clip_skip_categories": [],
|
479 |
+
"interrogate_deepbooru_score_threshold": 0.5,
|
480 |
+
"deepbooru_sort_alpha": True,
|
481 |
+
"deepbooru_use_spaces": True,
|
482 |
+
"deepbooru_escape": True,
|
483 |
+
"deepbooru_filter_tags": "",
|
484 |
+
"extra_networks_show_hidden_directories": True,
|
485 |
+
"extra_networks_hidden_models": "When searched",
|
486 |
+
"extra_networks_default_multiplier": 1.0,
|
487 |
+
"extra_networks_card_width": 0,
|
488 |
+
"extra_networks_card_height": 0,
|
489 |
+
"extra_networks_card_text_scale": 1.0,
|
490 |
+
"extra_networks_card_show_desc": True,
|
491 |
+
"extra_networks_add_text_separator": " ",
|
492 |
+
"ui_extra_networks_tab_reorder": "",
|
493 |
+
"textual_inversion_print_at_load": False,
|
494 |
+
"textual_inversion_add_hashes_to_infotext": True,
|
495 |
+
"sd_hypernetwork": "None",
|
496 |
+
"localization": "None",
|
497 |
+
"gradio_theme": "Default",
|
498 |
+
"gradio_themes_cache": True,
|
499 |
+
"gallery_height": "",
|
500 |
+
"return_grid": True,
|
501 |
+
"do_not_show_images": False,
|
502 |
+
"send_seed": True,
|
503 |
+
"send_size": True,
|
504 |
+
"js_modal_lightbox": True,
|
505 |
+
"js_modal_lightbox_initially_zoomed": True,
|
506 |
+
"js_modal_lightbox_gamepad": False,
|
507 |
+
"js_modal_lightbox_gamepad_repeat": 250.0,
|
508 |
+
"show_progress_in_title": True,
|
509 |
+
"samplers_in_dropdown": True,
|
510 |
+
"dimensions_and_batch_together": True,
|
511 |
+
"keyedit_precision_attention": 0.1,
|
512 |
+
"keyedit_precision_extra": 0.05,
|
513 |
+
"keyedit_delimiters": ".,\\/!?%^*;:{}=`~()",
|
514 |
+
"keyedit_move": True,
|
515 |
+
"quicksettings_list": [
|
516 |
+
"sd_model_checkpoint",
|
517 |
+
"sd_unet",
|
518 |
+
"sd_vae",
|
519 |
+
"CLIP_stop_at_last_layers"
|
520 |
+
],
|
521 |
+
"ui_tab_order": [],
|
522 |
+
"hidden_tabs": [],
|
523 |
+
"ui_reorder_list": [],
|
524 |
+
"hires_fix_show_sampler": False,
|
525 |
+
"hires_fix_show_prompts": False,
|
526 |
+
"disable_token_counters": False,
|
527 |
+
"add_model_hash_to_info": True,
|
528 |
+
"add_model_name_to_info": True,
|
529 |
+
"add_user_name_to_info": False,
|
530 |
+
"add_version_to_infotext": True,
|
531 |
+
"disable_weights_auto_swap": True,
|
532 |
+
"infotext_styles": "Apply if any",
|
533 |
+
"show_progressbar": True,
|
534 |
+
"live_previews_enable": True,
|
535 |
+
"live_previews_image_format": "png",
|
536 |
+
"show_progress_grid": True,
|
537 |
+
"show_progress_every_n_steps": 10.0,
|
538 |
+
"show_progress_type": "Approx NN",
|
539 |
+
"live_preview_allow_lowvram_full": False,
|
540 |
+
"live_preview_content": "Prompt",
|
541 |
+
"live_preview_refresh_period": 1000.0,
|
542 |
+
"live_preview_fast_interrupt": False,
|
543 |
+
"hide_samplers": [],
|
544 |
+
"eta_ddim": 0.0,
|
545 |
+
"eta_ancestral": 1.0,
|
546 |
+
"ddim_discretize": "uniform",
|
547 |
+
"s_churn": 0.0,
|
548 |
+
"s_tmin": 0.0,
|
549 |
+
"s_tmax": 0,
|
550 |
+
"s_noise": 1.0,
|
551 |
+
"k_sched_type": "Automatic",
|
552 |
+
"sigma_min": 0.0,
|
553 |
+
"sigma_max": 0.0,
|
554 |
+
"rho": 0.0,
|
555 |
+
"eta_noise_seed_delta": 0,
|
556 |
+
"always_discard_next_to_last_sigma": False,
|
557 |
+
"sgm_noise_multiplier": False,
|
558 |
+
"uni_pc_variant": "bh1",
|
559 |
+
"uni_pc_skip_type": "time_uniform",
|
560 |
+
"uni_pc_order": 3.0,
|
561 |
+
"uni_pc_lower_order_final": True,
|
562 |
+
"postprocessing_enable_in_main_ui": [],
|
563 |
+
"postprocessing_operation_order": [],
|
564 |
+
"upscaling_max_images_in_cache": 5.0,
|
565 |
+
"disabled_extensions": [],
|
566 |
+
"disable_all_extensions": "none",
|
567 |
+
"restore_config_state_file": "",
|
568 |
+
"sd_checkpoint_hash": "91e0f7cbaf70676153810c231e8703bf26b3208c116a3d1f2481cbc666905471"
|
569 |
+
}
|
570 |
+
|
571 |
+
return build_resp
|
572 |
+
|
573 |
+
|
574 |
+
class TaskHandler(StaticHandler):
|
575 |
+
|
576 |
+
backend_avg_dict: dict = {}
|
577 |
+
write_count: dict = {}
|
578 |
+
backend_images: dict = {}
|
579 |
+
|
580 |
+
backend_site_list = None
|
581 |
+
load_balance_logger = setup_logger('[AvgTimeCalculator]')
|
582 |
+
load_balance_sample = 10
|
583 |
+
|
584 |
+
redis_client = None
|
585 |
+
backend_status = None
|
586 |
+
|
587 |
+
@classmethod
|
588 |
+
def update_backend_status(cls):
|
589 |
+
cls.backend_status = json.loads(cls.redis_client.get("workload"))
|
590 |
+
|
591 |
+
@classmethod
|
592 |
+
def get_redis_client(cls):
|
593 |
+
cls.redis_client = init_instance.redis_client
|
594 |
+
|
595 |
+
@classmethod
|
596 |
+
async def get_backend_avg_work_time(cls) -> dict:
|
597 |
+
backend_sites = cls.backend_site_list
|
598 |
+
|
599 |
+
avg_time_key = ""
|
600 |
+
|
601 |
+
avg_time_data = cls.redis_client.get("backend_avg_time")
|
602 |
+
if avg_time_data is None:
|
603 |
+
cls.redis_client.set(avg_time_key, json.dumps(cls.backend_avg_dict))
|
604 |
+
else:
|
605 |
+
new_data = json.loads(avg_time_data)
|
606 |
+
for key, values in new_data.items():
|
607 |
+
if key in cls.backend_avg_dict:
|
608 |
+
cls.backend_avg_dict[key].extend(
|
609 |
+
values[-cls.load_balance_sample:] if len(values) >= cls.load_balance_sample else
|
610 |
+
values
|
611 |
+
)
|
612 |
+
else:
|
613 |
+
cls.backend_avg_dict[key] = (values[-cls.load_balance_sample:] if
|
614 |
+
len(values) >= cls.load_balance_sample else values)
|
615 |
+
|
616 |
+
cls.backend_avg_dict[key] = cls.backend_avg_dict[key][-10:]
|
617 |
+
|
618 |
+
avg_time_dict = {}
|
619 |
+
for backend_site in backend_sites:
|
620 |
+
spend_time_list = cls.backend_avg_dict.get(backend_site, [])
|
621 |
+
if spend_time_list and len(spend_time_list) >= cls.load_balance_sample:
|
622 |
+
sorted_list = sorted(spend_time_list)
|
623 |
+
trimmed_list = sorted_list[1:-1]
|
624 |
+
avg_time = sum(trimmed_list) / len(trimmed_list) if trimmed_list else None
|
625 |
+
avg_time_dict[backend_site] = avg_time
|
626 |
+
else:
|
627 |
+
avg_time_dict[backend_site] = None
|
628 |
+
|
629 |
+
return avg_time_dict
|
630 |
+
|
631 |
+
@classmethod
|
632 |
+
async def set_backend_work_time(cls, spend_time, backend_site, total_images=1):
|
633 |
+
spend_time_list = cls.backend_avg_dict.get(backend_site, [])
|
634 |
+
spend_time_list.append(int(spend_time/total_images))
|
635 |
+
|
636 |
+
if len(spend_time_list) >= cls.load_balance_sample:
|
637 |
+
spend_time_list = spend_time_list[-cls.load_balance_sample:]
|
638 |
+
|
639 |
+
cls.backend_avg_dict[backend_site] = spend_time_list
|
640 |
+
|
641 |
+
cls.write_count[backend_site] = cls.write_count.get(backend_site, 0) + 1
|
642 |
+
|
643 |
+
if cls.write_count.get(backend_site, 0) >= cls.load_balance_sample:
|
644 |
+
cls.redis_client.set("backend_avg_time", json.dumps(cls.backend_avg_dict))
|
645 |
+
cls.write_count[backend_site] = 0
|
646 |
+
|
647 |
+
# info_str = ''
|
648 |
+
|
649 |
+
# for key, values in cls.backend_avg_dict.items():
|
650 |
+
# info_str += f"{key}: 最近10次生成时间{values}\n"
|
651 |
+
#
|
652 |
+
# cls.load_balance_logger.info(info_str)
|
653 |
+
|
654 |
+
@classmethod
|
655 |
+
def set_backend_image(cls, num=0, backend_site=None, get=False) -> Union[None, dict]:
|
656 |
+
all_backend_dict = {}
|
657 |
+
|
658 |
+
if backend_site:
|
659 |
+
working_images = cls.backend_images.get(backend_site, 1)
|
660 |
+
working_images += num
|
661 |
+
cls.backend_images[backend_site] = working_images
|
662 |
+
|
663 |
+
if get:
|
664 |
+
for site in cls.backend_site_list:
|
665 |
+
all_backend_dict[site] = cls.backend_images.get(site, 1)
|
666 |
+
return all_backend_dict
|
667 |
+
|
668 |
+
@classmethod
|
669 |
+
def set_backend_list(cls, backend_dict):
|
670 |
+
cls.backend_site_list = list(backend_dict.values())
|
671 |
+
|
672 |
+
def __init__(
|
673 |
+
self,
|
674 |
+
payload=None,
|
675 |
+
request: Request = None,
|
676 |
+
path: str = None,
|
677 |
+
select_backend: int = None,
|
678 |
+
reutrn_instance: bool = False,
|
679 |
+
model_to_backend: str = None,
|
680 |
+
disable_loadbalance: bool = False,
|
681 |
+
comfyui_json: str = "",
|
682 |
+
override_model_select: bool = False,
|
683 |
+
):
|
684 |
+
self.payload = payload
|
685 |
+
self.instance_list = []
|
686 |
+
self.result = None
|
687 |
+
self.request = request
|
688 |
+
self.path = path
|
689 |
+
self.enable_backend = None
|
690 |
+
self.reutrn_instance = reutrn_instance
|
691 |
+
self.select_backend = select_backend
|
692 |
+
self.model_to_backend = model_to_backend # 模型的名称
|
693 |
+
self.disable_loadbalance = disable_loadbalance
|
694 |
+
self.lock_to_backend = self.get_lock_to_backend() if override_model_select is False else None
|
695 |
+
self.comfyui_json: str = comfyui_json
|
696 |
+
|
697 |
+
self.total_images = (self.payload.get("batch_size", 1) * self.payload.get("n_iter", 1)) or 1
|
698 |
+
|
699 |
+
self.ava_backend_url = None
|
700 |
+
self.ava_backend_index = None
|
701 |
+
|
702 |
+
@staticmethod
|
703 |
+
def get_backend_name(model_name) -> str:
|
704 |
+
all_model: bytes = init_instance.redis_client.get('models')
|
705 |
+
all_model: dict = json.loads(all_model.decode('utf-8'))
|
706 |
+
for key, models in all_model.items():
|
707 |
+
if isinstance(models, list):
|
708 |
+
for model in models:
|
709 |
+
if model.get("title") == model_name or model.get("model_name") == model_name:
|
710 |
+
return key
|
711 |
+
|
712 |
+
@staticmethod
|
713 |
+
def get_backend_index(mapping_dict, key_to_find) -> int:
|
714 |
+
keys = list(mapping_dict.keys())
|
715 |
+
if key_to_find in keys:
|
716 |
+
return keys.index(key_to_find)
|
717 |
+
return None
|
718 |
+
|
719 |
+
async def txt2img(self):
|
720 |
+
|
721 |
+
self.instance_list, self.enable_backend = await TXT2IMGHandler(
|
722 |
+
self.payload,
|
723 |
+
comfyui_task=self.comfyui_json
|
724 |
+
).get_all_instance()
|
725 |
+
|
726 |
+
await self.choice_backend()
|
727 |
+
return self.result
|
728 |
+
|
729 |
+
async def img2img(self):
|
730 |
+
|
731 |
+
self.instance_list, self.enable_backend = await IMG2IMGHandler(
|
732 |
+
self.payload,
|
733 |
+
comfyui_task=self.comfyui_json
|
734 |
+
).get_all_instance()
|
735 |
+
|
736 |
+
await self.choice_backend()
|
737 |
+
return self.result
|
738 |
+
|
739 |
+
async def sd_api(self) -> JSONResponse or list[Backend]:
|
740 |
+
|
741 |
+
self.instance_list, self.enable_backend = await A1111WebuiHandlerAPI(
|
742 |
+
self.payload,
|
743 |
+
self.request,
|
744 |
+
self.path
|
745 |
+
).get_all_instance()
|
746 |
+
|
747 |
+
await self.choice_backend()
|
748 |
+
return self.result
|
749 |
+
|
750 |
+
|
751 |
+
async def choice_backend(self):
|
752 |
+
|
753 |
+
from DrawBridgeAPI.locales import _ as i18n
|
754 |
+
|
755 |
+
if self.disable_loadbalance:
|
756 |
+
return
|
757 |
+
backend_url_dict = self.enable_backend
|
758 |
+
self.set_backend_list(backend_url_dict)
|
759 |
+
self.get_redis_client()
|
760 |
+
reverse_dict = {value: key for key, value in backend_url_dict.items()}
|
761 |
+
|
762 |
+
tasks = []
|
763 |
+
is_avaiable = 0
|
764 |
+
status_dict = {}
|
765 |
+
ava_url = None
|
766 |
+
n = -1
|
767 |
+
e = -1
|
768 |
+
normal_backend = None
|
769 |
+
idle_backend = []
|
770 |
+
|
771 |
+
logger = setup_logger(custom_prefix='[LOAD_BALANCE]')
|
772 |
+
|
773 |
+
if self.reutrn_instance:
|
774 |
+
self.result = self.instance_list
|
775 |
+
return
|
776 |
+
for i in self.instance_list:
|
777 |
+
task = i.get_backend_working_progress()
|
778 |
+
tasks.append(task)
|
779 |
+
# 获取api队列状态
|
780 |
+
key = self.get_backend_name(self.model_to_backend or self.lock_to_backend)
|
781 |
+
if self.model_to_backend and key is not None:
|
782 |
+
|
783 |
+
backend_index = self.get_backend_index(backend_url_dict, key)
|
784 |
+
logger.info(f"{i18n('Manually select model')}: {self.model_to_backend}, {i18n('Backend select')}{key[:24]}")
|
785 |
+
|
786 |
+
self.ava_backend_url = backend_url_dict[key]
|
787 |
+
self.ava_backend_index = backend_index
|
788 |
+
|
789 |
+
await self.exec_generate()
|
790 |
+
|
791 |
+
elif self.lock_to_backend:
|
792 |
+
if self.lock_to_backend and key is not None:
|
793 |
+
backend_index = self.get_backend_index(backend_url_dict, key)
|
794 |
+
logger.info(f"{i18n('Backend locked')}: {key[:24]}")
|
795 |
+
|
796 |
+
self.ava_backend_url = backend_url_dict[key]
|
797 |
+
self.ava_backend_index = backend_index
|
798 |
+
|
799 |
+
await self.exec_generate()
|
800 |
+
|
801 |
+
else:
|
802 |
+
all_resp = await asyncio.gather(*tasks, return_exceptions=True)
|
803 |
+
logger.info(i18n('Starting backend selection'))
|
804 |
+
for resp_tuple in all_resp:
|
805 |
+
e += 1
|
806 |
+
if isinstance(resp_tuple, None or Exception):
|
807 |
+
logger.warning(i18n('Backend %s is down') % self.instance_list[e].workload_name[:24])
|
808 |
+
else:
|
809 |
+
try:
|
810 |
+
if resp_tuple[3] in [200, 201]:
|
811 |
+
n += 1
|
812 |
+
status_dict[resp_tuple[2]] = resp_tuple[0]["eta_relative"]
|
813 |
+
normal_backend = (list(status_dict.keys()))
|
814 |
+
else:
|
815 |
+
raise RuntimeError
|
816 |
+
except RuntimeError or TypeError:
|
817 |
+
logger.warning(i18n('Backend %s is failed or locked') % self.instance_list[e].workload_name[:24])
|
818 |
+
continue
|
819 |
+
else:
|
820 |
+
# 更改判断逻辑
|
821 |
+
if resp_tuple[0]["progress"] in [0, 0.0]:
|
822 |
+
is_avaiable += 1
|
823 |
+
idle_backend.append(normal_backend[n])
|
824 |
+
else:
|
825 |
+
pass
|
826 |
+
# 显示进度
|
827 |
+
total = 100
|
828 |
+
progress = int(resp_tuple[0]["progress"] * 100)
|
829 |
+
show_str = f"{list(backend_url_dict.keys())[e][:24]}"
|
830 |
+
show_str = show_str.ljust(50, "-")
|
831 |
+
|
832 |
+
bar_format = f"{Fore.CYAN}[Progress] {{l_bar}}{{bar}}|{Style.RESET_ALL}"
|
833 |
+
|
834 |
+
with tqdm(
|
835 |
+
total=total,
|
836 |
+
desc=show_str + "-->",
|
837 |
+
bar_format=bar_format
|
838 |
+
) as pbar:
|
839 |
+
pbar.update(progress)
|
840 |
+
if len(normal_backend) == 0:
|
841 |
+
logger.error(i18n('No available backend'))
|
842 |
+
raise RuntimeError(i18n('No available backend'))
|
843 |
+
|
844 |
+
backend_total_work_time = {}
|
845 |
+
avg_time_dict = await self.get_backend_avg_work_time()
|
846 |
+
backend_image = self.set_backend_image(get=True)
|
847 |
+
|
848 |
+
eta = 0
|
849 |
+
|
850 |
+
for (site, time_), (_, image_count) in zip(avg_time_dict.items(), backend_image.items()):
|
851 |
+
self.load_balance_logger.info(
|
852 |
+
i18n('Backend: %s Average work time: %s seconds, Current tasks: %s') % (site, time_, image_count - 1)
|
853 |
+
)
|
854 |
+
if site in normal_backend:
|
855 |
+
self.update_backend_status()
|
856 |
+
for key in self.backend_status:
|
857 |
+
if site in key:
|
858 |
+
end_time = self.backend_status[key].get('end_time', None)
|
859 |
+
start_time = self.backend_status[key].get('start_time', None)
|
860 |
+
if start_time:
|
861 |
+
if end_time:
|
862 |
+
eta = 0
|
863 |
+
else:
|
864 |
+
current_time = time.time()
|
865 |
+
eta = int(current_time - start_time)
|
866 |
+
|
867 |
+
effective_time = 1 if time_ is None else time_
|
868 |
+
total_work_time = effective_time * int(image_count)
|
869 |
+
|
870 |
+
eta = eta if time_ else 0
|
871 |
+
self.load_balance_logger.info(f"{i18n('Extra time weight')}{eta}")
|
872 |
+
|
873 |
+
backend_total_work_time[site] = total_work_time - eta if (total_work_time - eta) >= 0 else total_work_time
|
874 |
+
|
875 |
+
total_time_dict = list(backend_total_work_time.values())
|
876 |
+
rev_dict = {}
|
877 |
+
for key, value in backend_total_work_time.items():
|
878 |
+
if value in rev_dict:
|
879 |
+
rev_dict[(value, key)] = value
|
880 |
+
else:
|
881 |
+
rev_dict[value] = key
|
882 |
+
|
883 |
+
sorted_list = sorted(total_time_dict)
|
884 |
+
fastest_backend = sorted_list[0]
|
885 |
+
ava_url = rev_dict[fastest_backend]
|
886 |
+
self.load_balance_logger.info(i18n('Backend %s is the fastest, has been selected') % ava_url[:24])
|
887 |
+
ava_url_index = list(backend_url_dict.values()).index(ava_url)
|
888 |
+
|
889 |
+
self.ava_backend_url = ava_url
|
890 |
+
self.ava_backend_index = ava_url_index
|
891 |
+
|
892 |
+
await self.exec_generate()
|
893 |
+
# ava_url_tuple = (ava_url, reverse_dict[ava_url], all_resp, len(normal_backend), vram_dict[ava_url])
|
894 |
+
|
895 |
+
async def exec_generate(self):
|
896 |
+
self.set_backend_image(self.total_images, self.ava_backend_url)
|
897 |
+
fifo = None
|
898 |
+
try:
|
899 |
+
fifo = await self.instance_list[self.ava_backend_index].send_result_to_api()
|
900 |
+
except:
|
901 |
+
pass
|
902 |
+
finally:
|
903 |
+
self.set_backend_image(-self.total_images, self.ava_backend_url)
|
904 |
+
self.result = fifo.result if fifo is not None else None
|
905 |
+
await self.set_backend_work_time(fifo.spend_time, self.ava_backend_url, fifo.total_img_count)
|
906 |
+
|
907 |
+
|
908 |
+
|
909 |
+
|
DrawBridgeAPI/backend/base.py
ADDED
@@ -0,0 +1,984 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import uuid
|
3 |
+
|
4 |
+
import aiofiles
|
5 |
+
import aiohttp
|
6 |
+
import json
|
7 |
+
import asyncio
|
8 |
+
import traceback
|
9 |
+
import time
|
10 |
+
import httpx
|
11 |
+
|
12 |
+
from tqdm import tqdm
|
13 |
+
from fastapi import Request
|
14 |
+
from fastapi.responses import JSONResponse
|
15 |
+
from pathlib import Path
|
16 |
+
from datetime import datetime
|
17 |
+
from typing import Union
|
18 |
+
|
19 |
+
from ..base_config import setup_logger
|
20 |
+
from ..base_config import init_instance
|
21 |
+
from ..utils import exceptions
|
22 |
+
from ..locales import _
|
23 |
+
|
24 |
+
import base64
|
25 |
+
from io import BytesIO
|
26 |
+
from PIL import Image, ImageDraw, ImageFont
|
27 |
+
from ..utils.shared import PATH_TO_COMFYUI_WORKFLOWS
|
28 |
+
|
29 |
+
|
30 |
+
class Backend:
|
31 |
+
|
32 |
+
queues = {}
|
33 |
+
locks = {}
|
34 |
+
task_count = 0
|
35 |
+
queue_logger = setup_logger('[QueueManager]')
|
36 |
+
|
37 |
+
@classmethod
|
38 |
+
def get_queue(cls, token):
|
39 |
+
if token not in cls.queues:
|
40 |
+
cls.queues[token] = asyncio.Queue()
|
41 |
+
return cls.queues[token]
|
42 |
+
|
43 |
+
@classmethod
|
44 |
+
def get_lock(cls, token):
|
45 |
+
if token not in cls.locks:
|
46 |
+
cls.locks[token] = asyncio.Lock()
|
47 |
+
return cls.locks[token]
|
48 |
+
|
49 |
+
@classmethod
|
50 |
+
async def add_to_queue(cls, token, request_func, *args, **kwargs):
|
51 |
+
queue = cls.get_queue(token)
|
52 |
+
future = asyncio.get_event_loop().create_future()
|
53 |
+
|
54 |
+
await queue.put((request_func, args, kwargs, future))
|
55 |
+
|
56 |
+
lock = cls.get_lock(token)
|
57 |
+
|
58 |
+
if not lock.locked():
|
59 |
+
asyncio.create_task(cls.process_queue(token))
|
60 |
+
|
61 |
+
return await future
|
62 |
+
|
63 |
+
@classmethod
|
64 |
+
async def process_queue(cls, token):
|
65 |
+
queue = cls.get_queue(token)
|
66 |
+
lock = cls.get_lock(token)
|
67 |
+
|
68 |
+
async with lock:
|
69 |
+
while not queue.empty():
|
70 |
+
|
71 |
+
request_func, args, kwargs, future = await queue.get()
|
72 |
+
try:
|
73 |
+
result = await request_func(*args, **kwargs)
|
74 |
+
if not future.done():
|
75 |
+
future.set_result(result)
|
76 |
+
cls.queue_logger.info(f"Token: {token}, {_('Task completed successfully')}")
|
77 |
+
except Exception as e:
|
78 |
+
if not future.done():
|
79 |
+
future.set_exception(e)
|
80 |
+
cls.queue_logger.info(f"Token: {token}, {_('Task failed')}: {e}")
|
81 |
+
finally:
|
82 |
+
queue.task_done()
|
83 |
+
|
84 |
+
cls.queue_logger.info(f"Token: {token}, {_('Remaining tasks in the queue')}")
|
85 |
+
cls.queue_logger.info(f"Token: {token}, {_('No remaining tasks in the queue')}")
|
86 |
+
|
87 |
+
def __init__(
|
88 |
+
self,
|
89 |
+
login: bool = False,
|
90 |
+
backend_url: str = None,
|
91 |
+
token: str = "",
|
92 |
+
count: int = None,
|
93 |
+
payload: dict = {},
|
94 |
+
input_img: str = None,
|
95 |
+
request: Request = None,
|
96 |
+
path: str = None,
|
97 |
+
comfyui_api_json: str = None,
|
98 |
+
**kwargs,
|
99 |
+
):
|
100 |
+
|
101 |
+
|
102 |
+
self.tags: str = payload.get('prompt', '1girl')
|
103 |
+
self.ntags: str = payload.get('negative_prompt', '')
|
104 |
+
self.seed: int = payload.get('seed', random.randint(0, 4294967295))
|
105 |
+
self.seed_list: list[int] = [self.seed]
|
106 |
+
self.steps: int = payload.get('steps', 20)
|
107 |
+
self.scale: float = payload.get('cfg_scale', 7.0)
|
108 |
+
self.width: int = payload.get('width', 512)
|
109 |
+
self.height: int = payload.get('height', 512)
|
110 |
+
self.sampler: str = payload.get('sampler_name', "Euler")
|
111 |
+
self.restore_faces: bool = payload.get('restore_faces', False)
|
112 |
+
self.scheduler: str = payload.get('scheduler', 'Normal')
|
113 |
+
|
114 |
+
self.batch_size: int = payload.get('batch_size', 1)
|
115 |
+
self.batch_count: int = payload.get('n_iter', 1)
|
116 |
+
self.total_img_count: int = self.batch_size * self.batch_count
|
117 |
+
|
118 |
+
self.enable_hr: bool = payload.get('enable_hr', False)
|
119 |
+
self.hr_scale: float = payload.get('hr_scale', 1.5)
|
120 |
+
self.hr_second_pass_steps: int = payload.get('hr_second_pass_steps', self.steps)
|
121 |
+
self.hr_upscaler: str = payload.get('hr_upscaler', "")
|
122 |
+
self.denoising_strength: float = payload.get('denoising_strength', 1.0)
|
123 |
+
self.hr_resize_x: int = payload.get('hr_resize_x', 0)
|
124 |
+
self.hr_resize_y: int = payload.get('hr_resize_y', 0)
|
125 |
+
self.hr_sampiler: str = payload.get('hr_sampler_name', "Euler")
|
126 |
+
self.hr_scheduler: str = payload.get('hr_scheduler', 'Normal')
|
127 |
+
self.hr_prompt: str = payload.get('hr_prompt', '')
|
128 |
+
self.hr_negative_prompt: str = payload.get('hr_negative_prompt', '')
|
129 |
+
self.hr_distilled_cfg: float = payload.get('hr_distilled_cfg', 3.5)
|
130 |
+
|
131 |
+
self.init_images: list = payload.get('init_images', [])
|
132 |
+
|
133 |
+
self.xl = False
|
134 |
+
self.flux = False
|
135 |
+
self.clip_skip = 2
|
136 |
+
self.final_width = None
|
137 |
+
self.final_height = None
|
138 |
+
self.model = "DiaoDaia"
|
139 |
+
self.model_id = '20204'
|
140 |
+
self.model_hash = "c7352c5d2f"
|
141 |
+
self.model_list: list = []
|
142 |
+
self.model_path = "models\\1053-S.ckpt"
|
143 |
+
self.client_id = uuid.uuid4().hex
|
144 |
+
|
145 |
+
self.comfyui_api_json = comfyui_api_json
|
146 |
+
self.comfyui_api_json_reflex = None
|
147 |
+
|
148 |
+
self.result: list = []
|
149 |
+
self.time = time.strftime("%Y-%m-%d %H:%M:%S")
|
150 |
+
|
151 |
+
self.backend_url = backend_url # 后端url
|
152 |
+
self.backend_id = None # 用于区别后端, token或者ulr
|
153 |
+
self.headers = {
|
154 |
+
"Content-Type": "application/json",
|
155 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
156 |
+
} # 后端headers
|
157 |
+
self.login = login # 是否需要登录后端
|
158 |
+
self.token = token # 后端token
|
159 |
+
self.count = count # 适用于后端的负载均衡中遍历的后端编号
|
160 |
+
self.config = init_instance.config # 配置文件
|
161 |
+
self.backend_name = '' # 后端名称
|
162 |
+
self.current_config = None # 当前后端的配置
|
163 |
+
|
164 |
+
self.fail_on_login = None
|
165 |
+
self.fail_on_requesting = None
|
166 |
+
|
167 |
+
self.result = None # api返回的结果
|
168 |
+
self.img = [] # 返回的图片
|
169 |
+
self.img_url = []
|
170 |
+
self.img_btyes = []
|
171 |
+
self.input_img = input_img
|
172 |
+
|
173 |
+
self.payload = payload # post时使用的负载
|
174 |
+
self.request = request
|
175 |
+
self.path = path
|
176 |
+
|
177 |
+
self.logger = None
|
178 |
+
self.setup_logger = setup_logger
|
179 |
+
self.redis_client = init_instance.redis_client
|
180 |
+
|
181 |
+
self.parameters = None # 图片元数据
|
182 |
+
self.post_event = None
|
183 |
+
self.task_id = uuid.uuid4().hex
|
184 |
+
self.task_type = 'txt2img'
|
185 |
+
self.workload_name = None
|
186 |
+
self.current_date = datetime.now().strftime('%Y%m%d')
|
187 |
+
self.save_path = ''
|
188 |
+
|
189 |
+
self.start_time = None
|
190 |
+
self.end_time = None
|
191 |
+
self.spend_time = None
|
192 |
+
self.comment = None
|
193 |
+
|
194 |
+
self.current_process = None
|
195 |
+
|
196 |
+
self.build_info: dict = None
|
197 |
+
self.build_respond: dict = None
|
198 |
+
|
199 |
+
self.nsfw_detected = False
|
200 |
+
self.DBAPIExceptions = exceptions.DrawBridgeAPIException
|
201 |
+
|
202 |
+
self.reflex_dict = {}
|
203 |
+
|
204 |
+
def format_api_respond(self):
|
205 |
+
|
206 |
+
self.build_info = {
|
207 |
+
"prompt": self.tags,
|
208 |
+
"all_prompts": self.repeat(self.tags)
|
209 |
+
,
|
210 |
+
"negative_prompt": self.ntags,
|
211 |
+
"all_negative_prompts": self.repeat(self.ntags)
|
212 |
+
,
|
213 |
+
"seed": self.seed_list,
|
214 |
+
"all_seeds": self.seed_list,
|
215 |
+
"subseed": self.seed,
|
216 |
+
"all_subseeds": self.seed_list,
|
217 |
+
"subseed_strength": 0,
|
218 |
+
"width": self.width,
|
219 |
+
"height": self.height,
|
220 |
+
"sampler_name": self.sampler,
|
221 |
+
"cfg_scale": self.scale,
|
222 |
+
"steps": self.steps,
|
223 |
+
"batch_size": 1,
|
224 |
+
"restore_faces": False,
|
225 |
+
"face_restoration_model": None,
|
226 |
+
"sd_model_name": self.model,
|
227 |
+
"sd_model_hash": self.model_hash,
|
228 |
+
"sd_vae_name": 'no vae',
|
229 |
+
"sd_vae_hash": self.model_hash,
|
230 |
+
"seed_resize_from_w": -1,
|
231 |
+
"seed_resize_from_h": -1,
|
232 |
+
"denoising_strength": self.denoising_strength,
|
233 |
+
"extra_generation_params": {
|
234 |
+
|
235 |
+
},
|
236 |
+
"index_of_first_image": 0,
|
237 |
+
"infotexts": self.repeat(
|
238 |
+
f"{self.tags}\\nNegative prompt: {self.ntags}\\nSteps: {self.steps}, Sampler: {self.sampler}, CFG scale: {self.scale}, Seed: {self.seed_list}, Size: {self.final_width}x{self.final_height}, Model hash: c7352c5d2f, Model: {self.model}, Denoising strength: {self.denoising_strength}, Clip skip: {self.clip_skip}, Version: 1.1.4"
|
239 |
+
)
|
240 |
+
,
|
241 |
+
"styles": [
|
242 |
+
|
243 |
+
],
|
244 |
+
"job_timestamp": "0",
|
245 |
+
"clip_skip": self.clip_skip,
|
246 |
+
"is_using_inpainting_conditioning": False
|
247 |
+
}
|
248 |
+
|
249 |
+
self.build_respond = {
|
250 |
+
"images": self.img,
|
251 |
+
"videos": [],
|
252 |
+
"images_url": self.img_url,
|
253 |
+
"parameters": {
|
254 |
+
"prompt": self.tags,
|
255 |
+
"negative_prompt": self.ntags,
|
256 |
+
"seed": self.seed_list,
|
257 |
+
"subseed": -1,
|
258 |
+
"subseed_strength": 0,
|
259 |
+
"seed_resize_from_h": -1,
|
260 |
+
"seed_resize_from_w": -1,
|
261 |
+
"sampler_name": '',
|
262 |
+
"batch_size": 1,
|
263 |
+
"n_iter": self.total_img_count,
|
264 |
+
"steps": self.steps,
|
265 |
+
"cfg_scale": self.scale,
|
266 |
+
"width": self.width,
|
267 |
+
"height": self.height,
|
268 |
+
"restore_faces": None,
|
269 |
+
"tiling": None,
|
270 |
+
"do_not_save_samples": None,
|
271 |
+
"do_not_save_grid": None,
|
272 |
+
"eta": None,
|
273 |
+
"denoising_strength": 0,
|
274 |
+
"s_min_uncond": None,
|
275 |
+
"s_churn": None,
|
276 |
+
"s_tmax": None,
|
277 |
+
"s_tmin": None,
|
278 |
+
"s_noise": None,
|
279 |
+
"override_settings": None,
|
280 |
+
"override_settings_restore_afterwards": True,
|
281 |
+
"refiner_checkpoint": None,
|
282 |
+
"refiner_switch_at": None,
|
283 |
+
"disable_extra_networks": False,
|
284 |
+
"comments": None,
|
285 |
+
"enable_hr": True if self.enable_hr else False,
|
286 |
+
"firstphase_width": 0,
|
287 |
+
"firstphase_height": 0,
|
288 |
+
"hr_scale": self.hr_scale,
|
289 |
+
"hr_upscaler": None,
|
290 |
+
"hr_second_pass_steps": self.hr_second_pass_steps,
|
291 |
+
"hr_resize_x": 0,
|
292 |
+
"hr_resize_y": 0,
|
293 |
+
"hr_checkpoint_name": None,
|
294 |
+
"hr_sampler_name": None,
|
295 |
+
"hr_prompt": "",
|
296 |
+
"hr_negative_prompt": "",
|
297 |
+
"sampler_index": "Euler",
|
298 |
+
"script_name": None,
|
299 |
+
"script_args": [],
|
300 |
+
"send_images": True,
|
301 |
+
"save_images": False,
|
302 |
+
"alwayson_scripts": {}
|
303 |
+
},
|
304 |
+
|
305 |
+
"info": ''
|
306 |
+
}
|
307 |
+
image = Image.open(BytesIO(self.img_btyes[0]))
|
308 |
+
self.final_width, self.final_height = image.size
|
309 |
+
|
310 |
+
str_info = json.dumps(self.build_info)
|
311 |
+
self.build_respond['info'] = str_info
|
312 |
+
|
313 |
+
def format_models_resp(self, input_list=None):
|
314 |
+
models_resp_list = []
|
315 |
+
input_list = input_list if input_list else [self.model]
|
316 |
+
for i in input_list:
|
317 |
+
built_reps = {
|
318 |
+
"title": f"{i} [{self.model_hash}]",
|
319 |
+
"model_name": i,
|
320 |
+
"hash": f"{self.model_hash}",
|
321 |
+
"sha256": "03f33720f33b67634b5da3a8bf2e374ef90ea03e85ab157fcf89bf48213eee4e",
|
322 |
+
"filename": self.backend_name,
|
323 |
+
"config": None
|
324 |
+
}
|
325 |
+
models_resp_list.append(built_reps)
|
326 |
+
|
327 |
+
return models_resp_list
|
328 |
+
|
329 |
+
@staticmethod
|
330 |
+
async def write_image(img_data, save_path):
|
331 |
+
"""
|
332 |
+
异步保存图片数据到指定路径。
|
333 |
+
:param img_data: 图片的字节数据
|
334 |
+
:param save_path: 保存图片的完整路径
|
335 |
+
"""
|
336 |
+
if "view?filename=" in str(save_path):
|
337 |
+
save_path = Path(str(save_path).replace("view?filename=", ""))
|
338 |
+
async with aiofiles.open(save_path, 'wb') as img_file:
|
339 |
+
await img_file.write(img_data)
|
340 |
+
|
341 |
+
@staticmethod
|
342 |
+
async def run_later(func, delay=1):
|
343 |
+
loop = asyncio.get_running_loop()
|
344 |
+
loop.call_later(
|
345 |
+
delay,
|
346 |
+
lambda: loop.create_task(
|
347 |
+
func
|
348 |
+
)
|
349 |
+
)
|
350 |
+
|
351 |
+
@staticmethod
|
352 |
+
def format_progress_api_resp(progress, start_time) -> dict:
|
353 |
+
build_resp = {
|
354 |
+
"progress": progress,
|
355 |
+
"eta_relative": 0.0,
|
356 |
+
"state": {
|
357 |
+
"skipped": False,
|
358 |
+
"interrupted": False,
|
359 |
+
"job": "",
|
360 |
+
"job_count": 0,
|
361 |
+
"job_timestamp": start_time,
|
362 |
+
"job_no": 0,
|
363 |
+
"sampling_step": 0,
|
364 |
+
"sampling_steps": 0
|
365 |
+
},
|
366 |
+
"current_image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyEAIAAADBzcOlAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0T///////8JWPfcAAAAB3RJTUUH6AgIDSUYMECLgwAAB6lJREFUaN7tmXtQU1cawE9CQkIgAUKAiIEgL5GX8ioPjStGqHSLL0Tcugyl4NKVtshYpC4y1LXSsj4othbRbV0BC4JgXRAqD01Z3iwgCqIEwjvREPJOyIMk+0forB1mHK7sjrG9vz/P/e53vvnNOd89914En8/n8/kAZhkgX3UBrxOwLAjAsiAAy4IALAsCsCwIwLIgAMuCACwLArAsCMCyIADLggAsCwKwLAjAsiAAy4IALAsCsCwIwLIgAMuCwG9ClgipTld9tfI8BiFL5wh2Ar+5aGW9In/leR7aCoznovM/HWrrD9pW0RD3Q6R/T82aMnN2y3yUrHkl+VGvWhQAACDGwS3QmxnR93n7mbARMoFSFuviGOy6f2kkn64iKhTMdHGWKHYkXZwljB2qFa8WNDyuFdkJGv8NeIXPTnr4W6QQie4kc7bltu1udtXU31mOYmiYHk6nvEK23W6TCTB9nWXpyazyaQxE+PfUjJRJR45KsoTazCrvhsBfrP3padllqRR9E/kdEknj2CLt7HboHHROAJ+Muoeeco64SSvamPO+773QqQ0fEFGkSQCAHHSD9eAOAGDPSis0iG2ox94M12R2ZM8pBxdnYRlvLJUZtjTG54llNInkV0i8bX3TgWeait+Cd0ANo68sJ78mTPet7uAh184Qxh8u9zIvDBZBrdCAVpaeZEs3ipdX2CkyiULRj1wfGe9glpX3jDsyCdRZs1T8Fmy2URsqSH+VXSO/JnNnhUs/F52WTKrdVAmZkr7e9iyPJIsCoif6JrIRubggJA/U2ar2qUDZVol42lN+UdobbEtKI0d7P7NUWVUupzbE6/v7Xv+M29pdf6Pq+L4A6pir6BKLSRvcWMAMat9SeuvA1DkWK++PgYjNh43zkB8i7698RgPahstH+mAhW93xTnBzzJ2cNyPsFqgOGTov4L941eYsVoHbtOuCQ7dz7tt2TcerOQ+NhfFz9b9CWZMoGVfqlinp621XKU9oQzVdz1/l1Mxfk7vvvcgIq6Xs/szBxUmQM+c7FfJwaZ7wfauK7etSv1w3sf69KLumzGr2u+2tzY0xd489TZ7OWprZoGXpz0R/seqzb/dOyml3bRLqBR1z6W1pbbvEGqYNbvwp5GnXDFkf31DOiZuKTCvp1jV/efZ4wC5acdJWV6Kn4vmcKAYiEXEZk23UZrTY0YKDrbvI5MaYiOLddewWeZSseW8hI6zWfm3eD8piWqFgeHpgYPk1G1zPutPFvjGZeci6M5/B6Pg6sjsm9x8fjZYP4dZ0mzkSCGKxulelGkmXZIlil947R1cSFQqrJgwfi1Xv1m7Tar/Hj919ciTvQCCSdnhfAHXMVVyrnrk68SaxB/MN5mpwMKmTTF5+bQb3NHwe081od3T/x+s9M/xQCCrwAcm8RGWewmO2WnFt3tORadaOLw2JqztXwfqTr+shr/icct/sn7fkv9ZxndjZle0TrSNv0BPJj+xPgkoAQOpb6NXx1DsgGMRDr8egZeF2Gt1HpQDhf0dI32KuYB+RAAZggSZTx9QdnNkgk8ruu5zGK80rAQALP0eWeLPwT77fWWNv52RpVYmhYA83/Y1DmLahH10lpnBfrh6Da/DLZ/iU2FUo1FmCUl1h0H5rMvkt/fjoQckj0d2qzImRUYtnXyti5fLU1u6o5uaYL366VPvZmf5BXF/gy81o0CvrxVQ0ThxjptPZqyzsT5rz0dPGi63a+TLew3xrdQfd6+3qoMekw+RbrUquN+d0yTjr1JPVSRtcKR75gAU2/UZkKfI1k5rE6yNjHUzz797ZeGabdmnMYvMWAgCSh3pFF/gmawRmawgDFizjTZjQl5v3tdyGJ873T3YmRtqsRlP3BG0npdneenF8R8ds0NOnAblWeBublcxrcLJmBuRS6QF1gpanKVh6dPzn76fYY+FTUhldevaLUX9a6C/WSF8sv3j2x6K4UeTjtFbFrDen5rbpTOT4eK16pmg8gn50ldj+2UpqM4hz1iRJli9hZAT0pLTFT0nldMnZsALyFgplgiQ9L2EU1od8EvZx5d8nPEfp0wNyqfTAUarnDj8/5H0EDhGwNNuPw+zoSVIKupPLWCvwV6Yo4t1SCAEWGc3S7XV7qSt5T3zFsmo8pwvGiYesO8/fY/hwLVWkqusXN9+OrDLdjHJHPWgx4Q5woj4t6s/tXPizaG2vtyo6yWHQuWk5ma9+NFo+hGtVcL04NX+lbXAP7ifHmSBxaSup9pXJ0n9dCuuuv1GVqW/YjTHhxbvqCAS0n7Hx85FDbaJmQUxaSbe2OW/CWpYvYQR8YpVsm+XUgD9GSCd/iL2Ow+nvQl8xska+r7PQlYJLOktQCgp1FqBUV7iwSxuu1QqtVBSlF99PmaKMFzirWpRtpEpMBDY1neq1w88Pk41sM3rDQGXpETqpWpRtnJJ5rTxvXaj5ZsuKF8f3ZMyJuVz9e+IDW4ExL3rcRRoi3s89osDOt+i/Z6kTtDxtwYvzpL3rwfMtyDrn80Fg3/KrNYie9b9lYbcuXKuVX13IXVhQntCEarrm8zWTmvfUCVqe5qIqQcvTFuhflUzijTJQEA5Pv0JZ/z8M7uhgyMCyIADLggAsCwKwLAj8B/xrbj+8eKAPAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDI0LTA4LTA4VDEzOjM3OjI0KzAwOjAwxx6klgAAACV0RVh0ZGF0ZTptb2RpZnkAMjAyNC0wOC0wOFQxMzozNzoyNCswMDowMLZDHCoAAAAASUVORK5CYII=",
|
367 |
+
"textinfo": None
|
368 |
+
}
|
369 |
+
|
370 |
+
return build_resp
|
371 |
+
|
372 |
+
@staticmethod
|
373 |
+
def format_vram_api_resp():
|
374 |
+
|
375 |
+
build_resp = {
|
376 |
+
"ram": {
|
377 |
+
"free": 61582063428.50122,
|
378 |
+
"used": 2704183296,
|
379 |
+
"total": 64286246724.50122
|
380 |
+
},
|
381 |
+
"cuda": {
|
382 |
+
"system": {
|
383 |
+
"free": 4281335808,
|
384 |
+
"used": 2160787456,
|
385 |
+
"total": 85899345920
|
386 |
+
},
|
387 |
+
"active": {
|
388 |
+
"current": 699560960,
|
389 |
+
"peak": 3680867328
|
390 |
+
},
|
391 |
+
"allocated": {
|
392 |
+
"current": 699560960,
|
393 |
+
"peak": 3680867328
|
394 |
+
},
|
395 |
+
"reserved": {
|
396 |
+
"current": 713031680,
|
397 |
+
"peak": 3751804928
|
398 |
+
},
|
399 |
+
"inactive": {
|
400 |
+
"current": 13470720,
|
401 |
+
"peak": 650977280
|
402 |
+
},
|
403 |
+
"events": {
|
404 |
+
"retries": 0,
|
405 |
+
"oom": 0
|
406 |
+
}
|
407 |
+
}
|
408 |
+
}
|
409 |
+
return build_resp
|
410 |
+
|
411 |
+
@staticmethod
|
412 |
+
async def http_request(
|
413 |
+
method,
|
414 |
+
target_url,
|
415 |
+
headers=None,
|
416 |
+
params=None,
|
417 |
+
content=None,
|
418 |
+
format=True,
|
419 |
+
timeout=300,
|
420 |
+
verify=True,
|
421 |
+
http2=False,
|
422 |
+
use_aiohttp=False,
|
423 |
+
proxy=False
|
424 |
+
) -> Union[dict, httpx.Response, bytes, list]:
|
425 |
+
|
426 |
+
logger = setup_logger("[HTTP_REQUEST]")
|
427 |
+
|
428 |
+
if use_aiohttp:
|
429 |
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
|
430 |
+
async with session.request(
|
431 |
+
method,
|
432 |
+
target_url,
|
433 |
+
headers=headers,
|
434 |
+
params=params,
|
435 |
+
data=content,
|
436 |
+
ssl=verify,
|
437 |
+
proxy=init_instance.config.server_settings['proxy'] if proxy else None
|
438 |
+
) as response:
|
439 |
+
if format:
|
440 |
+
return await response.json()
|
441 |
+
else:
|
442 |
+
return await response.read()
|
443 |
+
|
444 |
+
proxies = {
|
445 |
+
"http://": init_instance.config.server_settings['proxy'] if proxy else None,
|
446 |
+
"https://": init_instance.config.server_settings['proxy'] if proxy else None,
|
447 |
+
}
|
448 |
+
|
449 |
+
async with httpx.AsyncClient(
|
450 |
+
verify=verify,
|
451 |
+
http2=http2,
|
452 |
+
proxies=proxies
|
453 |
+
) as client:
|
454 |
+
try:
|
455 |
+
response = await client.request(
|
456 |
+
method,
|
457 |
+
target_url,
|
458 |
+
headers=headers,
|
459 |
+
params=params,
|
460 |
+
content=content,
|
461 |
+
timeout=timeout,
|
462 |
+
)
|
463 |
+
response.raise_for_status()
|
464 |
+
except httpx.RequestError as e:
|
465 |
+
error_info = {"error": "Request error", "details": traceback.format_exc()}
|
466 |
+
logger.warning(error_info)
|
467 |
+
return error_info
|
468 |
+
except httpx.HTTPStatusError as e:
|
469 |
+
error_info = {"error": "HTTP error", "status_code": e.response.status_code, "details": traceback.format_exc()}
|
470 |
+
logger.warning(error_info)
|
471 |
+
return error_info
|
472 |
+
if format:
|
473 |
+
return response.json()
|
474 |
+
else:
|
475 |
+
return response
|
476 |
+
|
477 |
+
def repeat(self, input_):
|
478 |
+
# 使用列表推导式生成重复的tag列表
|
479 |
+
repeated_ = [input_ for _ in range(self.total_img_count)]
|
480 |
+
return repeated_
|
481 |
+
|
482 |
+
async def exec_login(self):
|
483 |
+
pass
|
484 |
+
|
485 |
+
async def check_backend_usability(self):
|
486 |
+
pass
|
487 |
+
|
488 |
+
async def get_backend_working_progress(self):
|
489 |
+
|
490 |
+
self.get_backend_id()
|
491 |
+
|
492 |
+
avg_time = 0
|
493 |
+
try:
|
494 |
+
if self.redis_client.exists("backend_avg_time"):
|
495 |
+
backend_avg_dict = json.loads(self.redis_client.get("backend_avg_time"))
|
496 |
+
spend_time_list = backend_avg_dict.get(self.backend_id, [])
|
497 |
+
if spend_time_list and len(spend_time_list) >= 10:
|
498 |
+
sorted_list = sorted(spend_time_list)
|
499 |
+
trimmed_list = sorted_list[1:-1]
|
500 |
+
avg_time = sum(trimmed_list) / len(trimmed_list) if trimmed_list else None
|
501 |
+
|
502 |
+
workload_dict = await self.set_backend_working_status(get=True)
|
503 |
+
start_time = workload_dict.get('start_time', None)
|
504 |
+
end_time = workload_dict.get('end_time', None)
|
505 |
+
current_time = time.time()
|
506 |
+
|
507 |
+
if end_time:
|
508 |
+
progress = 0.0
|
509 |
+
else:
|
510 |
+
if start_time:
|
511 |
+
spend_time = current_time - start_time
|
512 |
+
self.logger.info(f"当前耗时: {spend_time}")
|
513 |
+
|
514 |
+
if avg_time:
|
515 |
+
progress = 0.99 if spend_time > avg_time else spend_time / avg_time
|
516 |
+
else:
|
517 |
+
progress = 0.99
|
518 |
+
else:
|
519 |
+
progress = 0.0
|
520 |
+
|
521 |
+
available = await self.set_backend_working_status(get=True, key="available")
|
522 |
+
sc = 200 if available is True else 500
|
523 |
+
build_resp = self.format_progress_api_resp(progress, self.start_time)
|
524 |
+
|
525 |
+
except:
|
526 |
+
traceback.print_exc()
|
527 |
+
|
528 |
+
return build_resp, sc, self.backend_id, sc
|
529 |
+
|
530 |
+
async def send_result_to_api(self):
|
531 |
+
"""
|
532 |
+
获取生图结果的函数
|
533 |
+
:return: 类A1111 webui返回值
|
534 |
+
"""
|
535 |
+
if self.backend_id is None:
|
536 |
+
self.get_backend_id()
|
537 |
+
total_retry = self.config.retry_times
|
538 |
+
|
539 |
+
for retry_times in range(total_retry):
|
540 |
+
self.start_time = time.time()
|
541 |
+
|
542 |
+
try:
|
543 |
+
await self.set_backend_working_status(
|
544 |
+
params={"start_time": self.start_time, "idle": False, "end_time": None}
|
545 |
+
)
|
546 |
+
# 如果传入了Request对象/转发请求
|
547 |
+
if self.request:
|
548 |
+
target_url = f"{self.backend_url}/{self.path}"
|
549 |
+
|
550 |
+
self.logger.info(f"{_('Forwarding request')} - {target_url}")
|
551 |
+
|
552 |
+
method = self.request.method
|
553 |
+
headers = self.request.headers
|
554 |
+
params = self.request.query_params
|
555 |
+
content = await self.request.body()
|
556 |
+
|
557 |
+
response = await self.http_request(method, target_url, headers, params, content, False)
|
558 |
+
|
559 |
+
try:
|
560 |
+
resp = response.json()
|
561 |
+
except json.JSONDecodeError:
|
562 |
+
self.logger.error(str(response.text))
|
563 |
+
raise RuntimeError(_('Backend returned error'))
|
564 |
+
|
565 |
+
self.result = JSONResponse(content=resp, status_code=response.status_code)
|
566 |
+
else:
|
567 |
+
|
568 |
+
if "comfyui" in self.backend_name:
|
569 |
+
await self.add_to_queue(self.backend_id[:24], self.posting)
|
570 |
+
self.logger.info(_('Comfyui Backend, not using built-in multi-image generation management'))
|
571 |
+
elif "a1111" in self.backend_name:
|
572 |
+
await self.add_to_queue(self.backend_id[:24], self.posting)
|
573 |
+
self.logger.info(_('A1111 Backend, not using built-in multi-image generation management'))
|
574 |
+
else:
|
575 |
+
self.logger.info(f"{self.backend_name}: {self.backend_id[:24]} total {self.total_img_count} images")
|
576 |
+
for i in range(self.total_img_count):
|
577 |
+
if i > 0:
|
578 |
+
self.seed += 1
|
579 |
+
self.seed_list.append(self.seed)
|
580 |
+
|
581 |
+
await self.add_to_queue(self.backend_id[:24], self.posting)
|
582 |
+
|
583 |
+
if self.config.server_settings['enable_nsfw_check']:
|
584 |
+
await self.pic_audit()
|
585 |
+
break
|
586 |
+
|
587 |
+
except Exception as e:
|
588 |
+
|
589 |
+
self.logger.info(f"{retry_times + 1} retries")
|
590 |
+
self.logger.error(traceback.format_exc())
|
591 |
+
|
592 |
+
# if retry_times >= (total_retry - 1):
|
593 |
+
# await asyncio.sleep(30)
|
594 |
+
|
595 |
+
if retry_times == (total_retry - 1):
|
596 |
+
|
597 |
+
err = traceback.format_exc()
|
598 |
+
self.logger.error(f"{_('Over maximum retry times, posting still failed')}: {err}")
|
599 |
+
await self.return_build_image(text=f"Exception: {e}", title="FATAL")
|
600 |
+
await self.err_formating_to_sd_style()
|
601 |
+
return self
|
602 |
+
|
603 |
+
finally:
|
604 |
+
self.end_time = time.time()
|
605 |
+
self.spend_time = self.end_time - self.start_time
|
606 |
+
self.logger.info(_("Request completed, took %s seconds") % int(self.spend_time))
|
607 |
+
await self.set_backend_working_status(params={"end_time": self.end_time, "idle": True})
|
608 |
+
|
609 |
+
return self
|
610 |
+
|
611 |
+
async def post_request(self):
|
612 |
+
try:
|
613 |
+
post_api = f"{self.backend_url}/sdapi/v1/txt2img"
|
614 |
+
if self.init_images:
|
615 |
+
post_api = f"{self.backend_url}/sdapi/v1/img2img"
|
616 |
+
|
617 |
+
response = await self.http_request(
|
618 |
+
method="POST",
|
619 |
+
target_url=post_api,
|
620 |
+
headers=self.headers,
|
621 |
+
content=json.dumps(self.payload),
|
622 |
+
format=False,
|
623 |
+
|
624 |
+
)
|
625 |
+
|
626 |
+
if isinstance(response, httpx.Response):
|
627 |
+
resp_dict = response.json()
|
628 |
+
|
629 |
+
if response.status_code not in [200, 201]:
|
630 |
+
self.logger.error(resp_dict)
|
631 |
+
if resp_dict.get("error") == "OutOfMemoryError":
|
632 |
+
self.logger.info(_("VRAM OOM detected, auto model unload and reload"))
|
633 |
+
await self.unload_and_reload(self.backend_url)
|
634 |
+
else:
|
635 |
+
self.result = resp_dict
|
636 |
+
self.logger.info(_("Get a respond image, processing"))
|
637 |
+
else:
|
638 |
+
self.logger.error(f"{_('Request failed, error message:')} {response.get('details')}")
|
639 |
+
return True
|
640 |
+
|
641 |
+
except:
|
642 |
+
traceback.print_exc()
|
643 |
+
|
644 |
+
async def posting(self):
|
645 |
+
|
646 |
+
"""
|
647 |
+
默认为a1111webui posting
|
648 |
+
:return:
|
649 |
+
"""
|
650 |
+
await self.post_request()
|
651 |
+
|
652 |
+
# self.post_event = asyncio.Event()
|
653 |
+
# post_task = asyncio.create_task(self.post_request())
|
654 |
+
# # 此处为显示进度条
|
655 |
+
# while not self.post_event.is_set():
|
656 |
+
# await self.show_progress_bar()
|
657 |
+
# await asyncio.sleep(2)
|
658 |
+
#
|
659 |
+
# ok = await post_task
|
660 |
+
|
661 |
+
async def download_img(self, image_list=None):
|
662 |
+
"""
|
663 |
+
使用aiohttp下载图片并保存到指定路径。
|
664 |
+
"""
|
665 |
+
|
666 |
+
for url in self.img_url:
|
667 |
+
response = await self.http_request(
|
668 |
+
method="GET",
|
669 |
+
target_url=url,
|
670 |
+
headers=None,
|
671 |
+
format=False,
|
672 |
+
verify=False,
|
673 |
+
proxy=True
|
674 |
+
)
|
675 |
+
|
676 |
+
if isinstance(response, httpx.Response):
|
677 |
+
if response.status_code == 200:
|
678 |
+
img_data = response.read()
|
679 |
+
self.logger.info(_("Downloading image successful"))
|
680 |
+
self.img.append(base64.b64encode(img_data).decode('utf-8'))
|
681 |
+
self.img_btyes.append(img_data)
|
682 |
+
await self.save_image(img_data)
|
683 |
+
else:
|
684 |
+
self.logger.error(f"{_('Image download failed!')}: {response.status_code}")
|
685 |
+
raise ConnectionError(_('Image download failed!'))
|
686 |
+
else:
|
687 |
+
self.logger.error(f"{_('Request failed, error message:')} {response.get('details')}")
|
688 |
+
|
689 |
+
async def save_image(self, img_data, base_path="txt2img"):
|
690 |
+
|
691 |
+
self.save_path = Path(f'saved_images/{self.task_type}/{self.current_date}/{self.workload_name[:12]}')
|
692 |
+
self.save_path.mkdir(parents=True, exist_ok=True)
|
693 |
+
|
694 |
+
img_filename = self.save_path / Path(self.task_id).name
|
695 |
+
await self.run_later(self.write_image(img_data, img_filename), 1)
|
696 |
+
|
697 |
+
async def unload_and_reload(self, backend_url=None):
|
698 |
+
"""
|
699 |
+
释放a1111后端的显存
|
700 |
+
:param backend_url: 后端url地址
|
701 |
+
:return:
|
702 |
+
"""
|
703 |
+
# 释放模型
|
704 |
+
response = await self.http_request(
|
705 |
+
method="POST",
|
706 |
+
target_url=f"{backend_url}/sdapi/v1/unload-checkpoint",
|
707 |
+
headers=None
|
708 |
+
)
|
709 |
+
|
710 |
+
if isinstance(response, httpx.Response):
|
711 |
+
if response.status_code not in [200, 201]:
|
712 |
+
error_message = await response.text()
|
713 |
+
self.logger.error(f"释放模型失败,可能是webui版本太旧,未支持此API,错误: {error_message}")
|
714 |
+
else:
|
715 |
+
self.logger.error(f"{_('Request failed, error message:')} {response.get('details')}")
|
716 |
+
|
717 |
+
# 重载模型
|
718 |
+
response = await self.http_request(
|
719 |
+
method="POST",
|
720 |
+
target_url=f"{backend_url}/sdapi/v1/reload-checkpoint",
|
721 |
+
headers=None
|
722 |
+
)
|
723 |
+
|
724 |
+
if isinstance(response, httpx.Response):
|
725 |
+
if response.status_code not in [200, 201]:
|
726 |
+
error_message = await response.text()
|
727 |
+
self.logger.error(f"重载模型失败,错误: {error_message}")
|
728 |
+
else:
|
729 |
+
self.logger.info("重载模型成功")
|
730 |
+
else:
|
731 |
+
self.logger.error(f"{_('Request failed, error message:')} {response.get('details')}")
|
732 |
+
|
733 |
+
async def get_backend_status(self):
|
734 |
+
"""
|
735 |
+
共有函数, 用于获取各种类型的后端的工作状态
|
736 |
+
:return:
|
737 |
+
"""
|
738 |
+
await self.check_backend_usability()
|
739 |
+
resp_json, resp_status = await self.get_backend_working_progress()
|
740 |
+
|
741 |
+
return resp_json, resp_status
|
742 |
+
|
743 |
+
async def show_progress_bar(self):
|
744 |
+
"""
|
745 |
+
在控制台实时打印后端工作进度进度条
|
746 |
+
:return:
|
747 |
+
"""
|
748 |
+
show_str = f"[SD-A1111] [{self.time}] : {self.seed}"
|
749 |
+
show_str = show_str.ljust(25, "-")
|
750 |
+
with tqdm(total=1, desc=show_str + "-->", bar_format="{l_bar}{bar}|{postfix}\n") as pbar:
|
751 |
+
while not self.post_event.is_set():
|
752 |
+
self.current_process, eta = await self.update_progress()
|
753 |
+
increment = self.current_process - pbar.n
|
754 |
+
pbar.update(increment)
|
755 |
+
pbar.set_postfix({"eta": f"{int(eta)}秒"})
|
756 |
+
await asyncio.sleep(2)
|
757 |
+
|
758 |
+
async def update_progress(self):
|
759 |
+
"""
|
760 |
+
更新后端工作进度
|
761 |
+
:return:
|
762 |
+
"""
|
763 |
+
try:
|
764 |
+
response = await self.http_request(
|
765 |
+
method="GET",
|
766 |
+
target_url=f"{self.backend_url}/sdapi/v1/progress",
|
767 |
+
headers=None
|
768 |
+
)
|
769 |
+
|
770 |
+
if isinstance(response, httpx.Response):
|
771 |
+
if response.status_code == 200:
|
772 |
+
resp_json = response.json()
|
773 |
+
return resp_json.get("progress"), resp_json.get("eta_relative")
|
774 |
+
else:
|
775 |
+
self.logger.error(f"获取进度失败,状态码: {response.status_code}")
|
776 |
+
raise RuntimeError(f"获取进度失败,状态码: {response.status_code}")
|
777 |
+
else:
|
778 |
+
self.logger.error(f"请求失败,错误信息: {response.get('details')}")
|
779 |
+
raise RuntimeError(f"请求失败,错误信息: {response.get('details')}")
|
780 |
+
except:
|
781 |
+
traceback.print_exc()
|
782 |
+
return 0.404
|
783 |
+
|
784 |
+
async def set_backend_working_status(
|
785 |
+
self,
|
786 |
+
params: dict = None,
|
787 |
+
get: bool = False,
|
788 |
+
key: str = None,
|
789 |
+
) -> bool or None:
|
790 |
+
"""
|
791 |
+
设置或获取后端工作状态
|
792 |
+
|
793 |
+
:param params: 包含要更新的参数的字典 (如 {'start_time': xxx, 'idle': True})
|
794 |
+
:param get: 是否只读取
|
795 |
+
:param key: 要获取的键
|
796 |
+
:return: 获取或设置结果
|
797 |
+
"""
|
798 |
+
current_backend_workload = self.redis_client.get('workload')
|
799 |
+
backend_workload: dict = json.loads(current_backend_workload.decode('utf-8'))
|
800 |
+
current_backend_workload: dict = backend_workload.get(self.workload_name)
|
801 |
+
|
802 |
+
if get:
|
803 |
+
if key is None:
|
804 |
+
return current_backend_workload
|
805 |
+
return current_backend_workload.get(key, None)
|
806 |
+
|
807 |
+
if params:
|
808 |
+
for param_key, param_value in params.items():
|
809 |
+
if param_key in current_backend_workload:
|
810 |
+
current_backend_workload[param_key] = param_value
|
811 |
+
|
812 |
+
backend_workload[self.workload_name] = current_backend_workload
|
813 |
+
self.redis_client.set('workload', json.dumps(backend_workload))
|
814 |
+
|
815 |
+
return True
|
816 |
+
|
817 |
+
async def get_models(self) -> dict:
|
818 |
+
|
819 |
+
if self.backend_name != self.config.backend_name_list[1]:
|
820 |
+
respond = self.format_models_resp()
|
821 |
+
|
822 |
+
backend_to_models_dict = {
|
823 |
+
self.workload_name: respond
|
824 |
+
}
|
825 |
+
|
826 |
+
return backend_to_models_dict
|
827 |
+
|
828 |
+
else:
|
829 |
+
|
830 |
+
self.backend_url = self.config.a1111webui_setting['backend_url'][self.count]
|
831 |
+
try:
|
832 |
+
respond = await self.http_request(
|
833 |
+
"GET",
|
834 |
+
f"{self.backend_url}/sdapi/v1/sd-models",
|
835 |
+
)
|
836 |
+
except Exception:
|
837 |
+
self.logger.warning(f"获取模型失败")
|
838 |
+
respond = self.format_models_resp()
|
839 |
+
|
840 |
+
backend_to_models_dict = {
|
841 |
+
self.workload_name: respond
|
842 |
+
}
|
843 |
+
|
844 |
+
return backend_to_models_dict
|
845 |
+
|
846 |
+
async def get_all_prompt_style(self) -> list:
|
847 |
+
|
848 |
+
if self.backend_name == "comfyui":
|
849 |
+
|
850 |
+
work_flows = []
|
851 |
+
resp_dict = {}
|
852 |
+
json_files = PATH_TO_COMFYUI_WORKFLOWS.glob("**/*.json")
|
853 |
+
|
854 |
+
for json_file in json_files:
|
855 |
+
prefixed_filename = f"comfyui-work-flows-{json_file.name}".replace('.json', '')
|
856 |
+
if not json_file.name.endswith("_reflex.json"):
|
857 |
+
work_flows.append({"name": prefixed_filename, "prompt": "", "negative_prompt": ""})
|
858 |
+
|
859 |
+
return work_flows
|
860 |
+
|
861 |
+
else:
|
862 |
+
|
863 |
+
resp = []
|
864 |
+
|
865 |
+
try:
|
866 |
+
self.backend_url = self.config.a1111webui_setting['backend_url'][self.count]
|
867 |
+
respond = await self.http_request(
|
868 |
+
"GET",
|
869 |
+
f"{self.backend_url}/sdapi/v1/prompt-styles",
|
870 |
+
format=True
|
871 |
+
)
|
872 |
+
if respond.get('error', None):
|
873 |
+
self.logger.warning(f"获取预设失败")
|
874 |
+
else:
|
875 |
+
resp = await respond.json()
|
876 |
+
|
877 |
+
except:
|
878 |
+
self.logger.warning(f"获取预设失败")
|
879 |
+
finally:
|
880 |
+
return resp
|
881 |
+
|
882 |
+
async def pic_audit(self):
|
883 |
+
from ..utils.tagger import wd_tagger_handler
|
884 |
+
new_image_list = []
|
885 |
+
for i in self.result['images']:
|
886 |
+
is_nsfw = await wd_tagger_handler.tagger_main(i, 0.35, [], True)
|
887 |
+
|
888 |
+
if is_nsfw:
|
889 |
+
img_base64 = await self.return_build_image()
|
890 |
+
new_image_list.append(img_base64)
|
891 |
+
else:
|
892 |
+
new_image_list.append(i)
|
893 |
+
|
894 |
+
self.result['images'] = new_image_list
|
895 |
+
|
896 |
+
async def return_build_image(self, title='Warning', text='NSFW Detected'):
|
897 |
+
|
898 |
+
def draw_rounded_rectangle(draw, xy, radius, fill):
|
899 |
+
x0, y0, x1, y1 = xy
|
900 |
+
draw.rectangle([x0 + radius, y0, x1 - radius, y1], fill=fill) # 中间部分
|
901 |
+
draw.rectangle([x0, y0 + radius, x0 + radius, y1 - radius], fill=fill) # 左上角
|
902 |
+
draw.rectangle([x1 - radius, y0 + radius, x1, y1 - radius], fill=fill) # 右上角
|
903 |
+
draw.pieslice([x0, y0, x0 + 2 * radius, y0 + 2 * radius], 180, 270, fill=fill) # 左上圆角
|
904 |
+
draw.pieslice([x1 - 2 * radius, y0, x1, y0 + 2 * radius], 270, 360, fill=fill) # 右上圆角
|
905 |
+
draw.pieslice([x0, y1 - 2 * radius, x0 + 2 * radius, y1], 90, 180, fill=fill) # 左下圆角
|
906 |
+
draw.pieslice([x1 - 2 * radius, y1 - 2 * radius, x1, y1], 0, 90, fill=fill) # 右下圆角
|
907 |
+
|
908 |
+
# 创建一个新的图像
|
909 |
+
img = Image.new("RGB", (512, 512), color=(255, 255, 255))
|
910 |
+
draw = ImageDraw.Draw(img)
|
911 |
+
|
912 |
+
# 设置字体大小
|
913 |
+
title_font_size = 24
|
914 |
+
text_font_size = 16
|
915 |
+
|
916 |
+
# 加载默认字体
|
917 |
+
title_font = ImageFont.load_default()
|
918 |
+
text_font = ImageFont.load_default()
|
919 |
+
|
920 |
+
# 绘制标题背景
|
921 |
+
title_x = 20 # 左对齐,设置标题的横坐标
|
922 |
+
title_y = 20 # 设置标题的纵坐标
|
923 |
+
title_bbox = draw.textbbox((0, 0), title, font=title_font)
|
924 |
+
|
925 |
+
# 绘制以标题为边界的背景
|
926 |
+
draw_rounded_rectangle(draw,
|
927 |
+
(title_x - 10, title_y - 10, title_x + title_bbox[2] + 10, title_y + title_bbox[3] + 10),
|
928 |
+
radius=10, fill=(0, 0, 0))
|
929 |
+
|
930 |
+
# 绘制标题
|
931 |
+
draw.text((title_x, title_y), title, fill=(255, 255, 255), font=title_font) # 白色标题
|
932 |
+
|
933 |
+
# 准备绘制文本,设置最大宽度
|
934 |
+
max_text_width = img.width - 40
|
935 |
+
wrapped_text = []
|
936 |
+
words = text.split(' ')
|
937 |
+
current_line = ""
|
938 |
+
|
939 |
+
for word in words:
|
940 |
+
test_line = f"{current_line} {word}".strip()
|
941 |
+
text_bbox = draw.textbbox((0, 0), test_line, font=text_font)
|
942 |
+
if text_bbox[2] - text_bbox[0] <= max_text_width:
|
943 |
+
current_line = test_line
|
944 |
+
else:
|
945 |
+
wrapped_text.append(current_line)
|
946 |
+
current_line = word
|
947 |
+
|
948 |
+
wrapped_text.append(current_line) # 添加最后一行
|
949 |
+
|
950 |
+
# 绘制内容文字
|
951 |
+
text_y = title_y + 40 # 让内容文字与标题有间距
|
952 |
+
for line in wrapped_text:
|
953 |
+
text_x = 20 # 左对齐,设置内容文字的横坐标
|
954 |
+
|
955 |
+
# 绘制内容文字背景
|
956 |
+
text_bbox = draw.textbbox((0, 0), line, font=text_font)
|
957 |
+
draw_rounded_rectangle(draw,
|
958 |
+
(text_x - 10, text_y - 5, text_x + text_bbox[2] + 10, text_y + text_bbox[3] + 5),
|
959 |
+
radius=10, fill=(0, 0, 0))
|
960 |
+
|
961 |
+
# 绘制内容文字
|
962 |
+
draw.text((text_x, text_y), line, fill=(255, 255, 255), font=text_font) # 白色内容文字
|
963 |
+
text_y += text_bbox[3] - text_bbox[1] + 5 # 行间距
|
964 |
+
|
965 |
+
# 将图像保存到内存字节流中
|
966 |
+
img_byte_array = BytesIO()
|
967 |
+
img.save(img_byte_array, format='PNG')
|
968 |
+
img_byte_array.seek(0)
|
969 |
+
|
970 |
+
# 编码为base64
|
971 |
+
base64_image = base64.b64encode(img_byte_array.getvalue()).decode('utf-8')
|
972 |
+
self.img_btyes.append(img_byte_array.getvalue())
|
973 |
+
self.img.append(base64_image)
|
974 |
+
|
975 |
+
return base64_image
|
976 |
+
|
977 |
+
def get_backend_id(self):
|
978 |
+
self.backend_id = self.token or self.backend_url
|
979 |
+
|
980 |
+
async def err_formating_to_sd_style(self):
|
981 |
+
|
982 |
+
self.format_api_respond()
|
983 |
+
|
984 |
+
self.result = self.build_respond
|
DrawBridgeAPI/backend/comfyui.py
ADDED
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import copy
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
import traceback
|
7 |
+
import uuid
|
8 |
+
from pathlib import Path
|
9 |
+
from tqdm import tqdm
|
10 |
+
import os
|
11 |
+
import base64
|
12 |
+
import aiohttp
|
13 |
+
|
14 |
+
from .base import Backend
|
15 |
+
|
16 |
+
global __ALL_SUPPORT_NODE__
|
17 |
+
MAX_SEED = 2 ** 32
|
18 |
+
|
19 |
+
class AIDRAW(Backend):
|
20 |
+
|
21 |
+
def __init__(self, count, payload, **kwargs):
|
22 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
23 |
+
# 需要更改
|
24 |
+
self.model_hash = "c7352c5d2f"
|
25 |
+
self.logger = self.setup_logger('[Comfyui]')
|
26 |
+
backend = self.config.comfyui['name'][self.count]
|
27 |
+
self.backend_name = self.config.backend_name_list[8]
|
28 |
+
self.workload_name = f"{self.backend_name}-{backend}"
|
29 |
+
|
30 |
+
self.current_config: dict = self.config.comfyui_setting
|
31 |
+
self.model = f"Comfyui - {self.current_config['name'][self.count]}"
|
32 |
+
self.backend_url = self.current_config['backend_url'][self.count]
|
33 |
+
|
34 |
+
self.reflex_dict['sampler'] = {
|
35 |
+
"DPM++ 2M": "dpmpp_2m",
|
36 |
+
"DPM++ SDE": "dpmpp_sde",
|
37 |
+
"DPM++ 2M SDE": "dpmpp_2m_sde",
|
38 |
+
"DPM++ 2M SDE Heun": "dpmpp_2m_sde",
|
39 |
+
"DPM++ 2S a": "dpmpp_2s_ancestral",
|
40 |
+
"DPM++ 3M SDE": "dpmpp_3m_sde",
|
41 |
+
"Euler a": "euler_ancestral",
|
42 |
+
"Euler": "euler",
|
43 |
+
"LMS": "lms",
|
44 |
+
"Heun": "heun",
|
45 |
+
"DPM2": "dpm_2",
|
46 |
+
"DPM2 a": "dpm_2_ancestral",
|
47 |
+
"DPM fast": "dpm_fast",
|
48 |
+
"DPM adaptive": "dpm_adaptive",
|
49 |
+
"Restart": "restart",
|
50 |
+
"HeunPP2": "heunpp2",
|
51 |
+
"IPNDM": "ipndm",
|
52 |
+
"IPNDM_V": "ipndm_v",
|
53 |
+
"DEIS": "deis",
|
54 |
+
"DDIM": "ddim",
|
55 |
+
"DDIM CFG++": "ddim",
|
56 |
+
"PLMS": "plms",
|
57 |
+
"UniPC": "uni_pc",
|
58 |
+
"LCM": "lcm",
|
59 |
+
"DDPM": "ddpm",
|
60 |
+
# "[Forge] Flux Realistic": None,
|
61 |
+
# "[Forge] Flux Realistic (Slow)": None,
|
62 |
+
}
|
63 |
+
self.reflex_dict['scheduler'] = {
|
64 |
+
"Automatic": "normal",
|
65 |
+
"Karras": "karras",
|
66 |
+
"Exponential": "exponential",
|
67 |
+
"SGM Uniform": "sgm_uniform",
|
68 |
+
"Simple": "simple",
|
69 |
+
"Normal": "normal",
|
70 |
+
"DDIM": "ddim_uniform",
|
71 |
+
"Beta": "beta"
|
72 |
+
}
|
73 |
+
|
74 |
+
self.reflex_dict['parameters'] = {}
|
75 |
+
|
76 |
+
self.scheduler = self.reflex_dict['scheduler'].get(self.scheduler, "normal")
|
77 |
+
self.sampler = self.reflex_dict['sampler'].get(self.sampler, "euler")
|
78 |
+
|
79 |
+
self.model_path = self.config.comfyui['model'][self.count]
|
80 |
+
|
81 |
+
self.logger.info(f"选择工作流{self.comfyui_api_json}")
|
82 |
+
path_to_json = self.comfyui_api_json
|
83 |
+
if self.comfyui_api_json:
|
84 |
+
|
85 |
+
with open(
|
86 |
+
Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows/{self.comfyui_api_json}.json").resolve(), 'r', encoding='utf-8') as f:
|
87 |
+
self.comfyui_api_json = json.load(f)
|
88 |
+
with open(
|
89 |
+
Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows/{path_to_json}_reflex.json").resolve(), 'r', encoding='utf-8') as f:
|
90 |
+
self.comfyui_api_json_reflex = json.load(f)
|
91 |
+
|
92 |
+
async def heart_beat(self, id_):
|
93 |
+
self.logger.info(f"{id_} 开始请求")
|
94 |
+
|
95 |
+
async def get_images():
|
96 |
+
|
97 |
+
response = await self.http_request(
|
98 |
+
method="GET",
|
99 |
+
target_url=f"{self.backend_url}/history/{id_}",
|
100 |
+
)
|
101 |
+
|
102 |
+
if response:
|
103 |
+
for img in response[id_]['outputs'][str(self.comfyui_api_json_reflex.get('output', 9))]['images']:
|
104 |
+
img_url = f"{self.backend_url}/view?filename={img['filename']}"
|
105 |
+
self.img_url.append(img_url)
|
106 |
+
|
107 |
+
async with aiohttp.ClientSession() as session:
|
108 |
+
ws_url = f'{self.backend_url}/ws?clientId={self.client_id}'
|
109 |
+
async with session.ws_connect(ws_url) as ws:
|
110 |
+
|
111 |
+
self.logger.info(f"WS连接成功: {ws_url}")
|
112 |
+
progress_bar = None
|
113 |
+
|
114 |
+
async for msg in ws:
|
115 |
+
if msg.type == aiohttp.WSMsgType.TEXT:
|
116 |
+
ws_msg = json.loads(msg.data)
|
117 |
+
#
|
118 |
+
# current_node = ws_msg['data']['node']
|
119 |
+
|
120 |
+
if ws_msg['type'] == 'progress':
|
121 |
+
value = ws_msg['data']['value']
|
122 |
+
max_value = ws_msg['data']['max']
|
123 |
+
|
124 |
+
if progress_bar is None:
|
125 |
+
progress_bar = await asyncio.to_thread(
|
126 |
+
tqdm, total=max_value,
|
127 |
+
desc=f"Prompt ID: {ws_msg['data']['prompt_id']}",
|
128 |
+
unit="steps"
|
129 |
+
)
|
130 |
+
|
131 |
+
delta = value - progress_bar.n
|
132 |
+
await asyncio.to_thread(progress_bar.update, delta)
|
133 |
+
|
134 |
+
if ws_msg['type'] == 'executing':
|
135 |
+
if ws_msg['data']['node'] is None:
|
136 |
+
self.logger.info(f"{id_}绘画完成!")
|
137 |
+
await get_images()
|
138 |
+
await ws.close()
|
139 |
+
#
|
140 |
+
# elif msg.type == aiohttp.WSMsgType.BINARY:
|
141 |
+
# if current_node == 'save_image_websocket_node':
|
142 |
+
# bytes_msg = msg.data
|
143 |
+
# images_output = output_images.get(current_node, [])
|
144 |
+
# images_output.append(bytes_msg[8:])
|
145 |
+
# output_images[current_node] = images_output
|
146 |
+
|
147 |
+
|
148 |
+
elif msg.type == aiohttp.WSMsgType.ERROR:
|
149 |
+
self.logger.error(f"Error: {msg.data}")
|
150 |
+
await ws.close()
|
151 |
+
break
|
152 |
+
|
153 |
+
if progress_bar is not None:
|
154 |
+
await asyncio.to_thread(progress_bar.close)
|
155 |
+
|
156 |
+
async def update_progress(self):
|
157 |
+
# 覆写函数
|
158 |
+
pass
|
159 |
+
|
160 |
+
async def get_backend_working_progress(self):
|
161 |
+
|
162 |
+
self.get_backend_id()
|
163 |
+
|
164 |
+
try:
|
165 |
+
response = await self.http_request(
|
166 |
+
method="GET",
|
167 |
+
target_url=f"{self.backend_url}/queue",
|
168 |
+
)
|
169 |
+
if response.get("error", None):
|
170 |
+
available = False
|
171 |
+
else:
|
172 |
+
available = True
|
173 |
+
|
174 |
+
if len(response["queue_running"]) == 0:
|
175 |
+
progress = 0
|
176 |
+
else:
|
177 |
+
progress = 0.99
|
178 |
+
|
179 |
+
build_resp = self.format_progress_api_resp(progress, self.start_time)
|
180 |
+
|
181 |
+
sc = 200 if available is True else 500
|
182 |
+
except:
|
183 |
+
traceback.print_exc()
|
184 |
+
finally:
|
185 |
+
return build_resp, sc, self.backend_url, sc
|
186 |
+
|
187 |
+
async def check_backend_usability(self):
|
188 |
+
pass
|
189 |
+
|
190 |
+
async def err_formating_to_sd_style(self):
|
191 |
+
|
192 |
+
await self.download_img()
|
193 |
+
self.format_api_respond()
|
194 |
+
self.result = self.build_respond
|
195 |
+
|
196 |
+
async def posting(self):
|
197 |
+
upload_img_resp_list = []
|
198 |
+
|
199 |
+
if self.init_images:
|
200 |
+
for image in self.init_images:
|
201 |
+
resp = await self.upload_base64_image(image, uuid.uuid4().hex)
|
202 |
+
upload_img_resp_list.append(resp)
|
203 |
+
|
204 |
+
self.update_api_json(upload_img_resp_list)
|
205 |
+
|
206 |
+
input_ = {
|
207 |
+
"client_id": self.client_id,
|
208 |
+
"prompt": self.comfyui_api_json
|
209 |
+
}
|
210 |
+
|
211 |
+
respone = await self.http_request(
|
212 |
+
method="POST",
|
213 |
+
target_url=f"{self.backend_url}/prompt",
|
214 |
+
headers=self.headers,
|
215 |
+
content=json.dumps(input_)
|
216 |
+
)
|
217 |
+
|
218 |
+
if respone.get("error", None):
|
219 |
+
self.logger.error(respone)
|
220 |
+
raise RuntimeError(respone["status_code"])
|
221 |
+
|
222 |
+
self.task_id = respone['prompt_id']
|
223 |
+
|
224 |
+
await self.heart_beat(self.task_id)
|
225 |
+
await self.err_formating_to_sd_style()
|
226 |
+
|
227 |
+
def update_api_json(self, init_images):
|
228 |
+
api_json = copy.deepcopy(self.comfyui_api_json)
|
229 |
+
raw_api_json = copy.deepcopy(self.comfyui_api_json)
|
230 |
+
|
231 |
+
print(api_json)
|
232 |
+
|
233 |
+
update_mapping = {
|
234 |
+
"sampler": {
|
235 |
+
"seed": self.seed,
|
236 |
+
"steps": self.steps,
|
237 |
+
"cfg": self.scale,
|
238 |
+
"sampler_name": self.sampler,
|
239 |
+
"scheduler": self.scheduler,
|
240 |
+
"denoise": self.denoising_strength
|
241 |
+
},
|
242 |
+
"seed": {
|
243 |
+
"seed": self.seed,
|
244 |
+
"noise_seed": self.seed
|
245 |
+
},
|
246 |
+
"image_size": {
|
247 |
+
"width": self.width,
|
248 |
+
"height": self.height,
|
249 |
+
"batch_size": self.batch_size
|
250 |
+
},
|
251 |
+
"prompt": {
|
252 |
+
"text": self.tags
|
253 |
+
},
|
254 |
+
"negative_prompt": {
|
255 |
+
"text": self.ntags
|
256 |
+
},
|
257 |
+
"checkpoint": {
|
258 |
+
"ckpt_name": self.model_path if self.model_path else None
|
259 |
+
},
|
260 |
+
"latentupscale": {
|
261 |
+
"width": int(self.width*self.hr_scale) if not self.hr_resize_x else self.hr_resize_x,
|
262 |
+
"height": int(self.height*self.hr_scale) if not self.hr_resize_y else self.hr_resize_y,
|
263 |
+
},
|
264 |
+
"load_image": {
|
265 |
+
"image": init_images[0]['name'] if self.init_images else None
|
266 |
+
},
|
267 |
+
"resize": {
|
268 |
+
"width": int(self.width*self.hr_scale) if not self.hr_resize_x else self.hr_resize_x,
|
269 |
+
"height": int(self.height*self.hr_scale) if not self.hr_resize_y else self.hr_resize_y,
|
270 |
+
},
|
271 |
+
"hr_steps": {
|
272 |
+
"seed": self.seed,
|
273 |
+
"steps": self.hr_second_pass_steps,
|
274 |
+
"cfg": self.hr_scale,
|
275 |
+
"sampler_name": self.sampler,
|
276 |
+
"scheduler": self.scheduler,
|
277 |
+
"denoise": self.denoising_strength,
|
278 |
+
},
|
279 |
+
"hr_prompt": {
|
280 |
+
"text": self.hr_prompt
|
281 |
+
},
|
282 |
+
"hr_negative_prompt": {
|
283 |
+
"text": self.hr_negative_prompt
|
284 |
+
},
|
285 |
+
"tipo": {
|
286 |
+
"width": self.width,
|
287 |
+
"height": self.height,
|
288 |
+
"seed": self.seed,
|
289 |
+
"tags": self.tags,
|
290 |
+
},
|
291 |
+
"append_prompt": {
|
292 |
+
|
293 |
+
}
|
294 |
+
}
|
295 |
+
|
296 |
+
__OVERRIDE_SUPPORT_KEYS__ = {
|
297 |
+
'keep',
|
298 |
+
'value',
|
299 |
+
'append_prompt',
|
300 |
+
'append_negative_prompt',
|
301 |
+
'remove',
|
302 |
+
"randint",
|
303 |
+
"get_text",
|
304 |
+
"upscale",
|
305 |
+
'image'
|
306 |
+
|
307 |
+
}
|
308 |
+
__ALL_SUPPORT_NODE__ = set(update_mapping.keys())
|
309 |
+
|
310 |
+
for item, node_id in self.comfyui_api_json_reflex.items():
|
311 |
+
|
312 |
+
if node_id and item not in ("override", "note"):
|
313 |
+
|
314 |
+
org_node_id = node_id
|
315 |
+
|
316 |
+
if isinstance(node_id, list):
|
317 |
+
node_id = node_id
|
318 |
+
elif isinstance(node_id, int or str):
|
319 |
+
node_id = [node_id]
|
320 |
+
elif isinstance(node_id, dict):
|
321 |
+
node_id = list(node_id.keys())
|
322 |
+
|
323 |
+
for id_ in node_id:
|
324 |
+
id_ = str(id_)
|
325 |
+
update_dict = api_json.get(id_, None)
|
326 |
+
if update_dict and item in update_mapping:
|
327 |
+
api_json[id_]['inputs'].update(update_mapping[item])
|
328 |
+
|
329 |
+
if isinstance(org_node_id, dict):
|
330 |
+
for node, override_dict in org_node_id.items():
|
331 |
+
single_node_or = override_dict.get("override", {})
|
332 |
+
|
333 |
+
if single_node_or:
|
334 |
+
for key, override_action in single_node_or.items():
|
335 |
+
|
336 |
+
if override_action == "randint":
|
337 |
+
api_json[node]['inputs'][key] = random.randint(0, MAX_SEED)
|
338 |
+
|
339 |
+
elif override_action == "keep":
|
340 |
+
org_cons = raw_api_json[node]['inputs'][key]
|
341 |
+
|
342 |
+
elif override_action == "append_prompt":
|
343 |
+
prompt = raw_api_json[node]['inputs'][key]
|
344 |
+
prompt = self.tags + prompt
|
345 |
+
api_json[node]['inputs'][key] = prompt
|
346 |
+
|
347 |
+
elif override_action == "append_negative_prompt":
|
348 |
+
prompt = raw_api_json[node]['inputs'][key]
|
349 |
+
prompt = self.ntags + prompt
|
350 |
+
api_json[node]['inputs'][key] = prompt
|
351 |
+
|
352 |
+
elif "upscale" in override_action:
|
353 |
+
scale = 1.5
|
354 |
+
if "_" in override_action:
|
355 |
+
scale = override_action.split("_")[1]
|
356 |
+
|
357 |
+
if key == 'width':
|
358 |
+
res = self.width
|
359 |
+
elif key == 'height':
|
360 |
+
res = self.height
|
361 |
+
|
362 |
+
upscale_size = int(res * scale)
|
363 |
+
api_json[node]['inputs'][key] = upscale_size
|
364 |
+
|
365 |
+
elif "value" in override_action:
|
366 |
+
override_value = raw_api_json[node]['inputs'][key]
|
367 |
+
if "_" in override_action:
|
368 |
+
override_value = override_action.split("_")[1]
|
369 |
+
override_type = override_action.split("_")[2]
|
370 |
+
if override_type == "int":
|
371 |
+
override_value = int(override_value)
|
372 |
+
elif override_type == "float":
|
373 |
+
override_value = float(override_value)
|
374 |
+
elif override_type == "str":
|
375 |
+
override_value = str(override_value)
|
376 |
+
|
377 |
+
api_json[node]['inputs'][key] = override_value
|
378 |
+
|
379 |
+
elif "image" in override_action:
|
380 |
+
image_id = int(override_action.split("_")[1])
|
381 |
+
api_json[node]['inputs'][key] = init_images[image_id]['name']
|
382 |
+
|
383 |
+
else:
|
384 |
+
update_dict = api_json.get(node, None)
|
385 |
+
if update_dict and item in update_mapping:
|
386 |
+
api_json[node]['inputs'].update(update_mapping[item])
|
387 |
+
|
388 |
+
test_dict = {
|
389 |
+
"sampler": 3,
|
390 |
+
"image_size": 5,
|
391 |
+
"prompt": 6,
|
392 |
+
"negative_prompt": 7,
|
393 |
+
"checkpoint": 4,
|
394 |
+
"latentupscale": 10,
|
395 |
+
"load_image": 0,
|
396 |
+
"resize": 15,
|
397 |
+
"hr_steps": 19,
|
398 |
+
"hr_prompt": 21,
|
399 |
+
"hr_negative_prompt": 22,
|
400 |
+
"output": 9
|
401 |
+
}
|
402 |
+
|
403 |
+
print(api_json)
|
404 |
+
self.comfyui_api_json = api_json
|
405 |
+
|
406 |
+
async def upload_base64_image(self, b64_image, name, image_type="input", overwrite=False):
|
407 |
+
|
408 |
+
if b64_image.startswith("data:image"):
|
409 |
+
header, b64_image = b64_image.split(",", 1)
|
410 |
+
file_type = header.split(";")[0].split(":")[1].split("/")[1]
|
411 |
+
else:
|
412 |
+
raise ValueError("Invalid base64 image format.")
|
413 |
+
|
414 |
+
image_data = base64.b64decode(b64_image)
|
415 |
+
|
416 |
+
data = aiohttp.FormData()
|
417 |
+
data.add_field('image', image_data, filename=f"{name}.{file_type}", content_type=f'image/{file_type}')
|
418 |
+
data.add_field('type', image_type)
|
419 |
+
data.add_field('overwrite', str(overwrite).lower())
|
420 |
+
|
421 |
+
async with aiohttp.ClientSession() as session:
|
422 |
+
async with session.post(f"{self.backend_url}/upload/image", data=data) as response:
|
423 |
+
return json.loads(await response.read())
|
DrawBridgeAPI/backend/liblibai.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
|
7 |
+
|
8 |
+
class AIDRAW(Backend):
|
9 |
+
|
10 |
+
def __init__(self, count, payload, **kwargs):
|
11 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
12 |
+
|
13 |
+
self.xl = self.config.liblibai_setting['xl'][self.count]
|
14 |
+
self.flux = self.config.liblibai_setting['flux'][self.count]
|
15 |
+
site_name = 'LiblibAI_XL' if self.xl else 'LiblibAI'
|
16 |
+
self.model = f"{site_name} - {self.config.liblibai_setting['model_name'][self.count]}"
|
17 |
+
self.model_id = self.config.liblibai_setting['model'][self.count]
|
18 |
+
self.model_hash = "c7352c5d2f"
|
19 |
+
self.logger = self.setup_logger('[LiblibAI]')
|
20 |
+
|
21 |
+
token = self.config.liblibai[self.count]
|
22 |
+
self.token = token
|
23 |
+
self.backend_name = self.config.backend_name_list[4]
|
24 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
25 |
+
|
26 |
+
async def heart_beat(self, id_):
|
27 |
+
self.logger.info(f"{id_}开始请求")
|
28 |
+
for i in range(60):
|
29 |
+
|
30 |
+
response = await self.http_request(
|
31 |
+
method="POST",
|
32 |
+
target_url=f"https://liblib-api.vibrou.com/gateway/sd-api/generate/progress/msg/v3/{id_}",
|
33 |
+
headers=self.headers,
|
34 |
+
content=json.dumps({"flag": 0}),
|
35 |
+
verify=False
|
36 |
+
)
|
37 |
+
|
38 |
+
# 检查请求结果并处理
|
39 |
+
if response.get('error') == "error":
|
40 |
+
self.logger.warning(f"Failed to request: {response}")
|
41 |
+
raise RuntimeError('服务器返回错误')
|
42 |
+
if response['code'] != 0 or response['data']['statusMsg'] == '执行异常':
|
43 |
+
raise RuntimeError('服务器返回错误')
|
44 |
+
|
45 |
+
images = response['data']['images']
|
46 |
+
|
47 |
+
if images is None:
|
48 |
+
self.logger.info(f"第{i+1}次心跳,未返回结果")
|
49 |
+
await asyncio.sleep(5)
|
50 |
+
continue
|
51 |
+
else:
|
52 |
+
# await self.set_backend_working_status(available=True)
|
53 |
+
for i in images:
|
54 |
+
if 'porn' in i['previewPath']:
|
55 |
+
self.nsfw_detected = True
|
56 |
+
self.logger.warning("API侧检测到NSFW图片")
|
57 |
+
else:
|
58 |
+
self.logger.img(f"图片url: {i['previewPath']}")
|
59 |
+
self.img_url.append(i['previewPath'])
|
60 |
+
self.comment = i['imageInfo']
|
61 |
+
break
|
62 |
+
|
63 |
+
async def update_progress(self):
|
64 |
+
# 覆写函数
|
65 |
+
pass
|
66 |
+
|
67 |
+
async def check_backend_usability(self):
|
68 |
+
pass
|
69 |
+
|
70 |
+
async def err_formating_to_sd_style(self):
|
71 |
+
|
72 |
+
if self.nsfw_detected:
|
73 |
+
await self.return_build_image()
|
74 |
+
else:
|
75 |
+
await self.download_img()
|
76 |
+
|
77 |
+
self.format_api_respond()
|
78 |
+
|
79 |
+
self.result = self.build_respond
|
80 |
+
|
81 |
+
async def posting(self):
|
82 |
+
|
83 |
+
if self.xl or self.flux:
|
84 |
+
if self.xl:
|
85 |
+
pre_tag, pre_ntag = tuple(self.config.liblibai_setting.get('preference')[self.count]['pretags']['xl'])
|
86 |
+
elif self.flux:
|
87 |
+
pre_tag, pre_ntag = tuple(self.config.liblibai_setting.get('preference')[self.count]['pretags']['flux'])
|
88 |
+
self.tags = pre_tag + self.tags
|
89 |
+
self.ntags = pre_ntag + self.ntags
|
90 |
+
if self.enable_hr:
|
91 |
+
self.width = int(self.width * self.hr_scale)
|
92 |
+
self.height = int(self.height * self.hr_scale)
|
93 |
+
self.enable_hr = False
|
94 |
+
elif self.width * self.height < 1048576:
|
95 |
+
self.width = int(self.width * 1.5)
|
96 |
+
self.height = int(self.height * 1.5)
|
97 |
+
|
98 |
+
self.steps = self.config.liblibai_setting.get('preference')[self.count].get('steps', 12)
|
99 |
+
|
100 |
+
if self.flux:
|
101 |
+
input_ = {
|
102 |
+
"checkpointId": 2295774,
|
103 |
+
"generateType": 17,
|
104 |
+
"frontCustomerReq": {
|
105 |
+
"windowId": "",
|
106 |
+
"tabType": "txt2img",
|
107 |
+
"conAndSegAndGen": "gen"
|
108 |
+
},
|
109 |
+
"adetailerEnable": 0,
|
110 |
+
"text2imgV3": {
|
111 |
+
"clipSkip": 2,
|
112 |
+
"checkPointName": 2295774,
|
113 |
+
"prompt": self.tags,
|
114 |
+
"negPrompt": self.ntags,
|
115 |
+
"seed": self.seed,
|
116 |
+
"randnSource": 0,
|
117 |
+
"samplingMethod": 31,
|
118 |
+
"imgCount": self.batch_size,
|
119 |
+
"samplingStep": self.steps,
|
120 |
+
"cfgScale": self.scale,
|
121 |
+
"width": self.width,
|
122 |
+
"height": self.height
|
123 |
+
},
|
124 |
+
"taskQueuePriority": 1
|
125 |
+
}
|
126 |
+
|
127 |
+
else:
|
128 |
+
input_ = {
|
129 |
+
"checkpointId": self.model_id,
|
130 |
+
"generateType": 1,
|
131 |
+
"frontCustomerReq": {
|
132 |
+
# "frontId": "f46f8e35-5728-4ded-b163-832c3b85009d",
|
133 |
+
"windowId": "",
|
134 |
+
"tabType": "txt2img",
|
135 |
+
"conAndSegAndGen": "gen"
|
136 |
+
}
|
137 |
+
,
|
138 |
+
"adetailerEnable": 0,
|
139 |
+
"text2img": {
|
140 |
+
"prompt": self.tags,
|
141 |
+
"negativePrompt": self.ntags,
|
142 |
+
"extraNetwork": "",
|
143 |
+
"samplingMethod": 0,
|
144 |
+
"samplingStep": self.steps,
|
145 |
+
"width": self.width,
|
146 |
+
"height": self.height,
|
147 |
+
"imgCount": self.batch_size,
|
148 |
+
"cfgScale": self.scale,
|
149 |
+
"seed": self.seed,
|
150 |
+
"seedExtra": 0,
|
151 |
+
"hiResFix": 0,
|
152 |
+
"restoreFaces": 0,
|
153 |
+
"tiling": 0,
|
154 |
+
"clipSkip": 2,
|
155 |
+
"randnSource": 0,
|
156 |
+
"tileDiffusion": None
|
157 |
+
}
|
158 |
+
,
|
159 |
+
"taskQueuePriority": 1
|
160 |
+
}
|
161 |
+
|
162 |
+
if self.enable_hr and self.flux is False and self.xl is False:
|
163 |
+
|
164 |
+
hr_payload = {
|
165 |
+
"hiresSteps": self.hr_second_pass_steps,
|
166 |
+
"denoisingStrength": self.denoising_strength,
|
167 |
+
"hiResFix": 1 if self.enable_hr else 0,
|
168 |
+
"hiResFixInfo": {
|
169 |
+
"upscaler": 6,
|
170 |
+
"upscaleBy": self.hr_scale,
|
171 |
+
"resizeWidth": int(self.width * self.hr_scale),
|
172 |
+
"resizeHeight": int(self.height * self.hr_scale)
|
173 |
+
}
|
174 |
+
}
|
175 |
+
|
176 |
+
input_['text2img'].update(hr_payload)
|
177 |
+
|
178 |
+
new_headers = {
|
179 |
+
"Accept": "application/json, text/plain, */*",
|
180 |
+
"Token": self.token
|
181 |
+
}
|
182 |
+
self.headers.update(new_headers)
|
183 |
+
|
184 |
+
response = await self.http_request(
|
185 |
+
method="POST",
|
186 |
+
target_url="https://liblib-api.vibrou.com/gateway/sd-api/generate/image",
|
187 |
+
headers=self.headers,
|
188 |
+
content=json.dumps(input_),
|
189 |
+
verify=False
|
190 |
+
)
|
191 |
+
|
192 |
+
# 检查请求结果
|
193 |
+
if response.get('error') == "error":
|
194 |
+
self.logger.warning(f"Failed to request: {response}")
|
195 |
+
else:
|
196 |
+
task = response
|
197 |
+
if task.get('msg') == 'Insufficient power':
|
198 |
+
self.logger.warning('费用不足!')
|
199 |
+
self.logger.info(f"API返回{task}")
|
200 |
+
task_id = task['data']
|
201 |
+
await self.heart_beat(task_id)
|
202 |
+
|
203 |
+
await self.err_formating_to_sd_style()
|
204 |
+
|
205 |
+
|
DrawBridgeAPI/backend/midjourney.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
import aiohttp
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
from PIL import Image
|
7 |
+
import asyncio
|
8 |
+
import json
|
9 |
+
import traceback
|
10 |
+
import math
|
11 |
+
import zipfile
|
12 |
+
import io
|
13 |
+
import os
|
14 |
+
import aiofiles
|
15 |
+
import base64
|
16 |
+
|
17 |
+
from pathlib import Path
|
18 |
+
|
19 |
+
class AIDRAW(Backend):
|
20 |
+
|
21 |
+
def __init__(self, count, payload, **kwargs):
|
22 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
23 |
+
|
24 |
+
self.model = f"MidJourney"
|
25 |
+
self.model_hash = "c7352c5d2f"
|
26 |
+
self.logger = self.setup_logger('[MidJourney]')
|
27 |
+
|
28 |
+
self.backend_url = self.config.midjourney['backend_url'][self.count]
|
29 |
+
self.backend_name = self.config.backend_name_list[10]
|
30 |
+
self.workload_name = f"{self.backend_name}-{self.config.midjourney['name'][self.count]}"
|
31 |
+
|
32 |
+
async def heart_beat(self, id_):
|
33 |
+
task_url = f"{self.backend_url}/mj/task/{id_}/fetch"
|
34 |
+
|
35 |
+
while True:
|
36 |
+
try:
|
37 |
+
resp = await self.http_request("GET", task_url, format=True)
|
38 |
+
status = resp.get('status')
|
39 |
+
content = ''
|
40 |
+
|
41 |
+
if status == "SUCCESS":
|
42 |
+
content = resp['imageUrl']
|
43 |
+
self.img_url.append(resp['imageUrl'])
|
44 |
+
self.logger.img(f"任务{id_}成功完成,图片URL:{resp['imageUrl']}")
|
45 |
+
return content
|
46 |
+
|
47 |
+
elif status == "FAILED":
|
48 |
+
content = resp.get('failReason') or '未知原因'
|
49 |
+
self.logger.error(f"任务处理失败,原因:{content}")
|
50 |
+
|
51 |
+
raise Exception(f"任务处理失败,原因:{content}")
|
52 |
+
|
53 |
+
elif status == "NOT_START":
|
54 |
+
content = '任务未开始'
|
55 |
+
|
56 |
+
elif status == "IN_PROGRESS":
|
57 |
+
content = '任务正在运行'
|
58 |
+
if resp.get('progress'):
|
59 |
+
content += f",进度:{resp['progress']}"
|
60 |
+
|
61 |
+
elif status == "SUBMITTED":
|
62 |
+
content = '任务已提交处理'
|
63 |
+
|
64 |
+
elif status == "FAILURE":
|
65 |
+
fail_reason = resp.get('failReason') or '未知原因'
|
66 |
+
self.logger.error(f"任务处理失败,原因:{fail_reason}")
|
67 |
+
if "Banned prompt detected" in fail_reason:
|
68 |
+
await self.return_build_image("NSFW Prompt Detected")
|
69 |
+
return
|
70 |
+
else:
|
71 |
+
raise Exception(f"任务处理失败,原因:{content}")
|
72 |
+
|
73 |
+
else:
|
74 |
+
content = status
|
75 |
+
|
76 |
+
self.logger.info(f"任务{id_}状态:{content}")
|
77 |
+
|
78 |
+
await asyncio.sleep(5)
|
79 |
+
|
80 |
+
except Exception as e:
|
81 |
+
self.logger.error(f"任务{id_}心跳监控出错: {str(e)}")
|
82 |
+
raise
|
83 |
+
|
84 |
+
|
85 |
+
async def update_progress(self):
|
86 |
+
# 覆写函数
|
87 |
+
pass
|
88 |
+
|
89 |
+
async def get_shape(self):
|
90 |
+
|
91 |
+
gcd = math.gcd(self.width, self.height)
|
92 |
+
|
93 |
+
simplified_width = self.width // gcd
|
94 |
+
simplified_height = self.height // gcd
|
95 |
+
|
96 |
+
ar = f"{simplified_width}:{simplified_height}"
|
97 |
+
|
98 |
+
return ar
|
99 |
+
|
100 |
+
async def check_backend_usability(self):
|
101 |
+
pass
|
102 |
+
|
103 |
+
async def split_image(self):
|
104 |
+
img = Image.open(io.BytesIO(self.img_btyes[0]))
|
105 |
+
width, height = img.size
|
106 |
+
|
107 |
+
half_width = width // 2
|
108 |
+
half_height = height // 2
|
109 |
+
|
110 |
+
coordinates = [(0, 0, half_width, half_height),
|
111 |
+
(half_width, 0, width, half_height),
|
112 |
+
(0, half_height, half_width, height),
|
113 |
+
(half_width, half_height, width, height)]
|
114 |
+
|
115 |
+
images = [img.crop(c) for c in coordinates]
|
116 |
+
|
117 |
+
images_bytes = [io.BytesIO() for _ in range(4)]
|
118 |
+
base64_images = []
|
119 |
+
|
120 |
+
for i in range(4):
|
121 |
+
images[i].save(images_bytes[i], format='PNG')
|
122 |
+
|
123 |
+
images_bytes[i].seek(0)
|
124 |
+
base64_image = base64.b64encode(images_bytes[i].getvalue()).decode('utf-8')
|
125 |
+
|
126 |
+
base64_images.append(base64_image)
|
127 |
+
|
128 |
+
self.img_btyes += images_bytes
|
129 |
+
self.img += base64_images
|
130 |
+
|
131 |
+
# async def formating_to_sd_style(self):
|
132 |
+
#
|
133 |
+
# await self.download_img()
|
134 |
+
# await self.split_image()
|
135 |
+
#
|
136 |
+
# self.format_api_respond()
|
137 |
+
# self.result = self.build_respond
|
138 |
+
|
139 |
+
async def posting(self):
|
140 |
+
|
141 |
+
accept_ratio = await self.get_shape()
|
142 |
+
|
143 |
+
ntags = f"--no {self.ntags}" if self.ntags else ""
|
144 |
+
|
145 |
+
build_prompt = f"{self.tags} --ar {accept_ratio} --seed {self.seed}" + ' ' + ntags + ' '
|
146 |
+
|
147 |
+
payload = {
|
148 |
+
"prompt": build_prompt
|
149 |
+
}
|
150 |
+
|
151 |
+
if self.config.midjourney['auth_toekn'][self.count]:
|
152 |
+
self.headers.update({"mj-api-secret": self.config.midjourney['auth_toekn'][self.count]})
|
153 |
+
|
154 |
+
resp = await self.http_request(
|
155 |
+
"POST",
|
156 |
+
f"{self.backend_url}/mj/submit/imagine",
|
157 |
+
headers=self.headers,
|
158 |
+
content=json.dumps(payload),
|
159 |
+
format=True
|
160 |
+
)
|
161 |
+
|
162 |
+
if resp.get('code') == 24:
|
163 |
+
await self.return_build_image(text="NSFW Prompt Detected")
|
164 |
+
|
165 |
+
elif resp.get('code') == 1:
|
166 |
+
task_id = resp.get('result')
|
167 |
+
self.task_id = task_id
|
168 |
+
self.logger.info(f"任务提交成功,任务id: {task_id}")
|
169 |
+
|
170 |
+
await self.heart_beat(task_id)
|
171 |
+
await self.download_img()
|
172 |
+
await self.split_image()
|
173 |
+
|
174 |
+
self.format_api_respond()
|
175 |
+
self.result = self.build_respond
|
DrawBridgeAPI/backend/novelai.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
import aiohttp
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
import asyncio
|
7 |
+
import json
|
8 |
+
import traceback
|
9 |
+
import zipfile
|
10 |
+
import io
|
11 |
+
import os
|
12 |
+
import aiofiles
|
13 |
+
import base64
|
14 |
+
|
15 |
+
from pathlib import Path
|
16 |
+
|
17 |
+
class AIDRAW(Backend):
|
18 |
+
|
19 |
+
def __init__(self, count, payload, **kwargs):
|
20 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
21 |
+
|
22 |
+
self.model = f"NovelAI - {self.config.novelai_setting['model'][self.count]}"
|
23 |
+
self.model_hash = "c7352c5d2f"
|
24 |
+
self.logger = self.setup_logger('[NovelAI]')
|
25 |
+
|
26 |
+
token = self.config.novelai[self.count]
|
27 |
+
self.token = token
|
28 |
+
self.backend_name = self.config.backend_name_list[9]
|
29 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
30 |
+
|
31 |
+
self.save_path = Path(f'saved_images/{self.task_type}/{self.current_date}/{self.workload_name[:12]}')
|
32 |
+
|
33 |
+
self.reflex_dict['sampler'] = {
|
34 |
+
"DPM++ 2M": "k_dpmpp_2m",
|
35 |
+
"DPM++ SDE": "k_dpmpp_sde",
|
36 |
+
"DPM++ 2M SDE": "k_dpmpp_2m_sde",
|
37 |
+
"DPM++ 2S a": "k_dpmpp_2s_ancestral",
|
38 |
+
"Euler a": "k_euler_ancestral",
|
39 |
+
"Euler": "k_euler",
|
40 |
+
"DDIM": "ddim_v3"
|
41 |
+
}
|
42 |
+
|
43 |
+
async def update_progress(self):
|
44 |
+
# 覆写函数
|
45 |
+
pass
|
46 |
+
|
47 |
+
async def get_shape(self):
|
48 |
+
aspect_ratio = self.width / self.height
|
49 |
+
|
50 |
+
resolutions = {
|
51 |
+
"832x1216": (832, 1216),
|
52 |
+
"1216x832": (1216, 832),
|
53 |
+
"1024x1024": (1024, 1024),
|
54 |
+
}
|
55 |
+
|
56 |
+
closest_resolution = min(resolutions.keys(),
|
57 |
+
key=lambda r: abs((resolutions[r][0] / resolutions[r][1]) - aspect_ratio))
|
58 |
+
|
59 |
+
self.width, self.height = resolutions[closest_resolution]
|
60 |
+
|
61 |
+
return closest_resolution
|
62 |
+
|
63 |
+
async def check_backend_usability(self):
|
64 |
+
pass
|
65 |
+
|
66 |
+
async def err_formating_to_sd_style(self):
|
67 |
+
|
68 |
+
if self.nsfw_detected:
|
69 |
+
await self.return_build_image()
|
70 |
+
|
71 |
+
self.format_api_respond()
|
72 |
+
|
73 |
+
self.result = self.build_respond
|
74 |
+
|
75 |
+
async def posting(self):
|
76 |
+
|
77 |
+
self.sampler = self.reflex_dict['sampler'].get(self.sampler, "k_euler_ancestral")
|
78 |
+
|
79 |
+
header = {
|
80 |
+
"authorization": "Bearer " + self.token,
|
81 |
+
":authority": "https://api.novelai.net",
|
82 |
+
":path": "/ai/generate-image",
|
83 |
+
"content-type": "application/json",
|
84 |
+
"referer": "https://novelai.net",
|
85 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
|
86 |
+
}
|
87 |
+
|
88 |
+
post_api = "https://image.novelai.net/ai/generate-image"
|
89 |
+
|
90 |
+
await self.get_shape()
|
91 |
+
|
92 |
+
parameters = {
|
93 |
+
"width": self.width,
|
94 |
+
"height": self.height,
|
95 |
+
"qualityToggle": False,
|
96 |
+
"scale": self.scale,
|
97 |
+
"sampler": self.sampler,
|
98 |
+
"steps": self.steps,
|
99 |
+
"seed": self.seed,
|
100 |
+
"n_samples": 1,
|
101 |
+
"ucPreset": 0,
|
102 |
+
"negative_prompt": self.ntags,
|
103 |
+
}
|
104 |
+
|
105 |
+
json_data = {
|
106 |
+
"input": self.tags,
|
107 |
+
"model": self.config.novelai_setting['model'][self.count],
|
108 |
+
"parameters": parameters
|
109 |
+
}
|
110 |
+
|
111 |
+
async def send_request():
|
112 |
+
|
113 |
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=300)) as session:
|
114 |
+
while True:
|
115 |
+
async with session.post(
|
116 |
+
post_api,
|
117 |
+
headers=header,
|
118 |
+
json=json_data,
|
119 |
+
ssl=False,
|
120 |
+
proxy=self.config.server_settings['proxy']
|
121 |
+
) as response:
|
122 |
+
|
123 |
+
if response.status == 429:
|
124 |
+
resp_text = await response.json()
|
125 |
+
if resp_text['message'] == 'Rate limited':
|
126 |
+
raise Exception("触发频率限制")
|
127 |
+
self.logger.warning(f"token繁忙中..., {resp_text}")
|
128 |
+
wait_time = 5
|
129 |
+
await asyncio.sleep(wait_time)
|
130 |
+
else:
|
131 |
+
response_data = await response.read()
|
132 |
+
try:
|
133 |
+
with zipfile.ZipFile(io.BytesIO(response_data)) as z:
|
134 |
+
z.extractall(self.save_path)
|
135 |
+
except:
|
136 |
+
try:
|
137 |
+
resp_text = await response.json()
|
138 |
+
except:
|
139 |
+
if resp_text['statusCode'] == 402:
|
140 |
+
self.logger.warning(f"token余额不足, {resp_text}")
|
141 |
+
return
|
142 |
+
|
143 |
+
await send_request()
|
144 |
+
|
145 |
+
# self.save_path = self.save_path
|
146 |
+
# self.save_path.mkdir(parents=True, exist_ok=True)
|
147 |
+
|
148 |
+
await self.images_to_base64(self.save_path)
|
149 |
+
|
150 |
+
await self.err_formating_to_sd_style()
|
151 |
+
|
152 |
+
async def images_to_base64(self, save_path):
|
153 |
+
|
154 |
+
for filename in os.listdir(save_path):
|
155 |
+
if filename.endswith('.png'):
|
156 |
+
file_path = os.path.join(save_path, filename)
|
157 |
+
async with aiofiles.open(file_path, "rb") as image_file:
|
158 |
+
image_data = await image_file.read()
|
159 |
+
encoded_string = base64.b64encode(image_data).decode('utf-8')
|
160 |
+
self.img.append(encoded_string)
|
161 |
+
self.img_btyes.append(image_data)
|
DrawBridgeAPI/backend/seaart.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
|
7 |
+
|
8 |
+
class AIDRAW(Backend):
|
9 |
+
|
10 |
+
def __init__(self, count, payload, **kwargs):
|
11 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
12 |
+
# 需要更改
|
13 |
+
self.model = f"SeaArt - {self.config.seaart_setting['model'][self.count]}"
|
14 |
+
self.model_hash = "c7352c5d2f"
|
15 |
+
self.logger = self.setup_logger('[SeaArt]')
|
16 |
+
token = self.config.seaart[self.count]
|
17 |
+
|
18 |
+
self.token = token
|
19 |
+
self.backend_name = self.config.backend_name_list[6]
|
20 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
21 |
+
|
22 |
+
async def heart_beat(self, id_):
|
23 |
+
self.logger.info(f"{id_} 开始请求")
|
24 |
+
data = json.dumps({"task_ids": [id_]})
|
25 |
+
for i in range(60):
|
26 |
+
response = await self.http_request(
|
27 |
+
method="POST",
|
28 |
+
target_url="https://www.seaart.me/api/v1/task/batch-progress",
|
29 |
+
headers=self.headers,
|
30 |
+
content=data
|
31 |
+
)
|
32 |
+
|
33 |
+
if isinstance(response, dict) and 'error' in response:
|
34 |
+
raise RuntimeError(f"请求失败,错误信息: {response.get('details')}")
|
35 |
+
else:
|
36 |
+
items = response.get('data', {}).get('items', [])
|
37 |
+
|
38 |
+
if not items:
|
39 |
+
self.logger.info(f"第{i + 1}次心跳,未返回结果")
|
40 |
+
await asyncio.sleep(5)
|
41 |
+
continue
|
42 |
+
|
43 |
+
for item in items:
|
44 |
+
urls = item.get("img_uris")
|
45 |
+
|
46 |
+
if urls is None:
|
47 |
+
self.logger.info(f"第{i + 1}次心跳,未返回结果")
|
48 |
+
await asyncio.sleep(5)
|
49 |
+
continue
|
50 |
+
|
51 |
+
elif isinstance(urls, list):
|
52 |
+
for url in urls:
|
53 |
+
self.logger.img(f"图片url: {url['url']}")
|
54 |
+
self.img_url.append(url['url'])
|
55 |
+
return
|
56 |
+
|
57 |
+
raise RuntimeError(f"任务 {id_} 在60次心跳后仍未完成")
|
58 |
+
|
59 |
+
|
60 |
+
async def update_progress(self):
|
61 |
+
# 覆写函数
|
62 |
+
pass
|
63 |
+
|
64 |
+
async def check_backend_usability(self):
|
65 |
+
pass
|
66 |
+
|
67 |
+
async def err_formating_to_sd_style(self):
|
68 |
+
|
69 |
+
await self.download_img()
|
70 |
+
|
71 |
+
self.format_api_respond()
|
72 |
+
|
73 |
+
self.result = self.build_respond
|
74 |
+
|
75 |
+
async def posting(self):
|
76 |
+
|
77 |
+
input_ = {
|
78 |
+
"action": 1,
|
79 |
+
"art_model_no": "1a486c58c2aa0601b57ddc263fc350d0",
|
80 |
+
"category": 1,
|
81 |
+
"speed_type": 1,
|
82 |
+
"meta":
|
83 |
+
{
|
84 |
+
"prompt": self.tags,
|
85 |
+
"negative_prompt": self.ntags,
|
86 |
+
"restore_faces": self.restore_faces,
|
87 |
+
"seed": self.seed,
|
88 |
+
"sampler_name": self.sampler,
|
89 |
+
"width": self.width,
|
90 |
+
"height": self.height,
|
91 |
+
"steps": self.steps,
|
92 |
+
"cfg_scale": self.scale,
|
93 |
+
"lora_models": [],
|
94 |
+
"vae": "vae-ft-mse-840000-ema-pruned",
|
95 |
+
"clip_skip": 1,
|
96 |
+
"hr_second_pass_steps": 20,
|
97 |
+
"lcm_mode": 0,
|
98 |
+
"n_iter": 1,
|
99 |
+
"embeddings": []
|
100 |
+
}
|
101 |
+
}
|
102 |
+
|
103 |
+
if self.enable_hr:
|
104 |
+
|
105 |
+
hr_payload = {
|
106 |
+
"hr_second_pass_steps": self.hr_second_pass_steps,
|
107 |
+
"enable_hr": True,
|
108 |
+
"hr_upscaler": "4x-UltraSharp",
|
109 |
+
"hr_scale": self.hr_scale,
|
110 |
+
}
|
111 |
+
|
112 |
+
input_['meta'].update(hr_payload)
|
113 |
+
|
114 |
+
new_headers = {
|
115 |
+
"Accept": "application/json, text/plain, */*",
|
116 |
+
"Token": self.token
|
117 |
+
}
|
118 |
+
|
119 |
+
self.headers.update(new_headers)
|
120 |
+
|
121 |
+
data = json.dumps(input_)
|
122 |
+
response = await self.http_request(
|
123 |
+
method="POST",
|
124 |
+
target_url="https://www.seaart.me/api/v1/task/create",
|
125 |
+
headers=self.headers,
|
126 |
+
content=data
|
127 |
+
)
|
128 |
+
|
129 |
+
if isinstance(response, dict) and 'error' in response:
|
130 |
+
self.logger.warning(f"{response.get('details')}")
|
131 |
+
else:
|
132 |
+
task = response
|
133 |
+
task_id = task.get('data', {}).get('id')
|
134 |
+
|
135 |
+
if task_id:
|
136 |
+
await self.heart_beat(task_id)
|
137 |
+
|
138 |
+
await self.err_formating_to_sd_style()
|
139 |
+
|
DrawBridgeAPI/backend/tusiart.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
|
7 |
+
|
8 |
+
class AIDRAW(Backend):
|
9 |
+
|
10 |
+
def __init__(self, count, payload, **kwargs):
|
11 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
12 |
+
|
13 |
+
self.model = f"TusiArt - tusiart.com/models/{self.config.tusiart_setting['model'][self.count]}"
|
14 |
+
self.model_hash = "c7352c5d2f"
|
15 |
+
self.logger = self.setup_logger('[TusiArt]')
|
16 |
+
|
17 |
+
token = self.config.tusiart[self.count]
|
18 |
+
self.token = token
|
19 |
+
self.backend_name = self.config.backend_name_list[5]
|
20 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
21 |
+
|
22 |
+
async def heart_beat(self, id_):
|
23 |
+
self.logger.info(f"{id_}开始请求")
|
24 |
+
self.headers['referer'] = "https://tusiart.com/models"
|
25 |
+
del self.headers['sec-ch-ua']
|
26 |
+
|
27 |
+
for i in range(60):
|
28 |
+
await asyncio.sleep(5)
|
29 |
+
self.logger.info(f"第{i + 1}次心跳")
|
30 |
+
response = await self.http_request(
|
31 |
+
method="GET",
|
32 |
+
target_url='https://api.tusiart.cn/works/v1/works/tasks?size=20&cursor=0&returnAllTask=true',
|
33 |
+
headers=self.headers
|
34 |
+
)
|
35 |
+
|
36 |
+
if isinstance(response, dict) and 'error' in response:
|
37 |
+
raise RuntimeError(f"Request failed with error: {response.get('details')}")
|
38 |
+
else:
|
39 |
+
resp_json = response
|
40 |
+
all_tasks = resp_json['data']['tasks']
|
41 |
+
task_found = False
|
42 |
+
for task in all_tasks:
|
43 |
+
if task['taskId'] == id_:
|
44 |
+
task_found = True
|
45 |
+
if task['status'] == 'WAITING':
|
46 |
+
break
|
47 |
+
elif task['status'] == 'FINISH':
|
48 |
+
matched = False
|
49 |
+
for img in task['items']:
|
50 |
+
if 'workspace.tusiassets.com' in img['url']:
|
51 |
+
self.logger.img(f"图片url: {img['url']}")
|
52 |
+
self.img_url.append(img['url'])
|
53 |
+
matched = True
|
54 |
+
|
55 |
+
if matched:
|
56 |
+
return
|
57 |
+
else:
|
58 |
+
self.logger.info(f"第{i + 1}次心跳,FINISH状态下未找到符合条件的URL")
|
59 |
+
await asyncio.sleep(5)
|
60 |
+
break
|
61 |
+
if not task_found:
|
62 |
+
self.logger.info(f"任务 {id_} 未找到")
|
63 |
+
await asyncio.sleep(5)
|
64 |
+
continue
|
65 |
+
|
66 |
+
raise RuntimeError(f"任务 {id_} 在 {60} 次轮询后仍未完成")
|
67 |
+
|
68 |
+
async def update_progress(self):
|
69 |
+
# 覆写函数
|
70 |
+
pass
|
71 |
+
|
72 |
+
async def check_backend_usability(self):
|
73 |
+
pass
|
74 |
+
|
75 |
+
async def err_formating_to_sd_style(self):
|
76 |
+
|
77 |
+
await self.download_img()
|
78 |
+
|
79 |
+
self.format_api_respond()
|
80 |
+
|
81 |
+
self.result = self.build_respond
|
82 |
+
|
83 |
+
async def posting(self):
|
84 |
+
|
85 |
+
self.sampler = "Euler a"
|
86 |
+
|
87 |
+
input_ = {
|
88 |
+
"params":
|
89 |
+
{
|
90 |
+
"baseModel":
|
91 |
+
{
|
92 |
+
"modelId": self.config.tusiart_setting['model'][self.count],
|
93 |
+
"modelFileId": "708770380970509676"
|
94 |
+
},
|
95 |
+
"sdxl":
|
96 |
+
{"refiner": False},
|
97 |
+
"models": [],
|
98 |
+
"embeddingModels": [],
|
99 |
+
"sdVae": "Automatic",
|
100 |
+
"prompt": self.tags,
|
101 |
+
"negativePrompt": self.ntags,
|
102 |
+
"height": self.height,
|
103 |
+
"width": self.width,
|
104 |
+
"imageCount": self.total_img_count,
|
105 |
+
"steps": self.steps,
|
106 |
+
"images": [],
|
107 |
+
"cfgScale": self.scale,
|
108 |
+
"seed": str(self.seed),
|
109 |
+
"clipSkip": 2,
|
110 |
+
"etaNoiseSeedDelta": 31337,
|
111 |
+
"v1Clip": False,
|
112 |
+
"samplerName": self.sampler
|
113 |
+
},
|
114 |
+
"taskType": "TXT2IMG",
|
115 |
+
"isRemix": False,
|
116 |
+
"captchaType": "CLOUDFLARE_TURNSTILE"
|
117 |
+
}
|
118 |
+
|
119 |
+
if self.enable_hr:
|
120 |
+
|
121 |
+
hr_payload = {
|
122 |
+
"enableHr": True,
|
123 |
+
"hrUpscaler": "R-ESRGAN 4x+ Anime6B",
|
124 |
+
"hrSecondPassSteps": self.hr_second_pass_steps,
|
125 |
+
"denoisingStrength": self.denoising_strength,
|
126 |
+
"hrResizeX": int(self.width*self.hr_scale),
|
127 |
+
"hrResizeY": int(self.height*self.hr_scale)
|
128 |
+
}
|
129 |
+
|
130 |
+
input_['params'].update(hr_payload)
|
131 |
+
|
132 |
+
new_headers = {
|
133 |
+
"Authorization": f"Bearer {self.token}",
|
134 |
+
"Token": self.token,
|
135 |
+
"referer": self.config.tusiart_setting['referer'][self.count],
|
136 |
+
"sec-ch-ua": 'Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127'
|
137 |
+
}
|
138 |
+
self.headers.update(new_headers)
|
139 |
+
|
140 |
+
data = json.dumps(input_)
|
141 |
+
|
142 |
+
response = await self.http_request(
|
143 |
+
method="POST",
|
144 |
+
target_url="https://api.tusiart.cn/works/v1/works/task",
|
145 |
+
headers=self.headers,
|
146 |
+
content=data
|
147 |
+
)
|
148 |
+
|
149 |
+
if isinstance(response, dict) and 'error' in response:
|
150 |
+
pass
|
151 |
+
else:
|
152 |
+
task = response
|
153 |
+
if task['code'] == '1300100':
|
154 |
+
error_text = f"""
|
155 |
+
后端:{self.config.tusiart_setting['note'][self.count]} 遇到人机验证,需到验证。
|
156 |
+
请前往https://tusiart.com/使用一次生图来触发验证码。
|
157 |
+
后端已被标记为不可使用,如需继续使用请重启API"
|
158 |
+
"""
|
159 |
+
self.logger.warning("遇到人机验证!")
|
160 |
+
raise RuntimeError(error_text)
|
161 |
+
task_id = task['data']['task']['taskId']
|
162 |
+
await self.heart_beat(task_id)
|
163 |
+
|
164 |
+
await self.err_formating_to_sd_style()
|
165 |
+
|
166 |
+
|
DrawBridgeAPI/backend/yunjie.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
from .base import Backend
|
6 |
+
|
7 |
+
|
8 |
+
class AIDRAW(Backend):
|
9 |
+
|
10 |
+
def __init__(self, count, payload, **kwargs):
|
11 |
+
super().__init__(count=count, payload=payload, **kwargs)
|
12 |
+
# 需要更改
|
13 |
+
self.model = f"YunJie - {self.config.yunjie_setting['model'][self.count]}"
|
14 |
+
self.model_hash = "c7352c5d2f"
|
15 |
+
self.logger = self.setup_logger('[YunJie]')
|
16 |
+
token = self.config.yunjie[self.count]
|
17 |
+
|
18 |
+
self.token = token
|
19 |
+
self.backend_name = self.config.backend_name_list[7]
|
20 |
+
self.workload_name = f"{self.backend_name}-{token}"
|
21 |
+
|
22 |
+
async def heart_beat(self, id_):
|
23 |
+
self.logger.info(f"{id_} 开始请求")
|
24 |
+
for i in range(60):
|
25 |
+
await asyncio.sleep(5)
|
26 |
+
|
27 |
+
data = json.dumps({"taskId": id_})
|
28 |
+
response = await self.http_request(
|
29 |
+
method="POST",
|
30 |
+
target_url="https://www.yunjie.art/rayvision/aigc/customer/task/progress",
|
31 |
+
headers=self.headers,
|
32 |
+
content=data
|
33 |
+
)
|
34 |
+
|
35 |
+
if isinstance(response, dict) and 'error' in response:
|
36 |
+
raise RuntimeError(f"请求失败,错误信息: {response.get('details')}")
|
37 |
+
else:
|
38 |
+
resp_json = response
|
39 |
+
if resp_json['code'] == "Account.Token.Expired":
|
40 |
+
error_text = f"""
|
41 |
+
后端:{self.config.yunjie_setting['note'][self.count]} token过期。
|
42 |
+
请前往https://www.yunjie.art/ 登录重新获取token
|
43 |
+
"""
|
44 |
+
self.logger.warning("token过期")
|
45 |
+
raise RuntimeError(error_text)
|
46 |
+
items = resp_json.get('data', {}).get('data', [])
|
47 |
+
self.logger.info(f"第{i + 1}次心跳,未返回结果")
|
48 |
+
|
49 |
+
if not items:
|
50 |
+
continue
|
51 |
+
|
52 |
+
for item in items:
|
53 |
+
url = item.get("url")
|
54 |
+
|
55 |
+
if url:
|
56 |
+
self.logger.img(f"图片url: {url}")
|
57 |
+
self.img_url.append(url)
|
58 |
+
return
|
59 |
+
|
60 |
+
raise RuntimeError(f"任务 {id_} 在60次心跳后仍未完成")
|
61 |
+
|
62 |
+
async def update_progress(self):
|
63 |
+
# 覆写函数
|
64 |
+
pass
|
65 |
+
|
66 |
+
async def check_backend_usability(self):
|
67 |
+
pass
|
68 |
+
|
69 |
+
async def err_formating_to_sd_style(self):
|
70 |
+
|
71 |
+
await self.download_img()
|
72 |
+
|
73 |
+
self.format_api_respond()
|
74 |
+
|
75 |
+
self.result = self.build_respond
|
76 |
+
|
77 |
+
async def posting(self):
|
78 |
+
|
79 |
+
input_ = {
|
80 |
+
"genModel": "advance",
|
81 |
+
"initImage": "",
|
82 |
+
"modelUuid": "MGC-17d172ee37c1b000",
|
83 |
+
"samplingMethod":
|
84 |
+
self.sampler,
|
85 |
+
"cfgScale": self.scale,
|
86 |
+
"samplingSteps": self.steps,
|
87 |
+
"plugins": [],
|
88 |
+
"clipSkip": 2,
|
89 |
+
"etaNoiseSeedDelta": 31337,
|
90 |
+
"prompt": self.tags,
|
91 |
+
"negativePrompt": self.ntags,
|
92 |
+
"resolutionX": self.width,
|
93 |
+
"resolutionY": self.height,
|
94 |
+
"genCount": self.total_img_count,
|
95 |
+
"seed": self.seed,
|
96 |
+
"tags": []
|
97 |
+
}
|
98 |
+
|
99 |
+
if self.enable_hr:
|
100 |
+
|
101 |
+
hr_payload = {
|
102 |
+
"hires":
|
103 |
+
{"hrSecondPassSteps": self.hr_second_pass_steps,
|
104 |
+
"denoisingStrength": self.denoising_strength,
|
105 |
+
"hrScale": self.hr_scale,
|
106 |
+
"hrUpscaler": "R-ESRGAN 4x+"
|
107 |
+
}
|
108 |
+
}
|
109 |
+
|
110 |
+
input_.update(hr_payload)
|
111 |
+
|
112 |
+
new_headers = {
|
113 |
+
"Token": self.token
|
114 |
+
}
|
115 |
+
self.headers.update(new_headers)
|
116 |
+
data = json.dumps(input_)
|
117 |
+
|
118 |
+
# 使用 http_request 函数发送 POST 请求
|
119 |
+
response = await self.http_request(
|
120 |
+
method="POST",
|
121 |
+
target_url="https://www.yunjie.art/rayvision/aigc/customer/task/imageGen",
|
122 |
+
headers=self.headers,
|
123 |
+
content=data
|
124 |
+
)
|
125 |
+
|
126 |
+
if response.get("error", None):
|
127 |
+
self.logger.error(f"请求失败,错误信息: {response.get('details')}")
|
128 |
+
else:
|
129 |
+
task = response
|
130 |
+
task_id = task['data']['taskId']
|
131 |
+
await self.heart_beat(task_id)
|
132 |
+
await self.err_formating_to_sd_style()
|
133 |
+
|
DrawBridgeAPI/base_config.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml as yaml_
|
2 |
+
import shutil
|
3 |
+
import redis
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
import os
|
7 |
+
import traceback
|
8 |
+
import sys
|
9 |
+
|
10 |
+
import pydantic
|
11 |
+
from packaging import version
|
12 |
+
|
13 |
+
pyd_version = pydantic.__version__
|
14 |
+
|
15 |
+
if version.parse(pyd_version) < version.parse("2.0"):
|
16 |
+
from pydantic import BaseSettings
|
17 |
+
else:
|
18 |
+
try:
|
19 |
+
from pydantic_settings import BaseSettings
|
20 |
+
except:
|
21 |
+
traceback.print_exc()
|
22 |
+
import subprocess
|
23 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "pydantic_settings"])
|
24 |
+
from pydantic_settings import BaseSettings
|
25 |
+
|
26 |
+
from pathlib import Path
|
27 |
+
|
28 |
+
redis_client = None
|
29 |
+
|
30 |
+
api_current_dir = os.path.dirname(os.path.abspath(__file__))
|
31 |
+
|
32 |
+
|
33 |
+
class CustomFormatter(logging.Formatter):
|
34 |
+
def __init__(self, fmt=None, datefmt=None, style='%', prefix="[MAIN]"):
|
35 |
+
super().__init__(fmt, datefmt, style)
|
36 |
+
self.prefix = prefix
|
37 |
+
|
38 |
+
def format(self, record):
|
39 |
+
original_msg = record.msg
|
40 |
+
record.msg = f"{self.prefix} {original_msg}"
|
41 |
+
formatted_msg = super().format(record)
|
42 |
+
record.msg = original_msg # 恢复原始消息
|
43 |
+
return formatted_msg
|
44 |
+
|
45 |
+
|
46 |
+
# 字典用于跟踪已创建的日志记录器
|
47 |
+
|
48 |
+
empty_dict = {"token": None}
|
49 |
+
|
50 |
+
import logging
|
51 |
+
|
52 |
+
|
53 |
+
class CustomFormatter(logging.Formatter):
|
54 |
+
"""Custom formatter to add a fixed color for the prefix and variable colors for the log levels."""
|
55 |
+
|
56 |
+
def __init__(self, prefix="", img_prefix="", *args, **kwargs):
|
57 |
+
super().__init__(*args, **kwargs)
|
58 |
+
self.prefix = f"\033[94m{prefix}\033[0m" # 固定蓝色前缀
|
59 |
+
self.img_prefix = f"\033[93m{img_prefix}\033[0m" # 固定黄色前缀
|
60 |
+
self.FORMATS = {
|
61 |
+
logging.DEBUG: f"{self.prefix} \033[94m[DEBUG]\033[0m %(message)s",
|
62 |
+
logging.INFO: f"{self.prefix} \033[92m[INFO]\033[0m %(message)s",
|
63 |
+
logging.WARNING: f"{self.prefix} \033[93m[WARNING]\033[0m %(message)s",
|
64 |
+
logging.ERROR: f"{self.prefix} \033[91m[ERROR]\033[0m %(message)s",
|
65 |
+
logging.CRITICAL: f"{self.prefix} \033[95m[CRITICAL]\033[0m %(message)s",
|
66 |
+
"IMG": f"{self.img_prefix} \033[93m[IMG]\033[0m %(message)s" # 黄色前缀的 IMG 日志
|
67 |
+
}
|
68 |
+
|
69 |
+
def format(self, record):
|
70 |
+
log_fmt = self.FORMATS.get(record.levelno, self.FORMATS.get("IMG"))
|
71 |
+
formatter = logging.Formatter(log_fmt)
|
72 |
+
return formatter.format(record)
|
73 |
+
|
74 |
+
|
75 |
+
class CustomLogger(logging.Logger):
|
76 |
+
"""Custom logger class to add an img method."""
|
77 |
+
|
78 |
+
def __init__(self, name, level=logging.DEBUG):
|
79 |
+
super().__init__(name, level)
|
80 |
+
self.img_level = 25 # 自定义日志等级
|
81 |
+
logging.addLevelName(self.img_level, "IMG")
|
82 |
+
|
83 |
+
def img(self, msg, *args, **kwargs):
|
84 |
+
if self.isEnabledFor(self.img_level):
|
85 |
+
self._log(self.img_level, msg, args, **kwargs)
|
86 |
+
|
87 |
+
|
88 |
+
loggers = {}
|
89 |
+
|
90 |
+
|
91 |
+
def setup_logger(custom_prefix="[MAIN]"):
|
92 |
+
# 检查是否已经存在具有相同前缀的 logger
|
93 |
+
if custom_prefix in loggers:
|
94 |
+
return loggers[custom_prefix]
|
95 |
+
|
96 |
+
# 使用自定义的 Logger 类
|
97 |
+
logger = CustomLogger(custom_prefix)
|
98 |
+
logger.setLevel(logging.DEBUG)
|
99 |
+
|
100 |
+
# 创建一个控制台处理器并设置日志级别为DEBUG
|
101 |
+
console_handler = logging.StreamHandler()
|
102 |
+
console_handler.setLevel(logging.DEBUG)
|
103 |
+
|
104 |
+
# 创建一个文件处理器来保存所有日志到 log.txt
|
105 |
+
file_handler = logging.FileHandler('log.log')
|
106 |
+
file_handler.setLevel(logging.DEBUG)
|
107 |
+
|
108 |
+
# 创建一个错误文件处理器来保存错误日志到 log_error.txt
|
109 |
+
error_file_handler = logging.FileHandler('log_error.log')
|
110 |
+
error_file_handler.setLevel(logging.ERROR)
|
111 |
+
|
112 |
+
# 创建一个文件处理器来保存IMG日志到 log_img.log
|
113 |
+
img_file_handler = logging.FileHandler('log_img.log')
|
114 |
+
img_file_handler.setLevel(logger.img_level)
|
115 |
+
|
116 |
+
# 创建格式器并将其添加到处理器
|
117 |
+
formatter = CustomFormatter(prefix=custom_prefix, img_prefix=custom_prefix)
|
118 |
+
console_handler.setFormatter(formatter)
|
119 |
+
file_handler.setFormatter(formatter)
|
120 |
+
error_file_handler.setFormatter(formatter)
|
121 |
+
img_file_handler.setFormatter(formatter)
|
122 |
+
|
123 |
+
# 将处理器添加到日志记录器
|
124 |
+
logger.addHandler(console_handler)
|
125 |
+
logger.addHandler(file_handler)
|
126 |
+
logger.addHandler(error_file_handler)
|
127 |
+
logger.addHandler(img_file_handler)
|
128 |
+
|
129 |
+
# 将创建的 logger 存储在字典中
|
130 |
+
loggers[custom_prefix] = logger
|
131 |
+
|
132 |
+
return logger
|
133 |
+
|
134 |
+
|
135 |
+
class Config(BaseSettings):
|
136 |
+
|
137 |
+
backend_name_list: list = []
|
138 |
+
|
139 |
+
server_settings: dict = None
|
140 |
+
|
141 |
+
civitai_setting: dict = empty_dict
|
142 |
+
a1111webui_setting: dict = {"backend_url": None}
|
143 |
+
fal_ai_setting: dict = empty_dict
|
144 |
+
replicate_setting: dict = empty_dict
|
145 |
+
liblibai_setting: dict = empty_dict
|
146 |
+
tusiart_setting: dict = empty_dict
|
147 |
+
seaart_setting: dict = empty_dict
|
148 |
+
yunjie_setting: dict = empty_dict
|
149 |
+
comfyui_setting: dict = empty_dict
|
150 |
+
novelai_setting: dict = empty_dict
|
151 |
+
midjourney_setting: dict = empty_dict
|
152 |
+
|
153 |
+
civitai: list or None = []
|
154 |
+
a1111webui: list = []
|
155 |
+
fal_ai: list = []
|
156 |
+
replicate: list = []
|
157 |
+
liblibai: list = []
|
158 |
+
tusiart: list = []
|
159 |
+
seaart: list = []
|
160 |
+
yunjie: list = []
|
161 |
+
comfyui: list = []
|
162 |
+
novelai: list = []
|
163 |
+
midjourney: list = []
|
164 |
+
|
165 |
+
civitai_name: dict = {}
|
166 |
+
a1111webui_name: dict = {}
|
167 |
+
fal_ai_name: dict = {}
|
168 |
+
replicate_name: dict = {}
|
169 |
+
liblibai_name: dict = {}
|
170 |
+
tusiart_name: dict = {}
|
171 |
+
seaart_name: dict = {}
|
172 |
+
yunjie_name: dict = {}
|
173 |
+
comfyui_name: dict = {}
|
174 |
+
novelai_name: dict = {}
|
175 |
+
midjourney_name: dict = {}
|
176 |
+
|
177 |
+
server_settings: dict = {}
|
178 |
+
retry_times: int = 3
|
179 |
+
proxy: str = ''
|
180 |
+
|
181 |
+
workload_dict: dict = {}
|
182 |
+
|
183 |
+
base_workload_dict: dict = {
|
184 |
+
"start_time": None,
|
185 |
+
"end_time": None,
|
186 |
+
"idle": True,
|
187 |
+
"available": True,
|
188 |
+
"fault": False
|
189 |
+
}
|
190 |
+
|
191 |
+
models_list: list = []
|
192 |
+
|
193 |
+
name_url: dict = {}
|
194 |
+
|
195 |
+
|
196 |
+
def package_import(copy_to_config_path):
|
197 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
198 |
+
source_template = Path(os.path.join(current_dir, "config_example.yaml")).resolve()
|
199 |
+
shutil.copy(source_template, copy_to_config_path)
|
200 |
+
|
201 |
+
|
202 |
+
class ConfigInit:
|
203 |
+
|
204 |
+
def __init__(self):
|
205 |
+
self.config = None
|
206 |
+
self.config_file_path = None
|
207 |
+
self.logger = setup_logger(custom_prefix="[INIT]")
|
208 |
+
self.redis_client = None
|
209 |
+
|
210 |
+
def load_config(self):
|
211 |
+
|
212 |
+
with open(self.config_file_path, "r", encoding="utf-8") as f:
|
213 |
+
yaml_config = yaml_.load(f, Loader=yaml_.FullLoader)
|
214 |
+
config = Config(**yaml_config)
|
215 |
+
self.logger.info('Loading config file completed')
|
216 |
+
|
217 |
+
return config
|
218 |
+
|
219 |
+
def init(self, config_file_path):
|
220 |
+
|
221 |
+
self.config_file_path = config_file_path
|
222 |
+
config = self.load_config()
|
223 |
+
|
224 |
+
config.backend_name_list = ['civitai', 'a1111', 'falai', 'replicate', 'liblibai', 'tusiart', 'seaart', 'yunjie',
|
225 |
+
'comfyui', 'novelai', 'midjourney']
|
226 |
+
|
227 |
+
welcome_txt = '''
|
228 |
+
欢迎使用
|
229 |
+
_____ ____ _ _ _____ _____
|
230 |
+
| __ \ | _ \ (_) | | /\ | __ \ |_ _|
|
231 |
+
| | | | _ __ __ _ __ __ | |_) | _ __ _ __| | __ _ ___ / \ | |__) | | |
|
232 |
+
| | | | | '__| / _` | \ \ /\ / / | _ < | '__| | | / _` | / _` | / _ \ / /\ \ | ___/ | |
|
233 |
+
| |__| | | | | (_| | \ V V / | |_) | | | | | | (_| | | (_| | | __/ / ____ \ | | _| |_
|
234 |
+
|_____/ |_| \__,_| \_/\_/ |____/ |_| |_| \__,_| \__, | \___| /_/ \_\ |_| |_____|
|
235 |
+
__/ |
|
236 |
+
|___/
|
237 |
+
关注雕雕, 关注雕雕喵
|
238 |
+
项目地址/Project Re: https://github.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI
|
239 |
+
'''
|
240 |
+
|
241 |
+
print(welcome_txt)
|
242 |
+
|
243 |
+
config.civitai = config.civitai_setting['token']
|
244 |
+
config.a1111webui = config.a1111webui_setting
|
245 |
+
config.fal_ai = config.fal_ai_setting['token']
|
246 |
+
config.replicate = config.replicate_setting['token']
|
247 |
+
config.liblibai = config.liblibai_setting['token']
|
248 |
+
config.tusiart = config.tusiart_setting['token']
|
249 |
+
config.seaart = config.seaart_setting['token']
|
250 |
+
config.yunjie = config.yunjie_setting['token']
|
251 |
+
config.comfyui = config.comfyui_setting
|
252 |
+
config.novelai = config.novelai_setting['token']
|
253 |
+
config.midjourney = config.midjourney_setting
|
254 |
+
|
255 |
+
sources_list = [
|
256 |
+
(config.civitai, 0, config.civitai_name),
|
257 |
+
(config.a1111webui, 1, config.a1111webui_name),
|
258 |
+
(config.fal_ai, 2, config.fal_ai_name),
|
259 |
+
(config.replicate, 3, config.replicate_name),
|
260 |
+
(config.liblibai, 4, config.liblibai_name),
|
261 |
+
(config.tusiart, 5, config.tusiart_name),
|
262 |
+
(config.seaart, 6, config.seaart_name),
|
263 |
+
(config.yunjie, 7, config.yunjie_name),
|
264 |
+
(config.comfyui, 8, config.comfyui_name),
|
265 |
+
(config.novelai, 9, config.novelai_name),
|
266 |
+
(config.midjourney, 10, config.midjourney_name),
|
267 |
+
]
|
268 |
+
|
269 |
+
def process_items(config, items, backend_index, name_dict):
|
270 |
+
if backend_index == 1: # 特殊处理 config.a1111webui
|
271 |
+
for i in range(len(items['name'])):
|
272 |
+
key = f"{config.backend_name_list[backend_index]}-{items['name'][i]}"
|
273 |
+
config.workload_dict[key] = config.base_workload_dict
|
274 |
+
name_dict[f"a1111-{items['name'][i]}"] = items['backend_url'][i]
|
275 |
+
elif backend_index == 8:
|
276 |
+
for i in range(len(items['name'])):
|
277 |
+
key = f"{config.backend_name_list[backend_index]}-{items['name'][i]}"
|
278 |
+
config.workload_dict[key] = config.base_workload_dict
|
279 |
+
name_dict[f"comfyui-{items['name'][i]}"] = items['backend_url'][i]
|
280 |
+
elif backend_index == 10:
|
281 |
+
for i in range(len(items['name'])):
|
282 |
+
key = f"{config.backend_name_list[backend_index]}-{items['name'][i]}"
|
283 |
+
config.workload_dict[key] = config.base_workload_dict
|
284 |
+
name_dict[f"midjourney-{items['name'][i]}"] = items['backend_url'][i]
|
285 |
+
else:
|
286 |
+
for n in items:
|
287 |
+
key = f"{config.backend_name_list[backend_index]}-{n}"
|
288 |
+
config.workload_dict[key] = config.base_workload_dict
|
289 |
+
name_dict[key] = n
|
290 |
+
|
291 |
+
for items, backend_index, name_dict in sources_list:
|
292 |
+
process_items(config, items, backend_index, name_dict)
|
293 |
+
|
294 |
+
def merge_and_count(*args):
|
295 |
+
merged_dict = {}
|
296 |
+
lengths = []
|
297 |
+
for arg in args:
|
298 |
+
merged_dict |= arg[2]
|
299 |
+
lengths.append(len(arg[0]))
|
300 |
+
return merged_dict, tuple(lengths)
|
301 |
+
|
302 |
+
config.name_url = merge_and_count(*sources_list)
|
303 |
+
|
304 |
+
models_dict = {}
|
305 |
+
models_dict['is_loaded'] = False
|
306 |
+
for back_name in list(config.workload_dict.keys()):
|
307 |
+
models_dict[back_name] = config.models_list
|
308 |
+
|
309 |
+
try:
|
310 |
+
db_index = config.server_settings['redis_server'][3]
|
311 |
+
except IndexError:
|
312 |
+
db_index = 15
|
313 |
+
|
314 |
+
self.redis_client = redis.Redis(
|
315 |
+
host=config.server_settings['redis_server'][0],
|
316 |
+
port=config.server_settings['redis_server'][1],
|
317 |
+
password=config.server_settings['redis_server'][2],
|
318 |
+
db=db_index
|
319 |
+
)
|
320 |
+
|
321 |
+
self.logger.info('Redis connection successful')
|
322 |
+
|
323 |
+
workload_json = json.dumps(config.workload_dict)
|
324 |
+
|
325 |
+
rp = self.redis_client.pipeline()
|
326 |
+
rp.set('workload', workload_json)
|
327 |
+
rp.set('models', json.dumps(models_dict))
|
328 |
+
rp.set('styles', json.dumps([]))
|
329 |
+
rp.execute()
|
330 |
+
|
331 |
+
self.config = config
|
332 |
+
|
333 |
+
|
334 |
+
init_instance = ConfigInit()
|
DrawBridgeAPI/comfyui_workflows/diaopony-hr.json
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"4": {
|
3 |
+
"inputs": {
|
4 |
+
"ckpt_name": "models\\DiaoDaiaPony - 100 Artists - testing.safetensors"
|
5 |
+
},
|
6 |
+
"class_type": "CheckpointLoaderSimple",
|
7 |
+
"_meta": {
|
8 |
+
"title": "Load Checkpoint"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"7": {
|
12 |
+
"inputs": {
|
13 |
+
"text": "score_3,poorly drawn,bad anatomy,bad proportions, watercolor painting, brush strokes,3d,2.5d,signature,watermark,bad face,distorted face,messed up eyes,deformed,(low quality, bad quality, worst quality:1.2),bad hand",
|
14 |
+
"clip": [
|
15 |
+
"4",
|
16 |
+
1
|
17 |
+
]
|
18 |
+
},
|
19 |
+
"class_type": "CLIPTextEncode",
|
20 |
+
"_meta": {
|
21 |
+
"title": "CLIP Text Encode (Negative Prompt)"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"53": {
|
25 |
+
"inputs": {
|
26 |
+
"width": 768,
|
27 |
+
"height": 1152,
|
28 |
+
"batch_size": 1
|
29 |
+
},
|
30 |
+
"class_type": "EmptyLatentImage",
|
31 |
+
"_meta": {
|
32 |
+
"title": "Empty Latent Image"
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"79": {
|
36 |
+
"inputs": {
|
37 |
+
"seed": 657283391776279,
|
38 |
+
"steps": 30,
|
39 |
+
"cfg": 8,
|
40 |
+
"sampler_name": "euler",
|
41 |
+
"scheduler": "karras",
|
42 |
+
"denoise": 1,
|
43 |
+
"model": [
|
44 |
+
"4",
|
45 |
+
0
|
46 |
+
],
|
47 |
+
"positive": [
|
48 |
+
"103",
|
49 |
+
0
|
50 |
+
],
|
51 |
+
"negative": [
|
52 |
+
"7",
|
53 |
+
0
|
54 |
+
],
|
55 |
+
"latent_image": [
|
56 |
+
"53",
|
57 |
+
0
|
58 |
+
]
|
59 |
+
},
|
60 |
+
"class_type": "KSampler",
|
61 |
+
"_meta": {
|
62 |
+
"title": "KSampler"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"88": {
|
66 |
+
"inputs": {
|
67 |
+
"filename_prefix": "ComfyUI",
|
68 |
+
"images": [
|
69 |
+
"91",
|
70 |
+
0
|
71 |
+
]
|
72 |
+
},
|
73 |
+
"class_type": "SaveImage",
|
74 |
+
"_meta": {
|
75 |
+
"title": "Save Image"
|
76 |
+
}
|
77 |
+
},
|
78 |
+
"91": {
|
79 |
+
"inputs": {
|
80 |
+
"upscale_by": 2,
|
81 |
+
"seed": 291655160144038,
|
82 |
+
"steps": 12,
|
83 |
+
"cfg": 8,
|
84 |
+
"sampler_name": "dpmpp_2m",
|
85 |
+
"scheduler": "karras",
|
86 |
+
"denoise": 0.2,
|
87 |
+
"mode_type": "Linear",
|
88 |
+
"tile_width": 1024,
|
89 |
+
"tile_height": 1024,
|
90 |
+
"mask_blur": 8,
|
91 |
+
"tile_padding": 32,
|
92 |
+
"seam_fix_mode": "None",
|
93 |
+
"seam_fix_denoise": 1,
|
94 |
+
"seam_fix_width": 64,
|
95 |
+
"seam_fix_mask_blur": 8,
|
96 |
+
"seam_fix_padding": 16,
|
97 |
+
"force_uniform_tiles": true,
|
98 |
+
"tiled_decode": false,
|
99 |
+
"image": [
|
100 |
+
"92",
|
101 |
+
0
|
102 |
+
],
|
103 |
+
"model": [
|
104 |
+
"4",
|
105 |
+
0
|
106 |
+
],
|
107 |
+
"positive": [
|
108 |
+
"103",
|
109 |
+
0
|
110 |
+
],
|
111 |
+
"negative": [
|
112 |
+
"7",
|
113 |
+
0
|
114 |
+
],
|
115 |
+
"vae": [
|
116 |
+
"4",
|
117 |
+
2
|
118 |
+
],
|
119 |
+
"upscale_model": [
|
120 |
+
"93",
|
121 |
+
0
|
122 |
+
]
|
123 |
+
},
|
124 |
+
"class_type": "UltimateSDUpscale",
|
125 |
+
"_meta": {
|
126 |
+
"title": "Ultimate SD Upscale"
|
127 |
+
}
|
128 |
+
},
|
129 |
+
"92": {
|
130 |
+
"inputs": {
|
131 |
+
"samples": [
|
132 |
+
"99",
|
133 |
+
0
|
134 |
+
],
|
135 |
+
"vae": [
|
136 |
+
"4",
|
137 |
+
2
|
138 |
+
]
|
139 |
+
},
|
140 |
+
"class_type": "VAEDecode",
|
141 |
+
"_meta": {
|
142 |
+
"title": "VAE Decode"
|
143 |
+
}
|
144 |
+
},
|
145 |
+
"93": {
|
146 |
+
"inputs": {
|
147 |
+
"model_name": "4x-UltraSharp.pth"
|
148 |
+
},
|
149 |
+
"class_type": "UpscaleModelLoader",
|
150 |
+
"_meta": {
|
151 |
+
"title": "Load Upscale Model"
|
152 |
+
}
|
153 |
+
},
|
154 |
+
"98": {
|
155 |
+
"inputs": {
|
156 |
+
"upscale_method": "nearest-exact",
|
157 |
+
"width": 1152,
|
158 |
+
"height": 1536,
|
159 |
+
"crop": "disabled",
|
160 |
+
"samples": [
|
161 |
+
"79",
|
162 |
+
0
|
163 |
+
]
|
164 |
+
},
|
165 |
+
"class_type": "LatentUpscale",
|
166 |
+
"_meta": {
|
167 |
+
"title": "Upscale Latent"
|
168 |
+
}
|
169 |
+
},
|
170 |
+
"99": {
|
171 |
+
"inputs": {
|
172 |
+
"seed": 641400482051274,
|
173 |
+
"steps": 20,
|
174 |
+
"cfg": 8,
|
175 |
+
"sampler_name": "euler",
|
176 |
+
"scheduler": "normal",
|
177 |
+
"denoise": 1,
|
178 |
+
"model": [
|
179 |
+
"4",
|
180 |
+
0
|
181 |
+
],
|
182 |
+
"positive": [
|
183 |
+
"103",
|
184 |
+
0
|
185 |
+
],
|
186 |
+
"negative": [
|
187 |
+
"7",
|
188 |
+
0
|
189 |
+
],
|
190 |
+
"latent_image": [
|
191 |
+
"98",
|
192 |
+
0
|
193 |
+
]
|
194 |
+
},
|
195 |
+
"class_type": "KSampler",
|
196 |
+
"_meta": {
|
197 |
+
"title": "KSampler"
|
198 |
+
}
|
199 |
+
},
|
200 |
+
"103": {
|
201 |
+
"inputs": {
|
202 |
+
"text": ",(qianqianjie:1.1),(shinyo yukino:1),roku 6,(miyu (miy u1308):1.1),momoko (momopoco), score_9,score_8_up,score_7_up,score_anime,amazing quality,very aesthetic,absurdres,",
|
203 |
+
"clip": [
|
204 |
+
"4",
|
205 |
+
1
|
206 |
+
]
|
207 |
+
},
|
208 |
+
"class_type": "CLIPTextEncode",
|
209 |
+
"_meta": {
|
210 |
+
"title": "CLIP Text Encode (Prompt)"
|
211 |
+
}
|
212 |
+
}
|
213 |
+
}
|
DrawBridgeAPI/comfyui_workflows/diaopony-hr_reflex.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"prompt": {"103": {"override": {"text": "append_prompt"}}},
|
3 |
+
"negative_prompt": {"7": {"override": {"text": "append_negative_prompt"}}},
|
4 |
+
"sampler": ["79", "99"],
|
5 |
+
"image_size": {"53": {}, "98": {"override": {"width": "upscale", "height": "upscale"}}},
|
6 |
+
"output": 88
|
7 |
+
}
|
DrawBridgeAPI/comfyui_workflows/diaopony-tipo.json
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"4": {
|
3 |
+
"inputs": {
|
4 |
+
"ckpt_name": "models\\DiaoDaiaPony - 100 Artists - testing.safetensors"
|
5 |
+
},
|
6 |
+
"class_type": "CheckpointLoaderSimple",
|
7 |
+
"_meta": {
|
8 |
+
"title": "Load Checkpoint"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"6": {
|
12 |
+
"inputs": {
|
13 |
+
"text": [
|
14 |
+
"50",
|
15 |
+
0
|
16 |
+
],
|
17 |
+
"clip": [
|
18 |
+
"4",
|
19 |
+
1
|
20 |
+
]
|
21 |
+
},
|
22 |
+
"class_type": "CLIPTextEncode",
|
23 |
+
"_meta": {
|
24 |
+
"title": "CLIP Text Encode (TIPO Prompt)"
|
25 |
+
}
|
26 |
+
},
|
27 |
+
"7": {
|
28 |
+
"inputs": {
|
29 |
+
"text": "score_3,poorly drawn,bad anatomy,bad proportions, watercolor painting, brush strokes,3d,2.5d,signature,watermark,bad face,distorted face,messed up eyes,deformed,(low quality, bad quality, worst quality:1.2),bad hand",
|
30 |
+
"clip": [
|
31 |
+
"4",
|
32 |
+
1
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"class_type": "CLIPTextEncode",
|
36 |
+
"_meta": {
|
37 |
+
"title": "CLIP Text Encode (Negative Prompt)"
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"8": {
|
41 |
+
"inputs": {
|
42 |
+
"samples": [
|
43 |
+
"52",
|
44 |
+
0
|
45 |
+
],
|
46 |
+
"vae": [
|
47 |
+
"4",
|
48 |
+
2
|
49 |
+
]
|
50 |
+
},
|
51 |
+
"class_type": "VAEDecode",
|
52 |
+
"_meta": {
|
53 |
+
"title": "VAE Decode"
|
54 |
+
}
|
55 |
+
},
|
56 |
+
"50": {
|
57 |
+
"inputs": {
|
58 |
+
"tags": "\n\nscore_9,score_8_up,score_7_up,score_anime,amazing quality,very aesthetic,absurdres",
|
59 |
+
"nl_prompt": "An illustration of",
|
60 |
+
"ban_tags": "text, censor, speech, say, illustrations, doll",
|
61 |
+
"tipo_model": "KBlueLeaf/TIPO-500M",
|
62 |
+
"format": "<|special|>, \n<|characters|>, <|copyrights|>, \n<|artist|>, \n\n<|general|>,\n\n<|extended|>.\n\n<|quality|>, <|meta|>, <|rating|>",
|
63 |
+
"width": 1024,
|
64 |
+
"height": 1024,
|
65 |
+
"temperature": 0.5,
|
66 |
+
"top_p": 0.95,
|
67 |
+
"min_p": 0.05,
|
68 |
+
"top_k": 80,
|
69 |
+
"tag_length": "long",
|
70 |
+
"nl_length": "long",
|
71 |
+
"seed": 1763
|
72 |
+
},
|
73 |
+
"class_type": "TIPO",
|
74 |
+
"_meta": {
|
75 |
+
"title": "TIPO"
|
76 |
+
}
|
77 |
+
},
|
78 |
+
"52": {
|
79 |
+
"inputs": {
|
80 |
+
"seed": 11451,
|
81 |
+
"steps": 20,
|
82 |
+
"cfg": 8,
|
83 |
+
"sampler_name": "euler",
|
84 |
+
"scheduler": "normal",
|
85 |
+
"denoise": 1,
|
86 |
+
"model": [
|
87 |
+
"4",
|
88 |
+
0
|
89 |
+
],
|
90 |
+
"positive": [
|
91 |
+
"6",
|
92 |
+
0
|
93 |
+
],
|
94 |
+
"negative": [
|
95 |
+
"7",
|
96 |
+
0
|
97 |
+
],
|
98 |
+
"latent_image": [
|
99 |
+
"53",
|
100 |
+
0
|
101 |
+
]
|
102 |
+
},
|
103 |
+
"class_type": "KSampler",
|
104 |
+
"_meta": {
|
105 |
+
"title": "KSampler"
|
106 |
+
}
|
107 |
+
},
|
108 |
+
"53": {
|
109 |
+
"inputs": {
|
110 |
+
"width": 1152,
|
111 |
+
"height": 1536,
|
112 |
+
"batch_size": 1
|
113 |
+
},
|
114 |
+
"class_type": "EmptyLatentImage",
|
115 |
+
"_meta": {
|
116 |
+
"title": "Empty Latent Image"
|
117 |
+
}
|
118 |
+
},
|
119 |
+
"72": {
|
120 |
+
"inputs": {
|
121 |
+
"filename_prefix": "ComfyUI",
|
122 |
+
"images": [
|
123 |
+
"8",
|
124 |
+
0
|
125 |
+
]
|
126 |
+
},
|
127 |
+
"class_type": "SaveImage",
|
128 |
+
"_meta": {
|
129 |
+
"title": "Save Image"
|
130 |
+
}
|
131 |
+
}
|
132 |
+
}
|
DrawBridgeAPI/comfyui_workflows/diaopony-tipo_reflex.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"tipo": {"50": {"override": {"tags": "append_prompt"}}},
|
3 |
+
"sampler": 52,
|
4 |
+
"image_size": 53,
|
5 |
+
"output": 72
|
6 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux-dev.json
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"1": {
|
3 |
+
"inputs": {
|
4 |
+
"ckpt_name": "models\\flux1-dev-bnb-nf4-v2.safetensors"
|
5 |
+
},
|
6 |
+
"class_type": "CheckpointLoaderNF4",
|
7 |
+
"_meta": {
|
8 |
+
"title": "CheckpointLoaderNF4"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"2": {
|
12 |
+
"inputs": {
|
13 |
+
"text": "a tank",
|
14 |
+
"clip": [
|
15 |
+
"1",
|
16 |
+
1
|
17 |
+
]
|
18 |
+
},
|
19 |
+
"class_type": "CLIPTextEncode",
|
20 |
+
"_meta": {
|
21 |
+
"title": "CLIP Text Encode (Prompt)"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"3": {
|
25 |
+
"inputs": {
|
26 |
+
"seed": 861133332082627,
|
27 |
+
"steps": 20,
|
28 |
+
"cfg": 1,
|
29 |
+
"sampler_name": "euler",
|
30 |
+
"scheduler": "simple",
|
31 |
+
"denoise": 1,
|
32 |
+
"model": [
|
33 |
+
"1",
|
34 |
+
0
|
35 |
+
],
|
36 |
+
"positive": [
|
37 |
+
"2",
|
38 |
+
0
|
39 |
+
],
|
40 |
+
"negative": [
|
41 |
+
"2",
|
42 |
+
0
|
43 |
+
],
|
44 |
+
"latent_image": [
|
45 |
+
"4",
|
46 |
+
0
|
47 |
+
]
|
48 |
+
},
|
49 |
+
"class_type": "KSampler",
|
50 |
+
"_meta": {
|
51 |
+
"title": "KSampler"
|
52 |
+
}
|
53 |
+
},
|
54 |
+
"4": {
|
55 |
+
"inputs": {
|
56 |
+
"width": 512,
|
57 |
+
"height": 768,
|
58 |
+
"batch_size": 1
|
59 |
+
},
|
60 |
+
"class_type": "EmptyLatentImage",
|
61 |
+
"_meta": {
|
62 |
+
"title": "Empty Latent Image"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"5": {
|
66 |
+
"inputs": {
|
67 |
+
"samples": [
|
68 |
+
"3",
|
69 |
+
0
|
70 |
+
],
|
71 |
+
"vae": [
|
72 |
+
"1",
|
73 |
+
2
|
74 |
+
]
|
75 |
+
},
|
76 |
+
"class_type": "VAEDecode",
|
77 |
+
"_meta": {
|
78 |
+
"title": "VAE Decode"
|
79 |
+
}
|
80 |
+
},
|
81 |
+
"6": {
|
82 |
+
"inputs": {
|
83 |
+
"filename_prefix": "ComfyUI",
|
84 |
+
"images": [
|
85 |
+
"5",
|
86 |
+
0
|
87 |
+
]
|
88 |
+
},
|
89 |
+
"class_type": "SaveImage",
|
90 |
+
"_meta": {
|
91 |
+
"title": "Save Image"
|
92 |
+
}
|
93 |
+
}
|
94 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux-dev_reflex.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"prompt": 2,
|
3 |
+
"image_size": 4,
|
4 |
+
"output": 6,
|
5 |
+
"seed": 3
|
6 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux-schnell.json
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"1": {
|
3 |
+
"inputs": {
|
4 |
+
"ckpt_name": "models\\flux1-schnell-bnb-nf4.safetensors"
|
5 |
+
},
|
6 |
+
"class_type": "CheckpointLoaderNF4",
|
7 |
+
"_meta": {
|
8 |
+
"title": "CheckpointLoaderNF4"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"2": {
|
12 |
+
"inputs": {
|
13 |
+
"text": "a tank",
|
14 |
+
"clip": [
|
15 |
+
"1",
|
16 |
+
1
|
17 |
+
]
|
18 |
+
},
|
19 |
+
"class_type": "CLIPTextEncode",
|
20 |
+
"_meta": {
|
21 |
+
"title": "CLIP Text Encode (Prompt)"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"3": {
|
25 |
+
"inputs": {
|
26 |
+
"seed": 0,
|
27 |
+
"steps": 4,
|
28 |
+
"cfg": 1,
|
29 |
+
"sampler_name": "euler",
|
30 |
+
"scheduler": "simple",
|
31 |
+
"denoise": 1,
|
32 |
+
"model": [
|
33 |
+
"1",
|
34 |
+
0
|
35 |
+
],
|
36 |
+
"positive": [
|
37 |
+
"2",
|
38 |
+
0
|
39 |
+
],
|
40 |
+
"negative": [
|
41 |
+
"2",
|
42 |
+
0
|
43 |
+
],
|
44 |
+
"latent_image": [
|
45 |
+
"4",
|
46 |
+
0
|
47 |
+
]
|
48 |
+
},
|
49 |
+
"class_type": "KSampler",
|
50 |
+
"_meta": {
|
51 |
+
"title": "KSampler"
|
52 |
+
}
|
53 |
+
},
|
54 |
+
"4": {
|
55 |
+
"inputs": {
|
56 |
+
"width": 512,
|
57 |
+
"height": 768,
|
58 |
+
"batch_size": 1
|
59 |
+
},
|
60 |
+
"class_type": "EmptyLatentImage",
|
61 |
+
"_meta": {
|
62 |
+
"title": "Empty Latent Image"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"5": {
|
66 |
+
"inputs": {
|
67 |
+
"samples": [
|
68 |
+
"3",
|
69 |
+
0
|
70 |
+
],
|
71 |
+
"vae": [
|
72 |
+
"1",
|
73 |
+
2
|
74 |
+
]
|
75 |
+
},
|
76 |
+
"class_type": "VAEDecode",
|
77 |
+
"_meta": {
|
78 |
+
"title": "VAE Decode"
|
79 |
+
}
|
80 |
+
},
|
81 |
+
"6": {
|
82 |
+
"inputs": {
|
83 |
+
"filename_prefix": "ComfyUI",
|
84 |
+
"images": [
|
85 |
+
"5",
|
86 |
+
0
|
87 |
+
]
|
88 |
+
},
|
89 |
+
"class_type": "SaveImage",
|
90 |
+
"_meta": {
|
91 |
+
"title": "Save Image"
|
92 |
+
}
|
93 |
+
}
|
94 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux-schnell_reflex.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"prompt": 2,
|
3 |
+
"image_size": 4,
|
4 |
+
"output": 6,
|
5 |
+
"seed": 3
|
6 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux修手.json
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"1": {
|
3 |
+
"inputs": {
|
4 |
+
"context_expand_pixels": 100,
|
5 |
+
"context_expand_factor": 1,
|
6 |
+
"fill_mask_holes": true,
|
7 |
+
"blur_mask_pixels": 16,
|
8 |
+
"invert_mask": false,
|
9 |
+
"blend_pixels": 16,
|
10 |
+
"rescale_algorithm": "bicubic",
|
11 |
+
"mode": "ranged size",
|
12 |
+
"force_width": 1024,
|
13 |
+
"force_height": 1024,
|
14 |
+
"rescale_factor": 1,
|
15 |
+
"min_width": 512,
|
16 |
+
"min_height": 512,
|
17 |
+
"max_width": 768,
|
18 |
+
"max_height": 768,
|
19 |
+
"padding": 32,
|
20 |
+
"image": [
|
21 |
+
"47",
|
22 |
+
0
|
23 |
+
],
|
24 |
+
"mask": [
|
25 |
+
"50",
|
26 |
+
0
|
27 |
+
]
|
28 |
+
},
|
29 |
+
"class_type": "InpaintCrop",
|
30 |
+
"_meta": {
|
31 |
+
"title": "✂️ Inpaint Crop"
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"2": {
|
35 |
+
"inputs": {
|
36 |
+
"rescale_algorithm": "bislerp",
|
37 |
+
"stitch": [
|
38 |
+
"1",
|
39 |
+
0
|
40 |
+
],
|
41 |
+
"inpainted_image": [
|
42 |
+
"15",
|
43 |
+
0
|
44 |
+
]
|
45 |
+
},
|
46 |
+
"class_type": "InpaintStitch",
|
47 |
+
"_meta": {
|
48 |
+
"title": "✂️ Inpaint Stitch"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"3": {
|
52 |
+
"inputs": {
|
53 |
+
"image": "a87ed50d8e69b8bfb62df848bac69d12.png",
|
54 |
+
"upload": "image"
|
55 |
+
},
|
56 |
+
"class_type": "LoadImage",
|
57 |
+
"_meta": {
|
58 |
+
"title": "Load Image"
|
59 |
+
}
|
60 |
+
},
|
61 |
+
"15": {
|
62 |
+
"inputs": {
|
63 |
+
"samples": [
|
64 |
+
"100",
|
65 |
+
0
|
66 |
+
],
|
67 |
+
"vae": [
|
68 |
+
"99",
|
69 |
+
2
|
70 |
+
]
|
71 |
+
},
|
72 |
+
"class_type": "VAEDecode",
|
73 |
+
"_meta": {
|
74 |
+
"title": "VAE Decode"
|
75 |
+
}
|
76 |
+
},
|
77 |
+
"19": {
|
78 |
+
"inputs": {
|
79 |
+
"positive": [
|
80 |
+
"32",
|
81 |
+
0
|
82 |
+
],
|
83 |
+
"negative": [
|
84 |
+
"32",
|
85 |
+
0
|
86 |
+
],
|
87 |
+
"vae": [
|
88 |
+
"99",
|
89 |
+
2
|
90 |
+
],
|
91 |
+
"pixels": [
|
92 |
+
"1",
|
93 |
+
1
|
94 |
+
],
|
95 |
+
"mask": [
|
96 |
+
"1",
|
97 |
+
2
|
98 |
+
]
|
99 |
+
},
|
100 |
+
"class_type": "InpaintModelConditioning",
|
101 |
+
"_meta": {
|
102 |
+
"title": "InpaintModelConditioning"
|
103 |
+
}
|
104 |
+
},
|
105 |
+
"25": {
|
106 |
+
"inputs": {
|
107 |
+
"rescale_algorithm": "bicubic",
|
108 |
+
"mode": "ensure minimum size",
|
109 |
+
"min_width": 0,
|
110 |
+
"min_height": 1536,
|
111 |
+
"rescale_factor": 1,
|
112 |
+
"image": [
|
113 |
+
"26",
|
114 |
+
0
|
115 |
+
],
|
116 |
+
"mask": [
|
117 |
+
"26",
|
118 |
+
1
|
119 |
+
]
|
120 |
+
},
|
121 |
+
"class_type": "InpaintResize",
|
122 |
+
"_meta": {
|
123 |
+
"title": "✂️ Resize Image Before Inpainting"
|
124 |
+
}
|
125 |
+
},
|
126 |
+
"26": {
|
127 |
+
"inputs": {
|
128 |
+
"sam_model": "sam_vit_h (2.56GB)",
|
129 |
+
"grounding_dino_model": "GroundingDINO_SwinB (938MB)",
|
130 |
+
"threshold": 0.3,
|
131 |
+
"detail_method": "VITMatte",
|
132 |
+
"detail_erode": 6,
|
133 |
+
"detail_dilate": 6,
|
134 |
+
"black_point": 0.15,
|
135 |
+
"white_point": 0.99,
|
136 |
+
"process_detail": false,
|
137 |
+
"prompt": "hand",
|
138 |
+
"device": "cuda",
|
139 |
+
"max_megapixels": 2,
|
140 |
+
"cache_model": false,
|
141 |
+
"image": [
|
142 |
+
"3",
|
143 |
+
0
|
144 |
+
]
|
145 |
+
},
|
146 |
+
"class_type": "LayerMask: SegmentAnythingUltra V2",
|
147 |
+
"_meta": {
|
148 |
+
"title": "LayerMask: SegmentAnythingUltra V2"
|
149 |
+
}
|
150 |
+
},
|
151 |
+
"32": {
|
152 |
+
"inputs": {
|
153 |
+
"text": "Masterpiece, High Definition, Real Person Portrait, 5 Fingers, Girl's Hand",
|
154 |
+
"clip": [
|
155 |
+
"99",
|
156 |
+
1
|
157 |
+
]
|
158 |
+
},
|
159 |
+
"class_type": "CLIPTextEncode",
|
160 |
+
"_meta": {
|
161 |
+
"title": "CLIP Text Encode (Prompt)"
|
162 |
+
}
|
163 |
+
},
|
164 |
+
"47": {
|
165 |
+
"inputs": {
|
166 |
+
"fill_background": false,
|
167 |
+
"background_color": "#000000",
|
168 |
+
"RGBA_image": [
|
169 |
+
"25",
|
170 |
+
0
|
171 |
+
],
|
172 |
+
"mask": [
|
173 |
+
"25",
|
174 |
+
1
|
175 |
+
]
|
176 |
+
},
|
177 |
+
"class_type": "LayerUtility: ImageRemoveAlpha",
|
178 |
+
"_meta": {
|
179 |
+
"title": "LayerUtility: ImageRemoveAlpha"
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"50": {
|
183 |
+
"inputs": {
|
184 |
+
"expand": 30,
|
185 |
+
"incremental_expandrate": 0.1,
|
186 |
+
"tapered_corners": false,
|
187 |
+
"flip_input": false,
|
188 |
+
"blur_radius": 10,
|
189 |
+
"lerp_alpha": 1,
|
190 |
+
"decay_factor": 1,
|
191 |
+
"fill_holes": false,
|
192 |
+
"mask": [
|
193 |
+
"25",
|
194 |
+
1
|
195 |
+
]
|
196 |
+
},
|
197 |
+
"class_type": "GrowMaskWithBlur",
|
198 |
+
"_meta": {
|
199 |
+
"title": "Grow Mask With Blur"
|
200 |
+
}
|
201 |
+
},
|
202 |
+
"94": {
|
203 |
+
"inputs": {
|
204 |
+
"filename_prefix": "hand_fix",
|
205 |
+
"images": [
|
206 |
+
"2",
|
207 |
+
0
|
208 |
+
]
|
209 |
+
},
|
210 |
+
"class_type": "SaveImage",
|
211 |
+
"_meta": {
|
212 |
+
"title": "Save Image"
|
213 |
+
}
|
214 |
+
},
|
215 |
+
"99": {
|
216 |
+
"inputs": {
|
217 |
+
"ckpt_name": "models\\flux1-dev-bnb-nf4-v2.safetensors"
|
218 |
+
},
|
219 |
+
"class_type": "CheckpointLoaderNF4",
|
220 |
+
"_meta": {
|
221 |
+
"title": "CheckpointLoaderNF4"
|
222 |
+
}
|
223 |
+
},
|
224 |
+
"100": {
|
225 |
+
"inputs": {
|
226 |
+
"seed": 266696528873091,
|
227 |
+
"steps": 20,
|
228 |
+
"cfg": 1,
|
229 |
+
"sampler_name": "euler",
|
230 |
+
"scheduler": "simple",
|
231 |
+
"denoise": 0.5,
|
232 |
+
"model": [
|
233 |
+
"99",
|
234 |
+
0
|
235 |
+
],
|
236 |
+
"positive": [
|
237 |
+
"19",
|
238 |
+
0
|
239 |
+
],
|
240 |
+
"negative": [
|
241 |
+
"19",
|
242 |
+
1
|
243 |
+
],
|
244 |
+
"latent_image": [
|
245 |
+
"19",
|
246 |
+
2
|
247 |
+
]
|
248 |
+
},
|
249 |
+
"class_type": "KSampler",
|
250 |
+
"_meta": {
|
251 |
+
"title": "KSampler"
|
252 |
+
}
|
253 |
+
}
|
254 |
+
}
|
DrawBridgeAPI/comfyui_workflows/flux修手_reflex.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"prompt": 32,
|
3 |
+
"output": 94,
|
4 |
+
"load_image":3
|
5 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img.json
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"4": {
|
3 |
+
"inputs": {
|
4 |
+
"ckpt_name": "models\\sd3.5_large.safetensors"
|
5 |
+
},
|
6 |
+
"class_type": "CheckpointLoaderSimple",
|
7 |
+
"_meta": {
|
8 |
+
"title": "Load Checkpoint"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"6": {
|
12 |
+
"inputs": {
|
13 |
+
"text": "beautiful scenery nature glass bottle landscape, purple galaxy bottle,",
|
14 |
+
"clip": [
|
15 |
+
"11",
|
16 |
+
0
|
17 |
+
]
|
18 |
+
},
|
19 |
+
"class_type": "CLIPTextEncode",
|
20 |
+
"_meta": {
|
21 |
+
"title": "CLIP Text Encode (Prompt)"
|
22 |
+
}
|
23 |
+
},
|
24 |
+
"8": {
|
25 |
+
"inputs": {
|
26 |
+
"samples": [
|
27 |
+
"294",
|
28 |
+
0
|
29 |
+
],
|
30 |
+
"vae": [
|
31 |
+
"4",
|
32 |
+
2
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"class_type": "VAEDecode",
|
36 |
+
"_meta": {
|
37 |
+
"title": "VAE Decode"
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"11": {
|
41 |
+
"inputs": {
|
42 |
+
"clip_name1": "clip_g.pth",
|
43 |
+
"clip_name2": "clip_l.safetensors",
|
44 |
+
"clip_name3": "t5xxl_fp16.safetensors"
|
45 |
+
},
|
46 |
+
"class_type": "TripleCLIPLoader",
|
47 |
+
"_meta": {
|
48 |
+
"title": "TripleCLIPLoader"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"13": {
|
52 |
+
"inputs": {
|
53 |
+
"shift": 3,
|
54 |
+
"model": [
|
55 |
+
"4",
|
56 |
+
0
|
57 |
+
]
|
58 |
+
},
|
59 |
+
"class_type": "ModelSamplingSD3",
|
60 |
+
"_meta": {
|
61 |
+
"title": "ModelSamplingSD3"
|
62 |
+
}
|
63 |
+
},
|
64 |
+
"67": {
|
65 |
+
"inputs": {
|
66 |
+
"conditioning": [
|
67 |
+
"71",
|
68 |
+
0
|
69 |
+
]
|
70 |
+
},
|
71 |
+
"class_type": "ConditioningZeroOut",
|
72 |
+
"_meta": {
|
73 |
+
"title": "ConditioningZeroOut"
|
74 |
+
}
|
75 |
+
},
|
76 |
+
"68": {
|
77 |
+
"inputs": {
|
78 |
+
"start": 0.1,
|
79 |
+
"end": 1,
|
80 |
+
"conditioning": [
|
81 |
+
"67",
|
82 |
+
0
|
83 |
+
]
|
84 |
+
},
|
85 |
+
"class_type": "ConditioningSetTimestepRange",
|
86 |
+
"_meta": {
|
87 |
+
"title": "ConditioningSetTimestepRange"
|
88 |
+
}
|
89 |
+
},
|
90 |
+
"69": {
|
91 |
+
"inputs": {
|
92 |
+
"conditioning_1": [
|
93 |
+
"68",
|
94 |
+
0
|
95 |
+
],
|
96 |
+
"conditioning_2": [
|
97 |
+
"70",
|
98 |
+
0
|
99 |
+
]
|
100 |
+
},
|
101 |
+
"class_type": "ConditioningCombine",
|
102 |
+
"_meta": {
|
103 |
+
"title": "Conditioning (Combine)"
|
104 |
+
}
|
105 |
+
},
|
106 |
+
"70": {
|
107 |
+
"inputs": {
|
108 |
+
"start": 0,
|
109 |
+
"end": 0.1,
|
110 |
+
"conditioning": [
|
111 |
+
"71",
|
112 |
+
0
|
113 |
+
]
|
114 |
+
},
|
115 |
+
"class_type": "ConditioningSetTimestepRange",
|
116 |
+
"_meta": {
|
117 |
+
"title": "ConditioningSetTimestepRange"
|
118 |
+
}
|
119 |
+
},
|
120 |
+
"71": {
|
121 |
+
"inputs": {
|
122 |
+
"text": "",
|
123 |
+
"clip": [
|
124 |
+
"11",
|
125 |
+
0
|
126 |
+
]
|
127 |
+
},
|
128 |
+
"class_type": "CLIPTextEncode",
|
129 |
+
"_meta": {
|
130 |
+
"title": "CLIP Text Encode (Prompt)"
|
131 |
+
}
|
132 |
+
},
|
133 |
+
"135": {
|
134 |
+
"inputs": {
|
135 |
+
"width": 1024,
|
136 |
+
"height": 1024,
|
137 |
+
"batch_size": 1
|
138 |
+
},
|
139 |
+
"class_type": "EmptySD3LatentImage",
|
140 |
+
"_meta": {
|
141 |
+
"title": "EmptySD3LatentImage"
|
142 |
+
}
|
143 |
+
},
|
144 |
+
"294": {
|
145 |
+
"inputs": {
|
146 |
+
"seed": 143084108695924,
|
147 |
+
"steps": 20,
|
148 |
+
"cfg": 4.5,
|
149 |
+
"sampler_name": "dpmpp_2m",
|
150 |
+
"scheduler": "sgm_uniform",
|
151 |
+
"denoise": 1,
|
152 |
+
"model": [
|
153 |
+
"13",
|
154 |
+
0
|
155 |
+
],
|
156 |
+
"positive": [
|
157 |
+
"6",
|
158 |
+
0
|
159 |
+
],
|
160 |
+
"negative": [
|
161 |
+
"69",
|
162 |
+
0
|
163 |
+
],
|
164 |
+
"latent_image": [
|
165 |
+
"135",
|
166 |
+
0
|
167 |
+
]
|
168 |
+
},
|
169 |
+
"class_type": "KSampler",
|
170 |
+
"_meta": {
|
171 |
+
"title": "KSampler"
|
172 |
+
}
|
173 |
+
},
|
174 |
+
"302": {
|
175 |
+
"inputs": {
|
176 |
+
"filename_prefix": "ComfyUI",
|
177 |
+
"images": [
|
178 |
+
"8",
|
179 |
+
0
|
180 |
+
]
|
181 |
+
},
|
182 |
+
"class_type": "SaveImage",
|
183 |
+
"_meta": {
|
184 |
+
"title": "Save Image"
|
185 |
+
}
|
186 |
+
}
|
187 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img_reflex.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"prompt": 6,
|
3 |
+
"negative_prompt": 71,
|
4 |
+
"image_size": 135,
|
5 |
+
"output": 302,
|
6 |
+
"seed": 294
|
7 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_img2img.json
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"3": {
|
3 |
+
"inputs": {
|
4 |
+
"seed": 280823642470253,
|
5 |
+
"steps": 20,
|
6 |
+
"cfg": 8,
|
7 |
+
"sampler_name": "dpmpp_2m",
|
8 |
+
"scheduler": "normal",
|
9 |
+
"denoise": 0.8700000000000001,
|
10 |
+
"model": [
|
11 |
+
"14",
|
12 |
+
0
|
13 |
+
],
|
14 |
+
"positive": [
|
15 |
+
"6",
|
16 |
+
0
|
17 |
+
],
|
18 |
+
"negative": [
|
19 |
+
"7",
|
20 |
+
0
|
21 |
+
],
|
22 |
+
"latent_image": [
|
23 |
+
"12",
|
24 |
+
0
|
25 |
+
]
|
26 |
+
},
|
27 |
+
"class_type": "KSampler",
|
28 |
+
"_meta": {
|
29 |
+
"title": "KSampler"
|
30 |
+
}
|
31 |
+
},
|
32 |
+
"6": {
|
33 |
+
"inputs": {
|
34 |
+
"text": "photograph of victorian woman with wings, sky clouds, meadow grass\n",
|
35 |
+
"clip": [
|
36 |
+
"14",
|
37 |
+
1
|
38 |
+
]
|
39 |
+
},
|
40 |
+
"class_type": "CLIPTextEncode",
|
41 |
+
"_meta": {
|
42 |
+
"title": "CLIP Text Encode (Prompt)"
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"7": {
|
46 |
+
"inputs": {
|
47 |
+
"text": "watermark, text\n",
|
48 |
+
"clip": [
|
49 |
+
"14",
|
50 |
+
1
|
51 |
+
]
|
52 |
+
},
|
53 |
+
"class_type": "CLIPTextEncode",
|
54 |
+
"_meta": {
|
55 |
+
"title": "CLIP Text Encode (Prompt)"
|
56 |
+
}
|
57 |
+
},
|
58 |
+
"8": {
|
59 |
+
"inputs": {
|
60 |
+
"samples": [
|
61 |
+
"3",
|
62 |
+
0
|
63 |
+
],
|
64 |
+
"vae": [
|
65 |
+
"14",
|
66 |
+
2
|
67 |
+
]
|
68 |
+
},
|
69 |
+
"class_type": "VAEDecode",
|
70 |
+
"_meta": {
|
71 |
+
"title": "VAE Decode"
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"9": {
|
75 |
+
"inputs": {
|
76 |
+
"filename_prefix": "ComfyUI",
|
77 |
+
"images": [
|
78 |
+
"8",
|
79 |
+
0
|
80 |
+
]
|
81 |
+
},
|
82 |
+
"class_type": "SaveImage",
|
83 |
+
"_meta": {
|
84 |
+
"title": "Save Image"
|
85 |
+
}
|
86 |
+
},
|
87 |
+
"10": {
|
88 |
+
"inputs": {
|
89 |
+
"image": "example.png",
|
90 |
+
"upload": "image"
|
91 |
+
},
|
92 |
+
"class_type": "LoadImage",
|
93 |
+
"_meta": {
|
94 |
+
"title": "Load Image"
|
95 |
+
}
|
96 |
+
},
|
97 |
+
"12": {
|
98 |
+
"inputs": {
|
99 |
+
"pixels": [
|
100 |
+
"10",
|
101 |
+
0
|
102 |
+
],
|
103 |
+
"vae": [
|
104 |
+
"14",
|
105 |
+
2
|
106 |
+
]
|
107 |
+
},
|
108 |
+
"class_type": "VAEEncode",
|
109 |
+
"_meta": {
|
110 |
+
"title": "VAE Encode"
|
111 |
+
}
|
112 |
+
},
|
113 |
+
"14": {
|
114 |
+
"inputs": {
|
115 |
+
"ckpt_name": "v1-5-pruned-emaonly.ckpt"
|
116 |
+
},
|
117 |
+
"class_type": "CheckpointLoaderSimple",
|
118 |
+
"_meta": {
|
119 |
+
"title": "Load Checkpoint"
|
120 |
+
}
|
121 |
+
}
|
122 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_img2img_reflex.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"sampler": 3,
|
3 |
+
"prompt": 6,
|
4 |
+
"image_size": 5,
|
5 |
+
"negative_prompt": 7,
|
6 |
+
"checkpoint": 14,
|
7 |
+
"output": 9,
|
8 |
+
"load_image":10
|
9 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_txt2img.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"3": {
|
3 |
+
"inputs": {
|
4 |
+
"seed": 567570346829551,
|
5 |
+
"steps": 20,
|
6 |
+
"cfg": 8,
|
7 |
+
"sampler_name": "euler",
|
8 |
+
"scheduler": "normal",
|
9 |
+
"denoise": 1,
|
10 |
+
"model": [
|
11 |
+
"4",
|
12 |
+
0
|
13 |
+
],
|
14 |
+
"positive": [
|
15 |
+
"6",
|
16 |
+
0
|
17 |
+
],
|
18 |
+
"negative": [
|
19 |
+
"7",
|
20 |
+
0
|
21 |
+
],
|
22 |
+
"latent_image": [
|
23 |
+
"5",
|
24 |
+
0
|
25 |
+
]
|
26 |
+
},
|
27 |
+
"class_type": "KSampler",
|
28 |
+
"_meta": {
|
29 |
+
"title": "KSampler"
|
30 |
+
}
|
31 |
+
},
|
32 |
+
"4": {
|
33 |
+
"inputs": {
|
34 |
+
"ckpt_name": "models\\DiaoDaia_mix_4.5.ckpt"
|
35 |
+
},
|
36 |
+
"class_type": "CheckpointLoaderSimple",
|
37 |
+
"_meta": {
|
38 |
+
"title": "Load Checkpoint"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"5": {
|
42 |
+
"inputs": {
|
43 |
+
"width": 512,
|
44 |
+
"height": 512,
|
45 |
+
"batch_size": 1
|
46 |
+
},
|
47 |
+
"class_type": "EmptyLatentImage",
|
48 |
+
"_meta": {
|
49 |
+
"title": "Empty Latent Image"
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"6": {
|
53 |
+
"inputs": {
|
54 |
+
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
|
55 |
+
"clip": [
|
56 |
+
"4",
|
57 |
+
1
|
58 |
+
]
|
59 |
+
},
|
60 |
+
"class_type": "CLIPTextEncode",
|
61 |
+
"_meta": {
|
62 |
+
"title": "CLIP Text Encode (Prompt)"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"7": {
|
66 |
+
"inputs": {
|
67 |
+
"text": "text, watermark",
|
68 |
+
"clip": [
|
69 |
+
"4",
|
70 |
+
1
|
71 |
+
]
|
72 |
+
},
|
73 |
+
"class_type": "CLIPTextEncode",
|
74 |
+
"_meta": {
|
75 |
+
"title": "CLIP Text Encode (Prompt)"
|
76 |
+
}
|
77 |
+
},
|
78 |
+
"8": {
|
79 |
+
"inputs": {
|
80 |
+
"samples": [
|
81 |
+
"3",
|
82 |
+
0
|
83 |
+
],
|
84 |
+
"vae": [
|
85 |
+
"4",
|
86 |
+
2
|
87 |
+
]
|
88 |
+
},
|
89 |
+
"class_type": "VAEDecode",
|
90 |
+
"_meta": {
|
91 |
+
"title": "VAE Decode"
|
92 |
+
}
|
93 |
+
},
|
94 |
+
"9": {
|
95 |
+
"inputs": {
|
96 |
+
"filename_prefix": "ComfyUI",
|
97 |
+
"images": [
|
98 |
+
"8",
|
99 |
+
0
|
100 |
+
]
|
101 |
+
},
|
102 |
+
"class_type": "SaveImage",
|
103 |
+
"_meta": {
|
104 |
+
"title": "Save Image"
|
105 |
+
}
|
106 |
+
}
|
107 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix.json
ADDED
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"3": {
|
3 |
+
"inputs": {
|
4 |
+
"seed": 213416933995644,
|
5 |
+
"steps": 20,
|
6 |
+
"cfg": 8,
|
7 |
+
"sampler_name": "euler_ancestral",
|
8 |
+
"scheduler": "normal",
|
9 |
+
"denoise": 1,
|
10 |
+
"model": [
|
11 |
+
"4",
|
12 |
+
0
|
13 |
+
],
|
14 |
+
"positive": [
|
15 |
+
"6",
|
16 |
+
0
|
17 |
+
],
|
18 |
+
"negative": [
|
19 |
+
"7",
|
20 |
+
0
|
21 |
+
],
|
22 |
+
"latent_image": [
|
23 |
+
"5",
|
24 |
+
0
|
25 |
+
]
|
26 |
+
},
|
27 |
+
"class_type": "KSampler",
|
28 |
+
"_meta": {
|
29 |
+
"title": "KSampler"
|
30 |
+
}
|
31 |
+
},
|
32 |
+
"4": {
|
33 |
+
"inputs": {
|
34 |
+
"ckpt_name": "models\\1053-S.ckpt"
|
35 |
+
},
|
36 |
+
"class_type": "CheckpointLoaderSimple",
|
37 |
+
"_meta": {
|
38 |
+
"title": "Load Checkpoint"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"5": {
|
42 |
+
"inputs": {
|
43 |
+
"width": 768,
|
44 |
+
"height": 512,
|
45 |
+
"batch_size": 1
|
46 |
+
},
|
47 |
+
"class_type": "EmptyLatentImage",
|
48 |
+
"_meta": {
|
49 |
+
"title": "Empty Latent Image"
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"6": {
|
53 |
+
"inputs": {
|
54 |
+
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
|
55 |
+
"clip": [
|
56 |
+
"4",
|
57 |
+
1
|
58 |
+
]
|
59 |
+
},
|
60 |
+
"class_type": "CLIPTextEncode",
|
61 |
+
"_meta": {
|
62 |
+
"title": "CLIP Text Encode (Prompt)"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"7": {
|
66 |
+
"inputs": {
|
67 |
+
"text": "text, watermark",
|
68 |
+
"clip": [
|
69 |
+
"4",
|
70 |
+
1
|
71 |
+
]
|
72 |
+
},
|
73 |
+
"class_type": "CLIPTextEncode",
|
74 |
+
"_meta": {
|
75 |
+
"title": "CLIP Text Encode (Prompt)"
|
76 |
+
}
|
77 |
+
},
|
78 |
+
"8": {
|
79 |
+
"inputs": {
|
80 |
+
"samples": [
|
81 |
+
"3",
|
82 |
+
0
|
83 |
+
],
|
84 |
+
"vae": [
|
85 |
+
"4",
|
86 |
+
2
|
87 |
+
]
|
88 |
+
},
|
89 |
+
"class_type": "VAEDecode",
|
90 |
+
"_meta": {
|
91 |
+
"title": "VAE Decode"
|
92 |
+
}
|
93 |
+
},
|
94 |
+
"9": {
|
95 |
+
"inputs": {
|
96 |
+
"filename_prefix": "ComfyUI",
|
97 |
+
"images": [
|
98 |
+
"18",
|
99 |
+
0
|
100 |
+
]
|
101 |
+
},
|
102 |
+
"class_type": "SaveImage",
|
103 |
+
"_meta": {
|
104 |
+
"title": "Save Image"
|
105 |
+
}
|
106 |
+
},
|
107 |
+
"10": {
|
108 |
+
"inputs": {
|
109 |
+
"upscale_method": "nearest-exact",
|
110 |
+
"width": 1536,
|
111 |
+
"height": 1152,
|
112 |
+
"crop": "disabled",
|
113 |
+
"samples": [
|
114 |
+
"16",
|
115 |
+
0
|
116 |
+
]
|
117 |
+
},
|
118 |
+
"class_type": "LatentUpscale",
|
119 |
+
"_meta": {
|
120 |
+
"title": "Upscale Latent"
|
121 |
+
}
|
122 |
+
},
|
123 |
+
"12": {
|
124 |
+
"inputs": {
|
125 |
+
"model_name": "RealESRGAN_x4plus.pth"
|
126 |
+
},
|
127 |
+
"class_type": "UpscaleModelLoader",
|
128 |
+
"_meta": {
|
129 |
+
"title": "Load Upscale Model"
|
130 |
+
}
|
131 |
+
},
|
132 |
+
"14": {
|
133 |
+
"inputs": {
|
134 |
+
"upscale_model": [
|
135 |
+
"12",
|
136 |
+
0
|
137 |
+
],
|
138 |
+
"image": [
|
139 |
+
"8",
|
140 |
+
0
|
141 |
+
]
|
142 |
+
},
|
143 |
+
"class_type": "ImageUpscaleWithModel",
|
144 |
+
"_meta": {
|
145 |
+
"title": "Upscale Image (using Model)"
|
146 |
+
}
|
147 |
+
},
|
148 |
+
"15": {
|
149 |
+
"inputs": {
|
150 |
+
"upscale_method": "area",
|
151 |
+
"width": 1152,
|
152 |
+
"height": 768,
|
153 |
+
"crop": "disabled",
|
154 |
+
"image": [
|
155 |
+
"14",
|
156 |
+
0
|
157 |
+
]
|
158 |
+
},
|
159 |
+
"class_type": "ImageScale",
|
160 |
+
"_meta": {
|
161 |
+
"title": "Upscale Image"
|
162 |
+
}
|
163 |
+
},
|
164 |
+
"16": {
|
165 |
+
"inputs": {
|
166 |
+
"pixels": [
|
167 |
+
"15",
|
168 |
+
0
|
169 |
+
],
|
170 |
+
"vae": [
|
171 |
+
"4",
|
172 |
+
2
|
173 |
+
]
|
174 |
+
},
|
175 |
+
"class_type": "VAEEncode",
|
176 |
+
"_meta": {
|
177 |
+
"title": "VAE Encode"
|
178 |
+
}
|
179 |
+
},
|
180 |
+
"18": {
|
181 |
+
"inputs": {
|
182 |
+
"samples": [
|
183 |
+
"19",
|
184 |
+
0
|
185 |
+
],
|
186 |
+
"vae": [
|
187 |
+
"4",
|
188 |
+
2
|
189 |
+
]
|
190 |
+
},
|
191 |
+
"class_type": "VAEDecode",
|
192 |
+
"_meta": {
|
193 |
+
"title": "VAE Decode"
|
194 |
+
}
|
195 |
+
},
|
196 |
+
"19": {
|
197 |
+
"inputs": {
|
198 |
+
"seed": 1069147258069384,
|
199 |
+
"steps": 8,
|
200 |
+
"cfg": 8,
|
201 |
+
"sampler_name": "euler",
|
202 |
+
"scheduler": "sgm_uniform",
|
203 |
+
"denoise": 0.6,
|
204 |
+
"model": [
|
205 |
+
"4",
|
206 |
+
0
|
207 |
+
],
|
208 |
+
"positive": [
|
209 |
+
"21",
|
210 |
+
0
|
211 |
+
],
|
212 |
+
"negative": [
|
213 |
+
"22",
|
214 |
+
0
|
215 |
+
],
|
216 |
+
"latent_image": [
|
217 |
+
"10",
|
218 |
+
0
|
219 |
+
]
|
220 |
+
},
|
221 |
+
"class_type": "KSampler",
|
222 |
+
"_meta": {
|
223 |
+
"title": "KSampler"
|
224 |
+
}
|
225 |
+
},
|
226 |
+
"20": {
|
227 |
+
"inputs": {
|
228 |
+
"seed": 85387134314530,
|
229 |
+
"steps": 20,
|
230 |
+
"cfg": 5.74,
|
231 |
+
"sampler_name": "dpm_2",
|
232 |
+
"scheduler": "normal",
|
233 |
+
"denoise": 1
|
234 |
+
},
|
235 |
+
"class_type": "KSampler",
|
236 |
+
"_meta": {
|
237 |
+
"title": "KSampler"
|
238 |
+
}
|
239 |
+
},
|
240 |
+
"21": {
|
241 |
+
"inputs": {
|
242 |
+
"text": "",
|
243 |
+
"clip": [
|
244 |
+
"4",
|
245 |
+
1
|
246 |
+
]
|
247 |
+
},
|
248 |
+
"class_type": "CLIPTextEncode",
|
249 |
+
"_meta": {
|
250 |
+
"title": "CLIP Text Encode (Prompt)"
|
251 |
+
}
|
252 |
+
},
|
253 |
+
"22": {
|
254 |
+
"inputs": {
|
255 |
+
"text": "",
|
256 |
+
"clip": [
|
257 |
+
"4",
|
258 |
+
1
|
259 |
+
]
|
260 |
+
},
|
261 |
+
"class_type": "CLIPTextEncode",
|
262 |
+
"_meta": {
|
263 |
+
"title": "CLIP Text Encode (Prompt)"
|
264 |
+
}
|
265 |
+
}
|
266 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix_reflex.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"sampler": 3,
|
3 |
+
"prompt": 6,
|
4 |
+
"image_size": 5,
|
5 |
+
"negative_prompt": 7,
|
6 |
+
"checkpoint": 4,
|
7 |
+
"output": 9,
|
8 |
+
"latentupscale": 10,
|
9 |
+
"resize": 15,
|
10 |
+
"hr_steps": 19,
|
11 |
+
"hr_prompt": 21,
|
12 |
+
"hr_negative_prompt": 22
|
13 |
+
}
|
DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_reflex.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"sampler": 3,
|
3 |
+
"prompt": 6,
|
4 |
+
"image_size": 5,
|
5 |
+
"negative_prompt": 7,
|
6 |
+
"checkpoint": 4,
|
7 |
+
"output": 9
|
8 |
+
}
|
DrawBridgeAPI/comfyui_workflows/创意融字 工作流Jianan_创意融字海报.json
ADDED
@@ -0,0 +1,1789 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 70,
|
3 |
+
"last_link_id": 130,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 68,
|
7 |
+
"type": "LineArtPreprocessor",
|
8 |
+
"pos": [
|
9 |
+
631,
|
10 |
+
-867
|
11 |
+
],
|
12 |
+
"size": {
|
13 |
+
"0": 315,
|
14 |
+
"1": 82
|
15 |
+
},
|
16 |
+
"flags": {
|
17 |
+
|
18 |
+
},
|
19 |
+
"order": 7,
|
20 |
+
"mode": 0,
|
21 |
+
"inputs": [
|
22 |
+
{
|
23 |
+
"name": "image",
|
24 |
+
"type": "IMAGE",
|
25 |
+
"link": 111,
|
26 |
+
"label": "图像"
|
27 |
+
}
|
28 |
+
],
|
29 |
+
"outputs": [
|
30 |
+
{
|
31 |
+
"name": "IMAGE",
|
32 |
+
"type": "IMAGE",
|
33 |
+
"links": [
|
34 |
+
112,
|
35 |
+
115
|
36 |
+
],
|
37 |
+
"shape": 3,
|
38 |
+
"label": "图像",
|
39 |
+
"slot_index": 0
|
40 |
+
}
|
41 |
+
],
|
42 |
+
"properties": {
|
43 |
+
"Node name for S&R": "LineArtPreprocessor"
|
44 |
+
},
|
45 |
+
"widgets_values": [
|
46 |
+
"disable",
|
47 |
+
512
|
48 |
+
],
|
49 |
+
"_widget_cache_map": {
|
50 |
+
|
51 |
+
}
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"id": 18,
|
55 |
+
"type": "ControlNetLoader",
|
56 |
+
"pos": [
|
57 |
+
977,
|
58 |
+
-874
|
59 |
+
],
|
60 |
+
"size": {
|
61 |
+
"0": 339.6257019042969,
|
62 |
+
"1": 82
|
63 |
+
},
|
64 |
+
"flags": {
|
65 |
+
"collapsed": false
|
66 |
+
},
|
67 |
+
"order": 0,
|
68 |
+
"mode": 0,
|
69 |
+
"outputs": [
|
70 |
+
{
|
71 |
+
"name": "CONTROL_NET",
|
72 |
+
"type": "CONTROL_NET",
|
73 |
+
"links": [
|
74 |
+
25
|
75 |
+
],
|
76 |
+
"shape": 3,
|
77 |
+
"label": "ControlNet"
|
78 |
+
}
|
79 |
+
],
|
80 |
+
"properties": {
|
81 |
+
"Node name for S&R": "ControlNetLoader"
|
82 |
+
},
|
83 |
+
"widgets_values": [
|
84 |
+
""
|
85 |
+
],
|
86 |
+
"_widget_cache_map": {
|
87 |
+
|
88 |
+
}
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"id": 42,
|
92 |
+
"type": "ControlNetLoader",
|
93 |
+
"pos": [
|
94 |
+
1349,
|
95 |
+
-876
|
96 |
+
],
|
97 |
+
"size": {
|
98 |
+
"0": 368.9059753417969,
|
99 |
+
"1": 82
|
100 |
+
},
|
101 |
+
"flags": {
|
102 |
+
"collapsed": false
|
103 |
+
},
|
104 |
+
"order": 1,
|
105 |
+
"mode": 0,
|
106 |
+
"outputs": [
|
107 |
+
{
|
108 |
+
"name": "CONTROL_NET",
|
109 |
+
"type": "CONTROL_NET",
|
110 |
+
"links": [
|
111 |
+
66
|
112 |
+
],
|
113 |
+
"shape": 3,
|
114 |
+
"label": "ControlNet"
|
115 |
+
}
|
116 |
+
],
|
117 |
+
"properties": {
|
118 |
+
"Node name for S&R": "ControlNetLoader"
|
119 |
+
},
|
120 |
+
"widgets_values": [
|
121 |
+
""
|
122 |
+
],
|
123 |
+
"_widget_cache_map": {
|
124 |
+
|
125 |
+
}
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"id": 22,
|
129 |
+
"type": "PreviewImage",
|
130 |
+
"pos": [
|
131 |
+
899,
|
132 |
+
-726
|
133 |
+
],
|
134 |
+
"size": {
|
135 |
+
"0": 240.84320068359375,
|
136 |
+
"1": 246
|
137 |
+
},
|
138 |
+
"flags": {
|
139 |
+
|
140 |
+
},
|
141 |
+
"order": 11,
|
142 |
+
"mode": 0,
|
143 |
+
"inputs": [
|
144 |
+
{
|
145 |
+
"name": "images",
|
146 |
+
"type": "IMAGE",
|
147 |
+
"link": 112,
|
148 |
+
"label": "图像"
|
149 |
+
}
|
150 |
+
],
|
151 |
+
"properties": {
|
152 |
+
"Node name for S&R": "PreviewImage"
|
153 |
+
}
|
154 |
+
},
|
155 |
+
{
|
156 |
+
"id": 17,
|
157 |
+
"type": "ControlNetApply",
|
158 |
+
"pos": [
|
159 |
+
1240,
|
160 |
+
-653
|
161 |
+
],
|
162 |
+
"size": {
|
163 |
+
"0": 210,
|
164 |
+
"1": 98
|
165 |
+
},
|
166 |
+
"flags": {
|
167 |
+
|
168 |
+
},
|
169 |
+
"order": 15,
|
170 |
+
"mode": 0,
|
171 |
+
"inputs": [
|
172 |
+
{
|
173 |
+
"name": "conditioning",
|
174 |
+
"type": "CONDITIONING",
|
175 |
+
"link": 23,
|
176 |
+
"label": "条件"
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"name": "control_net",
|
180 |
+
"type": "CONTROL_NET",
|
181 |
+
"link": 25,
|
182 |
+
"label": "ControlNet",
|
183 |
+
"slot_index": 1
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"name": "image",
|
187 |
+
"type": "IMAGE",
|
188 |
+
"link": 115,
|
189 |
+
"label": "图像"
|
190 |
+
}
|
191 |
+
],
|
192 |
+
"outputs": [
|
193 |
+
{
|
194 |
+
"name": "CONDITIONING",
|
195 |
+
"type": "CONDITIONING",
|
196 |
+
"links": [
|
197 |
+
70
|
198 |
+
],
|
199 |
+
"shape": 3,
|
200 |
+
"label": "条件",
|
201 |
+
"slot_index": 0
|
202 |
+
}
|
203 |
+
],
|
204 |
+
"properties": {
|
205 |
+
"Node name for S&R": "ControlNetApply"
|
206 |
+
},
|
207 |
+
"widgets_values": [
|
208 |
+
0.7000000000000001
|
209 |
+
],
|
210 |
+
"_widget_cache_map": {
|
211 |
+
|
212 |
+
}
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"id": 41,
|
216 |
+
"type": "ControlNetApply",
|
217 |
+
"pos": [
|
218 |
+
1493,
|
219 |
+
-652
|
220 |
+
],
|
221 |
+
"size": {
|
222 |
+
"0": 210,
|
223 |
+
"1": 98
|
224 |
+
},
|
225 |
+
"flags": {
|
226 |
+
|
227 |
+
},
|
228 |
+
"order": 16,
|
229 |
+
"mode": 0,
|
230 |
+
"inputs": [
|
231 |
+
{
|
232 |
+
"name": "conditioning",
|
233 |
+
"type": "CONDITIONING",
|
234 |
+
"link": 70,
|
235 |
+
"label": "条件"
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"name": "control_net",
|
239 |
+
"type": "CONTROL_NET",
|
240 |
+
"link": 66,
|
241 |
+
"label": "ControlNet",
|
242 |
+
"slot_index": 1
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"name": "image",
|
246 |
+
"type": "IMAGE",
|
247 |
+
"link": 119,
|
248 |
+
"label": "图像"
|
249 |
+
}
|
250 |
+
],
|
251 |
+
"outputs": [
|
252 |
+
{
|
253 |
+
"name": "CONDITIONING",
|
254 |
+
"type": "CONDITIONING",
|
255 |
+
"links": [
|
256 |
+
71
|
257 |
+
],
|
258 |
+
"shape": 3,
|
259 |
+
"label": "条件",
|
260 |
+
"slot_index": 0
|
261 |
+
}
|
262 |
+
],
|
263 |
+
"properties": {
|
264 |
+
"Node name for S&R": "ControlNetApply"
|
265 |
+
},
|
266 |
+
"widgets_values": [
|
267 |
+
0.8
|
268 |
+
],
|
269 |
+
"_widget_cache_map": {
|
270 |
+
|
271 |
+
}
|
272 |
+
},
|
273 |
+
{
|
274 |
+
"id": 20,
|
275 |
+
"type": "LoadImage",
|
276 |
+
"pos": [
|
277 |
+
59,
|
278 |
+
-877
|
279 |
+
],
|
280 |
+
"size": {
|
281 |
+
"0": 244.5690155029297,
|
282 |
+
"1": 338.8974304199219
|
283 |
+
},
|
284 |
+
"flags": {
|
285 |
+
"collapsed": false
|
286 |
+
},
|
287 |
+
"order": 2,
|
288 |
+
"mode": 0,
|
289 |
+
"outputs": [
|
290 |
+
{
|
291 |
+
"name": "IMAGE",
|
292 |
+
"type": "IMAGE",
|
293 |
+
"links": [
|
294 |
+
111,
|
295 |
+
119
|
296 |
+
],
|
297 |
+
"shape": 3,
|
298 |
+
"label": "图像",
|
299 |
+
"slot_index": 0
|
300 |
+
},
|
301 |
+
{
|
302 |
+
"name": "MASK",
|
303 |
+
"type": "MASK",
|
304 |
+
"links": null,
|
305 |
+
"shape": 3,
|
306 |
+
"label": "遮罩"
|
307 |
+
}
|
308 |
+
],
|
309 |
+
"properties": {
|
310 |
+
"Node name for S&R": "LoadImage"
|
311 |
+
},
|
312 |
+
"widgets_values": [
|
313 |
+
"1724338180087.png",
|
314 |
+
"image"
|
315 |
+
],
|
316 |
+
"_widget_cache_map": {
|
317 |
+
|
318 |
+
},
|
319 |
+
"color": "#322",
|
320 |
+
"bgcolor": "#533"
|
321 |
+
},
|
322 |
+
{
|
323 |
+
"id": 15,
|
324 |
+
"type": "LoraLoader",
|
325 |
+
"pos": [
|
326 |
+
14.33333365122489,
|
327 |
+
-380.3333536783855
|
328 |
+
],
|
329 |
+
"size": {
|
330 |
+
"0": 210,
|
331 |
+
"1": 150
|
332 |
+
},
|
333 |
+
"flags": {
|
334 |
+
|
335 |
+
},
|
336 |
+
"order": 9,
|
337 |
+
"mode": 0,
|
338 |
+
"inputs": [
|
339 |
+
{
|
340 |
+
"name": "model",
|
341 |
+
"type": "MODEL",
|
342 |
+
"link": 120,
|
343 |
+
"label": "模型"
|
344 |
+
},
|
345 |
+
{
|
346 |
+
"name": "clip",
|
347 |
+
"type": "CLIP",
|
348 |
+
"link": 121,
|
349 |
+
"label": "CLIP"
|
350 |
+
}
|
351 |
+
],
|
352 |
+
"outputs": [
|
353 |
+
{
|
354 |
+
"name": "MODEL",
|
355 |
+
"type": "MODEL",
|
356 |
+
"links": [
|
357 |
+
122
|
358 |
+
],
|
359 |
+
"shape": 3,
|
360 |
+
"label": "模型",
|
361 |
+
"slot_index": 0
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"name": "CLIP",
|
365 |
+
"type": "CLIP",
|
366 |
+
"links": [
|
367 |
+
123
|
368 |
+
],
|
369 |
+
"shape": 3,
|
370 |
+
"label": "CLIP",
|
371 |
+
"slot_index": 1
|
372 |
+
}
|
373 |
+
],
|
374 |
+
"properties": {
|
375 |
+
"Node name for S&R": "LoraLoader"
|
376 |
+
},
|
377 |
+
"widgets_values": [
|
378 |
+
null,
|
379 |
+
0.8,
|
380 |
+
1
|
381 |
+
],
|
382 |
+
"_widget_cache_map": {
|
383 |
+
|
384 |
+
}
|
385 |
+
},
|
386 |
+
{
|
387 |
+
"id": 14,
|
388 |
+
"type": "LoraLoader",
|
389 |
+
"pos": [
|
390 |
+
285.33333365122496,
|
391 |
+
-380.3333536783855
|
392 |
+
],
|
393 |
+
"size": {
|
394 |
+
"0": 210,
|
395 |
+
"1": 150
|
396 |
+
},
|
397 |
+
"flags": {
|
398 |
+
|
399 |
+
},
|
400 |
+
"order": 12,
|
401 |
+
"mode": 0,
|
402 |
+
"inputs": [
|
403 |
+
{
|
404 |
+
"name": "model",
|
405 |
+
"type": "MODEL",
|
406 |
+
"link": 122,
|
407 |
+
"label": "模型"
|
408 |
+
},
|
409 |
+
{
|
410 |
+
"name": "clip",
|
411 |
+
"type": "CLIP",
|
412 |
+
"link": 123,
|
413 |
+
"label": "CLIP"
|
414 |
+
}
|
415 |
+
],
|
416 |
+
"outputs": [
|
417 |
+
{
|
418 |
+
"name": "MODEL",
|
419 |
+
"type": "MODEL",
|
420 |
+
"links": [
|
421 |
+
124
|
422 |
+
],
|
423 |
+
"shape": 3,
|
424 |
+
"label": "模型",
|
425 |
+
"slot_index": 0
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"name": "CLIP",
|
429 |
+
"type": "CLIP",
|
430 |
+
"links": [
|
431 |
+
125
|
432 |
+
],
|
433 |
+
"shape": 3,
|
434 |
+
"label": "CLIP",
|
435 |
+
"slot_index": 1
|
436 |
+
}
|
437 |
+
],
|
438 |
+
"properties": {
|
439 |
+
"Node name for S&R": "LoraLoader"
|
440 |
+
},
|
441 |
+
"widgets_values": [
|
442 |
+
null,
|
443 |
+
0.9,
|
444 |
+
1
|
445 |
+
],
|
446 |
+
"_widget_cache_map": {
|
447 |
+
|
448 |
+
}
|
449 |
+
},
|
450 |
+
{
|
451 |
+
"id": 16,
|
452 |
+
"type": "LoraLoader",
|
453 |
+
"pos": [
|
454 |
+
533.333333651225,
|
455 |
+
-383.3333536783855
|
456 |
+
],
|
457 |
+
"size": {
|
458 |
+
"0": 235.24232482910156,
|
459 |
+
"1": 150
|
460 |
+
},
|
461 |
+
"flags": {
|
462 |
+
|
463 |
+
},
|
464 |
+
"order": 13,
|
465 |
+
"mode": 4,
|
466 |
+
"inputs": [
|
467 |
+
{
|
468 |
+
"name": "model",
|
469 |
+
"type": "MODEL",
|
470 |
+
"link": 124,
|
471 |
+
"label": "模型"
|
472 |
+
},
|
473 |
+
{
|
474 |
+
"name": "clip",
|
475 |
+
"type": "CLIP",
|
476 |
+
"link": 125,
|
477 |
+
"label": "CLIP"
|
478 |
+
}
|
479 |
+
],
|
480 |
+
"outputs": [
|
481 |
+
{
|
482 |
+
"name": "MODEL",
|
483 |
+
"type": "MODEL",
|
484 |
+
"links": [
|
485 |
+
22
|
486 |
+
],
|
487 |
+
"shape": 3,
|
488 |
+
"label": "模型",
|
489 |
+
"slot_index": 0
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"name": "CLIP",
|
493 |
+
"type": "CLIP",
|
494 |
+
"links": [
|
495 |
+
20
|
496 |
+
],
|
497 |
+
"shape": 3,
|
498 |
+
"label": "CLIP",
|
499 |
+
"slot_index": 1
|
500 |
+
}
|
501 |
+
],
|
502 |
+
"properties": {
|
503 |
+
"Node name for S&R": "LoraLoader"
|
504 |
+
},
|
505 |
+
"widgets_values": [
|
506 |
+
null,
|
507 |
+
0.9,
|
508 |
+
1
|
509 |
+
],
|
510 |
+
"_widget_cache_map": {
|
511 |
+
|
512 |
+
}
|
513 |
+
},
|
514 |
+
{
|
515 |
+
"id": 7,
|
516 |
+
"type": "CLIPTextEncode",
|
517 |
+
"pos": [
|
518 |
+
906,
|
519 |
+
-187
|
520 |
+
],
|
521 |
+
"size": {
|
522 |
+
"0": 304.75079345703125,
|
523 |
+
"1": 132.6532440185547
|
524 |
+
},
|
525 |
+
"flags": {
|
526 |
+
|
527 |
+
},
|
528 |
+
"order": 8,
|
529 |
+
"mode": 0,
|
530 |
+
"inputs": [
|
531 |
+
{
|
532 |
+
"name": "clip",
|
533 |
+
"type": "CLIP",
|
534 |
+
"link": 5,
|
535 |
+
"label": "CLIP"
|
536 |
+
}
|
537 |
+
],
|
538 |
+
"outputs": [
|
539 |
+
{
|
540 |
+
"name": "CONDITIONING",
|
541 |
+
"type": "CONDITIONING",
|
542 |
+
"links": [
|
543 |
+
6,
|
544 |
+
109
|
545 |
+
],
|
546 |
+
"slot_index": 0,
|
547 |
+
"label": "条件"
|
548 |
+
}
|
549 |
+
],
|
550 |
+
"properties": {
|
551 |
+
"Node name for S&R": "CLIPTextEncode"
|
552 |
+
},
|
553 |
+
"widgets_values": [
|
554 |
+
"embedding:EasyNegativeV2,humans,people, "
|
555 |
+
],
|
556 |
+
"_widget_cache_map": {
|
557 |
+
|
558 |
+
},
|
559 |
+
"color": "#322",
|
560 |
+
"bgcolor": "#533"
|
561 |
+
},
|
562 |
+
{
|
563 |
+
"id": 60,
|
564 |
+
"type": "UpscaleModelLoader",
|
565 |
+
"pos": [
|
566 |
+
162.99199549854984,
|
567 |
+
213.91385230251254
|
568 |
+
],
|
569 |
+
"size": {
|
570 |
+
"0": 261.4676208496094,
|
571 |
+
"1": 84.79285430908203
|
572 |
+
},
|
573 |
+
"flags": {
|
574 |
+
|
575 |
+
},
|
576 |
+
"order": 3,
|
577 |
+
"mode": 0,
|
578 |
+
"outputs": [
|
579 |
+
{
|
580 |
+
"name": "UPSCALE_MODEL",
|
581 |
+
"type": "UPSCALE_MODEL",
|
582 |
+
"links": [
|
583 |
+
95
|
584 |
+
],
|
585 |
+
"shape": 3,
|
586 |
+
"label": "放大模型"
|
587 |
+
}
|
588 |
+
],
|
589 |
+
"properties": {
|
590 |
+
"Node name for S&R": "UpscaleModelLoader"
|
591 |
+
},
|
592 |
+
"widgets_values": [
|
593 |
+
"ESRGAN_4x"
|
594 |
+
],
|
595 |
+
"_widget_cache_map": {
|
596 |
+
|
597 |
+
}
|
598 |
+
},
|
599 |
+
{
|
600 |
+
"id": 59,
|
601 |
+
"type": "ImageUpscaleWithModel",
|
602 |
+
"pos": [
|
603 |
+
433.99199549855007,
|
604 |
+
215.91385230251257
|
605 |
+
],
|
606 |
+
"size": {
|
607 |
+
"0": 241.79998779296875,
|
608 |
+
"1": 46
|
609 |
+
},
|
610 |
+
"flags": {
|
611 |
+
"collapsed": true
|
612 |
+
},
|
613 |
+
"order": 20,
|
614 |
+
"mode": 0,
|
615 |
+
"inputs": [
|
616 |
+
{
|
617 |
+
"name": "upscale_model",
|
618 |
+
"type": "UPSCALE_MODEL",
|
619 |
+
"link": 95,
|
620 |
+
"label": "放大模型",
|
621 |
+
"slot_index": 0
|
622 |
+
},
|
623 |
+
{
|
624 |
+
"name": "image",
|
625 |
+
"type": "IMAGE",
|
626 |
+
"link": 127,
|
627 |
+
"label": "图像",
|
628 |
+
"slot_index": 1
|
629 |
+
}
|
630 |
+
],
|
631 |
+
"outputs": [
|
632 |
+
{
|
633 |
+
"name": "IMAGE",
|
634 |
+
"type": "IMAGE",
|
635 |
+
"links": [
|
636 |
+
101
|
637 |
+
],
|
638 |
+
"shape": 3,
|
639 |
+
"label": "图像",
|
640 |
+
"slot_index": 0
|
641 |
+
}
|
642 |
+
],
|
643 |
+
"properties": {
|
644 |
+
"Node name for S&R": "ImageUpscaleWithModel"
|
645 |
+
},
|
646 |
+
"color": "#322",
|
647 |
+
"bgcolor": "#533"
|
648 |
+
},
|
649 |
+
{
|
650 |
+
"id": 64,
|
651 |
+
"type": "ImageScaleBy",
|
652 |
+
"pos": [
|
653 |
+
439.99199549855,
|
654 |
+
260.9138523025126
|
655 |
+
],
|
656 |
+
"size": {
|
657 |
+
"0": 210,
|
658 |
+
"1": 95.07756805419922
|
659 |
+
},
|
660 |
+
"flags": {
|
661 |
+
|
662 |
+
},
|
663 |
+
"order": 21,
|
664 |
+
"mode": 0,
|
665 |
+
"inputs": [
|
666 |
+
{
|
667 |
+
"name": "image",
|
668 |
+
"type": "IMAGE",
|
669 |
+
"link": 101,
|
670 |
+
"label": "图像"
|
671 |
+
}
|
672 |
+
],
|
673 |
+
"outputs": [
|
674 |
+
{
|
675 |
+
"name": "IMAGE",
|
676 |
+
"type": "IMAGE",
|
677 |
+
"links": [
|
678 |
+
92,
|
679 |
+
99
|
680 |
+
],
|
681 |
+
"shape": 3,
|
682 |
+
"label": "图像",
|
683 |
+
"slot_index": 0
|
684 |
+
}
|
685 |
+
],
|
686 |
+
"properties": {
|
687 |
+
"Node name for S&R": "ImageScaleBy"
|
688 |
+
},
|
689 |
+
"widgets_values": [
|
690 |
+
"nearest-exact",
|
691 |
+
0.5
|
692 |
+
],
|
693 |
+
"_widget_cache_map": {
|
694 |
+
|
695 |
+
}
|
696 |
+
},
|
697 |
+
{
|
698 |
+
"id": 56,
|
699 |
+
"type": "TilePreprocessor",
|
700 |
+
"pos": [
|
701 |
+
350.9919954985504,
|
702 |
+
420.9138523025124
|
703 |
+
],
|
704 |
+
"size": {
|
705 |
+
"0": 315,
|
706 |
+
"1": 82
|
707 |
+
},
|
708 |
+
"flags": {
|
709 |
+
|
710 |
+
},
|
711 |
+
"order": 22,
|
712 |
+
"mode": 0,
|
713 |
+
"inputs": [
|
714 |
+
{
|
715 |
+
"name": "image",
|
716 |
+
"type": "IMAGE",
|
717 |
+
"link": 92,
|
718 |
+
"label": "图像"
|
719 |
+
}
|
720 |
+
],
|
721 |
+
"outputs": [
|
722 |
+
{
|
723 |
+
"name": "IMAGE",
|
724 |
+
"type": "IMAGE",
|
725 |
+
"links": [
|
726 |
+
94
|
727 |
+
],
|
728 |
+
"shape": 3,
|
729 |
+
"label": "图像",
|
730 |
+
"slot_index": 0
|
731 |
+
}
|
732 |
+
],
|
733 |
+
"properties": {
|
734 |
+
"Node name for S&R": "TilePreprocessor"
|
735 |
+
},
|
736 |
+
"widgets_values": [
|
737 |
+
2,
|
738 |
+
512
|
739 |
+
],
|
740 |
+
"_widget_cache_map": {
|
741 |
+
|
742 |
+
}
|
743 |
+
},
|
744 |
+
{
|
745 |
+
"id": 58,
|
746 |
+
"type": "ControlNetLoader",
|
747 |
+
"pos": [
|
748 |
+
388.9919954985502,
|
749 |
+
636.9138523025122
|
750 |
+
],
|
751 |
+
"size": {
|
752 |
+
"0": 315,
|
753 |
+
"1": 82
|
754 |
+
},
|
755 |
+
"flags": {
|
756 |
+
"collapsed": true
|
757 |
+
},
|
758 |
+
"order": 4,
|
759 |
+
"mode": 0,
|
760 |
+
"outputs": [
|
761 |
+
{
|
762 |
+
"name": "CONTROL_NET",
|
763 |
+
"type": "CONTROL_NET",
|
764 |
+
"links": [
|
765 |
+
93
|
766 |
+
],
|
767 |
+
"shape": 3,
|
768 |
+
"label": "ControlNet"
|
769 |
+
}
|
770 |
+
],
|
771 |
+
"properties": {
|
772 |
+
"Node name for S&R": "ControlNetLoader"
|
773 |
+
},
|
774 |
+
"widgets_values": [
|
775 |
+
""
|
776 |
+
],
|
777 |
+
"_widget_cache_map": {
|
778 |
+
|
779 |
+
}
|
780 |
+
},
|
781 |
+
{
|
782 |
+
"id": 62,
|
783 |
+
"type": "VAEEncode",
|
784 |
+
"pos": [
|
785 |
+
699.9919954985504,
|
786 |
+
635.9138523025122
|
787 |
+
],
|
788 |
+
"size": {
|
789 |
+
"0": 210,
|
790 |
+
"1": 46
|
791 |
+
},
|
792 |
+
"flags": {
|
793 |
+
"collapsed": true
|
794 |
+
},
|
795 |
+
"order": 23,
|
796 |
+
"mode": 0,
|
797 |
+
"inputs": [
|
798 |
+
{
|
799 |
+
"name": "pixels",
|
800 |
+
"type": "IMAGE",
|
801 |
+
"link": 99,
|
802 |
+
"label": "图像"
|
803 |
+
},
|
804 |
+
{
|
805 |
+
"name": "vae",
|
806 |
+
"type": "VAE",
|
807 |
+
"link": 105,
|
808 |
+
"label": "VAE"
|
809 |
+
}
|
810 |
+
],
|
811 |
+
"outputs": [
|
812 |
+
{
|
813 |
+
"name": "LATENT",
|
814 |
+
"type": "LATENT",
|
815 |
+
"links": [
|
816 |
+
98
|
817 |
+
],
|
818 |
+
"shape": 3,
|
819 |
+
"label": "Latent"
|
820 |
+
}
|
821 |
+
],
|
822 |
+
"properties": {
|
823 |
+
"Node name for S&R": "VAEEncode"
|
824 |
+
}
|
825 |
+
},
|
826 |
+
{
|
827 |
+
"id": 4,
|
828 |
+
"type": "CheckpointLoaderSimple",
|
829 |
+
"pos": [
|
830 |
+
-297,
|
831 |
+
-131
|
832 |
+
],
|
833 |
+
"size": {
|
834 |
+
"0": 322.34063720703125,
|
835 |
+
"1": 125.84071350097656
|
836 |
+
},
|
837 |
+
"flags": {
|
838 |
+
|
839 |
+
},
|
840 |
+
"order": 5,
|
841 |
+
"mode": 0,
|
842 |
+
"outputs": [
|
843 |
+
{
|
844 |
+
"name": "MODEL",
|
845 |
+
"type": "MODEL",
|
846 |
+
"links": [
|
847 |
+
106,
|
848 |
+
120
|
849 |
+
],
|
850 |
+
"slot_index": 0,
|
851 |
+
"label": "模型"
|
852 |
+
},
|
853 |
+
{
|
854 |
+
"name": "CLIP",
|
855 |
+
"type": "CLIP",
|
856 |
+
"links": [
|
857 |
+
5,
|
858 |
+
121
|
859 |
+
],
|
860 |
+
"slot_index": 1,
|
861 |
+
"label": "CLIP"
|
862 |
+
},
|
863 |
+
{
|
864 |
+
"name": "VAE",
|
865 |
+
"type": "VAE",
|
866 |
+
"links": [
|
867 |
+
42
|
868 |
+
],
|
869 |
+
"slot_index": 2,
|
870 |
+
"label": "VAE"
|
871 |
+
}
|
872 |
+
],
|
873 |
+
"properties": {
|
874 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
875 |
+
},
|
876 |
+
"widgets_values": [
|
877 |
+
null
|
878 |
+
],
|
879 |
+
"_widget_cache_map": {
|
880 |
+
|
881 |
+
}
|
882 |
+
},
|
883 |
+
{
|
884 |
+
"id": 11,
|
885 |
+
"type": "Reroute",
|
886 |
+
"pos": [
|
887 |
+
852,
|
888 |
+
63
|
889 |
+
],
|
890 |
+
"size": [
|
891 |
+
75,
|
892 |
+
26
|
893 |
+
],
|
894 |
+
"flags": {
|
895 |
+
|
896 |
+
},
|
897 |
+
"order": 10,
|
898 |
+
"mode": 0,
|
899 |
+
"inputs": [
|
900 |
+
{
|
901 |
+
"name": "",
|
902 |
+
"type": "*",
|
903 |
+
"link": 42
|
904 |
+
}
|
905 |
+
],
|
906 |
+
"outputs": [
|
907 |
+
{
|
908 |
+
"name": "",
|
909 |
+
"type": "VAE",
|
910 |
+
"links": [
|
911 |
+
12,
|
912 |
+
105,
|
913 |
+
107
|
914 |
+
],
|
915 |
+
"slot_index": 0
|
916 |
+
}
|
917 |
+
],
|
918 |
+
"properties": {
|
919 |
+
"showOutputText": false,
|
920 |
+
"horizontal": false
|
921 |
+
}
|
922 |
+
},
|
923 |
+
{
|
924 |
+
"id": 63,
|
925 |
+
"type": "VAEDecode",
|
926 |
+
"pos": [
|
927 |
+
1285.9919954985496,
|
928 |
+
241.91385230251265
|
929 |
+
],
|
930 |
+
"size": {
|
931 |
+
"0": 210,
|
932 |
+
"1": 46
|
933 |
+
},
|
934 |
+
"flags": {
|
935 |
+
"collapsed": true
|
936 |
+
},
|
937 |
+
"order": 26,
|
938 |
+
"mode": 0,
|
939 |
+
"inputs": [
|
940 |
+
{
|
941 |
+
"name": "samples",
|
942 |
+
"type": "LATENT",
|
943 |
+
"link": 100,
|
944 |
+
"label": "Latent"
|
945 |
+
},
|
946 |
+
{
|
947 |
+
"name": "vae",
|
948 |
+
"type": "VAE",
|
949 |
+
"link": 107,
|
950 |
+
"label": "VAE"
|
951 |
+
}
|
952 |
+
],
|
953 |
+
"outputs": [
|
954 |
+
{
|
955 |
+
"name": "IMAGE",
|
956 |
+
"type": "IMAGE",
|
957 |
+
"links": [
|
958 |
+
102
|
959 |
+
],
|
960 |
+
"shape": 3,
|
961 |
+
"label": "图像",
|
962 |
+
"slot_index": 0
|
963 |
+
}
|
964 |
+
],
|
965 |
+
"properties": {
|
966 |
+
"Node name for S&R": "VAEDecode"
|
967 |
+
}
|
968 |
+
},
|
969 |
+
{
|
970 |
+
"id": 61,
|
971 |
+
"type": "KSampler",
|
972 |
+
"pos": [
|
973 |
+
1028.9919954985496,
|
974 |
+
221.9138523025125
|
975 |
+
],
|
976 |
+
"size": {
|
977 |
+
"0": 315,
|
978 |
+
"1": 474
|
979 |
+
},
|
980 |
+
"flags": {
|
981 |
+
|
982 |
+
},
|
983 |
+
"order": 25,
|
984 |
+
"mode": 0,
|
985 |
+
"inputs": [
|
986 |
+
{
|
987 |
+
"name": "model",
|
988 |
+
"type": "MODEL",
|
989 |
+
"link": 106,
|
990 |
+
"label": "模型"
|
991 |
+
},
|
992 |
+
{
|
993 |
+
"name": "positive",
|
994 |
+
"type": "CONDITIONING",
|
995 |
+
"link": 129,
|
996 |
+
"label": "正面条件"
|
997 |
+
},
|
998 |
+
{
|
999 |
+
"name": "negative",
|
1000 |
+
"type": "CONDITIONING",
|
1001 |
+
"link": 97,
|
1002 |
+
"label": "负面条件"
|
1003 |
+
},
|
1004 |
+
{
|
1005 |
+
"name": "latent_image",
|
1006 |
+
"type": "LATENT",
|
1007 |
+
"link": 98,
|
1008 |
+
"label": "Latent",
|
1009 |
+
"slot_index": 3
|
1010 |
+
}
|
1011 |
+
],
|
1012 |
+
"outputs": [
|
1013 |
+
{
|
1014 |
+
"name": "LATENT",
|
1015 |
+
"type": "LATENT",
|
1016 |
+
"links": [
|
1017 |
+
100
|
1018 |
+
],
|
1019 |
+
"shape": 3,
|
1020 |
+
"label": "Latent",
|
1021 |
+
"slot_index": 0
|
1022 |
+
}
|
1023 |
+
],
|
1024 |
+
"properties": {
|
1025 |
+
"Node name for S&R": "KSampler"
|
1026 |
+
},
|
1027 |
+
"widgets_values": [
|
1028 |
+
368308997265100,
|
1029 |
+
"randomize",
|
1030 |
+
32,
|
1031 |
+
6,
|
1032 |
+
"euler_ancestral",
|
1033 |
+
"normal",
|
1034 |
+
0.4
|
1035 |
+
],
|
1036 |
+
"_widget_cache_map": {
|
1037 |
+
|
1038 |
+
},
|
1039 |
+
"color": "#323",
|
1040 |
+
"bgcolor": "#535"
|
1041 |
+
},
|
1042 |
+
{
|
1043 |
+
"id": 57,
|
1044 |
+
"type": "ControlNetApplyAdvanced",
|
1045 |
+
"pos": [
|
1046 |
+
683.9919954985504,
|
1047 |
+
214.91385230251257
|
1048 |
+
],
|
1049 |
+
"size": {
|
1050 |
+
"0": 315,
|
1051 |
+
"1": 166
|
1052 |
+
},
|
1053 |
+
"flags": {
|
1054 |
+
|
1055 |
+
},
|
1056 |
+
"order": 24,
|
1057 |
+
"mode": 0,
|
1058 |
+
"inputs": [
|
1059 |
+
{
|
1060 |
+
"name": "positive",
|
1061 |
+
"type": "CONDITIONING",
|
1062 |
+
"link": 130,
|
1063 |
+
"label": "正面条件"
|
1064 |
+
},
|
1065 |
+
{
|
1066 |
+
"name": "negative",
|
1067 |
+
"type": "CONDITIONING",
|
1068 |
+
"link": 109,
|
1069 |
+
"label": "负面条件"
|
1070 |
+
},
|
1071 |
+
{
|
1072 |
+
"name": "control_net",
|
1073 |
+
"type": "CONTROL_NET",
|
1074 |
+
"link": 93,
|
1075 |
+
"label": "ControlNet",
|
1076 |
+
"slot_index": 2
|
1077 |
+
},
|
1078 |
+
{
|
1079 |
+
"name": "image",
|
1080 |
+
"type": "IMAGE",
|
1081 |
+
"link": 94,
|
1082 |
+
"label": "图像"
|
1083 |
+
}
|
1084 |
+
],
|
1085 |
+
"outputs": [
|
1086 |
+
{
|
1087 |
+
"name": "positive",
|
1088 |
+
"type": "CONDITIONING",
|
1089 |
+
"links": [
|
1090 |
+
129
|
1091 |
+
],
|
1092 |
+
"shape": 3,
|
1093 |
+
"label": "正面条件",
|
1094 |
+
"slot_index": 0
|
1095 |
+
},
|
1096 |
+
{
|
1097 |
+
"name": "negative",
|
1098 |
+
"type": "CONDITIONING",
|
1099 |
+
"links": [
|
1100 |
+
97
|
1101 |
+
],
|
1102 |
+
"shape": 3,
|
1103 |
+
"label": "负面条件",
|
1104 |
+
"slot_index": 1
|
1105 |
+
}
|
1106 |
+
],
|
1107 |
+
"properties": {
|
1108 |
+
"Node name for S&R": "ControlNetApplyAdvanced"
|
1109 |
+
},
|
1110 |
+
"widgets_values": [
|
1111 |
+
1,
|
1112 |
+
0,
|
1113 |
+
1
|
1114 |
+
],
|
1115 |
+
"_widget_cache_map": {
|
1116 |
+
|
1117 |
+
}
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"id": 6,
|
1121 |
+
"type": "CLIPTextEncode",
|
1122 |
+
"pos": [
|
1123 |
+
909,
|
1124 |
+
-357
|
1125 |
+
],
|
1126 |
+
"size": {
|
1127 |
+
"0": 294.09674072265625,
|
1128 |
+
"1": 124.96588134765625
|
1129 |
+
},
|
1130 |
+
"flags": {
|
1131 |
+
|
1132 |
+
},
|
1133 |
+
"order": 14,
|
1134 |
+
"mode": 0,
|
1135 |
+
"inputs": [
|
1136 |
+
{
|
1137 |
+
"name": "clip",
|
1138 |
+
"type": "CLIP",
|
1139 |
+
"link": 20,
|
1140 |
+
"label": "CLIP"
|
1141 |
+
}
|
1142 |
+
],
|
1143 |
+
"outputs": [
|
1144 |
+
{
|
1145 |
+
"name": "CONDITIONING",
|
1146 |
+
"type": "CONDITIONING",
|
1147 |
+
"links": [
|
1148 |
+
23,
|
1149 |
+
130
|
1150 |
+
],
|
1151 |
+
"slot_index": 0,
|
1152 |
+
"label": "条件"
|
1153 |
+
}
|
1154 |
+
],
|
1155 |
+
"properties": {
|
1156 |
+
"Node name for S&R": "CLIPTextEncode"
|
1157 |
+
},
|
1158 |
+
"widgets_values": [
|
1159 |
+
"Masterpiece,best quality,detailed,(cake:1.1),cloud,flower,no_one,Conceptual product design,outdoor,c4dplus,hs,8k"
|
1160 |
+
],
|
1161 |
+
"_widget_cache_map": {
|
1162 |
+
|
1163 |
+
},
|
1164 |
+
"color": "#322",
|
1165 |
+
"bgcolor": "#533"
|
1166 |
+
},
|
1167 |
+
{
|
1168 |
+
"id": 65,
|
1169 |
+
"type": "SaveImage",
|
1170 |
+
"pos": [
|
1171 |
+
1376.646858661257,
|
1172 |
+
207.7515440348524
|
1173 |
+
],
|
1174 |
+
"size": {
|
1175 |
+
"0": 410.506103515625,
|
1176 |
+
"1": 485.4575500488281
|
1177 |
+
},
|
1178 |
+
"flags": {
|
1179 |
+
|
1180 |
+
},
|
1181 |
+
"order": 27,
|
1182 |
+
"mode": 0,
|
1183 |
+
"inputs": [
|
1184 |
+
{
|
1185 |
+
"name": "images",
|
1186 |
+
"type": "IMAGE",
|
1187 |
+
"link": 102,
|
1188 |
+
"label": "图像"
|
1189 |
+
}
|
1190 |
+
],
|
1191 |
+
"properties": {
|
1192 |
+
"Node name for S&R": "SaveImage"
|
1193 |
+
},
|
1194 |
+
"widgets_values": [
|
1195 |
+
"ComfyUI"
|
1196 |
+
],
|
1197 |
+
"_widget_cache_map": {
|
1198 |
+
|
1199 |
+
},
|
1200 |
+
"color": "#232",
|
1201 |
+
"bgcolor": "#353"
|
1202 |
+
},
|
1203 |
+
{
|
1204 |
+
"id": 5,
|
1205 |
+
"type": "EmptyLatentImage",
|
1206 |
+
"pos": [
|
1207 |
+
1260,
|
1208 |
+
-329
|
1209 |
+
],
|
1210 |
+
"size": {
|
1211 |
+
"0": 210,
|
1212 |
+
"1": 112.68038177490234
|
1213 |
+
},
|
1214 |
+
"flags": {
|
1215 |
+
|
1216 |
+
},
|
1217 |
+
"order": 6,
|
1218 |
+
"mode": 0,
|
1219 |
+
"outputs": [
|
1220 |
+
{
|
1221 |
+
"name": "LATENT",
|
1222 |
+
"type": "LATENT",
|
1223 |
+
"links": [
|
1224 |
+
2
|
1225 |
+
],
|
1226 |
+
"slot_index": 0,
|
1227 |
+
"label": "Latent"
|
1228 |
+
}
|
1229 |
+
],
|
1230 |
+
"properties": {
|
1231 |
+
"Node name for S&R": "EmptyLatentImage"
|
1232 |
+
},
|
1233 |
+
"widgets_values": [
|
1234 |
+
512,
|
1235 |
+
768,
|
1236 |
+
1
|
1237 |
+
],
|
1238 |
+
"_widget_cache_map": {
|
1239 |
+
|
1240 |
+
}
|
1241 |
+
},
|
1242 |
+
{
|
1243 |
+
"id": 3,
|
1244 |
+
"type": "KSampler",
|
1245 |
+
"pos": [
|
1246 |
+
1497,
|
1247 |
+
-388
|
1248 |
+
],
|
1249 |
+
"size": {
|
1250 |
+
"0": 263.527099609375,
|
1251 |
+
"1": 474
|
1252 |
+
},
|
1253 |
+
"flags": {
|
1254 |
+
|
1255 |
+
},
|
1256 |
+
"order": 17,
|
1257 |
+
"mode": 0,
|
1258 |
+
"inputs": [
|
1259 |
+
{
|
1260 |
+
"name": "model",
|
1261 |
+
"type": "MODEL",
|
1262 |
+
"link": 22,
|
1263 |
+
"label": "模型"
|
1264 |
+
},
|
1265 |
+
{
|
1266 |
+
"name": "positive",
|
1267 |
+
"type": "CONDITIONING",
|
1268 |
+
"link": 71,
|
1269 |
+
"label": "正面条件",
|
1270 |
+
"slot_index": 1
|
1271 |
+
},
|
1272 |
+
{
|
1273 |
+
"name": "negative",
|
1274 |
+
"type": "CONDITIONING",
|
1275 |
+
"link": 6,
|
1276 |
+
"label": "负面条件"
|
1277 |
+
},
|
1278 |
+
{
|
1279 |
+
"name": "latent_image",
|
1280 |
+
"type": "LATENT",
|
1281 |
+
"link": 2,
|
1282 |
+
"label": "Latent"
|
1283 |
+
}
|
1284 |
+
],
|
1285 |
+
"outputs": [
|
1286 |
+
{
|
1287 |
+
"name": "LATENT",
|
1288 |
+
"type": "LATENT",
|
1289 |
+
"links": [
|
1290 |
+
7
|
1291 |
+
],
|
1292 |
+
"slot_index": 0,
|
1293 |
+
"label": "Latent"
|
1294 |
+
}
|
1295 |
+
],
|
1296 |
+
"properties": {
|
1297 |
+
"Node name for S&R": "KSampler"
|
1298 |
+
},
|
1299 |
+
"widgets_values": [
|
1300 |
+
163854169040437,
|
1301 |
+
"fixed",
|
1302 |
+
50,
|
1303 |
+
7,
|
1304 |
+
"dpmpp_2m_sde",
|
1305 |
+
"karras",
|
1306 |
+
1
|
1307 |
+
],
|
1308 |
+
"_widget_cache_map": {
|
1309 |
+
|
1310 |
+
},
|
1311 |
+
"color": "#323",
|
1312 |
+
"bgcolor": "#535"
|
1313 |
+
},
|
1314 |
+
{
|
1315 |
+
"id": 8,
|
1316 |
+
"type": "VAEDecode",
|
1317 |
+
"pos": [
|
1318 |
+
1820,
|
1319 |
+
-366
|
1320 |
+
],
|
1321 |
+
"size": {
|
1322 |
+
"0": 210,
|
1323 |
+
"1": 46
|
1324 |
+
},
|
1325 |
+
"flags": {
|
1326 |
+
"collapsed": true
|
1327 |
+
},
|
1328 |
+
"order": 18,
|
1329 |
+
"mode": 0,
|
1330 |
+
"inputs": [
|
1331 |
+
{
|
1332 |
+
"name": "samples",
|
1333 |
+
"type": "LATENT",
|
1334 |
+
"link": 7,
|
1335 |
+
"label": "Latent"
|
1336 |
+
},
|
1337 |
+
{
|
1338 |
+
"name": "vae",
|
1339 |
+
"type": "VAE",
|
1340 |
+
"link": 12,
|
1341 |
+
"label": "VAE"
|
1342 |
+
}
|
1343 |
+
],
|
1344 |
+
"outputs": [
|
1345 |
+
{
|
1346 |
+
"name": "IMAGE",
|
1347 |
+
"type": "IMAGE",
|
1348 |
+
"links": [
|
1349 |
+
29,
|
1350 |
+
127
|
1351 |
+
],
|
1352 |
+
"slot_index": 0,
|
1353 |
+
"label": "图像"
|
1354 |
+
}
|
1355 |
+
],
|
1356 |
+
"properties": {
|
1357 |
+
"Node name for S&R": "VAEDecode"
|
1358 |
+
}
|
1359 |
+
},
|
1360 |
+
{
|
1361 |
+
"id": 21,
|
1362 |
+
"type": "PreviewImage",
|
1363 |
+
"pos": [
|
1364 |
+
1774,
|
1365 |
+
-312
|
1366 |
+
],
|
1367 |
+
"size": {
|
1368 |
+
"0": 344.7645263671875,
|
1369 |
+
"1": 368.7522277832031
|
1370 |
+
},
|
1371 |
+
"flags": {
|
1372 |
+
|
1373 |
+
},
|
1374 |
+
"order": 19,
|
1375 |
+
"mode": 0,
|
1376 |
+
"inputs": [
|
1377 |
+
{
|
1378 |
+
"name": "images",
|
1379 |
+
"type": "IMAGE",
|
1380 |
+
"link": 29,
|
1381 |
+
"label": "图像"
|
1382 |
+
}
|
1383 |
+
],
|
1384 |
+
"properties": {
|
1385 |
+
"Node name for S&R": "PreviewImage"
|
1386 |
+
},
|
1387 |
+
"color": "#232",
|
1388 |
+
"bgcolor": "#353"
|
1389 |
+
}
|
1390 |
+
],
|
1391 |
+
"links": [
|
1392 |
+
[
|
1393 |
+
2,
|
1394 |
+
5,
|
1395 |
+
0,
|
1396 |
+
3,
|
1397 |
+
3,
|
1398 |
+
"LATENT"
|
1399 |
+
],
|
1400 |
+
[
|
1401 |
+
5,
|
1402 |
+
4,
|
1403 |
+
1,
|
1404 |
+
7,
|
1405 |
+
0,
|
1406 |
+
"CLIP"
|
1407 |
+
],
|
1408 |
+
[
|
1409 |
+
6,
|
1410 |
+
7,
|
1411 |
+
0,
|
1412 |
+
3,
|
1413 |
+
2,
|
1414 |
+
"CONDITIONING"
|
1415 |
+
],
|
1416 |
+
[
|
1417 |
+
7,
|
1418 |
+
3,
|
1419 |
+
0,
|
1420 |
+
8,
|
1421 |
+
0,
|
1422 |
+
"LATENT"
|
1423 |
+
],
|
1424 |
+
[
|
1425 |
+
12,
|
1426 |
+
11,
|
1427 |
+
0,
|
1428 |
+
8,
|
1429 |
+
1,
|
1430 |
+
"VAE"
|
1431 |
+
],
|
1432 |
+
[
|
1433 |
+
20,
|
1434 |
+
16,
|
1435 |
+
1,
|
1436 |
+
6,
|
1437 |
+
0,
|
1438 |
+
"CLIP"
|
1439 |
+
],
|
1440 |
+
[
|
1441 |
+
22,
|
1442 |
+
16,
|
1443 |
+
0,
|
1444 |
+
3,
|
1445 |
+
0,
|
1446 |
+
"MODEL"
|
1447 |
+
],
|
1448 |
+
[
|
1449 |
+
23,
|
1450 |
+
6,
|
1451 |
+
0,
|
1452 |
+
17,
|
1453 |
+
0,
|
1454 |
+
"CONDITIONING"
|
1455 |
+
],
|
1456 |
+
[
|
1457 |
+
25,
|
1458 |
+
18,
|
1459 |
+
0,
|
1460 |
+
17,
|
1461 |
+
1,
|
1462 |
+
"CONTROL_NET"
|
1463 |
+
],
|
1464 |
+
[
|
1465 |
+
29,
|
1466 |
+
8,
|
1467 |
+
0,
|
1468 |
+
21,
|
1469 |
+
0,
|
1470 |
+
"IMAGE"
|
1471 |
+
],
|
1472 |
+
[
|
1473 |
+
42,
|
1474 |
+
4,
|
1475 |
+
2,
|
1476 |
+
11,
|
1477 |
+
0,
|
1478 |
+
"*"
|
1479 |
+
],
|
1480 |
+
[
|
1481 |
+
66,
|
1482 |
+
42,
|
1483 |
+
0,
|
1484 |
+
41,
|
1485 |
+
1,
|
1486 |
+
"CONTROL_NET"
|
1487 |
+
],
|
1488 |
+
[
|
1489 |
+
70,
|
1490 |
+
17,
|
1491 |
+
0,
|
1492 |
+
41,
|
1493 |
+
0,
|
1494 |
+
"CONDITIONING"
|
1495 |
+
],
|
1496 |
+
[
|
1497 |
+
71,
|
1498 |
+
41,
|
1499 |
+
0,
|
1500 |
+
3,
|
1501 |
+
1,
|
1502 |
+
"CONDITIONING"
|
1503 |
+
],
|
1504 |
+
[
|
1505 |
+
92,
|
1506 |
+
64,
|
1507 |
+
0,
|
1508 |
+
56,
|
1509 |
+
0,
|
1510 |
+
"IMAGE"
|
1511 |
+
],
|
1512 |
+
[
|
1513 |
+
93,
|
1514 |
+
58,
|
1515 |
+
0,
|
1516 |
+
57,
|
1517 |
+
2,
|
1518 |
+
"CONTROL_NET"
|
1519 |
+
],
|
1520 |
+
[
|
1521 |
+
94,
|
1522 |
+
56,
|
1523 |
+
0,
|
1524 |
+
57,
|
1525 |
+
3,
|
1526 |
+
"IMAGE"
|
1527 |
+
],
|
1528 |
+
[
|
1529 |
+
95,
|
1530 |
+
60,
|
1531 |
+
0,
|
1532 |
+
59,
|
1533 |
+
0,
|
1534 |
+
"UPSCALE_MODEL"
|
1535 |
+
],
|
1536 |
+
[
|
1537 |
+
97,
|
1538 |
+
57,
|
1539 |
+
1,
|
1540 |
+
61,
|
1541 |
+
2,
|
1542 |
+
"CONDITIONING"
|
1543 |
+
],
|
1544 |
+
[
|
1545 |
+
98,
|
1546 |
+
62,
|
1547 |
+
0,
|
1548 |
+
61,
|
1549 |
+
3,
|
1550 |
+
"LATENT"
|
1551 |
+
],
|
1552 |
+
[
|
1553 |
+
99,
|
1554 |
+
64,
|
1555 |
+
0,
|
1556 |
+
62,
|
1557 |
+
0,
|
1558 |
+
"IMAGE"
|
1559 |
+
],
|
1560 |
+
[
|
1561 |
+
100,
|
1562 |
+
61,
|
1563 |
+
0,
|
1564 |
+
63,
|
1565 |
+
0,
|
1566 |
+
"LATENT"
|
1567 |
+
],
|
1568 |
+
[
|
1569 |
+
101,
|
1570 |
+
59,
|
1571 |
+
0,
|
1572 |
+
64,
|
1573 |
+
0,
|
1574 |
+
"IMAGE"
|
1575 |
+
],
|
1576 |
+
[
|
1577 |
+
102,
|
1578 |
+
63,
|
1579 |
+
0,
|
1580 |
+
65,
|
1581 |
+
0,
|
1582 |
+
"IMAGE"
|
1583 |
+
],
|
1584 |
+
[
|
1585 |
+
105,
|
1586 |
+
11,
|
1587 |
+
0,
|
1588 |
+
62,
|
1589 |
+
1,
|
1590 |
+
"VAE"
|
1591 |
+
],
|
1592 |
+
[
|
1593 |
+
106,
|
1594 |
+
4,
|
1595 |
+
0,
|
1596 |
+
61,
|
1597 |
+
0,
|
1598 |
+
"MODEL"
|
1599 |
+
],
|
1600 |
+
[
|
1601 |
+
107,
|
1602 |
+
11,
|
1603 |
+
0,
|
1604 |
+
63,
|
1605 |
+
1,
|
1606 |
+
"VAE"
|
1607 |
+
],
|
1608 |
+
[
|
1609 |
+
109,
|
1610 |
+
7,
|
1611 |
+
0,
|
1612 |
+
57,
|
1613 |
+
1,
|
1614 |
+
"CONDITIONING"
|
1615 |
+
],
|
1616 |
+
[
|
1617 |
+
111,
|
1618 |
+
20,
|
1619 |
+
0,
|
1620 |
+
68,
|
1621 |
+
0,
|
1622 |
+
"IMAGE"
|
1623 |
+
],
|
1624 |
+
[
|
1625 |
+
112,
|
1626 |
+
68,
|
1627 |
+
0,
|
1628 |
+
22,
|
1629 |
+
0,
|
1630 |
+
"IMAGE"
|
1631 |
+
],
|
1632 |
+
[
|
1633 |
+
115,
|
1634 |
+
68,
|
1635 |
+
0,
|
1636 |
+
17,
|
1637 |
+
2,
|
1638 |
+
"IMAGE"
|
1639 |
+
],
|
1640 |
+
[
|
1641 |
+
119,
|
1642 |
+
20,
|
1643 |
+
0,
|
1644 |
+
41,
|
1645 |
+
2,
|
1646 |
+
"IMAGE"
|
1647 |
+
],
|
1648 |
+
[
|
1649 |
+
120,
|
1650 |
+
4,
|
1651 |
+
0,
|
1652 |
+
15,
|
1653 |
+
0,
|
1654 |
+
"MODEL"
|
1655 |
+
],
|
1656 |
+
[
|
1657 |
+
121,
|
1658 |
+
4,
|
1659 |
+
1,
|
1660 |
+
15,
|
1661 |
+
1,
|
1662 |
+
"CLIP"
|
1663 |
+
],
|
1664 |
+
[
|
1665 |
+
122,
|
1666 |
+
15,
|
1667 |
+
0,
|
1668 |
+
14,
|
1669 |
+
0,
|
1670 |
+
"MODEL"
|
1671 |
+
],
|
1672 |
+
[
|
1673 |
+
123,
|
1674 |
+
15,
|
1675 |
+
1,
|
1676 |
+
14,
|
1677 |
+
1,
|
1678 |
+
"CLIP"
|
1679 |
+
],
|
1680 |
+
[
|
1681 |
+
124,
|
1682 |
+
14,
|
1683 |
+
0,
|
1684 |
+
16,
|
1685 |
+
0,
|
1686 |
+
"MODEL"
|
1687 |
+
],
|
1688 |
+
[
|
1689 |
+
125,
|
1690 |
+
14,
|
1691 |
+
1,
|
1692 |
+
16,
|
1693 |
+
1,
|
1694 |
+
"CLIP"
|
1695 |
+
],
|
1696 |
+
[
|
1697 |
+
127,
|
1698 |
+
8,
|
1699 |
+
0,
|
1700 |
+
59,
|
1701 |
+
1,
|
1702 |
+
"IMAGE"
|
1703 |
+
],
|
1704 |
+
[
|
1705 |
+
129,
|
1706 |
+
57,
|
1707 |
+
0,
|
1708 |
+
61,
|
1709 |
+
1,
|
1710 |
+
"CONDITIONING"
|
1711 |
+
],
|
1712 |
+
[
|
1713 |
+
130,
|
1714 |
+
6,
|
1715 |
+
0,
|
1716 |
+
57,
|
1717 |
+
0,
|
1718 |
+
"CONDITIONING"
|
1719 |
+
]
|
1720 |
+
],
|
1721 |
+
"groups": [
|
1722 |
+
{
|
1723 |
+
"title": "4X /2 _tile upscale 锐化 高清放大",
|
1724 |
+
"bounding": [
|
1725 |
+
144,
|
1726 |
+
134,
|
1727 |
+
1716,
|
1728 |
+
582
|
1729 |
+
],
|
1730 |
+
"color": "#3f789e",
|
1731 |
+
"font_size": 24,
|
1732 |
+
"locked": false
|
1733 |
+
},
|
1734 |
+
{
|
1735 |
+
"title": "Contrlolnet",
|
1736 |
+
"bounding": [
|
1737 |
+
621,
|
1738 |
+
-950,
|
1739 |
+
1107,
|
1740 |
+
466
|
1741 |
+
],
|
1742 |
+
"color": "#3f789e",
|
1743 |
+
"font_size": 24,
|
1744 |
+
"locked": false
|
1745 |
+
},
|
1746 |
+
{
|
1747 |
+
"title": "LORA",
|
1748 |
+
"bounding": [
|
1749 |
+
4,
|
1750 |
+
-457,
|
1751 |
+
774,
|
1752 |
+
237
|
1753 |
+
],
|
1754 |
+
"color": "#3f789e",
|
1755 |
+
"font_size": 24,
|
1756 |
+
"locked": false
|
1757 |
+
},
|
1758 |
+
{
|
1759 |
+
"title": "prompt",
|
1760 |
+
"bounding": [
|
1761 |
+
896,
|
1762 |
+
-431,
|
1763 |
+
325,
|
1764 |
+
387
|
1765 |
+
],
|
1766 |
+
"color": "#3f789e",
|
1767 |
+
"font_size": 24,
|
1768 |
+
"locked": false
|
1769 |
+
}
|
1770 |
+
],
|
1771 |
+
"config": {
|
1772 |
+
|
1773 |
+
},
|
1774 |
+
"extra": {
|
1775 |
+
"0246.VERSION": [
|
1776 |
+
0,
|
1777 |
+
0,
|
1778 |
+
4
|
1779 |
+
],
|
1780 |
+
"ds": {
|
1781 |
+
"scale": 0.7247295000000004,
|
1782 |
+
"offset": [
|
1783 |
+
-386.35423203542075,
|
1784 |
+
673.0381820629576
|
1785 |
+
]
|
1786 |
+
}
|
1787 |
+
},
|
1788 |
+
"version": 0.4
|
1789 |
+
}
|
DrawBridgeAPI/config_example.yaml
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
civitai_setting: # civitai API token
|
2 |
+
token:
|
3 |
+
- You token here
|
4 |
+
model:
|
5 |
+
''
|
6 |
+
proxy:
|
7 |
+
-
|
8 |
+
a1111webui_setting: # sd_webui 设置
|
9 |
+
backend_url: # 后端地址
|
10 |
+
- http://127.0.0.1:7860
|
11 |
+
- http://127.0.0.1:7861
|
12 |
+
name: # 后端备注名称
|
13 |
+
- 后端1
|
14 |
+
- 后端2
|
15 |
+
auth: # 是否需要登录
|
16 |
+
- false
|
17 |
+
- false
|
18 |
+
username: # 用户名
|
19 |
+
- admin
|
20 |
+
- admin
|
21 |
+
password: # 密码
|
22 |
+
- admin
|
23 |
+
- admin
|
24 |
+
max_resolution: # 最大分辨率,这个功能没写,暂时不生效
|
25 |
+
- null
|
26 |
+
- 1572864
|
27 |
+
fal_ai_setting: # {"token": []}
|
28 |
+
token: #
|
29 |
+
- You token here
|
30 |
+
model:
|
31 |
+
''
|
32 |
+
replicate_setting: # {"token": []}
|
33 |
+
token: # https://replicate.com/black-forest-labs/flux-schnell
|
34 |
+
- You token here
|
35 |
+
model:
|
36 |
+
''
|
37 |
+
liblibai_setting:
|
38 |
+
# https://www.liblib.art/ # 按下F12 -> 应用 -> cookies -> https://www.liblib.art -> usertoken 的值 d812c12d83c640.....
|
39 |
+
token: #
|
40 |
+
- d812c12d83c640...
|
41 |
+
- 只要token填上了也算一个后端哦
|
42 |
+
- token3
|
43 |
+
# 模型id获取方法 https://www.liblib.art/sd 先选择喜欢的模型 先按下F12 再 生图
|
44 |
+
# 回到开发者控制台,网络选项 -> 找到名为 image 的请求,点击 负载 , 请求负载 找到 checkpointId
|
45 |
+
model: # 模型id
|
46 |
+
- 2332049
|
47 |
+
- 2676318
|
48 |
+
- 2675606
|
49 |
+
model_name: # 模型名字,仅用作标记
|
50 |
+
- "liblib.art/modelinfo/d2f55cf374a7431cac13382182aed20c"
|
51 |
+
- "liblib.art/modelinfo/5ecc3218f1ef483ab63eeb4e4cff30cc"
|
52 |
+
- "liblib.art/modelinfo/fe3aac47589d4a20b24d0a6b045d607e"
|
53 |
+
xl: # 是否为XL模型
|
54 |
+
- false
|
55 |
+
- true
|
56 |
+
- false
|
57 |
+
flux: # 是否为FLUX模型
|
58 |
+
- false
|
59 |
+
- false
|
60 |
+
- false
|
61 |
+
preference:
|
62 |
+
- pretags: # 内置prompt
|
63 |
+
1.5: # 1.5模式下的预设词条,上面为正面,下面为负面
|
64 |
+
- '' # prompt
|
65 |
+
- '' # negative prompt
|
66 |
+
xl: # xl 同上
|
67 |
+
- ""
|
68 |
+
- ""
|
69 |
+
flux:
|
70 |
+
- ''
|
71 |
+
- ''
|
72 |
+
steps: 20 # 步数
|
73 |
+
- pretags:
|
74 |
+
1.5:
|
75 |
+
- ''
|
76 |
+
- ''
|
77 |
+
xl:
|
78 |
+
- ""
|
79 |
+
- ""
|
80 |
+
flux:
|
81 |
+
- ''
|
82 |
+
- ''
|
83 |
+
steps: 20
|
84 |
+
- pretags:
|
85 |
+
1.5:
|
86 |
+
- ''
|
87 |
+
- ''
|
88 |
+
xl:
|
89 |
+
- ""
|
90 |
+
- ""
|
91 |
+
flux:
|
92 |
+
- ''
|
93 |
+
- ''
|
94 |
+
steps: 20
|
95 |
+
|
96 |
+
tusiart_setting:
|
97 |
+
# 注意,有两个必填项,一个是token,一个是referer
|
98 |
+
# https://tusiart.com/
|
99 |
+
# 按下F12 -> 应用 -> cookies -> https://tusiart.com -> ta_token_prod 的值 eyJhbGciOiJI....
|
100 |
+
token: #
|
101 |
+
- eyJhbGciOiJI....
|
102 |
+
model: # 例如 https://tusiart.com/models/756170434619145524 # 取后面的数字
|
103 |
+
- 708770380971558251
|
104 |
+
note:
|
105 |
+
- 备注
|
106 |
+
referer: # 你的用户首页! 点击右上角头像,复制链接 必填!!
|
107 |
+
- https://tusiart.com/u/759763664390847335
|
108 |
+
seaart_setting:
|
109 |
+
# https://www.seaart.ai/ # 登录 按下F12 -> 应用 -> cookies -> https://www.seaart.ai -> T 的值 eyJhbGciOiJI....
|
110 |
+
token:
|
111 |
+
- You token here
|
112 |
+
model:
|
113 |
+
-
|
114 |
+
yunjie_setting:
|
115 |
+
# https://www.yunjie.art/ # 登录 按下F12 -> 应用 -> cookies -> https://www.yunjie.art -> rayvision_aigc_token 的值 rsat:9IS5EH6vY
|
116 |
+
token:
|
117 |
+
- You token here
|
118 |
+
model:
|
119 |
+
-
|
120 |
+
note:
|
121 |
+
- 移动
|
122 |
+
|
123 |
+
comfyui_setting:
|
124 |
+
backend_url:
|
125 |
+
- http://10.147.20.155:8188
|
126 |
+
name:
|
127 |
+
- default
|
128 |
+
model:
|
129 |
+
- models\\1053-S.ckpt
|
130 |
+
default_workflows:
|
131 |
+
- 'sdbase_txt2img'
|
132 |
+
|
133 |
+
novelai_setting:
|
134 |
+
token:
|
135 |
+
- eyJhbGciOi...
|
136 |
+
model:
|
137 |
+
- nai-diffusion-3
|
138 |
+
|
139 |
+
midjourney_setting:
|
140 |
+
backend_url:
|
141 |
+
- http://192.168.5.206:8081
|
142 |
+
name:
|
143 |
+
- default-mj-api
|
144 |
+
auth_toekn:
|
145 |
+
- null
|
146 |
+
|
147 |
+
server_settings:
|
148 |
+
# 重点! 需要启动的后端, 有些后端你没配置的话依然启动会导致API报错(虽然API会将它锁定,之后请求就不会到它)
|
149 |
+
# 怎么数呢? 比如在这个配置文件中 civitai 的第一个token是 0 a1111 的第一个后端是 1 , 第二个是2
|
150 |
+
# 所以 enable_txt2img_backends: [0,1] 表示启动 civitai第一个token 和 a1111的第一个后端
|
151 |
+
# 再比如 enable_txt2img_backends: [3, 4, 5] 表示启动 liblib 的所有两个token 和 tusiart的第一个token
|
152 |
+
enable_txt2img_backends: [13]
|
153 |
+
enable_img2img_backends: [1]
|
154 |
+
enable_sdapi_backends: [1]
|
155 |
+
redis_server: # 必填 Redis服务器
|
156 |
+
- 127.0.0.1 # 地址
|
157 |
+
- 6379 # 端口
|
158 |
+
- null # redis 密码
|
159 |
+
- 4 # redis数据库编号
|
160 |
+
enable_nsfw_check:
|
161 |
+
false
|
162 |
+
save_image: # 是否直接保存图片
|
163 |
+
true
|
164 |
+
build_in_tagger:
|
165 |
+
false
|
166 |
+
llm_caption: # 使用llm用自然语言打标
|
167 |
+
enable:
|
168 |
+
false
|
169 |
+
clip:
|
170 |
+
google/siglip-so400m-patch14-384
|
171 |
+
llm:
|
172 |
+
unsloth/Meta-Llama-3.1-8B-bnb-4bit
|
173 |
+
image_adapter: # https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/tree/main/wpkklhc6
|
174 |
+
image_adapter.pt
|
175 |
+
build_in_photoai:
|
176 |
+
exec_path:
|
177 |
+
"C:\\Program Files\\Topaz Labs LLC\\Topaz Photo AI\\tpai.exe"
|
178 |
+
proxy:
|
179 |
+
"http://127.0.0.1:7890"
|
180 |
+
|
181 |
+
start_gradio:
|
182 |
+
False
|
183 |
+
same_port_with_api:
|
184 |
+
False
|
185 |
+
prompt_audit:
|
186 |
+
enable:
|
187 |
+
False
|
188 |
+
site:
|
189 |
+
api.openai.com
|
190 |
+
api_key:
|
191 |
+
null
|
192 |
+
http_proxy:
|
193 |
+
null
|
194 |
+
|
195 |
+
|
196 |
+
backend_name_list: # 不要动!
|
197 |
+
- civitai
|
198 |
+
- a1111
|
199 |
+
- falai
|
200 |
+
- replicate
|
201 |
+
- liblibai
|
202 |
+
- tusiart
|
203 |
+
- seaart
|
204 |
+
- yunjie
|
205 |
+
- comfyui
|
206 |
+
- novelai
|
207 |
+
- midjourney
|
208 |
+
|
DrawBridgeAPI/locales/__init__.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gettext
|
3 |
+
|
4 |
+
locale_dir = os.path.join(os.path.dirname(__file__))
|
5 |
+
|
6 |
+
lang = gettext.translation('messages', localedir=locale_dir, languages=['zh'], fallback=True)
|
7 |
+
lang.install()
|
8 |
+
|
9 |
+
_ = lang.gettext
|
10 |
+
i18n = _
|
DrawBridgeAPI/locales/zh/LC_MESSAGES/messages.po
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
msgid "Loading config file completed"
|
2 |
+
msgstr "加载配置文件完成"
|
3 |
+
|
4 |
+
msgid "Redis connection successful"
|
5 |
+
msgstr "Redis连接成功"
|
6 |
+
|
7 |
+
msgid "Exec TXT2IMG"
|
8 |
+
msgstr "开始进行文生图"
|
9 |
+
|
10 |
+
msgid "IMG2IMG Requires image to start"
|
11 |
+
msgstr "图生图需要图片来启动"
|
12 |
+
|
13 |
+
msgid "Exec IMG2IMG"
|
14 |
+
msgstr "开始进行图生图"
|
15 |
+
|
16 |
+
msgid "Caption Successful"
|
17 |
+
msgstr "打标成功"
|
18 |
+
|
19 |
+
msgid "Lock to backend has configured"
|
20 |
+
msgstr "设置已经锁定后端"
|
21 |
+
|
22 |
+
msgid "URL detected"
|
23 |
+
msgstr "检测到url"
|
24 |
+
|
25 |
+
msgid "Image download failed!"
|
26 |
+
msgstr "图片下载失败!"
|
27 |
+
|
28 |
+
msgid "Exec forwarding"
|
29 |
+
msgstr "开始进行转发"
|
30 |
+
|
31 |
+
msgid "Waiting for API initialization"
|
32 |
+
msgstr "请等待API初始化"
|
33 |
+
|
34 |
+
msgid "Loading LLM"
|
35 |
+
msgstr "LLM加载中"
|
36 |
+
|
37 |
+
msgid "LLM loading completed, waiting for command"
|
38 |
+
msgstr "LLM加载完成,等待命令"
|
39 |
+
|
40 |
+
msgid "Loading Checkpoint"
|
41 |
+
msgstr "模型加载中"
|
42 |
+
|
43 |
+
msgid "Checkpoint loading completed, waiting for command"
|
44 |
+
msgstr "模型加载完成,等待命令"
|
45 |
+
|
46 |
+
msgid "Server is ready!"
|
47 |
+
msgstr "服务器准备就绪!"
|
48 |
+
|
49 |
+
msgid "Manually select model"
|
50 |
+
msgstr "手动选择模型"
|
51 |
+
|
52 |
+
msgid "Backend select"
|
53 |
+
msgstr "已选择后端"
|
54 |
+
|
55 |
+
msgid "Backend locked"
|
56 |
+
msgstr "已锁定后端"
|
57 |
+
|
58 |
+
msgid "Starting backend selection"
|
59 |
+
msgstr "开始进行后端选择"
|
60 |
+
|
61 |
+
msgid "Backend %s is down"
|
62 |
+
msgstr "后端%s掉线"
|
63 |
+
|
64 |
+
msgid "Backend %s is failed or locked"
|
65 |
+
msgstr "后端%s出错或者锁定中"
|
66 |
+
|
67 |
+
msgid "No available backend"
|
68 |
+
msgstr "没有可用后端"
|
69 |
+
|
70 |
+
msgid "Backend: %s Average work time: %s seconds, Current tasks: %s"
|
71 |
+
msgstr "后端: %s 平均工作时间: %s秒, 现在进行中的任务: %s"
|
72 |
+
|
73 |
+
msgid "Extra time weight"
|
74 |
+
msgstr "额外的时间权重"
|
75 |
+
|
76 |
+
msgid "Backend %s is the fastest, has been selected"
|
77 |
+
msgstr "后端%s最快, 已经选择"
|
78 |
+
|
79 |
+
msgid "Task completed successfully"
|
80 |
+
msgstr "任务成功完成"
|
81 |
+
|
82 |
+
msgid "Task failed"
|
83 |
+
msgstr "任务失败"
|
84 |
+
|
85 |
+
msgid "Remaining tasks in the queue"
|
86 |
+
msgstr "队列中的剩余任务"
|
87 |
+
|
88 |
+
msgid "No remaining tasks in the queue"
|
89 |
+
msgstr "队列中已无任务"
|
90 |
+
|
91 |
+
msgid "Forwarding request"
|
92 |
+
msgstr "已转发请求"
|
93 |
+
|
94 |
+
msgid "Backend returned error"
|
95 |
+
msgstr "后端返回错误"
|
96 |
+
|
97 |
+
msgid "Comfyui Backend, not using built-in multi-image generation management"
|
98 |
+
msgstr "Comfyui后端, 不使用内置多图生成管理"
|
99 |
+
|
100 |
+
msgid "A1111 Backend, not using built-in multi-image generation management"
|
101 |
+
msgstr "A1111后端, 不使用内置多图生成管理"
|
102 |
+
|
103 |
+
msgid "Over maximum retry times, posting still failed"
|
104 |
+
msgstr "超过最大重试次数之后依然失败"
|
105 |
+
|
106 |
+
msgid "Request completed, took %s seconds"
|
107 |
+
msgstr "请求完成,共耗%s秒"
|
108 |
+
|
109 |
+
msgid "VRAM OOM detected, auto model unload and reload"
|
110 |
+
msgstr "检测到爆显存,执行自动模型释放并加载"
|
111 |
+
|
112 |
+
msgid "Get a respond image, processing"
|
113 |
+
msgstr "获取到返回图片,正在处理"
|
114 |
+
|
115 |
+
msgid "Request failed, error message:"
|
116 |
+
msgstr "请求失败,错误信息:"
|
117 |
+
|
118 |
+
msgid "Downloading image successful"
|
119 |
+
msgstr "图片下载成功"
|
120 |
+
|
121 |
+
msgid "Selected ComfyUI style"
|
122 |
+
msgstr "已选择ComfyUI工作流"
|
DrawBridgeAPI/ui/__init__.py
ADDED
File without changes
|
DrawBridgeAPI/utils/__init__.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import httpx
|
4 |
+
from fastapi.exceptions import HTTPException
|
5 |
+
from ..base_config import init_instance
|
6 |
+
config = init_instance.config
|
7 |
+
import asyncio
|
8 |
+
|
9 |
+
|
10 |
+
async def http_request(
|
11 |
+
method,
|
12 |
+
target_url,
|
13 |
+
headers=None,
|
14 |
+
params=None,
|
15 |
+
content=None,
|
16 |
+
format=True
|
17 |
+
):
|
18 |
+
async with httpx.AsyncClient() as client:
|
19 |
+
|
20 |
+
response = await client.request(
|
21 |
+
method,
|
22 |
+
target_url,
|
23 |
+
headers=headers,
|
24 |
+
params=params,
|
25 |
+
content=content
|
26 |
+
)
|
27 |
+
|
28 |
+
if response.status_code != 200:
|
29 |
+
raise HTTPException(500)
|
30 |
+
if format:
|
31 |
+
return response.json()
|
32 |
+
else:
|
33 |
+
return response
|
34 |
+
|
35 |
+
|
36 |
+
async def run_later(func, delay=1):
|
37 |
+
loop = asyncio.get_running_loop()
|
38 |
+
loop.call_later(
|
39 |
+
delay,
|
40 |
+
lambda: loop.create_task(
|
41 |
+
func
|
42 |
+
)
|
43 |
+
)
|
44 |
+
|
45 |
+
|
46 |
+
async def txt_audit(
|
47 |
+
msg,
|
48 |
+
prompt='''
|
49 |
+
接下来请你对一些聊天内容进行审核,
|
50 |
+
如果内容出现政治/暴恐内容(特别是我国的政治人物/或者和我国相关的政治)则请你输出<yes>,
|
51 |
+
如果没有则输出<no>
|
52 |
+
'''
|
53 |
+
):
|
54 |
+
|
55 |
+
from ..backend import Backend
|
56 |
+
|
57 |
+
system = [
|
58 |
+
{"role": "system",
|
59 |
+
"content": prompt}
|
60 |
+
]
|
61 |
+
|
62 |
+
prompt = [{"role": "user", "content": msg}]
|
63 |
+
|
64 |
+
try:
|
65 |
+
resp = Backend.http_request(
|
66 |
+
"POST",
|
67 |
+
f"http://{config['prompt_audit']['site']}/v1/chat/completions",
|
68 |
+
{"Authorization": config['prompt_audit']['api_key']},
|
69 |
+
timeout=300,
|
70 |
+
format=True,
|
71 |
+
content= json.dumps(
|
72 |
+
{
|
73 |
+
"model": "gpt-3.5-turbo",
|
74 |
+
"messages": system + prompt,
|
75 |
+
"max_tokens": 4000,
|
76 |
+
}
|
77 |
+
)
|
78 |
+
)
|
79 |
+
except:
|
80 |
+
return "yes"
|
81 |
+
else:
|
82 |
+
res: str = remove_punctuation(resp['choices'][0]['message']['content'].strip())
|
83 |
+
return res
|
84 |
+
|
85 |
+
|
86 |
+
def remove_punctuation(text):
|
87 |
+
import string
|
88 |
+
for i in range(len(text)):
|
89 |
+
if text[i] not in string.punctuation:
|
90 |
+
return text[i:]
|
91 |
+
return ""
|
DrawBridgeAPI/utils/custom_class.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fal_client.client import AsyncClient
|
2 |
+
from fal_client.auth import fetch_credentials
|
3 |
+
|
4 |
+
import httpx
|
5 |
+
import os
|
6 |
+
|
7 |
+
USER_AGENT = "fal-client/0.2.2 (python)"
|
8 |
+
|
9 |
+
|
10 |
+
class CustomAsyncClient(AsyncClient):
|
11 |
+
def __init__(self, key=None, default_timeout=120.0):
|
12 |
+
if key is None:
|
13 |
+
key = os.getenv("FAL_KEY")
|
14 |
+
super().__init__(key=key, default_timeout=default_timeout)
|
15 |
+
|
16 |
+
@property
|
17 |
+
def _client(self):
|
18 |
+
key = self.key
|
19 |
+
if key is None:
|
20 |
+
key = fetch_credentials()
|
21 |
+
|
22 |
+
return httpx.AsyncClient(
|
23 |
+
headers={
|
24 |
+
"Authorization": f"Key {key}",
|
25 |
+
"User-Agent": USER_AGENT,
|
26 |
+
},
|
27 |
+
timeout=self.default_timeout,
|
28 |
+
)
|
DrawBridgeAPI/utils/exceptions.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class DrawBridgeAPIException(Exception):
|
2 |
+
|
3 |
+
class DBAPIExceptions(Exception):
|
4 |
+
pass
|
5 |
+
|
6 |
+
class TokenExpired(DBAPIExceptions):
|
7 |
+
def __init__(self, message="Token expired."):
|
8 |
+
self.message = message
|
9 |
+
super().__init__(self.message)
|
10 |
+
|
11 |
+
class NeedRecaptcha(DBAPIExceptions):
|
12 |
+
def __init__(self, message="Need Recaptcha."):
|
13 |
+
self.message = message
|
14 |
+
super().__init__(self.message)
|
15 |
+
|
DrawBridgeAPI/utils/llm_caption_requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
numpy
|
3 |
+
pillow
|
4 |
+
transformers>=4.43.3
|
5 |
+
huggingface_hub
|
6 |
+
protobuf
|
7 |
+
bitsandbytes
|
8 |
+
sentencepiece
|
9 |
+
accelerate
|
DrawBridgeAPI/utils/llm_captions.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import base64
|
3 |
+
import warnings
|
4 |
+
warnings.simplefilter(action='ignore', category=UserWarning)
|
5 |
+
from torch import nn
|
6 |
+
from io import BytesIO
|
7 |
+
from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, \
|
8 |
+
AutoModelForCausalLM
|
9 |
+
import torch
|
10 |
+
import torch.amp.autocast_mode
|
11 |
+
from PIL import Image
|
12 |
+
import numpy as np
|
13 |
+
from io import BytesIO
|
14 |
+
|
15 |
+
from ..base_config import init_instance , setup_logger
|
16 |
+
from ..locales import _
|
17 |
+
|
18 |
+
llm_logger = setup_logger('[LLM-Caption]')
|
19 |
+
|
20 |
+
class JoyPipeline:
|
21 |
+
def __init__(self):
|
22 |
+
self.clip_model = None
|
23 |
+
self.clip_processor = None
|
24 |
+
self.tokenizer = None
|
25 |
+
self.text_model = None
|
26 |
+
self.image_adapter = None
|
27 |
+
self.parent = None
|
28 |
+
|
29 |
+
def clearCache(self):
|
30 |
+
self.clip_model = None
|
31 |
+
self.clip_processor = None
|
32 |
+
self.tokenizer = None
|
33 |
+
self.text_model = None
|
34 |
+
self.image_adapter = None
|
35 |
+
|
36 |
+
|
37 |
+
class ImageAdapter(nn.Module):
|
38 |
+
def __init__(self, input_features: int, output_features: int):
|
39 |
+
super().__init__()
|
40 |
+
self.linear1 = nn.Linear(input_features, output_features)
|
41 |
+
self.activation = nn.GELU()
|
42 |
+
self.linear2 = nn.Linear(output_features, output_features)
|
43 |
+
|
44 |
+
def forward(self, vision_outputs: torch.Tensor):
|
45 |
+
x = self.linear1(vision_outputs)
|
46 |
+
x = self.activation(x)
|
47 |
+
x = self.linear2(x)
|
48 |
+
return x
|
49 |
+
|
50 |
+
|
51 |
+
class Joy_caption_load:
|
52 |
+
|
53 |
+
def __init__(self):
|
54 |
+
self.model = None
|
55 |
+
self.pipeline = JoyPipeline()
|
56 |
+
self.pipeline.parent = self
|
57 |
+
self.config = init_instance.config
|
58 |
+
pass
|
59 |
+
|
60 |
+
def loadCheckPoint(self):
|
61 |
+
# 清除一波
|
62 |
+
if self.pipeline != None:
|
63 |
+
self.pipeline.clearCache()
|
64 |
+
|
65 |
+
# clip
|
66 |
+
model_id = self.config.server_settings['llm_caption']['clip']
|
67 |
+
|
68 |
+
model = AutoModel.from_pretrained(model_id)
|
69 |
+
clip_processor = AutoProcessor.from_pretrained(model_id)
|
70 |
+
clip_model = AutoModel.from_pretrained(
|
71 |
+
model_id,
|
72 |
+
trust_remote_code=True
|
73 |
+
)
|
74 |
+
|
75 |
+
clip_model = clip_model.vision_model
|
76 |
+
clip_model.eval()
|
77 |
+
clip_model.requires_grad_(False)
|
78 |
+
clip_model.to("cuda")
|
79 |
+
|
80 |
+
# LLM
|
81 |
+
model_path_llm = self.config.server_settings['llm_caption']['llm']
|
82 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path_llm, use_fast=False)
|
83 |
+
assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer,
|
84 |
+
PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
|
85 |
+
|
86 |
+
text_model = AutoModelForCausalLM.from_pretrained(model_path_llm, device_map="auto", trust_remote_code=True)
|
87 |
+
text_model.eval()
|
88 |
+
|
89 |
+
# Image Adapte
|
90 |
+
|
91 |
+
image_adapter = ImageAdapter(clip_model.config.hidden_size,
|
92 |
+
text_model.config.hidden_size) # ImageAdapter(clip_model.config.hidden_size, 4096)
|
93 |
+
image_adapter.load_state_dict(torch.load(self.config.server_settings['llm_caption']['image_adapter'], map_location="cpu", weights_only=True))
|
94 |
+
adjusted_adapter = image_adapter # AdjustedImageAdapter(image_adapter, text_model.config.hidden_size)
|
95 |
+
adjusted_adapter.eval()
|
96 |
+
adjusted_adapter.to("cuda")
|
97 |
+
|
98 |
+
self.pipeline.clip_model = clip_model
|
99 |
+
self.pipeline.clip_processor = clip_processor
|
100 |
+
self.pipeline.tokenizer = tokenizer
|
101 |
+
self.pipeline.text_model = text_model
|
102 |
+
self.pipeline.image_adapter = adjusted_adapter
|
103 |
+
|
104 |
+
def clearCache(self):
|
105 |
+
if self.pipeline != None:
|
106 |
+
self.pipeline.clearCache()
|
107 |
+
|
108 |
+
def gen(self, model):
|
109 |
+
if self.model == None or self.model != model or self.pipeline == None:
|
110 |
+
self.model = model
|
111 |
+
self.loadCheckPoint()
|
112 |
+
return (self.pipeline,)
|
113 |
+
|
114 |
+
|
115 |
+
class Joy_caption:
|
116 |
+
|
117 |
+
def __init__(self):
|
118 |
+
pass
|
119 |
+
|
120 |
+
@staticmethod
|
121 |
+
def tensor2pil(t_image: torch.Tensor) -> Image:
|
122 |
+
return Image.fromarray(np.clip(255.0 * t_image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
123 |
+
|
124 |
+
def gen(
|
125 |
+
self,
|
126 |
+
joy_pipeline=JoyPipeline,
|
127 |
+
image=Image,
|
128 |
+
prompt="A descriptive caption for this image",
|
129 |
+
max_new_tokens=300,
|
130 |
+
temperature=0.5,
|
131 |
+
cache=False
|
132 |
+
):
|
133 |
+
|
134 |
+
if joy_pipeline.clip_processor == None:
|
135 |
+
joy_pipeline.parent.loadCheckPoint()
|
136 |
+
|
137 |
+
clip_processor = joy_pipeline.clip_processor
|
138 |
+
tokenizer = joy_pipeline.tokenizer
|
139 |
+
clip_model = joy_pipeline.clip_model
|
140 |
+
image_adapter = joy_pipeline.image_adapter
|
141 |
+
text_model = joy_pipeline.text_model
|
142 |
+
|
143 |
+
input_image = image
|
144 |
+
|
145 |
+
# Preprocess image
|
146 |
+
pImge = clip_processor(images=input_image, return_tensors='pt').pixel_values
|
147 |
+
pImge = pImge.to('cuda')
|
148 |
+
|
149 |
+
# Tokenize the prompt
|
150 |
+
prompt = tokenizer.encode(prompt, return_tensors='pt', padding=False, truncation=False,
|
151 |
+
add_special_tokens=False)
|
152 |
+
# Embed image
|
153 |
+
with torch.amp.autocast_mode.autocast('cuda', enabled=True):
|
154 |
+
vision_outputs = clip_model(pixel_values=pImge, output_hidden_states=True)
|
155 |
+
image_features = vision_outputs.hidden_states[-2]
|
156 |
+
embedded_images = image_adapter(image_features)
|
157 |
+
embedded_images = embedded_images.to('cuda')
|
158 |
+
|
159 |
+
# Embed prompt
|
160 |
+
prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))
|
161 |
+
assert prompt_embeds.shape == (1, prompt.shape[1],
|
162 |
+
text_model.config.hidden_size), f"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}"
|
163 |
+
embedded_bos = text_model.model.embed_tokens(
|
164 |
+
torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))
|
165 |
+
|
166 |
+
# Construct prompts
|
167 |
+
inputs_embeds = torch.cat([
|
168 |
+
embedded_bos.expand(embedded_images.shape[0], -1, -1),
|
169 |
+
embedded_images.to(dtype=embedded_bos.dtype),
|
170 |
+
prompt_embeds.expand(embedded_images.shape[0], -1, -1),
|
171 |
+
], dim=1)
|
172 |
+
|
173 |
+
input_ids = torch.cat([
|
174 |
+
torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),
|
175 |
+
torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),
|
176 |
+
prompt,
|
177 |
+
], dim=1).to('cuda')
|
178 |
+
attention_mask = torch.ones_like(input_ids)
|
179 |
+
|
180 |
+
generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask,
|
181 |
+
max_new_tokens=max_new_tokens, do_sample=True, top_k=10,
|
182 |
+
temperature=temperature, suppress_tokens=None)
|
183 |
+
|
184 |
+
# Trim off the prompt
|
185 |
+
generate_ids = generate_ids[:, input_ids.shape[1]:]
|
186 |
+
if generate_ids[0][-1] == tokenizer.eos_token_id:
|
187 |
+
generate_ids = generate_ids[:, :-1]
|
188 |
+
|
189 |
+
caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
|
190 |
+
r = caption.strip()
|
191 |
+
|
192 |
+
if cache == False:
|
193 |
+
joy_pipeline.parent.clearCache()
|
194 |
+
|
195 |
+
return (r,)
|
196 |
+
|
197 |
+
|
198 |
+
class JoyCaptionHandler:
|
199 |
+
def __init__(self, config):
|
200 |
+
self.config = config
|
201 |
+
self.pipeline, self.joy_caption = self._initialize()
|
202 |
+
|
203 |
+
def _initialize(self):
|
204 |
+
llm_logger.info(_("Loading LLM"))
|
205 |
+
joy_caption_load = Joy_caption_load()
|
206 |
+
model_path = self.config.server_settings['llm_caption']['llm']
|
207 |
+
pipeline, = joy_caption_load.gen(model_path)
|
208 |
+
joy_caption = Joy_caption()
|
209 |
+
llm_logger.info(_("LLM loading completed, waiting for command"))
|
210 |
+
return pipeline, joy_caption
|
211 |
+
|
212 |
+
async def get_caption(self, image, ntags=[]):
|
213 |
+
if image.startswith(b"data:image/png;base64,"):
|
214 |
+
image = image.replace("data:image/png;base64,", "")
|
215 |
+
image = Image.open(BytesIO(base64.b64decode(image))).convert(mode="RGB")
|
216 |
+
|
217 |
+
extra_ = f"do not describe {','.join(ntags)} if it exist" if ntags else ''
|
218 |
+
loop = asyncio.get_event_loop()
|
219 |
+
|
220 |
+
caption = await loop.run_in_executor(
|
221 |
+
None,
|
222 |
+
self.joy_caption.gen,
|
223 |
+
self.pipeline,
|
224 |
+
image,
|
225 |
+
f"A descriptive caption for this image, do not describe a signature or text in the image,{extra_}",
|
226 |
+
300,
|
227 |
+
0.5,
|
228 |
+
True
|
229 |
+
)
|
230 |
+
|
231 |
+
return caption[0]
|
232 |
+
|
233 |
+
|
234 |
+
config = init_instance.config
|
235 |
+
if config.server_settings['llm_caption']['enable']:
|
236 |
+
joy_caption_handler = JoyCaptionHandler(config)
|
DrawBridgeAPI/utils/request_model.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel, conint
|
2 |
+
from dataclasses import field
|
3 |
+
from typing import Optional, List, Dict, Any
|
4 |
+
from pathlib import Path
|
5 |
+
import random
|
6 |
+
|
7 |
+
|
8 |
+
class RequetModelClass(BaseModel):
|
9 |
+
pass
|
10 |
+
|
11 |
+
|
12 |
+
class Txt2ImgRequest(RequetModelClass):
|
13 |
+
prompt: Optional[str] = ""
|
14 |
+
negative_prompt: Optional[str] = ""
|
15 |
+
styles: List[str] = []
|
16 |
+
seed: int = random.randint(0, 4294967295)
|
17 |
+
subseed: int = random.randint(0, 4294967295)
|
18 |
+
subseed_strength: float = 0
|
19 |
+
seed_resize_from_h: int = -1
|
20 |
+
seed_resize_from_w: int = -1
|
21 |
+
sampler_name: str = "Euler a"
|
22 |
+
batch_size: int = 1
|
23 |
+
n_iter: int = 1
|
24 |
+
steps: int = 20
|
25 |
+
cfg_scale: float = 7
|
26 |
+
width: int = 512
|
27 |
+
height: int = 512
|
28 |
+
restore_faces: bool = False
|
29 |
+
tiling: bool = False
|
30 |
+
do_not_save_samples: bool = False
|
31 |
+
do_not_save_grid: bool = False
|
32 |
+
eta: float = 0
|
33 |
+
denoising_strength: float = 1
|
34 |
+
s_min_uncond: float = 0
|
35 |
+
s_churn: float = 0
|
36 |
+
s_tmax: float = 0
|
37 |
+
s_tmin: float = 0
|
38 |
+
s_noise: float = 0
|
39 |
+
override_settings: Dict[str, Any] = {}
|
40 |
+
override_settings_restore_afterwards: bool = False
|
41 |
+
refiner_checkpoint: str = ""
|
42 |
+
refiner_switch_at: int = 0
|
43 |
+
disable_extra_networks: bool = False
|
44 |
+
comments: Dict[str, Any] = {}
|
45 |
+
enable_hr: bool = False
|
46 |
+
firstphase_width: int = 0
|
47 |
+
firstphase_height: int = 0
|
48 |
+
hr_scale: float = 2
|
49 |
+
hr_upscaler: str = ""
|
50 |
+
hr_second_pass_steps: int = 10
|
51 |
+
hr_resize_x: int = 0
|
52 |
+
hr_resize_y: int = 0
|
53 |
+
hr_checkpoint_name: str = ""
|
54 |
+
hr_sampler_name: str = ""
|
55 |
+
hr_prompt: str = ""
|
56 |
+
hr_negative_prompt: str = ""
|
57 |
+
sampler_index: str = "Euler a"
|
58 |
+
script_name: str = ""
|
59 |
+
script_args: List[Any] = []
|
60 |
+
send_images: bool = True
|
61 |
+
save_images: bool = True
|
62 |
+
alwayson_scripts: Dict[str, Any] = {}
|
63 |
+
scheduler: str = "Automatic"
|
64 |
+
|
65 |
+
|
66 |
+
class Img2ImgRequest(RequetModelClass):
|
67 |
+
prompt: Optional[str] = ""
|
68 |
+
negative_prompt: Optional[str] = ""
|
69 |
+
styles: List[str] = []
|
70 |
+
seed: int = random.randint(0, 4294967295)
|
71 |
+
subseed: int = random.randint(0, 4294967295)
|
72 |
+
subseed_strength: float = 0
|
73 |
+
seed_resize_from_h: int = -1
|
74 |
+
seed_resize_from_w: int = -1
|
75 |
+
sampler_name: str = "Euler a"
|
76 |
+
batch_size: int = 1
|
77 |
+
n_iter: int = 1
|
78 |
+
steps: int = 50
|
79 |
+
cfg_scale: float = 7
|
80 |
+
width: int = 512
|
81 |
+
height: int = 512
|
82 |
+
restore_faces: bool = False
|
83 |
+
tiling: bool = False
|
84 |
+
do_not_save_samples: bool = False
|
85 |
+
do_not_save_grid: bool = False
|
86 |
+
eta: float = 0
|
87 |
+
denoising_strength: float = 0.75
|
88 |
+
s_min_uncond: float = 0
|
89 |
+
s_churn: float = 0
|
90 |
+
s_tmax: float = 0
|
91 |
+
s_tmin: float = 0
|
92 |
+
s_noise: float = 0
|
93 |
+
override_settings: Dict[str, Any] = {}
|
94 |
+
override_settings_restore_afterwards: bool = False
|
95 |
+
refiner_checkpoint: str = ""
|
96 |
+
refiner_switch_at: int = 0
|
97 |
+
disable_extra_networks: bool = False
|
98 |
+
comments: Dict[str, Any] = {}
|
99 |
+
init_images: List[str] = [""]
|
100 |
+
resize_mode: int = 0
|
101 |
+
image_cfg_scale: float = 0
|
102 |
+
mask: str = None
|
103 |
+
mask_blur_x: int = 4
|
104 |
+
mask_blur_y: int = 4
|
105 |
+
mask_blur: int = 0
|
106 |
+
inpainting_fill: int = 0
|
107 |
+
inpaint_full_res: bool = True
|
108 |
+
inpaint_full_res_padding: int = 0
|
109 |
+
inpainting_mask_invert: int = 0
|
110 |
+
initial_noise_multiplier: float = 0
|
111 |
+
latent_mask: str = ""
|
112 |
+
sampler_index: str = "Euler a"
|
113 |
+
include_init_images: bool = False
|
114 |
+
script_name: str = ""
|
115 |
+
script_args: List[Any] = []
|
116 |
+
send_images: bool = True
|
117 |
+
save_images: bool = True
|
118 |
+
alwayson_scripts: Dict[str, Any] = {}
|
119 |
+
scheduler: str = "Automatic"
|
120 |
+
# 以下为拓展
|
121 |
+
|
122 |
+
|
123 |
+
class TaggerRequest(RequetModelClass):
|
124 |
+
image: str = '',
|
125 |
+
model: Optional[str] = 'wd14-vit-v2'
|
126 |
+
threshold: Optional[float] = 0.35,
|
127 |
+
exclude_tags: Optional[List[str]] = []
|
128 |
+
|
129 |
+
|
130 |
+
class TopazAiRequest(BaseModel):
|
131 |
+
image: Optional[str] = None
|
132 |
+
input_folder: Optional[str or Path]
|
133 |
+
output_folder: Optional[str] = None
|
134 |
+
overwrite: Optional[bool] = False
|
135 |
+
recursive: Optional[bool] = False
|
136 |
+
format: Optional[str] = "preserve" # 可选值: jpg, jpeg, png, tif, tiff, dng, preserve
|
137 |
+
quality: Optional[conint(ge=0, le=100)] = 95 # JPEG 质量,0到100之间
|
138 |
+
compression: Optional[conint(ge=0, le=10)] = 2 # PNG 压缩,0到10之间
|
139 |
+
bit_depth: Optional[conint(strict=True, ge=8, le=16)] = 16 # TIFF 位深度,8或16
|
140 |
+
tiff_compression: Optional[str] = "zip" # 可选值: none, lzw, zip
|
141 |
+
show_settings: Optional[bool] = False
|
142 |
+
skip_processing: Optional[bool] = False
|
143 |
+
verbose: Optional[bool] = False
|
144 |
+
upscale: Optional[bool] = None
|
145 |
+
noise: Optional[bool] = None
|
146 |
+
sharpen: Optional[bool] = None
|
147 |
+
lighting: Optional[bool] = None
|
148 |
+
color: Optional[bool] = None
|
149 |
+
|
150 |
+
|
151 |
+
class SetConfigRequest(BaseModel):
|
152 |
+
class Config:
|
153 |
+
extra = "allow"
|
DrawBridgeAPI/utils/shared.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import os
|
3 |
+
|
4 |
+
PATH_TO_COMFYUI_WORKFLOWS = Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows")
|
5 |
+
|
DrawBridgeAPI/utils/tagger-requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
numpy
|
3 |
+
pillow
|
4 |
+
huggingface_hub
|
5 |
+
onnxruntime
|
DrawBridgeAPI/utils/tagger.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import asyncio
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
+
import base64
|
7 |
+
|
8 |
+
from typing import Tuple, List, Dict
|
9 |
+
from io import BytesIO
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
from pathlib import Path
|
13 |
+
from huggingface_hub import hf_hub_download
|
14 |
+
|
15 |
+
from ..base_config import setup_logger, init_instance
|
16 |
+
from ..locales import _
|
17 |
+
|
18 |
+
|
19 |
+
use_cpu = True
|
20 |
+
tf_device_name = '/gpu:0' if not use_cpu else '/cpu:0'
|
21 |
+
|
22 |
+
wd_logger = setup_logger('[TAGGER]')
|
23 |
+
# https://github.com/toriato/stable-diffusion-webui-wd14-tagger
|
24 |
+
|
25 |
+
|
26 |
+
class Interrogator:
|
27 |
+
@staticmethod
|
28 |
+
def postprocess_tags(
|
29 |
+
tags: Dict[str, float],
|
30 |
+
threshold=0.35,
|
31 |
+
additional_tags: List[str] = [],
|
32 |
+
exclude_tags: List[str] = [],
|
33 |
+
sort_by_alphabetical_order=False,
|
34 |
+
add_confident_as_weight=False,
|
35 |
+
replace_underscore=False,
|
36 |
+
replace_underscore_excludes: List[str] = [],
|
37 |
+
escape_tag=False
|
38 |
+
) -> Dict[str, float]:
|
39 |
+
for t in additional_tags:
|
40 |
+
tags[t] = 1.0
|
41 |
+
|
42 |
+
tags = {
|
43 |
+
t: c
|
44 |
+
for t, c in sorted(
|
45 |
+
tags.items(),
|
46 |
+
key=lambda i: i[0 if sort_by_alphabetical_order else 1],
|
47 |
+
reverse=not sort_by_alphabetical_order
|
48 |
+
)
|
49 |
+
if (
|
50 |
+
c >= threshold
|
51 |
+
and t not in exclude_tags
|
52 |
+
)
|
53 |
+
}
|
54 |
+
|
55 |
+
new_tags = []
|
56 |
+
for tag in list(tags):
|
57 |
+
new_tag = tag
|
58 |
+
|
59 |
+
if replace_underscore and tag not in replace_underscore_excludes:
|
60 |
+
new_tag = new_tag.replace('_', ' ')
|
61 |
+
|
62 |
+
if escape_tag:
|
63 |
+
new_tag = tag.replace('_', '\\_')
|
64 |
+
|
65 |
+
if add_confident_as_weight:
|
66 |
+
new_tag = f'({new_tag}:{tags[tag]})'
|
67 |
+
|
68 |
+
new_tags.append((new_tag, tags[tag]))
|
69 |
+
tags = dict(new_tags)
|
70 |
+
|
71 |
+
return tags
|
72 |
+
|
73 |
+
def __init__(self, name: str) -> None:
|
74 |
+
self.name = name
|
75 |
+
|
76 |
+
def load(self):
|
77 |
+
raise NotImplementedError()
|
78 |
+
|
79 |
+
def unload(self) -> bool:
|
80 |
+
unloaded = False
|
81 |
+
|
82 |
+
if hasattr(self, 'model') and self.model is not None:
|
83 |
+
del self.model
|
84 |
+
unloaded = True
|
85 |
+
print(f'Unloaded {self.name}')
|
86 |
+
|
87 |
+
if hasattr(self, 'tags'):
|
88 |
+
del self.tags
|
89 |
+
|
90 |
+
return unloaded
|
91 |
+
|
92 |
+
def interrogate(
|
93 |
+
self,
|
94 |
+
image: Image
|
95 |
+
) -> Tuple[
|
96 |
+
Dict[str, float], # rating confidents
|
97 |
+
Dict[str, float] # tag confidents
|
98 |
+
]:
|
99 |
+
raise NotImplementedError()
|
100 |
+
|
101 |
+
|
102 |
+
class WaifuDiffusionInterrogator(Interrogator):
|
103 |
+
def __init__(
|
104 |
+
self,
|
105 |
+
name: str,
|
106 |
+
model_path='model.onnx',
|
107 |
+
tags_path='selected_tags.csv',
|
108 |
+
**kwargs
|
109 |
+
) -> None:
|
110 |
+
super().__init__(name)
|
111 |
+
self.model_path = model_path
|
112 |
+
self.tags_path = tags_path
|
113 |
+
self.kwargs = kwargs
|
114 |
+
|
115 |
+
def download(self) -> Tuple[os.PathLike, os.PathLike]:
|
116 |
+
wd_logger.info(f"Loading {self.name} model file from {self.kwargs['repo_id']}")
|
117 |
+
|
118 |
+
model_path = Path(hf_hub_download(
|
119 |
+
**self.kwargs, filename=self.model_path))
|
120 |
+
tags_path = Path(hf_hub_download(
|
121 |
+
**self.kwargs, filename=self.tags_path))
|
122 |
+
return model_path, tags_path
|
123 |
+
|
124 |
+
def load(self) -> None:
|
125 |
+
model_path, tags_path = self.download()
|
126 |
+
|
127 |
+
from onnxruntime import InferenceSession
|
128 |
+
|
129 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
130 |
+
if use_cpu:
|
131 |
+
providers.pop(0)
|
132 |
+
|
133 |
+
self.model = InferenceSession(str(model_path), providers=providers)
|
134 |
+
|
135 |
+
wd_logger.info(f'Loaded {self.name} model from {model_path}')
|
136 |
+
|
137 |
+
self.tags = pd.read_csv(tags_path)
|
138 |
+
|
139 |
+
def interrogate(
|
140 |
+
self,
|
141 |
+
image: Image
|
142 |
+
) -> Tuple[
|
143 |
+
Dict[str, float], # rating confidents
|
144 |
+
Dict[str, float] # tag confidents
|
145 |
+
]:
|
146 |
+
if not hasattr(self, 'model') or self.model is None:
|
147 |
+
self.load()
|
148 |
+
|
149 |
+
_, height, _, _ = self.model.get_inputs()[0].shape
|
150 |
+
|
151 |
+
image = image.convert('RGBA')
|
152 |
+
new_image = Image.new('RGBA', image.size, 'WHITE')
|
153 |
+
new_image.paste(image, mask=image)
|
154 |
+
image = new_image.convert('RGB')
|
155 |
+
image = np.asarray(image)
|
156 |
+
|
157 |
+
image = image[:, :, ::-1]
|
158 |
+
|
159 |
+
# 模拟`dbimutils`的make_square和smart_resize功能
|
160 |
+
image = self.make_square(image, height)
|
161 |
+
image = self.smart_resize(image, height)
|
162 |
+
image = image.astype(np.float32)
|
163 |
+
image = np.expand_dims(image, 0)
|
164 |
+
|
165 |
+
input_name = self.model.get_inputs()[0].name
|
166 |
+
label_name = self.model.get_outputs()[0].name
|
167 |
+
confidents = self.model.run([label_name], {input_name: image})[0]
|
168 |
+
|
169 |
+
tags = self.tags[:][['name']]
|
170 |
+
tags['confidents'] = confidents[0]
|
171 |
+
|
172 |
+
ratings = dict(tags[:4].values)
|
173 |
+
tags = dict(tags[4:].values)
|
174 |
+
|
175 |
+
return ratings, tags
|
176 |
+
|
177 |
+
@staticmethod
|
178 |
+
def make_square(image, size):
|
179 |
+
old_size = image.shape[:2]
|
180 |
+
ratio = float(size) / max(old_size)
|
181 |
+
new_size = tuple([int(x * ratio) for x in old_size])
|
182 |
+
image = Image.fromarray(image)
|
183 |
+
image = image.resize(new_size, Image.LANCZOS)
|
184 |
+
new_image = Image.new("RGB", (size, size))
|
185 |
+
new_image.paste(image, ((size - new_size[0]) // 2,
|
186 |
+
(size - new_size[1]) // 2))
|
187 |
+
return np.array(new_image)
|
188 |
+
|
189 |
+
@staticmethod
|
190 |
+
def smart_resize(image, size):
|
191 |
+
image = Image.fromarray(image)
|
192 |
+
image = image.resize((size, size), Image.LANCZOS)
|
193 |
+
return np.array(image)
|
194 |
+
|
195 |
+
|
196 |
+
class WaifuDiffusionTaggerHandler:
|
197 |
+
def __init__(self, name, repo_id, revision, model_path, tags_path):
|
198 |
+
self.name = name
|
199 |
+
self.repo_id = repo_id
|
200 |
+
self.revision = revision
|
201 |
+
self.model_path = model_path
|
202 |
+
self.tags_path = tags_path
|
203 |
+
self.wd_instance = self._initialize()
|
204 |
+
|
205 |
+
def _initialize(self):
|
206 |
+
wd_instance = WaifuDiffusionInterrogator(
|
207 |
+
name=self.name,
|
208 |
+
repo_id=self.repo_id,
|
209 |
+
revision=self.revision,
|
210 |
+
model_path=self.model_path,
|
211 |
+
tags_path=self.tags_path
|
212 |
+
)
|
213 |
+
wd_logger.info(_("Loading Checkpoint"))
|
214 |
+
wd_instance.load()
|
215 |
+
wd_logger.info(_("Checkpoint loading completed, waiting for command"))
|
216 |
+
return wd_instance
|
217 |
+
|
218 |
+
async def tagger_main(self, base64_img, threshold, ntags=[], audit=False, ratings=False):
|
219 |
+
if base64_img.startswith("data:image/png;base64,"):
|
220 |
+
base64_img = base64_img.replace("data:image/png;base64,", "")
|
221 |
+
|
222 |
+
image_data = base64.b64decode(base64_img)
|
223 |
+
image = Image.open(BytesIO(image_data))
|
224 |
+
|
225 |
+
loop = asyncio.get_event_loop()
|
226 |
+
ratings, tags = await loop.run_in_executor(
|
227 |
+
None,
|
228 |
+
self.wd_instance.interrogate,
|
229 |
+
image
|
230 |
+
)
|
231 |
+
if ratings:
|
232 |
+
return ratings
|
233 |
+
if audit:
|
234 |
+
possibilities = ratings
|
235 |
+
value = list(possibilities.values())
|
236 |
+
value.sort(reverse=True)
|
237 |
+
reverse_dict = {value: key for key, value in possibilities.items()}
|
238 |
+
return True if reverse_dict[value[0]] == "questionable" or reverse_dict[value[0]] == "explicit" else False
|
239 |
+
|
240 |
+
# 处理标签
|
241 |
+
processed_tags = Interrogator.postprocess_tags(
|
242 |
+
tags=tags,
|
243 |
+
threshold=threshold,
|
244 |
+
additional_tags=['best quality', 'highres'],
|
245 |
+
exclude_tags=['lowres'] + ntags,
|
246 |
+
sort_by_alphabetical_order=False,
|
247 |
+
add_confident_as_weight=True,
|
248 |
+
replace_underscore=True,
|
249 |
+
replace_underscore_excludes=[],
|
250 |
+
escape_tag=False
|
251 |
+
)
|
252 |
+
|
253 |
+
def process_dict(input_dict):
|
254 |
+
processed_dict = {}
|
255 |
+
for key, value in input_dict.items():
|
256 |
+
cleaned_key = key.strip('()').split(':')[0]
|
257 |
+
processed_dict[cleaned_key] = value
|
258 |
+
return processed_dict
|
259 |
+
|
260 |
+
processed_tags = process_dict(processed_tags)
|
261 |
+
return {**ratings, **processed_tags}
|
262 |
+
|
263 |
+
|
264 |
+
config = init_instance.config
|
265 |
+
if config.server_settings['build_in_tagger']:
|
266 |
+
wd_tagger_handler = WaifuDiffusionTaggerHandler(
|
267 |
+
name='WaifuDiffusion',
|
268 |
+
repo_id='SmilingWolf/wd-v1-4-convnextv2-tagger-v2',
|
269 |
+
revision='v2.0',
|
270 |
+
model_path='model.onnx',
|
271 |
+
tags_path='selected_tags.csv'
|
272 |
+
)
|
DrawBridgeAPI/utils/topaz.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
from ..base_config import init_instance, setup_logger
|
3 |
+
|
4 |
+
topazai_logger = setup_logger('[TopaAI]')
|
5 |
+
|
6 |
+
|
7 |
+
def run_tpai(
|
8 |
+
input_folder, output_folder=None, overwrite=False, recursive=False,
|
9 |
+
format="preserve", quality=95, compression=2, bit_depth=16,
|
10 |
+
tiff_compression="zip", show_settings=False, skip_processing=False,
|
11 |
+
verbose=False, upscale=None, noise=None, sharpen=None,
|
12 |
+
lighting=None, color=None, **kwargs
|
13 |
+
):
|
14 |
+
# 基本命令和输入文件夹
|
15 |
+
command = [rf'"{init_instance.config.server_settings["build_in_photoai"]["exec_path"]}"', f'"{input_folder}"']
|
16 |
+
|
17 |
+
# 输出文件夹
|
18 |
+
if output_folder:
|
19 |
+
command.extend(["--output", f'"{output_folder}"'])
|
20 |
+
|
21 |
+
# 覆盖现有文件
|
22 |
+
if overwrite:
|
23 |
+
command.append("--overwrite")
|
24 |
+
|
25 |
+
# 递归处理子文件夹
|
26 |
+
if recursive:
|
27 |
+
command.append("--recursive")
|
28 |
+
|
29 |
+
# 文件格式选项
|
30 |
+
if format:
|
31 |
+
command.extend(["--format", format])
|
32 |
+
if quality is not None:
|
33 |
+
command.extend(["--quality", str(quality)])
|
34 |
+
if compression is not None:
|
35 |
+
command.extend(["--compression", str(compression)])
|
36 |
+
if bit_depth is not None:
|
37 |
+
command.extend(["--bit-depth", str(bit_depth)])
|
38 |
+
if tiff_compression:
|
39 |
+
command.extend(["--tiff-compression", tiff_compression])
|
40 |
+
|
41 |
+
# 调试选项
|
42 |
+
if show_settings:
|
43 |
+
command.append("--showSettings")
|
44 |
+
if skip_processing:
|
45 |
+
command.append("--skipProcessing")
|
46 |
+
if verbose:
|
47 |
+
command.append("--verbose")
|
48 |
+
|
49 |
+
# 设置选项(实验性)
|
50 |
+
if upscale is not None:
|
51 |
+
command.extend(["--upscale", f"enabled={str(upscale).lower()}"])
|
52 |
+
if noise is not None:
|
53 |
+
command.extend(["--noise", f"enabled={str(noise).lower()}"])
|
54 |
+
if sharpen is not None:
|
55 |
+
command.extend(["--sharpen", f"enabled={str(sharpen).lower()}"])
|
56 |
+
if lighting is not None:
|
57 |
+
command.extend(["--lighting", f"enabled={str(lighting).lower()}"])
|
58 |
+
if color is not None:
|
59 |
+
command.extend(["--color", f"enabled={str(color).lower()}"])
|
60 |
+
|
61 |
+
# 打印并执行命令
|
62 |
+
topazai_logger.info(str(" ".join(command)))
|
63 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
64 |
+
# 返回结果,并忽略无法解码的字符
|
65 |
+
return result.stdout.decode(errors='ignore'), result.stderr.decode(errors='ignore'), result.returncode
|
66 |
+
|