Verdant commited on
Commit
d249643
·
1 Parent(s): 0c590a3

Upload 13 files

Browse files
mikazuki/__pycache__/app.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
mikazuki/__pycache__/models.cpython-310.pyc ADDED
Binary file (888 Bytes). View file
 
mikazuki/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
mikazuki/app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+ import sys
5
+ from datetime import datetime
6
+ from threading import Lock
7
+
8
+ import starlette.responses as starlette_responses
9
+ from fastapi import BackgroundTasks, FastAPI, Request
10
+ from fastapi.responses import FileResponse
11
+ from fastapi.staticfiles import StaticFiles
12
+
13
+ import toml
14
+
15
+ from mikazuki.models import TaggerInterrogateRequest
16
+ from mikazuki.tagger.interrogator import WaifuDiffusionInterrogator, on_interrogate
17
+
18
+ app = FastAPI()
19
+ lock = Lock()
20
+ interrogator = WaifuDiffusionInterrogator('wd14-convnextv2-v2', repo_id='SmilingWolf/wd-v1-4-convnextv2-tagger-v2', revision='v2.0')
21
+
22
+ # fix mimetype error in some fucking systems
23
+ _origin_guess_type = starlette_responses.guess_type
24
+
25
+
26
+ def _hooked_guess_type(*args, **kwargs):
27
+ url = args[0]
28
+ r = _origin_guess_type(*args, **kwargs)
29
+ if url.endswith(".js"):
30
+ r = ("application/javascript", None)
31
+ elif url.endswith(".css"):
32
+ r = ("text/css", None)
33
+ return r
34
+
35
+
36
+ starlette_responses.guess_type = _hooked_guess_type
37
+
38
+
39
+ def run_train(toml_path: str):
40
+ print(f"Training started with config file / 训练开始,使用配置文件: {toml_path}")
41
+ args = [
42
+ sys.executable, "-m", "accelerate.commands.launch", "--num_cpu_threads_per_process", "8",
43
+ "./sd-scripts/train_network.py",
44
+ "--config_file", toml_path,
45
+ ]
46
+ try:
47
+ result = subprocess.run(args, env=os.environ)
48
+ if result.returncode != 0:
49
+ print(f"Training failed / 训练失败")
50
+ else:
51
+ print(f"Training finished / 训练完成")
52
+ except Exception as e:
53
+ print(f"An error occurred when training / 创建训练进程时出现致命错误: {e}")
54
+ finally:
55
+ lock.release()
56
+
57
+
58
+ @app.middleware("http")
59
+ async def add_cache_control_header(request, call_next):
60
+ response = await call_next(request)
61
+ response.headers["Cache-Control"] = "max-age=0"
62
+ return response
63
+
64
+
65
+ @app.post("/api/run")
66
+ async def create_toml_file(request: Request, background_tasks: BackgroundTasks):
67
+ acquired = lock.acquire(blocking=False)
68
+
69
+ if not acquired:
70
+ print("Training is already running / 已有正在进行的训练")
71
+ return {"status": "fail", "detail": "Training is already running"}
72
+
73
+ timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
74
+ toml_file = os.path.join(os.getcwd(), f"toml", "autosave", f"{timestamp}.toml")
75
+ toml_data = await request.body()
76
+ j = json.loads(toml_data.decode("utf-8"))
77
+ with open(toml_file, "w") as f:
78
+ f.write(toml.dumps(j))
79
+ background_tasks.add_task(run_train, toml_file)
80
+ return {"status": "success"}
81
+
82
+
83
+ @app.post("/api/interrogate")
84
+ async def run_interrogate(req: TaggerInterrogateRequest, background_tasks: BackgroundTasks):
85
+ background_tasks.add_task(on_interrogate,
86
+ image=None,
87
+ batch_input_glob=req.path,
88
+ batch_input_recursive=False,
89
+ batch_output_dir="",
90
+ batch_output_filename_format="[name].[output_extension]",
91
+ batch_output_action_on_conflict=req.batch_output_action_on_conflict,
92
+ batch_remove_duplicated_tag=True,
93
+ batch_output_save_json=False,
94
+ interrogator=interrogator,
95
+ threshold=req.threshold,
96
+ additional_tags=req.additional_tags,
97
+ exclude_tags=req.exclude_tags,
98
+ sort_by_alphabetical_order=False,
99
+ add_confident_as_weight=False,
100
+ replace_underscore=req.replace_underscore,
101
+ replace_underscore_excludes=req.replace_underscore_excludes,
102
+ escape_tag=req.escape_tag,
103
+ unload_model_after_running=True
104
+ )
105
+ return {"status": "success"}
106
+
107
+
108
+ @app.get("/")
109
+ async def index():
110
+ return FileResponse("./frontend/dist/index.html")
111
+
112
+
113
+ app.mount("/", StaticFiles(directory="frontend/dist"), name="static")
mikazuki/models.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class TaggerInterrogateRequest(BaseModel):
5
+ path: str
6
+ threshold: float = Field(
7
+ default=0.35,
8
+ ge=0,
9
+ le=1
10
+ )
11
+ additional_tags: str = ""
12
+ exclude_tags: str = ""
13
+ escape_tag: bool = True
14
+ batch_output_action_on_conflict: str = "ignore"
15
+ replace_underscore: bool = True
16
+ replace_underscore_excludes: str = Field(
17
+ default="0_0, (o)_(o), +_+, +_-, ._., <o>_<o>, <|>_<|>, =_=, >_<, 3_3, 6_9, >_o, @_@, ^_^, o_o, u_u, x_x, |_|, ||_||"
18
+ )
mikazuki/requirements.txt ADDED
File without changes
mikazuki/tagger/__pycache__/dbimutils.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
mikazuki/tagger/__pycache__/format.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
mikazuki/tagger/__pycache__/interrogator.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
mikazuki/tagger/dbimutils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DanBooru IMage Utility functions
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+
8
+ def smart_imread(img, flag=cv2.IMREAD_UNCHANGED):
9
+ if img.endswith(".gif"):
10
+ img = Image.open(img)
11
+ img = img.convert("RGB")
12
+ img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
13
+ else:
14
+ img = cv2.imread(img, flag)
15
+ return img
16
+
17
+
18
+ def smart_24bit(img):
19
+ if img.dtype is np.dtype(np.uint16):
20
+ img = (img / 257).astype(np.uint8)
21
+
22
+ if len(img.shape) == 2:
23
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
24
+ elif img.shape[2] == 4:
25
+ trans_mask = img[:, :, 3] == 0
26
+ img[trans_mask] = [255, 255, 255, 255]
27
+ img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
28
+ return img
29
+
30
+
31
+ def make_square(img, target_size):
32
+ old_size = img.shape[:2]
33
+ desired_size = max(old_size)
34
+ desired_size = max(desired_size, target_size)
35
+
36
+ delta_w = desired_size - old_size[1]
37
+ delta_h = desired_size - old_size[0]
38
+ top, bottom = delta_h // 2, delta_h - (delta_h // 2)
39
+ left, right = delta_w // 2, delta_w - (delta_w // 2)
40
+
41
+ color = [255, 255, 255]
42
+ new_im = cv2.copyMakeBorder(
43
+ img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
44
+ )
45
+ return new_im
46
+
47
+
48
+ def smart_resize(img, size):
49
+ # Assumes the image has already gone through make_square
50
+ if img.shape[0] > size:
51
+ img = cv2.resize(img, (size, size), interpolation=cv2.INTER_AREA)
52
+ elif img.shape[0] < size:
53
+ img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC)
54
+ return img
mikazuki/tagger/format.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import hashlib
3
+
4
+ from typing import Dict, Callable, NamedTuple
5
+ from pathlib import Path
6
+
7
+
8
+ class Info(NamedTuple):
9
+ path: Path
10
+ output_ext: str
11
+
12
+
13
+ def hash(i: Info, algo='sha1') -> str:
14
+ try:
15
+ hash = hashlib.new(algo)
16
+ except ImportError:
17
+ raise ValueError(f"'{algo}' is invalid hash algorithm")
18
+
19
+ # TODO: is okay to hash large image?
20
+ with open(i.path, 'rb') as file:
21
+ hash.update(file.read())
22
+
23
+ return hash.hexdigest()
24
+
25
+
26
+ pattern = re.compile(r'\[([\w:]+)\]')
27
+
28
+ # all function must returns string or raise TypeError or ValueError
29
+ # other errors will cause the extension error
30
+ available_formats: Dict[str, Callable] = {
31
+ 'name': lambda i: i.path.stem,
32
+ 'extension': lambda i: i.path.suffix[1:],
33
+ 'hash': hash,
34
+
35
+ 'output_extension': lambda i: i.output_ext
36
+ }
37
+
38
+
39
+ def format(match: re.Match, info: Info) -> str:
40
+ matches = match[1].split(':')
41
+ name, args = matches[0], matches[1:]
42
+
43
+ if name not in available_formats:
44
+ return match[0]
45
+
46
+ return available_formats[name](info, *args)
mikazuki/tagger/interrogator.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/toriato/stable-diffusion-webui-wd14-tagger
2
+ import json
3
+ import os
4
+ import re
5
+ from collections import OrderedDict
6
+ from glob import glob
7
+ from pathlib import Path
8
+ from typing import Dict, List, Tuple
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+ from PIL import Image
13
+ from PIL import UnidentifiedImageError
14
+ from huggingface_hub import hf_hub_download
15
+
16
+ from mikazuki.tagger import dbimutils, format
17
+
18
+ tag_escape_pattern = re.compile(r'([\\()])')
19
+
20
+
21
+ class Interrogator:
22
+ @staticmethod
23
+ def postprocess_tags(
24
+ tags: Dict[str, float],
25
+
26
+ threshold=0.35,
27
+ additional_tags: List[str] = [],
28
+ exclude_tags: List[str] = [],
29
+ sort_by_alphabetical_order=False,
30
+ add_confident_as_weight=False,
31
+ replace_underscore=False,
32
+ replace_underscore_excludes: List[str] = [],
33
+ escape_tag=False
34
+ ) -> Dict[str, float]:
35
+ for t in additional_tags:
36
+ tags[t] = 1.0
37
+
38
+ # those lines are totally not "pythonic" but looks better to me
39
+ tags = {
40
+ t: c
41
+
42
+ # sort by tag name or confident
43
+ for t, c in sorted(
44
+ tags.items(),
45
+ key=lambda i: i[0 if sort_by_alphabetical_order else 1],
46
+ reverse=not sort_by_alphabetical_order
47
+ )
48
+
49
+ # filter tags
50
+ if (
51
+ c >= threshold
52
+ and t not in exclude_tags
53
+ )
54
+ }
55
+
56
+ new_tags = []
57
+ for tag in list(tags):
58
+ new_tag = tag
59
+
60
+ if replace_underscore and tag not in replace_underscore_excludes:
61
+ new_tag = new_tag.replace('_', ' ')
62
+
63
+ if escape_tag:
64
+ new_tag = tag_escape_pattern.sub(r'\\\1', new_tag)
65
+
66
+ if add_confident_as_weight:
67
+ new_tag = f'({new_tag}:{tags[tag]})'
68
+
69
+ new_tags.append((new_tag, tags[tag]))
70
+ tags = dict(new_tags)
71
+
72
+ return tags
73
+
74
+ def __init__(self, name: str) -> None:
75
+ self.name = name
76
+
77
+ def load(self):
78
+ raise NotImplementedError()
79
+
80
+ def unload(self) -> bool:
81
+ unloaded = False
82
+
83
+ if hasattr(self, 'model') and self.model is not None:
84
+ del self.model
85
+ unloaded = True
86
+ print(f'Unloaded {self.name}')
87
+
88
+ if hasattr(self, 'tags'):
89
+ del self.tags
90
+
91
+ return unloaded
92
+
93
+ def interrogate(
94
+ self,
95
+ image: Image
96
+ ) -> Tuple[
97
+ Dict[str, float], # rating confidents
98
+ Dict[str, float] # tag confidents
99
+ ]:
100
+ raise NotImplementedError()
101
+
102
+
103
+ class WaifuDiffusionInterrogator(Interrogator):
104
+ def __init__(
105
+ self,
106
+ name: str,
107
+ model_path='model.onnx',
108
+ tags_path='selected_tags.csv',
109
+ **kwargs
110
+ ) -> None:
111
+ super().__init__(name)
112
+ self.model_path = model_path
113
+ self.tags_path = tags_path
114
+ self.kwargs = kwargs
115
+
116
+ def download(self) -> Tuple[os.PathLike, os.PathLike]:
117
+ print(f"Loading {self.name} model file from {self.kwargs['repo_id']}")
118
+
119
+ model_path = Path(hf_hub_download(
120
+ **self.kwargs, filename=self.model_path))
121
+ tags_path = Path(hf_hub_download(
122
+ **self.kwargs, filename=self.tags_path))
123
+ return model_path, tags_path
124
+
125
+ def load(self) -> None:
126
+ model_path, tags_path = self.download()
127
+
128
+ # only one of these packages should be installed at a time in any one environment
129
+ # https://onnxruntime.ai/docs/get-started/with-python.html#install-onnx-runtime
130
+ # TODO: remove old package when the environment changes?
131
+ from mikazuki.utils import is_installed, run_pip
132
+ if not is_installed('onnxruntime'):
133
+ package = os.environ.get(
134
+ 'ONNXRUNTIME_PACKAGE',
135
+ 'onnxruntime-gpu'
136
+ )
137
+
138
+ run_pip(f'install {package}', 'onnxruntime')
139
+
140
+ # Load torch to load cuda libs built in torch for onnxruntime, do not delete this.
141
+ import torch
142
+ from onnxruntime import InferenceSession
143
+
144
+ # https://onnxruntime.ai/docs/execution-providers/
145
+ # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/commit/e4ec460122cf674bbf984df30cdb10b4370c1224#r92654958
146
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
147
+
148
+ self.model = InferenceSession(str(model_path), providers=providers)
149
+
150
+ print(f'Loaded {self.name} model from {model_path}')
151
+
152
+ self.tags = pd.read_csv(tags_path)
153
+
154
+ def interrogate(
155
+ self,
156
+ image: Image
157
+ ) -> Tuple[
158
+ Dict[str, float], # rating confidents
159
+ Dict[str, float] # tag confidents
160
+ ]:
161
+ # init model
162
+ if not hasattr(self, 'model') or self.model is None:
163
+ self.load()
164
+
165
+ # code for converting the image and running the model is taken from the link below
166
+ # thanks, SmilingWolf!
167
+ # https://huggingface.co/spaces/SmilingWolf/wd-v1-4-tags/blob/main/app.py
168
+
169
+ # convert an image to fit the model
170
+ _, height, _, _ = self.model.get_inputs()[0].shape
171
+
172
+ # alpha to white
173
+ image = image.convert('RGBA')
174
+ new_image = Image.new('RGBA', image.size, 'WHITE')
175
+ new_image.paste(image, mask=image)
176
+ image = new_image.convert('RGB')
177
+ image = np.asarray(image)
178
+
179
+ # PIL RGB to OpenCV BGR
180
+ image = image[:, :, ::-1]
181
+
182
+ image = dbimutils.make_square(image, height)
183
+ image = dbimutils.smart_resize(image, height)
184
+ image = image.astype(np.float32)
185
+ image = np.expand_dims(image, 0)
186
+
187
+ # evaluate model
188
+ input_name = self.model.get_inputs()[0].name
189
+ label_name = self.model.get_outputs()[0].name
190
+ confidents = self.model.run([label_name], {input_name: image})[0]
191
+
192
+ tags = self.tags[:][['name']]
193
+ tags['confidents'] = confidents[0]
194
+
195
+ # first 4 items are for rating (general, sensitive, questionable, explicit)
196
+ ratings = dict(tags[:4].values)
197
+
198
+ # rest are regular tags
199
+ tags = dict(tags[4:].values)
200
+
201
+ return ratings, tags
202
+
203
+
204
+ def split_str(s: str, separator=',') -> List[str]:
205
+ return [x.strip() for x in s.split(separator) if x]
206
+
207
+
208
+ def on_interrogate(
209
+ image: Image,
210
+ batch_input_glob: str,
211
+ batch_input_recursive: bool,
212
+ batch_output_dir: str,
213
+ batch_output_filename_format: str,
214
+ batch_output_action_on_conflict: str,
215
+ batch_remove_duplicated_tag: bool,
216
+ batch_output_save_json: bool,
217
+
218
+ interrogator: Interrogator,
219
+ threshold: float,
220
+ additional_tags: str,
221
+ exclude_tags: str,
222
+ sort_by_alphabetical_order: bool,
223
+ add_confident_as_weight: bool,
224
+ replace_underscore: bool,
225
+ replace_underscore_excludes: str,
226
+ escape_tag: bool,
227
+
228
+ unload_model_after_running: bool
229
+ ):
230
+
231
+ postprocess_opts = (
232
+ threshold,
233
+ split_str(additional_tags),
234
+ split_str(exclude_tags),
235
+ sort_by_alphabetical_order,
236
+ add_confident_as_weight,
237
+ replace_underscore,
238
+ split_str(replace_underscore_excludes),
239
+ escape_tag
240
+ )
241
+
242
+ # batch process
243
+ batch_input_glob = batch_input_glob.strip()
244
+ batch_output_dir = batch_output_dir.strip()
245
+ batch_output_filename_format = batch_output_filename_format.strip()
246
+
247
+ if batch_input_glob != '':
248
+ # if there is no glob pattern, insert it automatically
249
+ if not batch_input_glob.endswith('*'):
250
+ if not batch_input_glob.endswith(os.sep):
251
+ batch_input_glob += os.sep
252
+ batch_input_glob += '*'
253
+
254
+ # get root directory of input glob pattern
255
+ base_dir = batch_input_glob.replace('?', '*')
256
+ base_dir = base_dir.split(os.sep + '*').pop(0)
257
+
258
+ # check the input directory path
259
+ if not os.path.isdir(base_dir):
260
+ print("input path is not a directory / 输入的路径不是文件夹,终止识别")
261
+ return 'input path is not a directory'
262
+
263
+ # this line is moved here because some reason
264
+ # PIL.Image.registered_extensions() returns only PNG if you call too early
265
+ supported_extensions = [
266
+ e
267
+ for e, f in Image.registered_extensions().items()
268
+ if f in Image.OPEN
269
+ ]
270
+
271
+ paths = [
272
+ Path(p)
273
+ for p in glob(batch_input_glob, recursive=batch_input_recursive)
274
+ if '.' + p.split('.').pop().lower() in supported_extensions
275
+ ]
276
+
277
+ print(f'found {len(paths)} image(s)')
278
+
279
+ for path in paths:
280
+ try:
281
+ image = Image.open(path)
282
+ except UnidentifiedImageError:
283
+ # just in case, user has mysterious file...
284
+ print(f'${path} is not supported image type')
285
+ continue
286
+
287
+ # guess the output path
288
+ base_dir_last = Path(base_dir).parts[-1]
289
+ base_dir_last_idx = path.parts.index(base_dir_last)
290
+ output_dir = Path(
291
+ batch_output_dir) if batch_output_dir else Path(base_dir)
292
+ output_dir = output_dir.joinpath(
293
+ *path.parts[base_dir_last_idx + 1:]).parent
294
+
295
+ output_dir.mkdir(0o777, True, True)
296
+
297
+ # format output filename
298
+ format_info = format.Info(path, 'txt')
299
+
300
+ try:
301
+ formatted_output_filename = format.pattern.sub(
302
+ lambda m: format.format(m, format_info),
303
+ batch_output_filename_format
304
+ )
305
+ except (TypeError, ValueError) as error:
306
+ return str(error)
307
+
308
+ output_path = output_dir.joinpath(
309
+ formatted_output_filename
310
+ )
311
+
312
+ output = []
313
+
314
+ if output_path.is_file():
315
+ output.append(output_path.read_text(errors='ignore').strip())
316
+
317
+ if batch_output_action_on_conflict == 'ignore':
318
+ print(f'skipping {path}')
319
+ continue
320
+
321
+ ratings, tags = interrogator.interrogate(image)
322
+ processed_tags = Interrogator.postprocess_tags(
323
+ tags,
324
+ *postprocess_opts
325
+ )
326
+
327
+ # TODO: switch for less print
328
+ print(
329
+ f'found {len(processed_tags)} tags out of {len(tags)} from {path}'
330
+ )
331
+
332
+ plain_tags = ', '.join(processed_tags)
333
+
334
+ if batch_output_action_on_conflict == 'copy':
335
+ output = [plain_tags]
336
+ elif batch_output_action_on_conflict == 'prepend':
337
+ output.insert(0, plain_tags)
338
+ else:
339
+ output.append(plain_tags)
340
+
341
+ if batch_remove_duplicated_tag:
342
+ output_path.write_text(
343
+ ', '.join(
344
+ OrderedDict.fromkeys(
345
+ map(str.strip, ','.join(output).split(','))
346
+ )
347
+ ),
348
+ encoding='utf-8'
349
+ )
350
+ else:
351
+ output_path.write_text(
352
+ ', '.join(output),
353
+ encoding='utf-8'
354
+ )
355
+
356
+ if batch_output_save_json:
357
+ output_path.with_suffix('.json').write_text(
358
+ json.dumps([ratings, tags])
359
+ )
360
+
361
+ print('all done / 识别完成')
362
+
363
+ if unload_model_after_running:
364
+ interrogator.unload()
365
+
366
+ return "Succeed"
mikazuki/utils.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import subprocess
4
+ import importlib.util
5
+
6
+ python_bin = sys.executable
7
+
8
+
9
+ def is_installed(package):
10
+ try:
11
+ spec = importlib.util.find_spec(package)
12
+ except ModuleNotFoundError:
13
+ return False
14
+
15
+ return spec is not None
16
+
17
+
18
+ def run(command, desc=None, errdesc=None, custom_env=None, live=False):
19
+ if desc is not None:
20
+ print(desc)
21
+
22
+ if live:
23
+ result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
24
+ if result.returncode != 0:
25
+ raise RuntimeError(f"""{errdesc or 'Error running command'}.
26
+ Command: {command}
27
+ Error code: {result.returncode}""")
28
+
29
+ return ""
30
+
31
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
32
+
33
+ if result.returncode != 0:
34
+ message = f"""{errdesc or 'Error running command'}.
35
+ Command: {command}
36
+ Error code: {result.returncode}
37
+ stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout) > 0 else '<empty>'}
38
+ stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0 else '<empty>'}
39
+ """
40
+ raise RuntimeError(message)
41
+
42
+ return result.stdout.decode(encoding="utf8", errors="ignore")
43
+
44
+
45
+ def run_pip(command, desc=None, live=False):
46
+ return run(f'"{python_bin}" -m pip {command}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live)