Delete main.py
Browse files
main.py
DELETED
@@ -1,247 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os
|
3 |
-
import shutil
|
4 |
-
import subprocess
|
5 |
-
import time
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
from git import Repo
|
9 |
-
|
10 |
-
|
11 |
-
def clone_hf_with_git(username: str, model_name: str, saved_dir: str):
|
12 |
-
full_model_name = f"{username}/{model_name}"
|
13 |
-
url = f"https://huggingface.co/{full_model_name}"
|
14 |
-
saved = f"{saved_dir}/{model_name}"
|
15 |
-
|
16 |
-
# perform `git lfs install`
|
17 |
-
subprocess.run(["git", "lfs", "install"])
|
18 |
-
|
19 |
-
print(f"[INFO] Cloning {model_name} from {url} ...")
|
20 |
-
Repo.clone_from(url, saved)
|
21 |
-
|
22 |
-
|
23 |
-
def download_hf_with_git(full_name: str, saved_dir: str):
|
24 |
-
model_name = full_name.split("/")[1]
|
25 |
-
url = f"[email protected]:{full_name}"
|
26 |
-
saved = f"{saved_dir}/{model_name}"
|
27 |
-
|
28 |
-
# perform `git lfs install`
|
29 |
-
subprocess.run(["git", "lfs", "install"])
|
30 |
-
|
31 |
-
print(f"Cloning {model_name} from {url} ...")
|
32 |
-
subprocess.run(["git", "clone", "--progress", url, saved])
|
33 |
-
|
34 |
-
|
35 |
-
def convert_hf_to_gguf(
|
36 |
-
script_path: str,
|
37 |
-
dir_raw_model: str,
|
38 |
-
gguf_model_path: str,
|
39 |
-
pad_vocab: bool = False,
|
40 |
-
):
|
41 |
-
if pad_vocab is True:
|
42 |
-
args = [
|
43 |
-
"--outfile",
|
44 |
-
gguf_model_path,
|
45 |
-
# "--vocab-type",
|
46 |
-
# "bpe",
|
47 |
-
"--pad-vocab",
|
48 |
-
dir_raw_model,
|
49 |
-
]
|
50 |
-
else:
|
51 |
-
args = ["--outfile", gguf_model_path, dir_raw_model]
|
52 |
-
# convert.py for llama-3
|
53 |
-
# args = ["--outfile", gguf_model_path, "--vocab-type", "bpe", dir_raw_model]
|
54 |
-
res = subprocess.run(["python", script_path] + args)
|
55 |
-
print(res)
|
56 |
-
|
57 |
-
|
58 |
-
def quantize_model(
|
59 |
-
quantizer: str,
|
60 |
-
f16_gguf_model_path: str,
|
61 |
-
quantized_gguf_model_path: str,
|
62 |
-
quant_type: str = "q4_0",
|
63 |
-
):
|
64 |
-
print(f"[INFO] quantizer: {quantizer}")
|
65 |
-
print(f"[INFO] quant_type: {quant_type}")
|
66 |
-
print(f"[INFO] f16_gguf_model_path: {f16_gguf_model_path}")
|
67 |
-
print(f"[INFO] quantized_model_filename: {quantized_gguf_model_path}")
|
68 |
-
subprocess.run(
|
69 |
-
[
|
70 |
-
quantizer,
|
71 |
-
f16_gguf_model_path,
|
72 |
-
quantized_gguf_model_path,
|
73 |
-
quant_type,
|
74 |
-
]
|
75 |
-
)
|
76 |
-
|
77 |
-
|
78 |
-
def main():
|
79 |
-
parser = argparse.ArgumentParser(description="Convert and quantize gguf models.")
|
80 |
-
parser.add_argument(
|
81 |
-
"--full-name",
|
82 |
-
type=str,
|
83 |
-
help="Huggingface model full name. e.g. `username/model_name`",
|
84 |
-
)
|
85 |
-
parser.add_argument(
|
86 |
-
"-s",
|
87 |
-
"--saved-dir",
|
88 |
-
type=str,
|
89 |
-
default="models",
|
90 |
-
help="The directory to save the model.",
|
91 |
-
)
|
92 |
-
parser.add_argument(
|
93 |
-
"--enable-converter",
|
94 |
-
action="store_true",
|
95 |
-
help="Enable the converter. Notice that `--converter` must be specified.",
|
96 |
-
)
|
97 |
-
parser.add_argument(
|
98 |
-
"-c",
|
99 |
-
"--converter",
|
100 |
-
type=str,
|
101 |
-
help="The path to the converter. Notice that `--enable-converter` must be specified if use this option.",
|
102 |
-
)
|
103 |
-
parser.add_argument(
|
104 |
-
"--pad-vocab",
|
105 |
-
action="store_true",
|
106 |
-
help="Enable adding pad tokens when model vocab expects more than tokenizer metadata provides. Notice that `--enable-converter` must be specified.",
|
107 |
-
)
|
108 |
-
parser.add_argument(
|
109 |
-
"--enable-quantizer",
|
110 |
-
action="store_true",
|
111 |
-
help="Enable the quantizer. Notice that `--quantizer` must be specified.",
|
112 |
-
)
|
113 |
-
parser.add_argument(
|
114 |
-
"-q",
|
115 |
-
"--quantizer",
|
116 |
-
type=str,
|
117 |
-
help="The path to the quantizer. Notice that `--enable-quantizer` must be specified if use this option.",
|
118 |
-
)
|
119 |
-
parser.add_argument(
|
120 |
-
"-t",
|
121 |
-
"--quant-type",
|
122 |
-
type=str,
|
123 |
-
default=None,
|
124 |
-
help="The quantization type. Notice that `--enable-quantizer` must be specified if use this option.",
|
125 |
-
)
|
126 |
-
|
127 |
-
args = parser.parse_args()
|
128 |
-
|
129 |
-
print(args)
|
130 |
-
|
131 |
-
print("Download model ...")
|
132 |
-
full_name = args.full_name
|
133 |
-
username, model_name = full_name.split("/")
|
134 |
-
saved_dir = args.saved_dir
|
135 |
-
# try:
|
136 |
-
# download_hf_with_git(full_name, saved_dir)
|
137 |
-
# print(f"The raw model is saved in {saved_dir}.")
|
138 |
-
|
139 |
-
# except Exception as e:
|
140 |
-
# print(f"Failed to download model. {e}")
|
141 |
-
# return
|
142 |
-
|
143 |
-
if args.enable_converter is True:
|
144 |
-
print("[CONVERTER] Convert model ...")
|
145 |
-
converter = args.converter
|
146 |
-
|
147 |
-
raw_model_dir = f"{saved_dir}/{model_name}"
|
148 |
-
print(f"[CONVERTER] raw_model_dir: {raw_model_dir}")
|
149 |
-
|
150 |
-
gguf_model_dir = Path(raw_model_dir).parent / f"{model_name}-gguf"
|
151 |
-
if not gguf_model_dir.exists():
|
152 |
-
gguf_model_dir.mkdir()
|
153 |
-
f16_gguf_model_path = gguf_model_dir / f"{model_name}-f16.gguf"
|
154 |
-
|
155 |
-
print(f"[CONVERTER] f16_gguf_model_path: {f16_gguf_model_path}")
|
156 |
-
|
157 |
-
# try:
|
158 |
-
# convert_hf_to_gguf(
|
159 |
-
# converter,
|
160 |
-
# raw_model_dir,
|
161 |
-
# str(f16_gguf_model_path),
|
162 |
-
# args.pad_vocab,
|
163 |
-
# )
|
164 |
-
# print(f"The converted gguf model is saved in {f16_gguf_model_path}.")
|
165 |
-
|
166 |
-
# except Exception as e:
|
167 |
-
# print(f"Failed to convert model. {e}")
|
168 |
-
# return
|
169 |
-
|
170 |
-
if args.enable_quantizer is True:
|
171 |
-
print("[QUANTIZER] Quantize model ...")
|
172 |
-
quantizer = args.quantizer
|
173 |
-
print(f"[QUANTIZER] quantizer: {quantizer}")
|
174 |
-
|
175 |
-
if args.quant_type is not None:
|
176 |
-
quant_type = args.quant_type
|
177 |
-
quantized_gguf_model_path = (
|
178 |
-
gguf_model_dir / f"{model_name}-{quant_type}.gguf"
|
179 |
-
)
|
180 |
-
|
181 |
-
print(f"[QUANTIZER] quant_type: {quant_type}")
|
182 |
-
print(f"[QUANTIZER] quantized_model_filename: {quantized_gguf_model_path}")
|
183 |
-
|
184 |
-
try:
|
185 |
-
quantize_model(
|
186 |
-
quantizer,
|
187 |
-
str(f16_gguf_model_path),
|
188 |
-
str(quantized_gguf_model_path),
|
189 |
-
quant_type,
|
190 |
-
)
|
191 |
-
print(
|
192 |
-
f"The quantized gguf model is saved in {quantized_gguf_model_path}."
|
193 |
-
)
|
194 |
-
|
195 |
-
except Exception as e:
|
196 |
-
print(e)
|
197 |
-
print("Failed to quantize model.")
|
198 |
-
return
|
199 |
-
else:
|
200 |
-
for quant_type in [
|
201 |
-
# "Q2_K",
|
202 |
-
# "Q3_K_L",
|
203 |
-
# "Q3_K_M",
|
204 |
-
# "Q3_K_S",
|
205 |
-
# "Q4_0",
|
206 |
-
# "Q4_K_M",
|
207 |
-
# "Q4_K_S",
|
208 |
-
# "Q5_0",
|
209 |
-
"Q5_K_M",
|
210 |
-
# "Q5_K_S",
|
211 |
-
"Q6_K",
|
212 |
-
"Q8_0",
|
213 |
-
]:
|
214 |
-
quantized_gguf_model_path = (
|
215 |
-
gguf_model_dir / f"{model_name}-{quant_type}.gguf"
|
216 |
-
)
|
217 |
-
|
218 |
-
print(f"[QUANTIZER] quant_type: {quant_type}")
|
219 |
-
print(
|
220 |
-
f"[QUANTIZER] quantized_model_filename: {quantized_gguf_model_path}"
|
221 |
-
)
|
222 |
-
|
223 |
-
try:
|
224 |
-
quantize_model(
|
225 |
-
quantizer,
|
226 |
-
str(f16_gguf_model_path),
|
227 |
-
str(quantized_gguf_model_path),
|
228 |
-
quant_type,
|
229 |
-
)
|
230 |
-
print(
|
231 |
-
f"The quantized gguf model is saved in {quantized_gguf_model_path}."
|
232 |
-
)
|
233 |
-
|
234 |
-
except Exception as e:
|
235 |
-
print(e)
|
236 |
-
print("Failed to quantize model.")
|
237 |
-
return
|
238 |
-
|
239 |
-
# # remove the raw model dir for saving space
|
240 |
-
# print(f"The quantization is done. Remove {raw_model_dir}")
|
241 |
-
# shutil.rmtree(raw_model_dir)
|
242 |
-
|
243 |
-
print("Done.")
|
244 |
-
|
245 |
-
|
246 |
-
if __name__ == "__main__":
|
247 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|