K00B404 commited on
Commit
5490186
·
verified ·
1 Parent(s): c517de9

Create quent_models.py

Browse files
Files changed (1) hide show
  1. quent_models.py +47 -0
quent_models.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from huggingface_hub import hf_hub_download
4
+ from typing import List
5
+
6
+ def QuantizeModel(model_name: str, lvl: str = 'q4_0'):
7
+ """
8
+ Function to quantize the model and save it in GGUF format.
9
+
10
+ :param model_name: Path to the model (safetensors file)
11
+ :param lvl: Quantization level (e.g., 'q4_0')
12
+ """
13
+ gguf_name = model_name.replace('.safetensors', f"_{lvl}.gguf")
14
+ os.system(f'./bin/sd -M convert -m {model_name} -o {gguf_name} -v --type {lvl}')
15
+
16
+
17
+ def download_models(models: List[str]):
18
+ """
19
+ Download models from Hugging Face using their URLs.
20
+
21
+ :param models: List of Hugging Face model URLs
22
+ """
23
+ for model_url in models:
24
+ os.system(f'curl -L -O {model_url}')
25
+
26
+
27
+ # Example usage
28
+ if __name__ == "__main__":
29
+ models_to_download = [
30
+ "https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp16.safetensors",
31
+ "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt",
32
+ "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors",
33
+ "https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-nonema-pruned.safetensors"
34
+ ]
35
+
36
+ download_models(models_to_download)
37
+
38
+ # Quantize models
39
+ model_names = [
40
+ 'sd-v1-4.ckpt',
41
+ 'v1-5-pruned-emaonly.safetensors',
42
+ 'v2-1_768-nonema-pruned.safetensors',
43
+ 'sd3_medium_incl_clips_t5xxlfp16.safetensors'
44
+ ]
45
+
46
+ for model_name in model_names:
47
+ QuantizeModel(model_name, lvl='q4_0')