Dracones commited on
Commit
de9c6fb
1 Parent(s): 043ef30

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gemma-2-27b-it-BF16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
37
+ gemma-2-27b-it-BF16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
38
+ gemma-2-27b-it-F32-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
39
+ gemma-2-27b-it-F32-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
40
+ gemma-2-27b-it-F32-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
41
+ gemma-2-27b-it-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
42
+ gemma-2-27b-it-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
43
+ gemma-2-27b-it-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
44
+ gemma-2-27b-it-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ gemma-2-27b-it-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ gemma-2-27b-it-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
47
+ gemma-2-27b-it-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
48
+ gemma-2-27b-it-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
49
+ gemma-2-27b-it-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: transformers
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - conversational
7
+ - gguf
8
+ - llamacpp
9
+ ---
10
+
11
+
12
+
13
+ # Gemma 2 27b Instruction Tuned - GGUF
14
+
15
+ These are GGUF quants of [google/gemma-2-27b-it](https://huggingface.co/google/gemma-2-27b-it)
16
+
17
+ Details about the model can be found at the above model page.
18
+
19
+ ## Llamacpp Version
20
+
21
+ These quants were made with llamacpp tag b3408.
22
+
23
+ If you have problems loading these models, please update your software to se the latest llamacpp version.
24
+
25
+
26
+ ## Perplexity Scoring
27
+
28
+ Below are the perplexity scores for the GGUF models. A lower score is better.
29
+
30
+ | Quant Level | Perplexity Score | Standard Deviation |
31
+ |-------------|------------------|--------------------|
32
+ | F32 | 7.1853 | 0.04922 |
33
+ | BF16 | 7.1853 | 0.04922 |
34
+ | Q8_0 | 7.1879 | 0.04924 |
35
+ | Q6_K | 7.2182 | 0.04948 |
36
+ | Q5_K_M | 7.2333 | 0.04953 |
37
+ | Q5_K_S | 7.2204 | 0.04931 |
38
+ | Q4_K_M | 7.4192 | 0.05149 |
39
+ | Q4_K_S | 7.5403 | 0.05231 |
40
+ | Q3_K_L | 7.4623 | 0.05128 |
41
+ | Q3_K_M | 7.7375 | 0.05362 |
42
+ | Q3_K_S | 8.0426 | 0.05546 |
43
+
44
+
45
+ ## Quant Details
46
+
47
+ This is the script used for quantization.
48
+
49
+ ```bash
50
+ #!/bin/bash
51
+
52
+ # Define MODEL_NAME above the loop
53
+ MODEL_NAME="gemma-2-27b-it"
54
+
55
+ # Define the output directory
56
+ outputDir="${MODEL_NAME}-GGUF"
57
+
58
+ # Create the output directory if it doesn't exist
59
+ mkdir -p "${outputDir}"
60
+
61
+ # Make the F32 quant
62
+ f32file="${outputDir}/${MODEL_NAME}-F32.gguf"
63
+ if [ -f "${f32file}" ]; then
64
+ echo "Skipping f32 as ${f32file} already exists."
65
+ else
66
+ python convert_hf_to_gguf.py "~/src/models/${MODEL_NAME}" --outfile "${f32file}" --outtype "f32"
67
+ fi
68
+
69
+ # Abort out if the F32 didn't work
70
+ if [ ! -f "${f32file}" ]; then
71
+ echo "No ${f32file} found."
72
+ exit 1
73
+ fi
74
+
75
+ # Define the array of quantization strings
76
+ quants=("Q8_0" "Q6_K" "Q5_K_M" "Q5_K_S" "Q4_K_M" "Q4_K_S" "Q3_K_L" "Q3_K_M" "Q3_K_S")
77
+
78
+
79
+ # Loop through the quants array
80
+ for quant in "${quants[@]}"; do
81
+ outfile="${outputDir}/${MODEL_NAME}-${quant}.gguf"
82
+
83
+ # Check if the outfile already exists
84
+ if [ -f "${outfile}" ]; then
85
+ echo "Skipping ${quant} as ${outfile} already exists."
86
+ else
87
+ # Run the command with the current quant string
88
+ ./llama-quantize "${f32file}" "${outfile}" "${quant}"
89
+
90
+ echo "Processed ${quant} and generated ${outfile}"
91
+ fi
92
+ done
93
+ ```
gemma-2-27b-it-BF16-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c661a821eeae60935de25496a866d5a3828b83e0d30b37e48c35042f343913e0
3
+ size 29885957344
gemma-2-27b-it-BF16-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377ca838eea7965ff525b16f7bbdbe54ded8c4bcfdbc8cbe81e99f4a2651767d
3
+ size 24576063776
gemma-2-27b-it-F32-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a695b7d965d5d6474fdadc54b4a9436b0554f18e292e1320f71ab522cb9d70
3
+ size 39605587584
gemma-2-27b-it-F32-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee1299b5d2d728b578c7d9e309ff87d13b4701dd01a7b268c4d2393e128e881a
3
+ size 39864004064
gemma-2-27b-it-F32-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7c97c2904b8af2f09fa1d83304f6b6d3640f3b00474685e994b49fed6c31e8f
3
+ size 29444981280
gemma-2-27b-it-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f733ba602927488de0ed6cf4f2fe6b9de2827034840c79a8cf5cc37586de3da
3
+ size 14519360800
gemma-2-27b-it-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61c15ce2450a199e61156be5b8a138975cd52df9f21f3ffc447756e2464675cc
3
+ size 13424647456
gemma-2-27b-it-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:679b79cae38d33fdc310524994f63b13ec1daef1c3cc749858f42da0fa3ae404
3
+ size 12169059616
gemma-2-27b-it-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecac990a76e7ae24360ebd42879a9b0850b683ada4e2caa676c68e790f14fc0e
3
+ size 16645381408
gemma-2-27b-it-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6d976b24ee5d73b6906463da84da1f7463fc808c75a8926088c0e3b98131815
3
+ size 15739264288
gemma-2-27b-it-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f72c7113e5a2064b24c0bb59c2d9a47cc8bb731259bb617df24f17b270f97826
3
+ size 19408117024
gemma-2-27b-it-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1a4ee1d3b06f027baa653fdb23bdfc0dd73f6e94ef5011513ea128f5e31ee9
3
+ size 18884205856
gemma-2-27b-it-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f08e22a5be47dff774a42ecdda11e5de5bf1dcb088a621506093290a58c2c91d
3
+ size 22343523616
gemma-2-27b-it-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:219fdccce6f40298436319b581c2bbd47df0f9f5df8d8cbc73641ed12ff4daf1
3
+ size 28937387296