morriszms commited on
Commit
6ce2196
·
verified ·
1 Parent(s): 663b4f6

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ TinyMistral-248M-v3-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ TinyMistral-248M-v3-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ TinyMistral-248M-v3-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ TinyMistral-248M-v3-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ TinyMistral-248M-v3-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ TinyMistral-248M-v3-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ TinyMistral-248M-v3-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ TinyMistral-248M-v3-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ TinyMistral-248M-v3-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ TinyMistral-248M-v3-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ TinyMistral-248M-v3-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ TinyMistral-248M-v3-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ datasets:
6
+ - Locutusque/TM-DATA-V2
7
+ - LLM360/TxT360
8
+ - mlfoundations/dclm-baseline-1.0
9
+ - Skylion007/openwebtext
10
+ - JeanKaddour/minipile
11
+ - eminorhan/gutenberg_en
12
+ tags:
13
+ - TensorBlock
14
+ - GGUF
15
+ base_model: M4-ai/TinyMistral-248M-v3
16
+ model-index:
17
+ - name: TinyMistral-248M-v3
18
+ results:
19
+ - task:
20
+ type: text-generation
21
+ name: Text Generation
22
+ dataset:
23
+ name: IFEval (0-Shot)
24
+ type: HuggingFaceH4/ifeval
25
+ args:
26
+ num_few_shot: 0
27
+ metrics:
28
+ - type: inst_level_strict_acc and prompt_level_strict_acc
29
+ value: 16.39
30
+ name: strict accuracy
31
+ source:
32
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
33
+ name: Open LLM Leaderboard
34
+ - task:
35
+ type: text-generation
36
+ name: Text Generation
37
+ dataset:
38
+ name: BBH (3-Shot)
39
+ type: BBH
40
+ args:
41
+ num_few_shot: 3
42
+ metrics:
43
+ - type: acc_norm
44
+ value: 1.78
45
+ name: normalized accuracy
46
+ source:
47
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
48
+ name: Open LLM Leaderboard
49
+ - task:
50
+ type: text-generation
51
+ name: Text Generation
52
+ dataset:
53
+ name: MATH Lvl 5 (4-Shot)
54
+ type: hendrycks/competition_math
55
+ args:
56
+ num_few_shot: 4
57
+ metrics:
58
+ - type: exact_match
59
+ value: 0.0
60
+ name: exact match
61
+ source:
62
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
63
+ name: Open LLM Leaderboard
64
+ - task:
65
+ type: text-generation
66
+ name: Text Generation
67
+ dataset:
68
+ name: GPQA (0-shot)
69
+ type: Idavidrein/gpqa
70
+ args:
71
+ num_few_shot: 0
72
+ metrics:
73
+ - type: acc_norm
74
+ value: 0.0
75
+ name: acc_norm
76
+ source:
77
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
78
+ name: Open LLM Leaderboard
79
+ - task:
80
+ type: text-generation
81
+ name: Text Generation
82
+ dataset:
83
+ name: MuSR (0-shot)
84
+ type: TAUR-Lab/MuSR
85
+ args:
86
+ num_few_shot: 0
87
+ metrics:
88
+ - type: acc_norm
89
+ value: 5.15
90
+ name: acc_norm
91
+ source:
92
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
93
+ name: Open LLM Leaderboard
94
+ - task:
95
+ type: text-generation
96
+ name: Text Generation
97
+ dataset:
98
+ name: MMLU-PRO (5-shot)
99
+ type: TIGER-Lab/MMLU-Pro
100
+ config: main
101
+ split: test
102
+ args:
103
+ num_few_shot: 5
104
+ metrics:
105
+ - type: acc
106
+ value: 1.47
107
+ name: accuracy
108
+ source:
109
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=M4-ai/TinyMistral-248M-v3
110
+ name: Open LLM Leaderboard
111
+ ---
112
+
113
+ <div style="width: auto; margin-left: auto; margin-right: auto">
114
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
115
+ </div>
116
+ <div style="display: flex; justify-content: space-between; width: 100%;">
117
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
118
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
119
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
120
+ </p>
121
+ </div>
122
+ </div>
123
+
124
+ ## M4-ai/TinyMistral-248M-v3 - GGUF
125
+
126
+ This repo contains GGUF format model files for [M4-ai/TinyMistral-248M-v3](https://huggingface.co/M4-ai/TinyMistral-248M-v3).
127
+
128
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b5165](https://github.com/ggml-org/llama.cpp/commit/1d735c0b4fa0551c51c2f4ac888dd9a01f447985).
129
+
130
+ ## Our projects
131
+ <table border="1" cellspacing="0" cellpadding="10">
132
+ <tr>
133
+ <th style="font-size: 25px;">Awesome MCP Servers</th>
134
+ <th style="font-size: 25px;">TensorBlock Studio</th>
135
+ </tr>
136
+ <tr>
137
+ <th><img src="https://imgur.com/2Xov7B7.jpeg" alt="Project A" width="450"/></th>
138
+ <th><img src="https://imgur.com/pJcmF5u.jpeg" alt="Project B" width="450"/></th>
139
+ </tr>
140
+ <tr>
141
+ <th>A comprehensive collection of Model Context Protocol (MCP) servers.</th>
142
+ <th>A lightweight, open, and extensible multi-LLM interaction studio.</th>
143
+ </tr>
144
+ <tr>
145
+ <th>
146
+ <a href="https://github.com/TensorBlock/awesome-mcp-servers" target="_blank" style="
147
+ display: inline-block;
148
+ padding: 8px 16px;
149
+ background-color: #FF7F50;
150
+ color: white;
151
+ text-decoration: none;
152
+ border-radius: 6px;
153
+ font-weight: bold;
154
+ font-family: sans-serif;
155
+ ">👀 See what we built 👀</a>
156
+ </th>
157
+ <th>
158
+ <a href="https://github.com/TensorBlock/TensorBlock-Studio" target="_blank" style="
159
+ display: inline-block;
160
+ padding: 8px 16px;
161
+ background-color: #FF7F50;
162
+ color: white;
163
+ text-decoration: none;
164
+ border-radius: 6px;
165
+ font-weight: bold;
166
+ font-family: sans-serif;
167
+ ">👀 See what we built 👀</a>
168
+ </th>
169
+ </tr>
170
+ </table>
171
+
172
+ ## Prompt template
173
+
174
+ ```
175
+ <|im_start|>system
176
+ {system_prompt}<|im_end|>
177
+ <|im_start|>user
178
+ {prompt}<|im_end|>
179
+ <|im_start|>assistant
180
+ ```
181
+
182
+ ## Model file specification
183
+
184
+ | Filename | Quant type | File Size | Description |
185
+ | -------- | ---------- | --------- | ----------- |
186
+ | [TinyMistral-248M-v3-Q2_K.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q2_K.gguf) | Q2_K | 0.105 GB | smallest, significant quality loss - not recommended for most purposes |
187
+ | [TinyMistral-248M-v3-Q3_K_S.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q3_K_S.gguf) | Q3_K_S | 0.120 GB | very small, high quality loss |
188
+ | [TinyMistral-248M-v3-Q3_K_M.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q3_K_M.gguf) | Q3_K_M | 0.129 GB | very small, high quality loss |
189
+ | [TinyMistral-248M-v3-Q3_K_L.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q3_K_L.gguf) | Q3_K_L | 0.137 GB | small, substantial quality loss |
190
+ | [TinyMistral-248M-v3-Q4_0.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q4_0.gguf) | Q4_0 | 0.149 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
191
+ | [TinyMistral-248M-v3-Q4_K_S.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q4_K_S.gguf) | Q4_K_S | 0.149 GB | small, greater quality loss |
192
+ | [TinyMistral-248M-v3-Q4_K_M.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q4_K_M.gguf) | Q4_K_M | 0.156 GB | medium, balanced quality - recommended |
193
+ | [TinyMistral-248M-v3-Q5_0.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q5_0.gguf) | Q5_0 | 0.176 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
194
+ | [TinyMistral-248M-v3-Q5_K_S.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q5_K_S.gguf) | Q5_K_S | 0.176 GB | large, low quality loss - recommended |
195
+ | [TinyMistral-248M-v3-Q5_K_M.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q5_K_M.gguf) | Q5_K_M | 0.179 GB | large, very low quality loss - recommended |
196
+ | [TinyMistral-248M-v3-Q6_K.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q6_K.gguf) | Q6_K | 0.204 GB | very large, extremely low quality loss |
197
+ | [TinyMistral-248M-v3-Q8_0.gguf](https://huggingface.co/tensorblock/M4-ai_TinyMistral-248M-v3-GGUF/blob/main/TinyMistral-248M-v3-Q8_0.gguf) | Q8_0 | 0.264 GB | very large, extremely low quality loss - not recommended |
198
+
199
+
200
+ ## Downloading instruction
201
+
202
+ ### Command line
203
+
204
+ Firstly, install Huggingface Client
205
+
206
+ ```shell
207
+ pip install -U "huggingface_hub[cli]"
208
+ ```
209
+
210
+ Then, downoad the individual model file the a local directory
211
+
212
+ ```shell
213
+ huggingface-cli download tensorblock/M4-ai_TinyMistral-248M-v3-GGUF --include "TinyMistral-248M-v3-Q2_K.gguf" --local-dir MY_LOCAL_DIR
214
+ ```
215
+
216
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
217
+
218
+ ```shell
219
+ huggingface-cli download tensorblock/M4-ai_TinyMistral-248M-v3-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
220
+ ```
TinyMistral-248M-v3-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d65f3ae3ee2912eda84a1f14c2fb6ee1185ed9a599138cce039c536d6483351
3
+ size 105461376
TinyMistral-248M-v3-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5496e32315ec808c8f90335e4f6164a9f34b656efff4a3fabb81a709c3b6a2c
3
+ size 137224320
TinyMistral-248M-v3-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c9334d6df342445ee75ade25fba8ee93a3fb5e91cd19c594cd7f4b553948b0d
3
+ size 129032320
TinyMistral-248M-v3-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:725c23865c7035d7d09136049587ed0126489883f40434ff8d8647b65e5ea396
3
+ size 120193152
TinyMistral-248M-v3-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c34d73f7f76623c4f3fc27b20e68f1f20137143b8e36ec0c4e081f0ef865d4
3
+ size 148777760
TinyMistral-248M-v3-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:852cd0ea7d1dabf7e312ccc542d4b31b41bd4d2946b67e10c5e3f375f551e7ed
3
+ size 155671328
TinyMistral-248M-v3-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3ee5a85a916b1bc81e0470a9ed47035486e0ef56834f6c1dd9f40ba6d084b86
3
+ size 149433120
TinyMistral-248M-v3-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73fd591545a5129594af40931ef2f1d392f8d788d0acdd176b70444cfc1b509a
3
+ size 175680928
TinyMistral-248M-v3-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf9528321366edd00152bd1a1294b078f3f93d82df54e9053c9797b4feb8b20
3
+ size 179232160
TinyMistral-248M-v3-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13be274e6503483d442ba7380cef4c9509c168fbc1778625e8ee24afb370b46
3
+ size 175680928
TinyMistral-248M-v3-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45d5d4c6dfbef01b37979c30320ce5e0d41a8a7d31f2b89a5c78086f7f3a7644
3
+ size 204265568
TinyMistral-248M-v3-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c94c6f59fb8737c6856e3c65cac8bcd331be7f077f0999cba939eb178a5c83ad
3
+ size 264327648