morriszms commited on
Commit
791eefc
·
verified ·
1 Parent(s): 99b51c3

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Llama3.3-70B-CogniLink-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Llama3.3-70B-CogniLink-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Llama3.3-70B-CogniLink-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Llama3.3-70B-CogniLink-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Llama3.3-70B-CogniLink-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Llama3.3-70B-CogniLink-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Llama3.3-70B-CogniLink-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Llama3.3-70B-CogniLink-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Llama3.3-70B-CogniLink-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Llama3.3-70B-CogniLink-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Llama3.3-70B-CogniLink-Q6_K/Llama3.3-70B-CogniLink-Q6_K-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Llama3.3-70B-CogniLink-Q6_K/Llama3.3-70B-CogniLink-Q6_K-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
48
+ Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
49
+ Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
50
+ Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
Llama3.3-70B-CogniLink-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e4cf99700a712fe0eaaf66487018af098652ccc9eb9fe054d3ee3045bdea501
3
+ size 26375110528
Llama3.3-70B-CogniLink-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d8ea0c350475fdd85bf2e4073e06cd05e708f48ea218d3a92d66fffcdbb1edc
3
+ size 37140594560
Llama3.3-70B-CogniLink-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1c3dd6dd8d5e3f2ec83dd83ba9bdd86328bf41e97bee5176bf03ff4dd7ee97c
3
+ size 34267496320
Llama3.3-70B-CogniLink-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2a4e9fd1540da208d9b6989ef986ce1e3cd628028ec1828054b522439a16cc
3
+ size 30912053120
Llama3.3-70B-CogniLink-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d329dcdb8cb5fbc7d8f6c93dbc1106a63a6dbdc81e5f559fa6e34ab57cfa58f6
3
+ size 39969734528
Llama3.3-70B-CogniLink-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c694fcc864a05af65e0417688e9ba5fc186b3747de8a4cfef0b05bcd6a10ff8e
3
+ size 42520395648
Llama3.3-70B-CogniLink-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:878a80b11d7ecd7e73fa78046bf24cbeaadd1a2c03f6a8e7613fca42ab60ac7b
3
+ size 40347221888
Llama3.3-70B-CogniLink-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29969e857706550d670ca65e2165f07717d009b6a772e134ff4c91789bfb31c
3
+ size 48657448832
Llama3.3-70B-CogniLink-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ef4b18fbd2a8ea79a4130a071d4b2f7fc86081e160e45c9d5e11006158d8586
3
+ size 49949818752
Llama3.3-70B-CogniLink-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a9d2410f94e4a720021b78d040cf5d6961c25921c565e30e97c2c91d65bc35
3
+ size 48657448832
Llama3.3-70B-CogniLink-Q6_K/Llama3.3-70B-CogniLink-Q6_K-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1c4cd8fa3a57f39a25a42adb519d18ebf0240515855e099261dcb5d16cd6129
3
+ size 34847472544
Llama3.3-70B-CogniLink-Q6_K/Llama3.3-70B-CogniLink-Q6_K-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854d012a1763ad2e73febf58d74f02f4a53d26604e72c4a0571e834ff715c74e
3
+ size 23040672928
Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a66a8369d2da0a80e4a23612be9766c4914a84d71e3feace5ea765f7f052a7cb
3
+ size 34980012992
Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f27033c46022c83a40108543bfd8597f8b3b542ccb118545548b70a64dbab94
3
+ size 34949976384
Llama3.3-70B-CogniLink-Q8_0/Llama3.3-70B-CogniLink-Q8_0-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa14cbd5e6403b3753615babedefc3ac65b849d6d07f1a1184bb10027f3c313f
3
+ size 5045062560
README.md ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Daemontatox/Llama3.3-70B-CogniLink
3
+ tags:
4
+ - state-of-the-art
5
+ - reasoning
6
+ - chain-of-thought
7
+ - text-generation
8
+ - transformers
9
+ - llama
10
+ - instruction-tuning
11
+ - TensorBlock
12
+ - GGUF
13
+ license: apache-2.0
14
+ language:
15
+ - en
16
+ datasets:
17
+ - Daemontatox/Deepthinking-COT
18
+ - gghfez/QwQ-LongCoT-130K-cleaned
19
+ pipeline_tag: text-generation
20
+ library_name: transformers
21
+ model-index:
22
+ - name: Llama3.3-70B-CogniLink
23
+ results:
24
+ - task:
25
+ type: text-generation
26
+ name: Text Generation
27
+ dataset:
28
+ name: IFEval (0-Shot)
29
+ type: wis-k/instruction-following-eval
30
+ split: train
31
+ args:
32
+ num_few_shot: 0
33
+ metrics:
34
+ - type: inst_level_strict_acc and prompt_level_strict_acc
35
+ value: 69.31
36
+ name: averaged accuracy
37
+ source:
38
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
39
+ name: Open LLM Leaderboard
40
+ - task:
41
+ type: text-generation
42
+ name: Text Generation
43
+ dataset:
44
+ name: BBH (3-Shot)
45
+ type: SaylorTwift/bbh
46
+ split: test
47
+ args:
48
+ num_few_shot: 3
49
+ metrics:
50
+ - type: acc_norm
51
+ value: 52.12
52
+ name: normalized accuracy
53
+ source:
54
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
55
+ name: Open LLM Leaderboard
56
+ - task:
57
+ type: text-generation
58
+ name: Text Generation
59
+ dataset:
60
+ name: MATH Lvl 5 (4-Shot)
61
+ type: lighteval/MATH-Hard
62
+ split: test
63
+ args:
64
+ num_few_shot: 4
65
+ metrics:
66
+ - type: exact_match
67
+ value: 39.58
68
+ name: exact match
69
+ source:
70
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
71
+ name: Open LLM Leaderboard
72
+ - task:
73
+ type: text-generation
74
+ name: Text Generation
75
+ dataset:
76
+ name: GPQA (0-shot)
77
+ type: Idavidrein/gpqa
78
+ split: train
79
+ args:
80
+ num_few_shot: 0
81
+ metrics:
82
+ - type: acc_norm
83
+ value: 26.06
84
+ name: acc_norm
85
+ source:
86
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
87
+ name: Open LLM Leaderboard
88
+ - task:
89
+ type: text-generation
90
+ name: Text Generation
91
+ dataset:
92
+ name: MuSR (0-shot)
93
+ type: TAUR-Lab/MuSR
94
+ args:
95
+ num_few_shot: 0
96
+ metrics:
97
+ - type: acc_norm
98
+ value: 21.4
99
+ name: acc_norm
100
+ source:
101
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
102
+ name: Open LLM Leaderboard
103
+ - task:
104
+ type: text-generation
105
+ name: Text Generation
106
+ dataset:
107
+ name: MMLU-PRO (5-shot)
108
+ type: TIGER-Lab/MMLU-Pro
109
+ config: main
110
+ split: test
111
+ args:
112
+ num_few_shot: 5
113
+ metrics:
114
+ - type: acc
115
+ value: 46.37
116
+ name: accuracy
117
+ source:
118
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard#/?search=Daemontatox%2FLlama3.3-70B-CogniLink
119
+ name: Open LLM Leaderboard
120
+ ---
121
+
122
+ <div style="width: auto; margin-left: auto; margin-right: auto">
123
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
124
+ </div>
125
+ <div style="display: flex; justify-content: space-between; width: 100%;">
126
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
127
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
128
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
129
+ </p>
130
+ </div>
131
+ </div>
132
+
133
+ ## Daemontatox/Llama3.3-70B-CogniLink - GGUF
134
+
135
+ This repo contains GGUF format model files for [Daemontatox/Llama3.3-70B-CogniLink](https://huggingface.co/Daemontatox/Llama3.3-70B-CogniLink).
136
+
137
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4823](https://github.com/ggml-org/llama.cpp/commit/5bbe6a9fe9a8796a9389c85accec89dbc4d91e39).
138
+
139
+ <div style="text-align: left; margin: 20px 0;">
140
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
141
+ Run them on the TensorBlock client using your local machine ↗
142
+ </a>
143
+ </div>
144
+
145
+ ## Prompt template
146
+
147
+ ```
148
+ <|begin_of_text|><|start_header_id|>system<|end_header_id|>
149
+
150
+ {system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
151
+
152
+ {prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
153
+ ```
154
+
155
+ ## Model file specification
156
+
157
+ | Filename | Quant type | File Size | Description |
158
+ | -------- | ---------- | --------- | ----------- |
159
+ | [Llama3.3-70B-CogniLink-Q2_K.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q2_K.gguf) | Q2_K | 26.375 GB | smallest, significant quality loss - not recommended for most purposes |
160
+ | [Llama3.3-70B-CogniLink-Q3_K_S.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q3_K_S.gguf) | Q3_K_S | 30.912 GB | very small, high quality loss |
161
+ | [Llama3.3-70B-CogniLink-Q3_K_M.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q3_K_M.gguf) | Q3_K_M | 34.267 GB | very small, high quality loss |
162
+ | [Llama3.3-70B-CogniLink-Q3_K_L.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q3_K_L.gguf) | Q3_K_L | 37.141 GB | small, substantial quality loss |
163
+ | [Llama3.3-70B-CogniLink-Q4_0.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q4_0.gguf) | Q4_0 | 39.970 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
164
+ | [Llama3.3-70B-CogniLink-Q4_K_S.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q4_K_S.gguf) | Q4_K_S | 40.347 GB | small, greater quality loss |
165
+ | [Llama3.3-70B-CogniLink-Q4_K_M.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q4_K_M.gguf) | Q4_K_M | 42.520 GB | medium, balanced quality - recommended |
166
+ | [Llama3.3-70B-CogniLink-Q5_0.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q5_0.gguf) | Q5_0 | 48.657 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
167
+ | [Llama3.3-70B-CogniLink-Q5_K_S.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q5_K_S.gguf) | Q5_K_S | 48.657 GB | large, low quality loss - recommended |
168
+ | [Llama3.3-70B-CogniLink-Q5_K_M.gguf](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q5_K_M.gguf) | Q5_K_M | 49.950 GB | large, very low quality loss - recommended |
169
+ | [Llama3.3-70B-CogniLink-Q6_K](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q6_K) | Q6_K | 57.888 GB | very large, extremely low quality loss |
170
+ | [Llama3.3-70B-CogniLink-Q8_0](https://huggingface.co/tensorblock/Llama3.3-70B-CogniLink-GGUF/blob/main/Llama3.3-70B-CogniLink-Q8_0) | Q8_0 | 74.975 GB | very large, extremely low quality loss - not recommended |
171
+
172
+
173
+ ## Downloading instruction
174
+
175
+ ### Command line
176
+
177
+ Firstly, install Huggingface Client
178
+
179
+ ```shell
180
+ pip install -U "huggingface_hub[cli]"
181
+ ```
182
+
183
+ Then, downoad the individual model file the a local directory
184
+
185
+ ```shell
186
+ huggingface-cli download tensorblock/Llama3.3-70B-CogniLink-GGUF --include "Llama3.3-70B-CogniLink-Q2_K.gguf" --local-dir MY_LOCAL_DIR
187
+ ```
188
+
189
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
190
+
191
+ ```shell
192
+ huggingface-cli download tensorblock/Llama3.3-70B-CogniLink-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
193
+ ```