Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- EtherealAurora-12B-v2.png +3 -0
- README.md +26 -30
- config.json +38 -0
- mergekit_config.yml +8 -0
- model.safetensors +3 -0
- quantization_config.json +0 -0
- special_tokens_map.json +30 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
EtherealAurora-12B-v2.png filter=lfs diff=lfs merge=lfs -text
|
EtherealAurora-12B-v2.png
ADDED
![]() |
Git LFS Details
|
README.md
CHANGED
@@ -1,48 +1,44 @@
|
|
1 |
---
|
2 |
-
base_model:
|
3 |
-
|
4 |
-
|
5 |
library_name: transformers
|
6 |
tags:
|
7 |
- mergekit
|
8 |
- merge
|
9 |
- chatml
|
10 |
-
- exl3
|
11 |
language:
|
12 |
- en
|
13 |
- ja
|
14 |
---
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
|
18 |
-
|
|
|
19 |
|
20 |
-
|
21 |
-
| Quant(Revision) | Bits per Weight | Head Bits |
|
22 |
-
| -------- | ---------- | --------- |
|
23 |
-
| [3.0_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/3.0bpw_H6) | 3.0 | 6 |
|
24 |
-
| [3.5_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/3.5bpw_H6) | 3.5 | 6 |
|
25 |
-
| [4.0_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/4.0bpw_H6) | 4.0 | 6 |
|
26 |
-
| [4.5_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/4.5bpw_H6) | 4.5 | 6 |
|
27 |
-
| [5.0_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/5.0bpw_H6) | 5.0 | 6 |
|
28 |
-
| [6.0_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/6.0bpw_H6) | 6.0 | 6 |
|
29 |
-
| [8.0_H6](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/8.0bpw_H6) | 8.0 | 6 |
|
30 |
-
| [8.0_H8](https://huggingface.co/ArtusDev/yamatazen_EtherealAurora-12B-v2-EXL3/tree/8.0bpw_H8) | 8.0 | 8 |
|
31 |
|
32 |
-
###
|
33 |
|
34 |
-
|
35 |
-
|
|
|
36 |
|
37 |
-
|
38 |
|
39 |
-
|
40 |
-
pip install -U "huggingface_hub[cli]"
|
41 |
-
```
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
base_model:
|
3 |
+
- yamatazen/EtherealAurora-12B
|
4 |
+
- natong19/Mistral-Nemo-Instruct-2407-abliterated
|
5 |
library_name: transformers
|
6 |
tags:
|
7 |
- mergekit
|
8 |
- merge
|
9 |
- chatml
|
|
|
10 |
language:
|
11 |
- en
|
12 |
- ja
|
13 |
---
|
14 |
+

|
15 |
+
This is a ChatML model.
|
16 |
+
# merge
|
17 |
|
18 |
+
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
|
19 |
|
20 |
+
## Merge Details
|
21 |
+
### Merge Method
|
22 |
|
23 |
+
This model was merged using the [SLERP](https://en.wikipedia.org/wiki/Slerp) merge method.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
### Models Merged
|
26 |
|
27 |
+
The following models were included in the merge:
|
28 |
+
* [yamatazen/EtherealAurora-12B](https://huggingface.co/yamatazen/EtherealAurora-12B)
|
29 |
+
* [natong19/Mistral-Nemo-Instruct-2407-abliterated](https://huggingface.co/natong19/Mistral-Nemo-Instruct-2407-abliterated)
|
30 |
|
31 |
+
### Configuration
|
32 |
|
33 |
+
The following YAML configuration was used to produce this model:
|
|
|
|
|
34 |
|
35 |
+
```yaml
|
36 |
+
base_model: yamatazen/EtherealAurora-12B
|
37 |
+
models:
|
38 |
+
- model: natong19/Mistral-Nemo-Instruct-2407-abliterated
|
39 |
+
merge_method: slerp
|
40 |
+
dtype: bfloat16
|
41 |
+
parameters:
|
42 |
+
normalize: true
|
43 |
+
t: [0.25,0.3,0.5,0.3,0.25]
|
44 |
+
```
|
config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "yamatazen/EtherealAurora-12B",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"head_dim": 128,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 5120,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 14336,
|
14 |
+
"max_position_embeddings": 1024000,
|
15 |
+
"model_type": "mistral",
|
16 |
+
"num_attention_heads": 32,
|
17 |
+
"num_hidden_layers": 40,
|
18 |
+
"num_key_value_heads": 8,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": null,
|
22 |
+
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.49.0",
|
25 |
+
"use_cache": false,
|
26 |
+
"vocab_size": 131072,
|
27 |
+
"quantization_config": {
|
28 |
+
"quant_method": "exl3",
|
29 |
+
"version": "0.0.3",
|
30 |
+
"bits": 2.5,
|
31 |
+
"head_bits": 6,
|
32 |
+
"calibration": {
|
33 |
+
"rows": 100,
|
34 |
+
"cols": 2048
|
35 |
+
},
|
36 |
+
"out_scales": "auto"
|
37 |
+
}
|
38 |
+
}
|
mergekit_config.yml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: yamatazen/EtherealAurora-12B
|
2 |
+
models:
|
3 |
+
- model: natong19/Mistral-Nemo-Instruct-2407-abliterated
|
4 |
+
merge_method: slerp
|
5 |
+
dtype: bfloat16
|
6 |
+
parameters:
|
7 |
+
normalize: true
|
8 |
+
t: [0.25,0.3,0.5,0.3,0.25]
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aba2e1cdd5ef14381cc9552143d65de65bfb7db957f9c8bb0538f8a4b2ec8817
|
3 |
+
size 5261697160
|
quantization_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|im_end|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<pad>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee57bd3271d8c200a06df40f9d7c94f8bb8f085b5cfb2d40907a794560df2ec3
|
3 |
+
size 17078342
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|