Upload folder using huggingface_hub
Browse files- README.md +5 -8
- config.json +4 -2
- mergekit_config.yml +3 -6
- model.safetensors +2 -2
- special_tokens_map.json +1 -1
- tokenizer.json +2 -2
- tokenizer_config.json +7 -4
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
base_model:
|
3 |
-
- google/gemma-3-1b-pt
|
4 |
- NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
5 |
- google/gemma-3-1b-it
|
|
|
6 |
library_name: transformers
|
7 |
tags:
|
8 |
- mergekit
|
@@ -21,9 +21,9 @@ This model was merged using the Passthrough merge method.
|
|
21 |
### Models Merged
|
22 |
|
23 |
The following models were included in the merge:
|
24 |
-
* [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt)
|
25 |
* [NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0](https://huggingface.co/NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0)
|
26 |
* [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it)
|
|
|
27 |
|
28 |
### Configuration
|
29 |
|
@@ -35,19 +35,16 @@ dtype: bfloat16
|
|
35 |
merge_method: passthrough
|
36 |
slices:
|
37 |
- sources:
|
38 |
-
- layer_range: [
|
39 |
model: google/gemma-3-1b-pt
|
40 |
- sources:
|
41 |
- layer_range: [0, 26]
|
42 |
model: google/gemma-3-1b-it
|
43 |
- sources:
|
44 |
-
- layer_range: [10,
|
45 |
model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
46 |
- sources:
|
47 |
-
- layer_range: [
|
48 |
-
model: google/gemma-3-1b-it
|
49 |
-
- sources:
|
50 |
-
- layer_range: [25, 26]
|
51 |
model: google/gemma-3-1b-pt
|
52 |
|
53 |
```
|
|
|
1 |
---
|
2 |
base_model:
|
|
|
3 |
- NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
4 |
- google/gemma-3-1b-it
|
5 |
+
- google/gemma-3-1b-pt
|
6 |
library_name: transformers
|
7 |
tags:
|
8 |
- mergekit
|
|
|
21 |
### Models Merged
|
22 |
|
23 |
The following models were included in the merge:
|
|
|
24 |
* [NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0](https://huggingface.co/NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0)
|
25 |
* [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it)
|
26 |
+
* [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt)
|
27 |
|
28 |
### Configuration
|
29 |
|
|
|
35 |
merge_method: passthrough
|
36 |
slices:
|
37 |
- sources:
|
38 |
+
- layer_range: [2, 8]
|
39 |
model: google/gemma-3-1b-pt
|
40 |
- sources:
|
41 |
- layer_range: [0, 26]
|
42 |
model: google/gemma-3-1b-it
|
43 |
- sources:
|
44 |
+
- layer_range: [10, 18]
|
45 |
model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
46 |
- sources:
|
47 |
+
- layer_range: [24, 26]
|
|
|
|
|
|
|
48 |
model: google/gemma-3-1b-pt
|
49 |
|
50 |
```
|
config.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"attn_logit_softcapping": null,
|
8 |
"bos_token_id": 2,
|
9 |
"cache_implementation": "hybrid",
|
10 |
-
"eos_token_id":
|
11 |
"final_logit_softcapping": null,
|
12 |
"head_dim": 256,
|
13 |
"hidden_activation": "gelu_pytorch_tanh",
|
@@ -17,7 +17,7 @@
|
|
17 |
"max_position_embeddings": 32768,
|
18 |
"model_type": "gemma3_text",
|
19 |
"num_attention_heads": 4,
|
20 |
-
"num_hidden_layers":
|
21 |
"num_key_value_heads": 1,
|
22 |
"pad_token_id": 0,
|
23 |
"query_pre_attn_scalar": 256,
|
@@ -29,6 +29,8 @@
|
|
29 |
"sliding_window_pattern": 6,
|
30 |
"torch_dtype": "bfloat16",
|
31 |
"transformers_version": "4.51.1",
|
|
|
|
|
32 |
"use_cache": true,
|
33 |
"vocab_size": 262144
|
34 |
}
|
|
|
7 |
"attn_logit_softcapping": null,
|
8 |
"bos_token_id": 2,
|
9 |
"cache_implementation": "hybrid",
|
10 |
+
"eos_token_id": 106,
|
11 |
"final_logit_softcapping": null,
|
12 |
"head_dim": 256,
|
13 |
"hidden_activation": "gelu_pytorch_tanh",
|
|
|
17 |
"max_position_embeddings": 32768,
|
18 |
"model_type": "gemma3_text",
|
19 |
"num_attention_heads": 4,
|
20 |
+
"num_hidden_layers": 42,
|
21 |
"num_key_value_heads": 1,
|
22 |
"pad_token_id": 0,
|
23 |
"query_pre_attn_scalar": 256,
|
|
|
29 |
"sliding_window_pattern": 6,
|
30 |
"torch_dtype": "bfloat16",
|
31 |
"transformers_version": "4.51.1",
|
32 |
+
"unsloth_fixed": true,
|
33 |
+
"unsloth_version": "2025.3.19",
|
34 |
"use_cache": true,
|
35 |
"vocab_size": 262144
|
36 |
}
|
mergekit_config.yml
CHANGED
@@ -3,17 +3,14 @@ dtype: bfloat16
|
|
3 |
merge_method: passthrough
|
4 |
slices:
|
5 |
- sources:
|
6 |
-
- layer_range: [
|
7 |
model: google/gemma-3-1b-pt
|
8 |
- sources:
|
9 |
- layer_range: [0, 26]
|
10 |
model: google/gemma-3-1b-it
|
11 |
- sources:
|
12 |
-
- layer_range: [10,
|
13 |
model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
14 |
- sources:
|
15 |
-
- layer_range: [
|
16 |
-
model: google/gemma-3-1b-it
|
17 |
-
- sources:
|
18 |
-
- layer_range: [25, 26]
|
19 |
model: google/gemma-3-1b-pt
|
|
|
3 |
merge_method: passthrough
|
4 |
slices:
|
5 |
- sources:
|
6 |
+
- layer_range: [2, 8]
|
7 |
model: google/gemma-3-1b-pt
|
8 |
- sources:
|
9 |
- layer_range: [0, 26]
|
10 |
model: google/gemma-3-1b-it
|
11 |
- sources:
|
12 |
+
- layer_range: [10, 18]
|
13 |
model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
|
14 |
- sources:
|
15 |
+
- layer_range: [24, 26]
|
|
|
|
|
|
|
16 |
model: google/gemma-3-1b-pt
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b39fe6af7173bccd6769050d489e5b66d7e4b6cc50bf090ea70f9e82997e0ad3
|
3 |
+
size 2858783032
|
special_tokens_map.json
CHANGED
@@ -9,7 +9,7 @@
|
|
9 |
},
|
10 |
"eoi_token": "<end_of_image>",
|
11 |
"eos_token": {
|
12 |
-
"content": "<
|
13 |
"lstrip": false,
|
14 |
"normalized": false,
|
15 |
"rstrip": false,
|
|
|
9 |
},
|
10 |
"eoi_token": "<end_of_image>",
|
11 |
"eos_token": {
|
12 |
+
"content": "<end_of_turn>",
|
13 |
"lstrip": false,
|
14 |
"normalized": false,
|
15 |
"rstrip": false,
|
tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
|
3 |
+
size 33384568
|
tokenizer_config.json
CHANGED
@@ -2160,7 +2160,7 @@
|
|
2160 |
"normalized": false,
|
2161 |
"rstrip": false,
|
2162 |
"single_word": false,
|
2163 |
-
"special":
|
2164 |
},
|
2165 |
"256000": {
|
2166 |
"content": "<end_of_image>",
|
@@ -2168,7 +2168,7 @@
|
|
2168 |
"normalized": false,
|
2169 |
"rstrip": false,
|
2170 |
"single_word": false,
|
2171 |
-
"special":
|
2172 |
},
|
2173 |
"256001": {
|
2174 |
"content": "<unused99>",
|
@@ -51325,17 +51325,20 @@
|
|
51325 |
},
|
51326 |
"boi_token": "<start_of_image>",
|
51327 |
"bos_token": "<bos>",
|
|
|
51328 |
"clean_up_tokenization_spaces": false,
|
51329 |
"eoi_token": "<end_of_image>",
|
51330 |
-
"eos_token": "<
|
51331 |
"extra_special_tokens": {
|
51332 |
"boi_token": "<start_of_image>",
|
51333 |
"eoi_token": "<end_of_image>",
|
51334 |
"image_token": "<image_soft_token>"
|
51335 |
},
|
51336 |
"image_token": "<image_soft_token>",
|
51337 |
-
"model_max_length":
|
51338 |
"pad_token": "<pad>",
|
|
|
|
|
51339 |
"sp_model_kwargs": null,
|
51340 |
"spaces_between_special_tokens": false,
|
51341 |
"tokenizer_class": "GemmaTokenizer",
|
|
|
2160 |
"normalized": false,
|
2161 |
"rstrip": false,
|
2162 |
"single_word": false,
|
2163 |
+
"special": true
|
2164 |
},
|
2165 |
"256000": {
|
2166 |
"content": "<end_of_image>",
|
|
|
2168 |
"normalized": false,
|
2169 |
"rstrip": false,
|
2170 |
"single_word": false,
|
2171 |
+
"special": true
|
2172 |
},
|
2173 |
"256001": {
|
2174 |
"content": "<unused99>",
|
|
|
51325 |
},
|
51326 |
"boi_token": "<start_of_image>",
|
51327 |
"bos_token": "<bos>",
|
51328 |
+
"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
|
51329 |
"clean_up_tokenization_spaces": false,
|
51330 |
"eoi_token": "<end_of_image>",
|
51331 |
+
"eos_token": "<end_of_turn>",
|
51332 |
"extra_special_tokens": {
|
51333 |
"boi_token": "<start_of_image>",
|
51334 |
"eoi_token": "<end_of_image>",
|
51335 |
"image_token": "<image_soft_token>"
|
51336 |
},
|
51337 |
"image_token": "<image_soft_token>",
|
51338 |
+
"model_max_length": 32768,
|
51339 |
"pad_token": "<pad>",
|
51340 |
+
"padding_side": "right",
|
51341 |
+
"processor_class": "Gemma3Processor",
|
51342 |
"sp_model_kwargs": null,
|
51343 |
"spaces_between_special_tokens": false,
|
51344 |
"tokenizer_class": "GemmaTokenizer",
|