Io2007 commited on
Commit
71f1d91
·
verified ·
1 Parent(s): 3fc7ebf

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  base_model:
 
3
  - google/gemma-3-1b-it
4
  - google/gemma-3-1b-pt
5
- - NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
6
  library_name: transformers
7
  tags:
8
  - mergekit
@@ -21,9 +21,9 @@ This model was merged using the Passthrough merge method.
21
  ### Models Merged
22
 
23
  The following models were included in the merge:
 
24
  * [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it)
25
  * [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt)
26
- * [NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0](https://huggingface.co/NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0)
27
 
28
  ### Configuration
29
 
@@ -35,16 +35,16 @@ dtype: bfloat16
35
  merge_method: passthrough
36
  slices:
37
  - sources:
38
- - layer_range: [2, 5]
39
  model: google/gemma-3-1b-pt
40
  - sources:
41
- - layer_range: [0, 26]
42
  model: google/gemma-3-1b-it
43
  - sources:
44
- - layer_range: [10, 16]
45
  model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
46
  - sources:
47
- - layer_range: [23, 26]
48
  model: google/gemma-3-1b-pt
49
 
50
  ```
 
1
  ---
2
  base_model:
3
+ - NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
4
  - google/gemma-3-1b-it
5
  - google/gemma-3-1b-pt
 
6
  library_name: transformers
7
  tags:
8
  - mergekit
 
21
  ### Models Merged
22
 
23
  The following models were included in the merge:
24
+ * [NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0](https://huggingface.co/NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0)
25
  * [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it)
26
  * [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt)
 
27
 
28
  ### Configuration
29
 
 
35
  merge_method: passthrough
36
  slices:
37
  - sources:
38
+ - layer_range: [0, 2]
39
  model: google/gemma-3-1b-pt
40
  - sources:
41
+ - layer_range: [3, 25]
42
  model: google/gemma-3-1b-it
43
  - sources:
44
+ - layer_range: [10, 14]
45
  model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
46
  - sources:
47
+ - layer_range: [24, 26]
48
  model: google/gemma-3-1b-pt
49
 
50
  ```
config.json CHANGED
@@ -7,10 +7,7 @@
7
  "attn_logit_softcapping": null,
8
  "bos_token_id": 2,
9
  "cache_implementation": "hybrid",
10
- "eos_token_id": [
11
- 1,
12
- 106
13
- ],
14
  "final_logit_softcapping": null,
15
  "head_dim": 256,
16
  "hidden_activation": "gelu_pytorch_tanh",
@@ -20,7 +17,7 @@
20
  "max_position_embeddings": 32768,
21
  "model_type": "gemma3_text",
22
  "num_attention_heads": 4,
23
- "num_hidden_layers": 38,
24
  "num_key_value_heads": 1,
25
  "pad_token_id": 0,
26
  "query_pre_attn_scalar": 256,
@@ -31,7 +28,9 @@
31
  "sliding_window": 512,
32
  "sliding_window_pattern": 6,
33
  "torch_dtype": "bfloat16",
34
- "transformers_version": "4.51.1",
 
 
35
  "use_cache": true,
36
  "vocab_size": 262144
37
  }
 
7
  "attn_logit_softcapping": null,
8
  "bos_token_id": 2,
9
  "cache_implementation": "hybrid",
10
+ "eos_token_id": 106,
 
 
 
11
  "final_logit_softcapping": null,
12
  "head_dim": 256,
13
  "hidden_activation": "gelu_pytorch_tanh",
 
17
  "max_position_embeddings": 32768,
18
  "model_type": "gemma3_text",
19
  "num_attention_heads": 4,
20
+ "num_hidden_layers": 30,
21
  "num_key_value_heads": 1,
22
  "pad_token_id": 0,
23
  "query_pre_attn_scalar": 256,
 
28
  "sliding_window": 512,
29
  "sliding_window_pattern": 6,
30
  "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.51.3",
32
+ "unsloth_fixed": true,
33
+ "unsloth_version": "2025.3.19",
34
  "use_cache": true,
35
  "vocab_size": 262144
36
  }
mergekit_config.yml CHANGED
@@ -3,14 +3,14 @@ dtype: bfloat16
3
  merge_method: passthrough
4
  slices:
5
  - sources:
6
- - layer_range: [2, 5]
7
  model: google/gemma-3-1b-pt
8
  - sources:
9
- - layer_range: [0, 26]
10
  model: google/gemma-3-1b-it
11
  - sources:
12
- - layer_range: [10, 16]
13
  model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
14
  - sources:
15
- - layer_range: [23, 26]
16
  model: google/gemma-3-1b-pt
 
3
  merge_method: passthrough
4
  slices:
5
  - sources:
6
+ - layer_range: [0, 2]
7
  model: google/gemma-3-1b-pt
8
  - sources:
9
+ - layer_range: [3, 25]
10
  model: google/gemma-3-1b-it
11
  - sources:
12
+ - layer_range: [10, 14]
13
  model: NuclearAi/Nuke_X_Gemma3_1B_Reasoner_v1.0
14
  - sources:
15
+ - layer_range: [24, 26]
16
  model: google/gemma-3-1b-pt
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adab3f4224d4c4b0b4e2c41863166817e92aad4933d1be0a689f94daa547ff15
3
- size 2644040072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e37bd2e4eeaf13e20f9116fc3f7b9117287d134ae8378f7180b9893dd03ae3
3
+ size 2214554160
special_tokens_map.json CHANGED
@@ -9,7 +9,7 @@
9
  },
10
  "eoi_token": "<end_of_image>",
11
  "eos_token": {
12
- "content": "<eos>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
9
  },
10
  "eoi_token": "<end_of_image>",
11
  "eos_token": {
12
+ "content": "<end_of_turn>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
tokenizer_config.json CHANGED
@@ -51328,15 +51328,16 @@
51328
  "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
- "eos_token": "<eos>",
51332
  "extra_special_tokens": {
51333
  "boi_token": "<start_of_image>",
51334
  "eoi_token": "<end_of_image>",
51335
  "image_token": "<image_soft_token>"
51336
  },
51337
  "image_token": "<image_soft_token>",
51338
- "model_max_length": 1000000000000000019884624838656,
51339
  "pad_token": "<pad>",
 
51340
  "processor_class": "Gemma3Processor",
51341
  "sp_model_kwargs": null,
51342
  "spaces_between_special_tokens": false,
 
51328
  "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
+ "eos_token": "<end_of_turn>",
51332
  "extra_special_tokens": {
51333
  "boi_token": "<start_of_image>",
51334
  "eoi_token": "<end_of_image>",
51335
  "image_token": "<image_soft_token>"
51336
  },
51337
  "image_token": "<image_soft_token>",
51338
+ "model_max_length": 32768,
51339
  "pad_token": "<pad>",
51340
+ "padding_side": "right",
51341
  "processor_class": "Gemma3Processor",
51342
  "sp_model_kwargs": null,
51343
  "spaces_between_special_tokens": false,