David Catalano commited on
Commit
084b59b
·
1 Parent(s): 9144e16

chore: add default Hugging Face project files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ calme_3.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,5 +1,183 @@
1
- ---
2
- license: other
3
- license_name: qwen
4
- license_link: https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/resolve/main/LICENSE
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: other
5
+ library_name: transformers
6
+ tags:
7
+ - chat
8
+ - qwen
9
+ - qwen2.5
10
+ - finetune
11
+ - english
12
+ base_model: MaziyarPanahi/calme-3-selfmerge-qwen2-78b
13
+ model_name: calme-3.2-instruct-78b
14
+ license_name: qwen
15
+ license_link: https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/blob/main/LICENSE
16
+ pipeline_tag: text-generation
17
+ inference: false
18
+ model_creator: MaziyarPanahi
19
+ quantized_by: MaziyarPanahi
20
+ model-index:
21
+ - name: calme-3.2-instruct-78b
22
+ results:
23
+ - task:
24
+ type: text-generation
25
+ name: Text Generation
26
+ dataset:
27
+ name: IFEval (0-Shot)
28
+ type: HuggingFaceH4/ifeval
29
+ args:
30
+ num_few_shot: 0
31
+ metrics:
32
+ - type: inst_level_strict_acc and prompt_level_strict_acc
33
+ value: 80.63
34
+ name: strict accuracy
35
+ source:
36
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
37
+ name: Open LLM Leaderboard
38
+ - task:
39
+ type: text-generation
40
+ name: Text Generation
41
+ dataset:
42
+ name: BBH (3-Shot)
43
+ type: BBH
44
+ args:
45
+ num_few_shot: 3
46
+ metrics:
47
+ - type: acc_norm
48
+ value: 62.61
49
+ name: normalized accuracy
50
+ source:
51
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
52
+ name: Open LLM Leaderboard
53
+ - task:
54
+ type: text-generation
55
+ name: Text Generation
56
+ dataset:
57
+ name: MATH Lvl 5 (4-Shot)
58
+ type: hendrycks/competition_math
59
+ args:
60
+ num_few_shot: 4
61
+ metrics:
62
+ - type: exact_match
63
+ value: 39.95
64
+ name: exact match
65
+ source:
66
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
67
+ name: Open LLM Leaderboard
68
+ - task:
69
+ type: text-generation
70
+ name: Text Generation
71
+ dataset:
72
+ name: GPQA (0-shot)
73
+ type: Idavidrein/gpqa
74
+ args:
75
+ num_few_shot: 0
76
+ metrics:
77
+ - type: acc_norm
78
+ value: 20.36
79
+ name: acc_norm
80
+ source:
81
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
82
+ name: Open LLM Leaderboard
83
+ - task:
84
+ type: text-generation
85
+ name: Text Generation
86
+ dataset:
87
+ name: MuSR (0-shot)
88
+ type: TAUR-Lab/MuSR
89
+ args:
90
+ num_few_shot: 0
91
+ metrics:
92
+ - type: acc_norm
93
+ value: 38.53
94
+ name: acc_norm
95
+ source:
96
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
97
+ name: Open LLM Leaderboard
98
+ - task:
99
+ type: text-generation
100
+ name: Text Generation
101
+ dataset:
102
+ name: MMLU-PRO (5-shot)
103
+ type: TIGER-Lab/MMLU-Pro
104
+ config: main
105
+ split: test
106
+ args:
107
+ num_few_shot: 5
108
+ metrics:
109
+ - type: acc
110
+ value: 70.03
111
+ name: accuracy
112
+ source:
113
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-3.2-instruct-78b
114
+ name: Open LLM Leaderboard
115
+ ---
116
+
117
+ <img src="./calme_3.png" alt="Calme-3 Models" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
118
+
119
+ > [!TIP]
120
+ > This is an experimental model, so it might not perform well for some prompts and may be sensitive to hyper parameters. I would appreciate any feedback to see if I can fix any issues in the next iteration. ❤️
121
+
122
+ # MaziyarPanahi/calme-3.2-instruct-78b
123
+
124
+ This model is an advanced iteration of the powerful `Qwen/Qwen2.5-72B`, specifically fine-tuned to enhance its capabilities in generic domains. The `Qwen2.5-72B` base model was merged with itself to create a larger model. After that, the model was fine-tuned on a custom datasets.
125
+
126
+ # ⚡ Quantized GGUF
127
+
128
+ Here are the GGUF models thanks to [https://huggingface.co/bartowski](https://huggingface.co/bartowski): [calme-3.2-instruct-78b-GGUF](https://huggingface.co/bartowski/calme-3.2-instruct-78b-GGUF)
129
+
130
+ # 🏆 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
131
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_MaziyarPanahi__calme-3.2-instruct-78b)
132
+
133
+ | Metric |Value|
134
+ |-------------------|----:|
135
+ |Avg. |52.02|
136
+ |IFEval (0-Shot) |80.63|
137
+ |BBH (3-Shot) |62.61|
138
+ |MATH Lvl 5 (4-Shot)|39.95|
139
+ |GPQA (0-shot) |20.36|
140
+ |MuSR (0-shot) |38.53|
141
+ |MMLU-PRO (5-shot) |70.03|
142
+
143
+ # Prompt Template
144
+
145
+ This model uses `ChatML` prompt template:
146
+
147
+ ```sh
148
+ <|im_start|>system
149
+ {System}
150
+ <|im_end|>
151
+ <|im_start|>user
152
+ {User}
153
+ <|im_end|>
154
+ <|im_start|>assistant
155
+ {Assistant}
156
+ ````
157
+
158
+ # How to use
159
+
160
+ ```python
161
+
162
+ # Use a pipeline as a high-level helper
163
+
164
+ from transformers import pipeline
165
+
166
+ messages = [
167
+ {"role": "user", "content": "Who are you?"},
168
+ ]
169
+ pipe = pipeline("text-generation", model="MaziyarPanahi/calme-3.2-instruct-78b")
170
+ pipe(messages)
171
+
172
+
173
+ # Load model directly
174
+
175
+ from transformers import AutoTokenizer, AutoModelForCausalLM
176
+
177
+ tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/calme-3.2-instruct-78b")
178
+ model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/calme-3.2-instruct-78b")
179
+ ```
180
+
181
+ # Ethical Considerations
182
+
183
+ As with any large language model, users should be aware of potential biases and limitations. We recommend implementing appropriate safeguards and human oversight when deploying this model in production environments.
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MaziyarPanahi/calme-3-selfmerge-qwen2-78b",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 8192,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 29568,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 80,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 64,
16
+ "num_hidden_layers": 86,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.46.1",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151646
28
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.4", "total_size": 311861854208}, "weight_map": {"lm_head.weight": "model-00001-of-00067.safetensors", "model.embed_tokens.weight": "model-00002-of-00067.safetensors", "model.layers.0.input_layernorm.weight": "model-00002-of-00067.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00003-of-00067.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00003-of-00067.safetensors", "model.layers.1.input_layernorm.weight": "model-00003-of-00067.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00003-of-00067.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00004-of-00067.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00004-of-00067.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00004-of-00067.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00004-of-00067.safetensors", "model.layers.10.input_layernorm.weight": "model-00004-of-00067.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00004-of-00067.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00004-of-00067.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00005-of-00067.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00005-of-00067.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.input_layernorm.weight": "model-00005-of-00067.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00005-of-00067.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00005-of-00067.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00005-of-00067.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00005-of-00067.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00006-of-00067.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00006-of-00067.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.input_layernorm.weight": "model-00006-of-00067.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00006-of-00067.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00006-of-00067.safetensors", "model.layers.13.input_layernorm.weight": "model-00006-of-00067.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00006-of-00067.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00007-of-00067.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00007-of-00067.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00007-of-00067.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00007-of-00067.safetensors", "model.layers.14.input_layernorm.weight": "model-00007-of-00067.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00007-of-00067.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00007-of-00067.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00008-of-00067.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00008-of-00067.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.input_layernorm.weight": "model-00008-of-00067.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00008-of-00067.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00008-of-00067.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00008-of-00067.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00008-of-00067.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00009-of-00067.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00009-of-00067.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.input_layernorm.weight": "model-00009-of-00067.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00009-of-00067.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00009-of-00067.safetensors", "model.layers.17.input_layernorm.weight": "model-00009-of-00067.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00009-of-00067.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00010-of-00067.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00010-of-00067.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00010-of-00067.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00010-of-00067.safetensors", "model.layers.18.input_layernorm.weight": "model-00010-of-00067.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00010-of-00067.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00010-of-00067.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00011-of-00067.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00011-of-00067.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.input_layernorm.weight": "model-00011-of-00067.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00011-of-00067.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00011-of-00067.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00011-of-00067.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00011-of-00067.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00012-of-00067.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00012-of-00067.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.input_layernorm.weight": "model-00012-of-00067.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00012-of-00067.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00012-of-00067.safetensors", "model.layers.20.input_layernorm.weight": "model-00012-of-00067.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00012-of-00067.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00013-of-00067.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00013-of-00067.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00013-of-00067.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00013-of-00067.safetensors", "model.layers.21.input_layernorm.weight": "model-00013-of-00067.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00013-of-00067.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00013-of-00067.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00014-of-00067.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00014-of-00067.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.input_layernorm.weight": "model-00014-of-00067.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00014-of-00067.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00014-of-00067.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00014-of-00067.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00014-of-00067.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00015-of-00067.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00015-of-00067.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.input_layernorm.weight": "model-00015-of-00067.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00015-of-00067.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00015-of-00067.safetensors", "model.layers.24.input_layernorm.weight": "model-00015-of-00067.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00015-of-00067.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00016-of-00067.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00016-of-00067.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00016-of-00067.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00016-of-00067.safetensors", "model.layers.25.input_layernorm.weight": "model-00016-of-00067.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00016-of-00067.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00016-of-00067.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00017-of-00067.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00017-of-00067.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.input_layernorm.weight": "model-00017-of-00067.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00017-of-00067.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00017-of-00067.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00017-of-00067.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00017-of-00067.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00018-of-00067.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00018-of-00067.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.input_layernorm.weight": "model-00018-of-00067.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00018-of-00067.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00018-of-00067.safetensors", "model.layers.28.input_layernorm.weight": "model-00018-of-00067.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00018-of-00067.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00019-of-00067.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00019-of-00067.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.k_proj.bias": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.q_proj.bias": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.v_proj.bias": "model-00019-of-00067.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00019-of-00067.safetensors", "model.layers.29.input_layernorm.weight": "model-00019-of-00067.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00019-of-00067.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00019-of-00067.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00020-of-00067.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.k_proj.bias": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.q_proj.bias": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.v_proj.bias": "model-00020-of-00067.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.input_layernorm.weight": "model-00020-of-00067.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00020-of-00067.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00020-of-00067.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00020-of-00067.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00020-of-00067.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00021-of-00067.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00021-of-00067.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.input_layernorm.weight": "model-00021-of-00067.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.k_proj.bias": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.q_proj.bias": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.v_proj.bias": "model-00021-of-00067.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00021-of-00067.safetensors", "model.layers.31.input_layernorm.weight": "model-00021-of-00067.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00021-of-00067.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00022-of-00067.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00022-of-00067.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.k_proj.bias": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.q_proj.bias": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.v_proj.bias": "model-00022-of-00067.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00022-of-00067.safetensors", "model.layers.32.input_layernorm.weight": "model-00022-of-00067.safetensors", "model.layers.32.mlp.down_proj.weight": "model-00022-of-00067.safetensors", "model.layers.32.mlp.gate_proj.weight": "model-00022-of-00067.safetensors", "model.layers.32.mlp.up_proj.weight": "model-00023-of-00067.safetensors", "model.layers.32.post_attention_layernorm.weight": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.k_proj.bias": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.k_proj.weight": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.o_proj.weight": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.q_proj.bias": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.q_proj.weight": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.v_proj.bias": "model-00023-of-00067.safetensors", "model.layers.32.self_attn.v_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.input_layernorm.weight": "model-00023-of-00067.safetensors", "model.layers.33.mlp.down_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.mlp.gate_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.mlp.up_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.post_attention_layernorm.weight": "model-00023-of-00067.safetensors", "model.layers.33.self_attn.k_proj.bias": "model-00023-of-00067.safetensors", "model.layers.33.self_attn.k_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.self_attn.o_proj.weight": "model-00023-of-00067.safetensors", "model.layers.33.self_attn.q_proj.bias": "model-00023-of-00067.safetensors", "model.layers.33.self_attn.q_proj.weight": "model-00024-of-00067.safetensors", "model.layers.33.self_attn.v_proj.bias": "model-00024-of-00067.safetensors", "model.layers.33.self_attn.v_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.input_layernorm.weight": "model-00024-of-00067.safetensors", "model.layers.34.mlp.down_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.mlp.gate_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.mlp.up_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.post_attention_layernorm.weight": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.k_proj.bias": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.k_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.o_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.q_proj.bias": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.q_proj.weight": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.v_proj.bias": "model-00024-of-00067.safetensors", "model.layers.34.self_attn.v_proj.weight": "model-00024-of-00067.safetensors", "model.layers.35.input_layernorm.weight": "model-00024-of-00067.safetensors", "model.layers.35.mlp.down_proj.weight": "model-00024-of-00067.safetensors", "model.layers.35.mlp.gate_proj.weight": "model-00025-of-00067.safetensors", "model.layers.35.mlp.up_proj.weight": "model-00025-of-00067.safetensors", "model.layers.35.post_attention_layernorm.weight": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.k_proj.bias": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.k_proj.weight": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.o_proj.weight": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.q_proj.bias": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.q_proj.weight": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.v_proj.bias": "model-00025-of-00067.safetensors", "model.layers.35.self_attn.v_proj.weight": "model-00025-of-00067.safetensors", "model.layers.36.input_layernorm.weight": "model-00025-of-00067.safetensors", "model.layers.36.mlp.down_proj.weight": "model-00025-of-00067.safetensors", "model.layers.36.mlp.gate_proj.weight": "model-00025-of-00067.safetensors", "model.layers.36.mlp.up_proj.weight": "model-00026-of-00067.safetensors", "model.layers.36.post_attention_layernorm.weight": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.k_proj.bias": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.k_proj.weight": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.o_proj.weight": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.q_proj.bias": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.q_proj.weight": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.v_proj.bias": "model-00026-of-00067.safetensors", "model.layers.36.self_attn.v_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.input_layernorm.weight": "model-00026-of-00067.safetensors", "model.layers.37.mlp.down_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.mlp.gate_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.mlp.up_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.post_attention_layernorm.weight": "model-00026-of-00067.safetensors", "model.layers.37.self_attn.k_proj.bias": "model-00026-of-00067.safetensors", "model.layers.37.self_attn.k_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.self_attn.o_proj.weight": "model-00026-of-00067.safetensors", "model.layers.37.self_attn.q_proj.bias": "model-00026-of-00067.safetensors", "model.layers.37.self_attn.q_proj.weight": "model-00027-of-00067.safetensors", "model.layers.37.self_attn.v_proj.bias": "model-00027-of-00067.safetensors", "model.layers.37.self_attn.v_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.input_layernorm.weight": "model-00027-of-00067.safetensors", "model.layers.38.mlp.down_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.mlp.gate_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.mlp.up_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.post_attention_layernorm.weight": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.k_proj.bias": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.k_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.o_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.q_proj.bias": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.q_proj.weight": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.v_proj.bias": "model-00027-of-00067.safetensors", "model.layers.38.self_attn.v_proj.weight": "model-00027-of-00067.safetensors", "model.layers.39.input_layernorm.weight": "model-00027-of-00067.safetensors", "model.layers.39.mlp.down_proj.weight": "model-00027-of-00067.safetensors", "model.layers.39.mlp.gate_proj.weight": "model-00028-of-00067.safetensors", "model.layers.39.mlp.up_proj.weight": "model-00028-of-00067.safetensors", "model.layers.39.post_attention_layernorm.weight": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.k_proj.bias": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.k_proj.weight": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.o_proj.weight": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.q_proj.bias": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.q_proj.weight": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.v_proj.bias": "model-00028-of-00067.safetensors", "model.layers.39.self_attn.v_proj.weight": "model-00028-of-00067.safetensors", "model.layers.4.input_layernorm.weight": "model-00028-of-00067.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00028-of-00067.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00028-of-00067.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00029-of-00067.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00029-of-00067.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.input_layernorm.weight": "model-00029-of-00067.safetensors", "model.layers.40.mlp.down_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.mlp.gate_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.mlp.up_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.post_attention_layernorm.weight": "model-00029-of-00067.safetensors", "model.layers.40.self_attn.k_proj.bias": "model-00029-of-00067.safetensors", "model.layers.40.self_attn.k_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.self_attn.o_proj.weight": "model-00029-of-00067.safetensors", "model.layers.40.self_attn.q_proj.bias": "model-00029-of-00067.safetensors", "model.layers.40.self_attn.q_proj.weight": "model-00030-of-00067.safetensors", "model.layers.40.self_attn.v_proj.bias": "model-00030-of-00067.safetensors", "model.layers.40.self_attn.v_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.input_layernorm.weight": "model-00030-of-00067.safetensors", "model.layers.41.mlp.down_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.mlp.gate_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.mlp.up_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.post_attention_layernorm.weight": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.k_proj.bias": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.k_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.o_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.q_proj.bias": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.q_proj.weight": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.v_proj.bias": "model-00030-of-00067.safetensors", "model.layers.41.self_attn.v_proj.weight": "model-00030-of-00067.safetensors", "model.layers.42.input_layernorm.weight": "model-00030-of-00067.safetensors", "model.layers.42.mlp.down_proj.weight": "model-00030-of-00067.safetensors", "model.layers.42.mlp.gate_proj.weight": "model-00031-of-00067.safetensors", "model.layers.42.mlp.up_proj.weight": "model-00031-of-00067.safetensors", "model.layers.42.post_attention_layernorm.weight": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.k_proj.bias": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.k_proj.weight": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.o_proj.weight": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.q_proj.bias": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.q_proj.weight": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.v_proj.bias": "model-00031-of-00067.safetensors", "model.layers.42.self_attn.v_proj.weight": "model-00031-of-00067.safetensors", "model.layers.43.input_layernorm.weight": "model-00031-of-00067.safetensors", "model.layers.43.mlp.down_proj.weight": "model-00031-of-00067.safetensors", "model.layers.43.mlp.gate_proj.weight": "model-00031-of-00067.safetensors", "model.layers.43.mlp.up_proj.weight": "model-00032-of-00067.safetensors", "model.layers.43.post_attention_layernorm.weight": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.k_proj.bias": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.k_proj.weight": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.o_proj.weight": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.q_proj.bias": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.q_proj.weight": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.v_proj.bias": "model-00032-of-00067.safetensors", "model.layers.43.self_attn.v_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.input_layernorm.weight": "model-00032-of-00067.safetensors", "model.layers.44.mlp.down_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.mlp.gate_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.mlp.up_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.post_attention_layernorm.weight": "model-00032-of-00067.safetensors", "model.layers.44.self_attn.k_proj.bias": "model-00032-of-00067.safetensors", "model.layers.44.self_attn.k_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.self_attn.o_proj.weight": "model-00032-of-00067.safetensors", "model.layers.44.self_attn.q_proj.bias": "model-00032-of-00067.safetensors", "model.layers.44.self_attn.q_proj.weight": "model-00033-of-00067.safetensors", "model.layers.44.self_attn.v_proj.bias": "model-00033-of-00067.safetensors", "model.layers.44.self_attn.v_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.input_layernorm.weight": "model-00033-of-00067.safetensors", "model.layers.45.mlp.down_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.mlp.gate_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.mlp.up_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.post_attention_layernorm.weight": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.k_proj.bias": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.k_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.o_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.q_proj.bias": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.q_proj.weight": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.v_proj.bias": "model-00033-of-00067.safetensors", "model.layers.45.self_attn.v_proj.weight": "model-00033-of-00067.safetensors", "model.layers.46.input_layernorm.weight": "model-00033-of-00067.safetensors", "model.layers.46.mlp.down_proj.weight": "model-00033-of-00067.safetensors", "model.layers.46.mlp.gate_proj.weight": "model-00034-of-00067.safetensors", "model.layers.46.mlp.up_proj.weight": "model-00034-of-00067.safetensors", "model.layers.46.post_attention_layernorm.weight": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.k_proj.bias": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.k_proj.weight": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.o_proj.weight": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.q_proj.bias": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.q_proj.weight": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.v_proj.bias": "model-00034-of-00067.safetensors", "model.layers.46.self_attn.v_proj.weight": "model-00034-of-00067.safetensors", "model.layers.47.input_layernorm.weight": "model-00034-of-00067.safetensors", "model.layers.47.mlp.down_proj.weight": "model-00034-of-00067.safetensors", "model.layers.47.mlp.gate_proj.weight": "model-00034-of-00067.safetensors", "model.layers.47.mlp.up_proj.weight": "model-00035-of-00067.safetensors", "model.layers.47.post_attention_layernorm.weight": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.k_proj.bias": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.k_proj.weight": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.o_proj.weight": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.q_proj.bias": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.q_proj.weight": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.v_proj.bias": "model-00035-of-00067.safetensors", "model.layers.47.self_attn.v_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.input_layernorm.weight": "model-00035-of-00067.safetensors", "model.layers.48.mlp.down_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.mlp.gate_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.mlp.up_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.post_attention_layernorm.weight": "model-00035-of-00067.safetensors", "model.layers.48.self_attn.k_proj.bias": "model-00035-of-00067.safetensors", "model.layers.48.self_attn.k_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.self_attn.o_proj.weight": "model-00035-of-00067.safetensors", "model.layers.48.self_attn.q_proj.bias": "model-00035-of-00067.safetensors", "model.layers.48.self_attn.q_proj.weight": "model-00036-of-00067.safetensors", "model.layers.48.self_attn.v_proj.bias": "model-00036-of-00067.safetensors", "model.layers.48.self_attn.v_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.input_layernorm.weight": "model-00036-of-00067.safetensors", "model.layers.49.mlp.down_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.mlp.gate_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.mlp.up_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.post_attention_layernorm.weight": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.k_proj.bias": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.k_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.o_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.q_proj.bias": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.q_proj.weight": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.v_proj.bias": "model-00036-of-00067.safetensors", "model.layers.49.self_attn.v_proj.weight": "model-00036-of-00067.safetensors", "model.layers.5.input_layernorm.weight": "model-00036-of-00067.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00036-of-00067.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00037-of-00067.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00037-of-00067.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00037-of-00067.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00037-of-00067.safetensors", "model.layers.50.input_layernorm.weight": "model-00037-of-00067.safetensors", "model.layers.50.mlp.down_proj.weight": "model-00037-of-00067.safetensors", "model.layers.50.mlp.gate_proj.weight": "model-00037-of-00067.safetensors", "model.layers.50.mlp.up_proj.weight": "model-00038-of-00067.safetensors", "model.layers.50.post_attention_layernorm.weight": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.k_proj.bias": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.k_proj.weight": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.o_proj.weight": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.q_proj.bias": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.q_proj.weight": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.v_proj.bias": "model-00038-of-00067.safetensors", "model.layers.50.self_attn.v_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.input_layernorm.weight": "model-00038-of-00067.safetensors", "model.layers.51.mlp.down_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.mlp.gate_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.mlp.up_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.post_attention_layernorm.weight": "model-00038-of-00067.safetensors", "model.layers.51.self_attn.k_proj.bias": "model-00038-of-00067.safetensors", "model.layers.51.self_attn.k_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.self_attn.o_proj.weight": "model-00038-of-00067.safetensors", "model.layers.51.self_attn.q_proj.bias": "model-00038-of-00067.safetensors", "model.layers.51.self_attn.q_proj.weight": "model-00039-of-00067.safetensors", "model.layers.51.self_attn.v_proj.bias": "model-00039-of-00067.safetensors", "model.layers.51.self_attn.v_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.input_layernorm.weight": "model-00039-of-00067.safetensors", "model.layers.52.mlp.down_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.mlp.gate_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.mlp.up_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.post_attention_layernorm.weight": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.k_proj.bias": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.k_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.o_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.q_proj.bias": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.q_proj.weight": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.v_proj.bias": "model-00039-of-00067.safetensors", "model.layers.52.self_attn.v_proj.weight": "model-00039-of-00067.safetensors", "model.layers.53.input_layernorm.weight": "model-00039-of-00067.safetensors", "model.layers.53.mlp.down_proj.weight": "model-00039-of-00067.safetensors", "model.layers.53.mlp.gate_proj.weight": "model-00040-of-00067.safetensors", "model.layers.53.mlp.up_proj.weight": "model-00040-of-00067.safetensors", "model.layers.53.post_attention_layernorm.weight": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.k_proj.bias": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.k_proj.weight": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.o_proj.weight": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.q_proj.bias": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.q_proj.weight": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.v_proj.bias": "model-00040-of-00067.safetensors", "model.layers.53.self_attn.v_proj.weight": "model-00040-of-00067.safetensors", "model.layers.54.input_layernorm.weight": "model-00040-of-00067.safetensors", "model.layers.54.mlp.down_proj.weight": "model-00040-of-00067.safetensors", "model.layers.54.mlp.gate_proj.weight": "model-00040-of-00067.safetensors", "model.layers.54.mlp.up_proj.weight": "model-00041-of-00067.safetensors", "model.layers.54.post_attention_layernorm.weight": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.k_proj.bias": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.k_proj.weight": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.o_proj.weight": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.q_proj.bias": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.q_proj.weight": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.v_proj.bias": "model-00041-of-00067.safetensors", "model.layers.54.self_attn.v_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.input_layernorm.weight": "model-00041-of-00067.safetensors", "model.layers.55.mlp.down_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.mlp.gate_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.mlp.up_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.post_attention_layernorm.weight": "model-00041-of-00067.safetensors", "model.layers.55.self_attn.k_proj.bias": "model-00041-of-00067.safetensors", "model.layers.55.self_attn.k_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.self_attn.o_proj.weight": "model-00041-of-00067.safetensors", "model.layers.55.self_attn.q_proj.bias": "model-00041-of-00067.safetensors", "model.layers.55.self_attn.q_proj.weight": "model-00042-of-00067.safetensors", "model.layers.55.self_attn.v_proj.bias": "model-00042-of-00067.safetensors", "model.layers.55.self_attn.v_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.input_layernorm.weight": "model-00042-of-00067.safetensors", "model.layers.56.mlp.down_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.mlp.gate_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.mlp.up_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.post_attention_layernorm.weight": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.k_proj.bias": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.k_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.o_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.q_proj.bias": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.q_proj.weight": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.v_proj.bias": "model-00042-of-00067.safetensors", "model.layers.56.self_attn.v_proj.weight": "model-00042-of-00067.safetensors", "model.layers.57.input_layernorm.weight": "model-00042-of-00067.safetensors", "model.layers.57.mlp.down_proj.weight": "model-00042-of-00067.safetensors", "model.layers.57.mlp.gate_proj.weight": "model-00043-of-00067.safetensors", "model.layers.57.mlp.up_proj.weight": "model-00043-of-00067.safetensors", "model.layers.57.post_attention_layernorm.weight": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.k_proj.bias": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.k_proj.weight": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.o_proj.weight": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.q_proj.bias": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.q_proj.weight": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.v_proj.bias": "model-00043-of-00067.safetensors", "model.layers.57.self_attn.v_proj.weight": "model-00043-of-00067.safetensors", "model.layers.58.input_layernorm.weight": "model-00043-of-00067.safetensors", "model.layers.58.mlp.down_proj.weight": "model-00043-of-00067.safetensors", "model.layers.58.mlp.gate_proj.weight": "model-00043-of-00067.safetensors", "model.layers.58.mlp.up_proj.weight": "model-00044-of-00067.safetensors", "model.layers.58.post_attention_layernorm.weight": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.k_proj.bias": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.k_proj.weight": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.o_proj.weight": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.q_proj.bias": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.q_proj.weight": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.v_proj.bias": "model-00044-of-00067.safetensors", "model.layers.58.self_attn.v_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.input_layernorm.weight": "model-00044-of-00067.safetensors", "model.layers.59.mlp.down_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.mlp.gate_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.mlp.up_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.post_attention_layernorm.weight": "model-00044-of-00067.safetensors", "model.layers.59.self_attn.k_proj.bias": "model-00044-of-00067.safetensors", "model.layers.59.self_attn.k_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.self_attn.o_proj.weight": "model-00044-of-00067.safetensors", "model.layers.59.self_attn.q_proj.bias": "model-00044-of-00067.safetensors", "model.layers.59.self_attn.q_proj.weight": "model-00045-of-00067.safetensors", "model.layers.59.self_attn.v_proj.bias": "model-00045-of-00067.safetensors", "model.layers.59.self_attn.v_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.input_layernorm.weight": "model-00045-of-00067.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00045-of-00067.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00045-of-00067.safetensors", "model.layers.60.input_layernorm.weight": "model-00045-of-00067.safetensors", "model.layers.60.mlp.down_proj.weight": "model-00045-of-00067.safetensors", "model.layers.60.mlp.gate_proj.weight": "model-00046-of-00067.safetensors", "model.layers.60.mlp.up_proj.weight": "model-00046-of-00067.safetensors", "model.layers.60.post_attention_layernorm.weight": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.k_proj.bias": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.k_proj.weight": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.o_proj.weight": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.q_proj.bias": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.q_proj.weight": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.v_proj.bias": "model-00046-of-00067.safetensors", "model.layers.60.self_attn.v_proj.weight": "model-00046-of-00067.safetensors", "model.layers.61.input_layernorm.weight": "model-00046-of-00067.safetensors", "model.layers.61.mlp.down_proj.weight": "model-00046-of-00067.safetensors", "model.layers.61.mlp.gate_proj.weight": "model-00046-of-00067.safetensors", "model.layers.61.mlp.up_proj.weight": "model-00047-of-00067.safetensors", "model.layers.61.post_attention_layernorm.weight": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.k_proj.bias": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.k_proj.weight": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.o_proj.weight": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.q_proj.bias": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.q_proj.weight": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.v_proj.bias": "model-00047-of-00067.safetensors", "model.layers.61.self_attn.v_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.input_layernorm.weight": "model-00047-of-00067.safetensors", "model.layers.62.mlp.down_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.mlp.gate_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.mlp.up_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.post_attention_layernorm.weight": "model-00047-of-00067.safetensors", "model.layers.62.self_attn.k_proj.bias": "model-00047-of-00067.safetensors", "model.layers.62.self_attn.k_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.self_attn.o_proj.weight": "model-00047-of-00067.safetensors", "model.layers.62.self_attn.q_proj.bias": "model-00047-of-00067.safetensors", "model.layers.62.self_attn.q_proj.weight": "model-00048-of-00067.safetensors", "model.layers.62.self_attn.v_proj.bias": "model-00048-of-00067.safetensors", "model.layers.62.self_attn.v_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.input_layernorm.weight": "model-00048-of-00067.safetensors", "model.layers.63.mlp.down_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.mlp.gate_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.mlp.up_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.post_attention_layernorm.weight": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.k_proj.bias": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.k_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.o_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.q_proj.bias": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.q_proj.weight": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.v_proj.bias": "model-00048-of-00067.safetensors", "model.layers.63.self_attn.v_proj.weight": "model-00048-of-00067.safetensors", "model.layers.64.input_layernorm.weight": "model-00048-of-00067.safetensors", "model.layers.64.mlp.down_proj.weight": "model-00048-of-00067.safetensors", "model.layers.64.mlp.gate_proj.weight": "model-00049-of-00067.safetensors", "model.layers.64.mlp.up_proj.weight": "model-00049-of-00067.safetensors", "model.layers.64.post_attention_layernorm.weight": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.k_proj.bias": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.k_proj.weight": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.o_proj.weight": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.q_proj.bias": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.q_proj.weight": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.v_proj.bias": "model-00049-of-00067.safetensors", "model.layers.64.self_attn.v_proj.weight": "model-00049-of-00067.safetensors", "model.layers.65.input_layernorm.weight": "model-00049-of-00067.safetensors", "model.layers.65.mlp.down_proj.weight": "model-00049-of-00067.safetensors", "model.layers.65.mlp.gate_proj.weight": "model-00049-of-00067.safetensors", "model.layers.65.mlp.up_proj.weight": "model-00050-of-00067.safetensors", "model.layers.65.post_attention_layernorm.weight": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.k_proj.bias": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.k_proj.weight": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.o_proj.weight": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.q_proj.bias": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.q_proj.weight": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.v_proj.bias": "model-00050-of-00067.safetensors", "model.layers.65.self_attn.v_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.input_layernorm.weight": "model-00050-of-00067.safetensors", "model.layers.66.mlp.down_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.mlp.gate_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.mlp.up_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.post_attention_layernorm.weight": "model-00050-of-00067.safetensors", "model.layers.66.self_attn.k_proj.bias": "model-00050-of-00067.safetensors", "model.layers.66.self_attn.k_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.self_attn.o_proj.weight": "model-00050-of-00067.safetensors", "model.layers.66.self_attn.q_proj.bias": "model-00050-of-00067.safetensors", "model.layers.66.self_attn.q_proj.weight": "model-00051-of-00067.safetensors", "model.layers.66.self_attn.v_proj.bias": "model-00051-of-00067.safetensors", "model.layers.66.self_attn.v_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.input_layernorm.weight": "model-00051-of-00067.safetensors", "model.layers.67.mlp.down_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.mlp.gate_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.mlp.up_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.post_attention_layernorm.weight": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.k_proj.bias": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.k_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.o_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.q_proj.bias": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.q_proj.weight": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.v_proj.bias": "model-00051-of-00067.safetensors", "model.layers.67.self_attn.v_proj.weight": "model-00051-of-00067.safetensors", "model.layers.68.input_layernorm.weight": "model-00051-of-00067.safetensors", "model.layers.68.mlp.down_proj.weight": "model-00051-of-00067.safetensors", "model.layers.68.mlp.gate_proj.weight": "model-00052-of-00067.safetensors", "model.layers.68.mlp.up_proj.weight": "model-00052-of-00067.safetensors", "model.layers.68.post_attention_layernorm.weight": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.k_proj.bias": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.k_proj.weight": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.o_proj.weight": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.q_proj.bias": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.q_proj.weight": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.v_proj.bias": "model-00052-of-00067.safetensors", "model.layers.68.self_attn.v_proj.weight": "model-00052-of-00067.safetensors", "model.layers.69.input_layernorm.weight": "model-00052-of-00067.safetensors", "model.layers.69.mlp.down_proj.weight": "model-00052-of-00067.safetensors", "model.layers.69.mlp.gate_proj.weight": "model-00052-of-00067.safetensors", "model.layers.69.mlp.up_proj.weight": "model-00053-of-00067.safetensors", "model.layers.69.post_attention_layernorm.weight": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.k_proj.bias": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.k_proj.weight": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.o_proj.weight": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.q_proj.bias": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.q_proj.weight": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.v_proj.bias": "model-00053-of-00067.safetensors", "model.layers.69.self_attn.v_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.input_layernorm.weight": "model-00053-of-00067.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00053-of-00067.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00053-of-00067.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00053-of-00067.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00053-of-00067.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00054-of-00067.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00054-of-00067.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.input_layernorm.weight": "model-00054-of-00067.safetensors", "model.layers.70.mlp.down_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.mlp.gate_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.mlp.up_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.post_attention_layernorm.weight": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.k_proj.bias": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.k_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.o_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.q_proj.bias": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.q_proj.weight": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.v_proj.bias": "model-00054-of-00067.safetensors", "model.layers.70.self_attn.v_proj.weight": "model-00054-of-00067.safetensors", "model.layers.71.input_layernorm.weight": "model-00054-of-00067.safetensors", "model.layers.71.mlp.down_proj.weight": "model-00054-of-00067.safetensors", "model.layers.71.mlp.gate_proj.weight": "model-00055-of-00067.safetensors", "model.layers.71.mlp.up_proj.weight": "model-00055-of-00067.safetensors", "model.layers.71.post_attention_layernorm.weight": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.k_proj.bias": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.k_proj.weight": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.o_proj.weight": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.q_proj.bias": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.q_proj.weight": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.v_proj.bias": "model-00055-of-00067.safetensors", "model.layers.71.self_attn.v_proj.weight": "model-00055-of-00067.safetensors", "model.layers.72.input_layernorm.weight": "model-00055-of-00067.safetensors", "model.layers.72.mlp.down_proj.weight": "model-00055-of-00067.safetensors", "model.layers.72.mlp.gate_proj.weight": "model-00055-of-00067.safetensors", "model.layers.72.mlp.up_proj.weight": "model-00056-of-00067.safetensors", "model.layers.72.post_attention_layernorm.weight": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.k_proj.bias": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.k_proj.weight": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.o_proj.weight": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.q_proj.bias": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.q_proj.weight": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.v_proj.bias": "model-00056-of-00067.safetensors", "model.layers.72.self_attn.v_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.input_layernorm.weight": "model-00056-of-00067.safetensors", "model.layers.73.mlp.down_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.mlp.gate_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.mlp.up_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.post_attention_layernorm.weight": "model-00056-of-00067.safetensors", "model.layers.73.self_attn.k_proj.bias": "model-00056-of-00067.safetensors", "model.layers.73.self_attn.k_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.self_attn.o_proj.weight": "model-00056-of-00067.safetensors", "model.layers.73.self_attn.q_proj.bias": "model-00056-of-00067.safetensors", "model.layers.73.self_attn.q_proj.weight": "model-00057-of-00067.safetensors", "model.layers.73.self_attn.v_proj.bias": "model-00057-of-00067.safetensors", "model.layers.73.self_attn.v_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.input_layernorm.weight": "model-00057-of-00067.safetensors", "model.layers.74.mlp.down_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.mlp.gate_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.mlp.up_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.post_attention_layernorm.weight": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.k_proj.bias": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.k_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.o_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.q_proj.bias": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.q_proj.weight": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.v_proj.bias": "model-00057-of-00067.safetensors", "model.layers.74.self_attn.v_proj.weight": "model-00057-of-00067.safetensors", "model.layers.75.input_layernorm.weight": "model-00057-of-00067.safetensors", "model.layers.75.mlp.down_proj.weight": "model-00057-of-00067.safetensors", "model.layers.75.mlp.gate_proj.weight": "model-00058-of-00067.safetensors", "model.layers.75.mlp.up_proj.weight": "model-00058-of-00067.safetensors", "model.layers.75.post_attention_layernorm.weight": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.k_proj.bias": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.k_proj.weight": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.o_proj.weight": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.q_proj.bias": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.q_proj.weight": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.v_proj.bias": "model-00058-of-00067.safetensors", "model.layers.75.self_attn.v_proj.weight": "model-00058-of-00067.safetensors", "model.layers.76.input_layernorm.weight": "model-00058-of-00067.safetensors", "model.layers.76.mlp.down_proj.weight": "model-00058-of-00067.safetensors", "model.layers.76.mlp.gate_proj.weight": "model-00058-of-00067.safetensors", "model.layers.76.mlp.up_proj.weight": "model-00059-of-00067.safetensors", "model.layers.76.post_attention_layernorm.weight": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.k_proj.bias": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.k_proj.weight": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.o_proj.weight": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.q_proj.bias": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.q_proj.weight": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.v_proj.bias": "model-00059-of-00067.safetensors", "model.layers.76.self_attn.v_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.input_layernorm.weight": "model-00059-of-00067.safetensors", "model.layers.77.mlp.down_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.mlp.gate_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.mlp.up_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.post_attention_layernorm.weight": "model-00059-of-00067.safetensors", "model.layers.77.self_attn.k_proj.bias": "model-00059-of-00067.safetensors", "model.layers.77.self_attn.k_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.self_attn.o_proj.weight": "model-00059-of-00067.safetensors", "model.layers.77.self_attn.q_proj.bias": "model-00059-of-00067.safetensors", "model.layers.77.self_attn.q_proj.weight": "model-00060-of-00067.safetensors", "model.layers.77.self_attn.v_proj.bias": "model-00060-of-00067.safetensors", "model.layers.77.self_attn.v_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.input_layernorm.weight": "model-00060-of-00067.safetensors", "model.layers.78.mlp.down_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.mlp.gate_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.mlp.up_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.post_attention_layernorm.weight": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.k_proj.bias": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.k_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.o_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.q_proj.bias": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.q_proj.weight": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.v_proj.bias": "model-00060-of-00067.safetensors", "model.layers.78.self_attn.v_proj.weight": "model-00060-of-00067.safetensors", "model.layers.79.input_layernorm.weight": "model-00060-of-00067.safetensors", "model.layers.79.mlp.down_proj.weight": "model-00060-of-00067.safetensors", "model.layers.79.mlp.gate_proj.weight": "model-00061-of-00067.safetensors", "model.layers.79.mlp.up_proj.weight": "model-00061-of-00067.safetensors", "model.layers.79.post_attention_layernorm.weight": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.k_proj.bias": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.k_proj.weight": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.o_proj.weight": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.q_proj.bias": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.q_proj.weight": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.v_proj.bias": "model-00061-of-00067.safetensors", "model.layers.79.self_attn.v_proj.weight": "model-00061-of-00067.safetensors", "model.layers.8.input_layernorm.weight": "model-00061-of-00067.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00061-of-00067.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00061-of-00067.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00062-of-00067.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00062-of-00067.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.input_layernorm.weight": "model-00062-of-00067.safetensors", "model.layers.80.mlp.down_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.mlp.gate_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.mlp.up_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.post_attention_layernorm.weight": "model-00062-of-00067.safetensors", "model.layers.80.self_attn.k_proj.bias": "model-00062-of-00067.safetensors", "model.layers.80.self_attn.k_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.self_attn.o_proj.weight": "model-00062-of-00067.safetensors", "model.layers.80.self_attn.q_proj.bias": "model-00062-of-00067.safetensors", "model.layers.80.self_attn.q_proj.weight": "model-00063-of-00067.safetensors", "model.layers.80.self_attn.v_proj.bias": "model-00063-of-00067.safetensors", "model.layers.80.self_attn.v_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.input_layernorm.weight": "model-00063-of-00067.safetensors", "model.layers.81.mlp.down_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.mlp.gate_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.mlp.up_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.post_attention_layernorm.weight": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.k_proj.bias": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.k_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.o_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.q_proj.bias": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.q_proj.weight": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.v_proj.bias": "model-00063-of-00067.safetensors", "model.layers.81.self_attn.v_proj.weight": "model-00063-of-00067.safetensors", "model.layers.82.input_layernorm.weight": "model-00063-of-00067.safetensors", "model.layers.82.mlp.down_proj.weight": "model-00063-of-00067.safetensors", "model.layers.82.mlp.gate_proj.weight": "model-00064-of-00067.safetensors", "model.layers.82.mlp.up_proj.weight": "model-00064-of-00067.safetensors", "model.layers.82.post_attention_layernorm.weight": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.k_proj.bias": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.k_proj.weight": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.o_proj.weight": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.q_proj.bias": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.q_proj.weight": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.v_proj.bias": "model-00064-of-00067.safetensors", "model.layers.82.self_attn.v_proj.weight": "model-00064-of-00067.safetensors", "model.layers.83.input_layernorm.weight": "model-00064-of-00067.safetensors", "model.layers.83.mlp.down_proj.weight": "model-00064-of-00067.safetensors", "model.layers.83.mlp.gate_proj.weight": "model-00064-of-00067.safetensors", "model.layers.83.mlp.up_proj.weight": "model-00065-of-00067.safetensors", "model.layers.83.post_attention_layernorm.weight": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.k_proj.bias": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.k_proj.weight": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.o_proj.weight": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.q_proj.bias": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.q_proj.weight": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.v_proj.bias": "model-00065-of-00067.safetensors", "model.layers.83.self_attn.v_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.input_layernorm.weight": "model-00065-of-00067.safetensors", "model.layers.84.mlp.down_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.mlp.gate_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.mlp.up_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.post_attention_layernorm.weight": "model-00065-of-00067.safetensors", "model.layers.84.self_attn.k_proj.bias": "model-00065-of-00067.safetensors", "model.layers.84.self_attn.k_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.self_attn.o_proj.weight": "model-00065-of-00067.safetensors", "model.layers.84.self_attn.q_proj.bias": "model-00065-of-00067.safetensors", "model.layers.84.self_attn.q_proj.weight": "model-00066-of-00067.safetensors", "model.layers.84.self_attn.v_proj.bias": "model-00066-of-00067.safetensors", "model.layers.84.self_attn.v_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.input_layernorm.weight": "model-00066-of-00067.safetensors", "model.layers.85.mlp.down_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.mlp.gate_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.mlp.up_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.post_attention_layernorm.weight": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.k_proj.bias": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.k_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.o_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.q_proj.bias": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.q_proj.weight": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.v_proj.bias": "model-00066-of-00067.safetensors", "model.layers.85.self_attn.v_proj.weight": "model-00066-of-00067.safetensors", "model.layers.9.input_layernorm.weight": "model-00066-of-00067.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00066-of-00067.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00067-of-00067.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00067-of-00067.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00067-of-00067.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00067-of-00067.safetensors", "model.norm.weight": "model-00067-of-00067.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 131072,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff