oodeh commited on
Commit
6e7ce2d
·
verified ·
1 Parent(s): 54105e2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. checkpoint-120/README.md +202 -0
  3. checkpoint-120/adapter_config.json +34 -0
  4. checkpoint-120/added_tokens.json +24 -0
  5. checkpoint-120/merges.txt +0 -0
  6. checkpoint-120/special_tokens_map.json +31 -0
  7. checkpoint-120/tokenizer.json +3 -0
  8. checkpoint-120/tokenizer_config.json +209 -0
  9. checkpoint-120/trainer_state.json +993 -0
  10. checkpoint-180/README.md +202 -0
  11. checkpoint-180/adapter_config.json +34 -0
  12. checkpoint-180/added_tokens.json +24 -0
  13. checkpoint-180/merges.txt +0 -0
  14. checkpoint-180/special_tokens_map.json +31 -0
  15. checkpoint-180/tokenizer_config.json +209 -0
  16. checkpoint-240/README.md +202 -0
  17. checkpoint-240/adapter_config.json +34 -0
  18. checkpoint-240/added_tokens.json +24 -0
  19. checkpoint-240/trainer_state.json +1953 -0
  20. checkpoint-300/README.md +202 -0
  21. checkpoint-300/adapter_config.json +34 -0
  22. checkpoint-300/added_tokens.json +24 -0
  23. checkpoint-300/special_tokens_map.json +31 -0
  24. checkpoint-300/tokenizer_config.json +209 -0
  25. checkpoint-300/vocab.json +0 -0
  26. checkpoint-360/adapter_config.json +34 -0
  27. checkpoint-360/added_tokens.json +24 -0
  28. checkpoint-360/merges.txt +0 -0
  29. checkpoint-360/special_tokens_map.json +31 -0
  30. checkpoint-360/tokenizer_config.json +209 -0
  31. checkpoint-360/trainer_state.json +2913 -0
  32. checkpoint-360/vocab.json +0 -0
  33. checkpoint-420/adapter_config.json +34 -0
  34. checkpoint-420/merges.txt +0 -0
  35. checkpoint-420/special_tokens_map.json +31 -0
  36. checkpoint-420/tokenizer_config.json +209 -0
  37. checkpoint-420/trainer_state.json +3393 -0
  38. checkpoint-420/vocab.json +0 -0
  39. checkpoint-480/adapter_config.json +34 -0
  40. checkpoint-480/added_tokens.json +24 -0
  41. checkpoint-480/trainer_state.json +0 -0
  42. checkpoint-480/vocab.json +0 -0
  43. checkpoint-540/merges.txt +0 -0
  44. checkpoint-540/tokenizer_config.json +209 -0
  45. checkpoint-540/vocab.json +0 -0
  46. checkpoint-60/README.md +202 -0
  47. checkpoint-60/adapter_config.json +34 -0
  48. checkpoint-60/added_tokens.json +24 -0
  49. checkpoint-60/merges.txt +0 -0
  50. checkpoint-60/rng_state_1.pth +3 -0
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-660/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-720/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-780/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
+ checkpoint-960/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
+ checkpoint-840/tokenizer.json filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-120/tokenizer.json filter=lfs diff=lfs merge=lfs -text
checkpoint-120/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-14B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-120/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-120/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-120/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-120/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-120/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
checkpoint-120/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-120/trainer_state.json ADDED
@@ -0,0 +1,993 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.980269989615784,
5
+ "eval_steps": 500,
6
+ "global_step": 120,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016614745586708203,
13
+ "grad_norm": 0.050998032093048096,
14
+ "learning_rate": 4.999991432639962e-05,
15
+ "loss": 0.5487,
16
+ "num_input_tokens_seen": 70408,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.033229491173416406,
21
+ "grad_norm": 0.049370743334293365,
22
+ "learning_rate": 4.999965730618567e-05,
23
+ "loss": 0.4981,
24
+ "num_input_tokens_seen": 139640,
25
+ "step": 2
26
+ },
27
+ {
28
+ "epoch": 0.04984423676012461,
29
+ "grad_norm": 0.05077400803565979,
30
+ "learning_rate": 4.9999228941119745e-05,
31
+ "loss": 0.5505,
32
+ "num_input_tokens_seen": 223656,
33
+ "step": 3
34
+ },
35
+ {
36
+ "epoch": 0.06645898234683281,
37
+ "grad_norm": 0.04397282376885414,
38
+ "learning_rate": 4.999862923413781e-05,
39
+ "loss": 0.504,
40
+ "num_input_tokens_seen": 300688,
41
+ "step": 4
42
+ },
43
+ {
44
+ "epoch": 0.08307372793354102,
45
+ "grad_norm": 0.05225864797830582,
46
+ "learning_rate": 4.999785818935018e-05,
47
+ "loss": 0.4925,
48
+ "num_input_tokens_seen": 366368,
49
+ "step": 5
50
+ },
51
+ {
52
+ "epoch": 0.09968847352024922,
53
+ "grad_norm": 0.049482282251119614,
54
+ "learning_rate": 4.999691581204152e-05,
55
+ "loss": 0.4771,
56
+ "num_input_tokens_seen": 445808,
57
+ "step": 6
58
+ },
59
+ {
60
+ "epoch": 0.11630321910695743,
61
+ "grad_norm": 0.05594080314040184,
62
+ "learning_rate": 4.9995802108670775e-05,
63
+ "loss": 0.4986,
64
+ "num_input_tokens_seen": 522800,
65
+ "step": 7
66
+ },
67
+ {
68
+ "epoch": 0.13291796469366562,
69
+ "grad_norm": 0.051852282136678696,
70
+ "learning_rate": 4.999451708687114e-05,
71
+ "loss": 0.5171,
72
+ "num_input_tokens_seen": 599608,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.14953271028037382,
77
+ "grad_norm": 0.045517683029174805,
78
+ "learning_rate": 4.9993060755450015e-05,
79
+ "loss": 0.5669,
80
+ "num_input_tokens_seen": 681424,
81
+ "step": 9
82
+ },
83
+ {
84
+ "epoch": 0.16614745586708204,
85
+ "grad_norm": 0.044325754046440125,
86
+ "learning_rate": 4.999143312438893e-05,
87
+ "loss": 0.4218,
88
+ "num_input_tokens_seen": 756744,
89
+ "step": 10
90
+ },
91
+ {
92
+ "epoch": 0.18276220145379024,
93
+ "grad_norm": 0.04328459873795509,
94
+ "learning_rate": 4.998963420484349e-05,
95
+ "loss": 0.434,
96
+ "num_input_tokens_seen": 842576,
97
+ "step": 11
98
+ },
99
+ {
100
+ "epoch": 0.19937694704049844,
101
+ "grad_norm": 0.04725787043571472,
102
+ "learning_rate": 4.998766400914329e-05,
103
+ "loss": 0.4287,
104
+ "num_input_tokens_seen": 917232,
105
+ "step": 12
106
+ },
107
+ {
108
+ "epoch": 0.21599169262720663,
109
+ "grad_norm": 0.03806879371404648,
110
+ "learning_rate": 4.9985522550791825e-05,
111
+ "loss": 0.3454,
112
+ "num_input_tokens_seen": 1006800,
113
+ "step": 13
114
+ },
115
+ {
116
+ "epoch": 0.23260643821391486,
117
+ "grad_norm": 0.05201176926493645,
118
+ "learning_rate": 4.998320984446641e-05,
119
+ "loss": 0.436,
120
+ "num_input_tokens_seen": 1085824,
121
+ "step": 14
122
+ },
123
+ {
124
+ "epoch": 0.24922118380062305,
125
+ "grad_norm": 0.047955628484487534,
126
+ "learning_rate": 4.9980725906018074e-05,
127
+ "loss": 0.4625,
128
+ "num_input_tokens_seen": 1164160,
129
+ "step": 15
130
+ },
131
+ {
132
+ "epoch": 0.26583592938733125,
133
+ "grad_norm": 0.05529098957777023,
134
+ "learning_rate": 4.997807075247146e-05,
135
+ "loss": 0.5035,
136
+ "num_input_tokens_seen": 1242264,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2824506749740395,
141
+ "grad_norm": 0.04751162976026535,
142
+ "learning_rate": 4.997524440202469e-05,
143
+ "loss": 0.4354,
144
+ "num_input_tokens_seen": 1325904,
145
+ "step": 17
146
+ },
147
+ {
148
+ "epoch": 0.29906542056074764,
149
+ "grad_norm": 0.06726882606744766,
150
+ "learning_rate": 4.9972246874049254e-05,
151
+ "loss": 0.5439,
152
+ "num_input_tokens_seen": 1385632,
153
+ "step": 18
154
+ },
155
+ {
156
+ "epoch": 0.31568016614745587,
157
+ "grad_norm": 0.05245920270681381,
158
+ "learning_rate": 4.996907818908987e-05,
159
+ "loss": 0.3727,
160
+ "num_input_tokens_seen": 1470632,
161
+ "step": 19
162
+ },
163
+ {
164
+ "epoch": 0.3322949117341641,
165
+ "grad_norm": 0.05745376646518707,
166
+ "learning_rate": 4.996573836886435e-05,
167
+ "loss": 0.4894,
168
+ "num_input_tokens_seen": 1547536,
169
+ "step": 20
170
+ },
171
+ {
172
+ "epoch": 0.34890965732087226,
173
+ "grad_norm": 0.056607529520988464,
174
+ "learning_rate": 4.9962227436263453e-05,
175
+ "loss": 0.3846,
176
+ "num_input_tokens_seen": 1615528,
177
+ "step": 21
178
+ },
179
+ {
180
+ "epoch": 0.3655244029075805,
181
+ "grad_norm": 0.06150667741894722,
182
+ "learning_rate": 4.995854541535071e-05,
183
+ "loss": 0.4362,
184
+ "num_input_tokens_seen": 1694352,
185
+ "step": 22
186
+ },
187
+ {
188
+ "epoch": 0.3821391484942887,
189
+ "grad_norm": 0.056484442204236984,
190
+ "learning_rate": 4.9954692331362294e-05,
191
+ "loss": 0.4438,
192
+ "num_input_tokens_seen": 1753776,
193
+ "step": 23
194
+ },
195
+ {
196
+ "epoch": 0.3987538940809969,
197
+ "grad_norm": 0.0704159140586853,
198
+ "learning_rate": 4.995066821070679e-05,
199
+ "loss": 0.4496,
200
+ "num_input_tokens_seen": 1809048,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.4153686396677051,
205
+ "grad_norm": 0.06202029809355736,
206
+ "learning_rate": 4.994647308096509e-05,
207
+ "loss": 0.5096,
208
+ "num_input_tokens_seen": 1884264,
209
+ "step": 25
210
+ },
211
+ {
212
+ "epoch": 0.43198338525441327,
213
+ "grad_norm": 0.04237145930528641,
214
+ "learning_rate": 4.994210697089014e-05,
215
+ "loss": 0.3722,
216
+ "num_input_tokens_seen": 1981704,
217
+ "step": 26
218
+ },
219
+ {
220
+ "epoch": 0.4485981308411215,
221
+ "grad_norm": 0.06920398026704788,
222
+ "learning_rate": 4.9937569910406756e-05,
223
+ "loss": 0.4103,
224
+ "num_input_tokens_seen": 2044144,
225
+ "step": 27
226
+ },
227
+ {
228
+ "epoch": 0.4652128764278297,
229
+ "grad_norm": 0.062432270497083664,
230
+ "learning_rate": 4.9932861930611454e-05,
231
+ "loss": 0.357,
232
+ "num_input_tokens_seen": 2107584,
233
+ "step": 28
234
+ },
235
+ {
236
+ "epoch": 0.4818276220145379,
237
+ "grad_norm": 0.06791180372238159,
238
+ "learning_rate": 4.9927983063772196e-05,
239
+ "loss": 0.3889,
240
+ "num_input_tokens_seen": 2169248,
241
+ "step": 29
242
+ },
243
+ {
244
+ "epoch": 0.4984423676012461,
245
+ "grad_norm": 0.07219590991735458,
246
+ "learning_rate": 4.99229333433282e-05,
247
+ "loss": 0.3543,
248
+ "num_input_tokens_seen": 2230344,
249
+ "step": 30
250
+ },
251
+ {
252
+ "epoch": 0.5150571131879543,
253
+ "grad_norm": 0.0647474005818367,
254
+ "learning_rate": 4.9917712803889674e-05,
255
+ "loss": 0.3453,
256
+ "num_input_tokens_seen": 2302368,
257
+ "step": 31
258
+ },
259
+ {
260
+ "epoch": 0.5316718587746625,
261
+ "grad_norm": 0.07434642314910889,
262
+ "learning_rate": 4.991232148123761e-05,
263
+ "loss": 0.435,
264
+ "num_input_tokens_seen": 2369984,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.5482866043613707,
269
+ "grad_norm": 0.05302443355321884,
270
+ "learning_rate": 4.990675941232353e-05,
271
+ "loss": 0.3981,
272
+ "num_input_tokens_seen": 2453032,
273
+ "step": 33
274
+ },
275
+ {
276
+ "epoch": 0.564901349948079,
277
+ "grad_norm": 0.053745292127132416,
278
+ "learning_rate": 4.990102663526924e-05,
279
+ "loss": 0.3755,
280
+ "num_input_tokens_seen": 2527464,
281
+ "step": 34
282
+ },
283
+ {
284
+ "epoch": 0.5815160955347871,
285
+ "grad_norm": 0.06717613339424133,
286
+ "learning_rate": 4.989512318936655e-05,
287
+ "loss": 0.3699,
288
+ "num_input_tokens_seen": 2597032,
289
+ "step": 35
290
+ },
291
+ {
292
+ "epoch": 0.5981308411214953,
293
+ "grad_norm": 0.071847103536129,
294
+ "learning_rate": 4.9889049115077005e-05,
295
+ "loss": 0.3705,
296
+ "num_input_tokens_seen": 2671704,
297
+ "step": 36
298
+ },
299
+ {
300
+ "epoch": 0.6147455867082036,
301
+ "grad_norm": 0.0460306741297245,
302
+ "learning_rate": 4.988280445403164e-05,
303
+ "loss": 0.3797,
304
+ "num_input_tokens_seen": 2767640,
305
+ "step": 37
306
+ },
307
+ {
308
+ "epoch": 0.6313603322949117,
309
+ "grad_norm": 0.053273387253284454,
310
+ "learning_rate": 4.987638924903067e-05,
311
+ "loss": 0.3799,
312
+ "num_input_tokens_seen": 2843720,
313
+ "step": 38
314
+ },
315
+ {
316
+ "epoch": 0.6479750778816199,
317
+ "grad_norm": 0.05600422993302345,
318
+ "learning_rate": 4.9869803544043166e-05,
319
+ "loss": 0.2866,
320
+ "num_input_tokens_seen": 2921472,
321
+ "step": 39
322
+ },
323
+ {
324
+ "epoch": 0.6645898234683282,
325
+ "grad_norm": 0.06414052098989487,
326
+ "learning_rate": 4.9863047384206835e-05,
327
+ "loss": 0.4115,
328
+ "num_input_tokens_seen": 2998400,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.6812045690550363,
333
+ "grad_norm": 0.09214208275079727,
334
+ "learning_rate": 4.985612081582764e-05,
335
+ "loss": 0.3804,
336
+ "num_input_tokens_seen": 3059648,
337
+ "step": 41
338
+ },
339
+ {
340
+ "epoch": 0.6978193146417445,
341
+ "grad_norm": 0.0555964931845665,
342
+ "learning_rate": 4.98490238863795e-05,
343
+ "loss": 0.3121,
344
+ "num_input_tokens_seen": 3140184,
345
+ "step": 42
346
+ },
347
+ {
348
+ "epoch": 0.7144340602284528,
349
+ "grad_norm": 0.06256969273090363,
350
+ "learning_rate": 4.984175664450397e-05,
351
+ "loss": 0.3271,
352
+ "num_input_tokens_seen": 3207184,
353
+ "step": 43
354
+ },
355
+ {
356
+ "epoch": 0.731048805815161,
357
+ "grad_norm": 0.0543232187628746,
358
+ "learning_rate": 4.983431914000991e-05,
359
+ "loss": 0.364,
360
+ "num_input_tokens_seen": 3292344,
361
+ "step": 44
362
+ },
363
+ {
364
+ "epoch": 0.7476635514018691,
365
+ "grad_norm": 0.06077824532985687,
366
+ "learning_rate": 4.982671142387316e-05,
367
+ "loss": 0.3894,
368
+ "num_input_tokens_seen": 3365384,
369
+ "step": 45
370
+ },
371
+ {
372
+ "epoch": 0.7642782969885774,
373
+ "grad_norm": 0.06091070920228958,
374
+ "learning_rate": 4.981893354823614e-05,
375
+ "loss": 0.3354,
376
+ "num_input_tokens_seen": 3440720,
377
+ "step": 46
378
+ },
379
+ {
380
+ "epoch": 0.7808930425752856,
381
+ "grad_norm": 0.054153311997652054,
382
+ "learning_rate": 4.9810985566407544e-05,
383
+ "loss": 0.3058,
384
+ "num_input_tokens_seen": 3533576,
385
+ "step": 47
386
+ },
387
+ {
388
+ "epoch": 0.7975077881619937,
389
+ "grad_norm": 0.06662417948246002,
390
+ "learning_rate": 4.980286753286195e-05,
391
+ "loss": 0.4658,
392
+ "num_input_tokens_seen": 3599744,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.814122533748702,
397
+ "grad_norm": 0.05790851265192032,
398
+ "learning_rate": 4.979457950323945e-05,
399
+ "loss": 0.3647,
400
+ "num_input_tokens_seen": 3689520,
401
+ "step": 49
402
+ },
403
+ {
404
+ "epoch": 0.8307372793354102,
405
+ "grad_norm": 0.10742159187793732,
406
+ "learning_rate": 4.9786121534345265e-05,
407
+ "loss": 0.343,
408
+ "num_input_tokens_seen": 3751808,
409
+ "step": 50
410
+ },
411
+ {
412
+ "epoch": 0.8473520249221184,
413
+ "grad_norm": 0.05565556138753891,
414
+ "learning_rate": 4.9777493684149375e-05,
415
+ "loss": 0.3317,
416
+ "num_input_tokens_seen": 3839096,
417
+ "step": 51
418
+ },
419
+ {
420
+ "epoch": 0.8639667705088265,
421
+ "grad_norm": 0.05752381682395935,
422
+ "learning_rate": 4.976869601178609e-05,
423
+ "loss": 0.38,
424
+ "num_input_tokens_seen": 3919824,
425
+ "step": 52
426
+ },
427
+ {
428
+ "epoch": 0.8805815160955348,
429
+ "grad_norm": 0.06406434625387192,
430
+ "learning_rate": 4.975972857755369e-05,
431
+ "loss": 0.2676,
432
+ "num_input_tokens_seen": 3989312,
433
+ "step": 53
434
+ },
435
+ {
436
+ "epoch": 0.897196261682243,
437
+ "grad_norm": 0.0653691440820694,
438
+ "learning_rate": 4.975059144291394e-05,
439
+ "loss": 0.3516,
440
+ "num_input_tokens_seen": 4060528,
441
+ "step": 54
442
+ },
443
+ {
444
+ "epoch": 0.9138110072689511,
445
+ "grad_norm": 0.06272953748703003,
446
+ "learning_rate": 4.974128467049176e-05,
447
+ "loss": 0.3004,
448
+ "num_input_tokens_seen": 4129368,
449
+ "step": 55
450
+ },
451
+ {
452
+ "epoch": 0.9304257528556594,
453
+ "grad_norm": 0.08054930716753006,
454
+ "learning_rate": 4.9731808324074717e-05,
455
+ "loss": 0.3009,
456
+ "num_input_tokens_seen": 4175208,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.9470404984423676,
461
+ "grad_norm": 0.07523038238286972,
462
+ "learning_rate": 4.972216246861262e-05,
463
+ "loss": 0.2814,
464
+ "num_input_tokens_seen": 4218096,
465
+ "step": 57
466
+ },
467
+ {
468
+ "epoch": 0.9636552440290758,
469
+ "grad_norm": 0.07347433269023895,
470
+ "learning_rate": 4.971234717021709e-05,
471
+ "loss": 0.3321,
472
+ "num_input_tokens_seen": 4275968,
473
+ "step": 58
474
+ },
475
+ {
476
+ "epoch": 0.980269989615784,
477
+ "grad_norm": 0.05830248445272446,
478
+ "learning_rate": 4.9702362496161085e-05,
479
+ "loss": 0.2881,
480
+ "num_input_tokens_seen": 4346616,
481
+ "step": 59
482
+ },
483
+ {
484
+ "epoch": 0.9968847352024922,
485
+ "grad_norm": 0.061629410833120346,
486
+ "learning_rate": 4.9692208514878444e-05,
487
+ "loss": 0.2993,
488
+ "num_input_tokens_seen": 4425064,
489
+ "step": 60
490
+ },
491
+ {
492
+ "epoch": 1.0,
493
+ "grad_norm": 0.13380740582942963,
494
+ "learning_rate": 4.968188529596342e-05,
495
+ "loss": 0.2511,
496
+ "num_input_tokens_seen": 4435328,
497
+ "step": 61
498
+ },
499
+ {
500
+ "epoch": 1.0166147455867083,
501
+ "grad_norm": 0.0726238414645195,
502
+ "learning_rate": 4.9671392910170185e-05,
503
+ "loss": 0.3127,
504
+ "num_input_tokens_seen": 4500104,
505
+ "step": 62
506
+ },
507
+ {
508
+ "epoch": 1.0332294911734163,
509
+ "grad_norm": 0.05980083718895912,
510
+ "learning_rate": 4.966073142941239e-05,
511
+ "loss": 0.3601,
512
+ "num_input_tokens_seen": 4581976,
513
+ "step": 63
514
+ },
515
+ {
516
+ "epoch": 1.0498442367601246,
517
+ "grad_norm": 0.06445376574993134,
518
+ "learning_rate": 4.964990092676263e-05,
519
+ "loss": 0.3049,
520
+ "num_input_tokens_seen": 4652160,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 1.066458982346833,
525
+ "grad_norm": 0.07824505120515823,
526
+ "learning_rate": 4.9638901476451946e-05,
527
+ "loss": 0.3099,
528
+ "num_input_tokens_seen": 4709368,
529
+ "step": 65
530
+ },
531
+ {
532
+ "epoch": 1.083073727933541,
533
+ "grad_norm": 0.058268457651138306,
534
+ "learning_rate": 4.962773315386935e-05,
535
+ "loss": 0.3273,
536
+ "num_input_tokens_seen": 4798256,
537
+ "step": 66
538
+ },
539
+ {
540
+ "epoch": 1.0996884735202492,
541
+ "grad_norm": 0.07069691270589828,
542
+ "learning_rate": 4.961639603556127e-05,
543
+ "loss": 0.282,
544
+ "num_input_tokens_seen": 4859200,
545
+ "step": 67
546
+ },
547
+ {
548
+ "epoch": 1.1163032191069575,
549
+ "grad_norm": 0.0775996670126915,
550
+ "learning_rate": 4.960489019923105e-05,
551
+ "loss": 0.3642,
552
+ "num_input_tokens_seen": 4925992,
553
+ "step": 68
554
+ },
555
+ {
556
+ "epoch": 1.1329179646936656,
557
+ "grad_norm": 0.07044171541929245,
558
+ "learning_rate": 4.9593215723738404e-05,
559
+ "loss": 0.2896,
560
+ "num_input_tokens_seen": 4998808,
561
+ "step": 69
562
+ },
563
+ {
564
+ "epoch": 1.1495327102803738,
565
+ "grad_norm": 0.05971802771091461,
566
+ "learning_rate": 4.958137268909887e-05,
567
+ "loss": 0.2578,
568
+ "num_input_tokens_seen": 5089672,
569
+ "step": 70
570
+ },
571
+ {
572
+ "epoch": 1.1661474558670821,
573
+ "grad_norm": 0.07145556062459946,
574
+ "learning_rate": 4.9569361176483286e-05,
575
+ "loss": 0.3243,
576
+ "num_input_tokens_seen": 5166744,
577
+ "step": 71
578
+ },
579
+ {
580
+ "epoch": 1.1827622014537902,
581
+ "grad_norm": 0.07455787807703018,
582
+ "learning_rate": 4.9557181268217227e-05,
583
+ "loss": 0.3949,
584
+ "num_input_tokens_seen": 5228264,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 1.1993769470404985,
589
+ "grad_norm": 0.055582575500011444,
590
+ "learning_rate": 4.9544833047780394e-05,
591
+ "loss": 0.2877,
592
+ "num_input_tokens_seen": 5338224,
593
+ "step": 73
594
+ },
595
+ {
596
+ "epoch": 1.2159916926272065,
597
+ "grad_norm": 0.07675391435623169,
598
+ "learning_rate": 4.9532316599806124e-05,
599
+ "loss": 0.3152,
600
+ "num_input_tokens_seen": 5399848,
601
+ "step": 74
602
+ },
603
+ {
604
+ "epoch": 1.2326064382139148,
605
+ "grad_norm": 0.08048644661903381,
606
+ "learning_rate": 4.951963201008076e-05,
607
+ "loss": 0.2976,
608
+ "num_input_tokens_seen": 5468624,
609
+ "step": 75
610
+ },
611
+ {
612
+ "epoch": 1.249221183800623,
613
+ "grad_norm": 0.07579060643911362,
614
+ "learning_rate": 4.9506779365543046e-05,
615
+ "loss": 0.2982,
616
+ "num_input_tokens_seen": 5536776,
617
+ "step": 76
618
+ },
619
+ {
620
+ "epoch": 1.2658359293873311,
621
+ "grad_norm": 0.07828006893396378,
622
+ "learning_rate": 4.949375875428357e-05,
623
+ "loss": 0.3272,
624
+ "num_input_tokens_seen": 5609296,
625
+ "step": 77
626
+ },
627
+ {
628
+ "epoch": 1.2824506749740394,
629
+ "grad_norm": 0.08079098165035248,
630
+ "learning_rate": 4.9480570265544144e-05,
631
+ "loss": 0.2768,
632
+ "num_input_tokens_seen": 5663824,
633
+ "step": 78
634
+ },
635
+ {
636
+ "epoch": 1.2990654205607477,
637
+ "grad_norm": 0.07579358667135239,
638
+ "learning_rate": 4.94672139897172e-05,
639
+ "loss": 0.318,
640
+ "num_input_tokens_seen": 5742032,
641
+ "step": 79
642
+ },
643
+ {
644
+ "epoch": 1.3156801661474558,
645
+ "grad_norm": 0.07588379085063934,
646
+ "learning_rate": 4.9453690018345144e-05,
647
+ "loss": 0.3007,
648
+ "num_input_tokens_seen": 5816864,
649
+ "step": 80
650
+ },
651
+ {
652
+ "epoch": 1.332294911734164,
653
+ "grad_norm": 0.08709035068750381,
654
+ "learning_rate": 4.943999844411977e-05,
655
+ "loss": 0.2797,
656
+ "num_input_tokens_seen": 5881624,
657
+ "step": 81
658
+ },
659
+ {
660
+ "epoch": 1.3489096573208723,
661
+ "grad_norm": 0.05975884944200516,
662
+ "learning_rate": 4.94261393608816e-05,
663
+ "loss": 0.2591,
664
+ "num_input_tokens_seen": 5970272,
665
+ "step": 82
666
+ },
667
+ {
668
+ "epoch": 1.3655244029075804,
669
+ "grad_norm": 0.07372818142175674,
670
+ "learning_rate": 4.941211286361922e-05,
671
+ "loss": 0.2687,
672
+ "num_input_tokens_seen": 6058752,
673
+ "step": 83
674
+ },
675
+ {
676
+ "epoch": 1.3821391484942886,
677
+ "grad_norm": 0.09071576595306396,
678
+ "learning_rate": 4.939791904846869e-05,
679
+ "loss": 0.2979,
680
+ "num_input_tokens_seen": 6120064,
681
+ "step": 84
682
+ },
683
+ {
684
+ "epoch": 1.398753894080997,
685
+ "grad_norm": 0.0849960595369339,
686
+ "learning_rate": 4.938355801271282e-05,
687
+ "loss": 0.2927,
688
+ "num_input_tokens_seen": 6182072,
689
+ "step": 85
690
+ },
691
+ {
692
+ "epoch": 1.415368639667705,
693
+ "grad_norm": 0.08258760720491409,
694
+ "learning_rate": 4.936902985478055e-05,
695
+ "loss": 0.295,
696
+ "num_input_tokens_seen": 6269680,
697
+ "step": 86
698
+ },
699
+ {
700
+ "epoch": 1.4319833852544133,
701
+ "grad_norm": 0.0851503536105156,
702
+ "learning_rate": 4.935433467424624e-05,
703
+ "loss": 0.2925,
704
+ "num_input_tokens_seen": 6347424,
705
+ "step": 87
706
+ },
707
+ {
708
+ "epoch": 1.4485981308411215,
709
+ "grad_norm": 0.08852345496416092,
710
+ "learning_rate": 4.933947257182901e-05,
711
+ "loss": 0.3153,
712
+ "num_input_tokens_seen": 6412584,
713
+ "step": 88
714
+ },
715
+ {
716
+ "epoch": 1.4652128764278296,
717
+ "grad_norm": 0.08184897154569626,
718
+ "learning_rate": 4.932444364939205e-05,
719
+ "loss": 0.292,
720
+ "num_input_tokens_seen": 6482728,
721
+ "step": 89
722
+ },
723
+ {
724
+ "epoch": 1.4818276220145379,
725
+ "grad_norm": 0.08270515501499176,
726
+ "learning_rate": 4.9309248009941914e-05,
727
+ "loss": 0.3472,
728
+ "num_input_tokens_seen": 6562104,
729
+ "step": 90
730
+ },
731
+ {
732
+ "epoch": 1.4984423676012462,
733
+ "grad_norm": 0.07407747954130173,
734
+ "learning_rate": 4.929388575762782e-05,
735
+ "loss": 0.2995,
736
+ "num_input_tokens_seen": 6656552,
737
+ "step": 91
738
+ },
739
+ {
740
+ "epoch": 1.5150571131879542,
741
+ "grad_norm": 0.08710360527038574,
742
+ "learning_rate": 4.9278356997740904e-05,
743
+ "loss": 0.2549,
744
+ "num_input_tokens_seen": 6714184,
745
+ "step": 92
746
+ },
747
+ {
748
+ "epoch": 1.5316718587746625,
749
+ "grad_norm": 0.0773790255188942,
750
+ "learning_rate": 4.9262661836713564e-05,
751
+ "loss": 0.2814,
752
+ "num_input_tokens_seen": 6793552,
753
+ "step": 93
754
+ },
755
+ {
756
+ "epoch": 1.5482866043613708,
757
+ "grad_norm": 0.1002134457230568,
758
+ "learning_rate": 4.924680038211867e-05,
759
+ "loss": 0.2876,
760
+ "num_input_tokens_seen": 6865256,
761
+ "step": 94
762
+ },
763
+ {
764
+ "epoch": 1.5649013499480788,
765
+ "grad_norm": 0.09670394659042358,
766
+ "learning_rate": 4.9230772742668866e-05,
767
+ "loss": 0.2846,
768
+ "num_input_tokens_seen": 6931152,
769
+ "step": 95
770
+ },
771
+ {
772
+ "epoch": 1.5815160955347871,
773
+ "grad_norm": 0.08910100907087326,
774
+ "learning_rate": 4.9214579028215776e-05,
775
+ "loss": 0.2944,
776
+ "num_input_tokens_seen": 6998408,
777
+ "step": 96
778
+ },
779
+ {
780
+ "epoch": 1.5981308411214954,
781
+ "grad_norm": 0.09202459454536438,
782
+ "learning_rate": 4.919821934974933e-05,
783
+ "loss": 0.251,
784
+ "num_input_tokens_seen": 7053008,
785
+ "step": 97
786
+ },
787
+ {
788
+ "epoch": 1.6147455867082035,
789
+ "grad_norm": 0.10218881815671921,
790
+ "learning_rate": 4.918169381939692e-05,
791
+ "loss": 0.2851,
792
+ "num_input_tokens_seen": 7106440,
793
+ "step": 98
794
+ },
795
+ {
796
+ "epoch": 1.6313603322949117,
797
+ "grad_norm": 0.09290914982557297,
798
+ "learning_rate": 4.916500255042268e-05,
799
+ "loss": 0.2959,
800
+ "num_input_tokens_seen": 7167032,
801
+ "step": 99
802
+ },
803
+ {
804
+ "epoch": 1.64797507788162,
805
+ "grad_norm": 0.07791033387184143,
806
+ "learning_rate": 4.914814565722671e-05,
807
+ "loss": 0.2481,
808
+ "num_input_tokens_seen": 7245720,
809
+ "step": 100
810
+ },
811
+ {
812
+ "epoch": 1.664589823468328,
813
+ "grad_norm": 0.08885534107685089,
814
+ "learning_rate": 4.913112325534426e-05,
815
+ "loss": 0.3168,
816
+ "num_input_tokens_seen": 7326320,
817
+ "step": 101
818
+ },
819
+ {
820
+ "epoch": 1.6812045690550363,
821
+ "grad_norm": 0.08569750934839249,
822
+ "learning_rate": 4.9113935461444955e-05,
823
+ "loss": 0.2805,
824
+ "num_input_tokens_seen": 7442232,
825
+ "step": 102
826
+ },
827
+ {
828
+ "epoch": 1.6978193146417446,
829
+ "grad_norm": 0.1112508773803711,
830
+ "learning_rate": 4.9096582393332025e-05,
831
+ "loss": 0.2675,
832
+ "num_input_tokens_seen": 7502496,
833
+ "step": 103
834
+ },
835
+ {
836
+ "epoch": 1.7144340602284527,
837
+ "grad_norm": 0.09654372185468674,
838
+ "learning_rate": 4.907906416994146e-05,
839
+ "loss": 0.3038,
840
+ "num_input_tokens_seen": 7566496,
841
+ "step": 104
842
+ },
843
+ {
844
+ "epoch": 1.731048805815161,
845
+ "grad_norm": 0.10022995620965958,
846
+ "learning_rate": 4.906138091134118e-05,
847
+ "loss": 0.3639,
848
+ "num_input_tokens_seen": 7629056,
849
+ "step": 105
850
+ },
851
+ {
852
+ "epoch": 1.7476635514018692,
853
+ "grad_norm": 0.08336564153432846,
854
+ "learning_rate": 4.9043532738730284e-05,
855
+ "loss": 0.2944,
856
+ "num_input_tokens_seen": 7706096,
857
+ "step": 106
858
+ },
859
+ {
860
+ "epoch": 1.7642782969885773,
861
+ "grad_norm": 0.08539658784866333,
862
+ "learning_rate": 4.9025519774438136e-05,
863
+ "loss": 0.2392,
864
+ "num_input_tokens_seen": 7780072,
865
+ "step": 107
866
+ },
867
+ {
868
+ "epoch": 1.7808930425752856,
869
+ "grad_norm": 0.09139693528413773,
870
+ "learning_rate": 4.900734214192358e-05,
871
+ "loss": 0.2685,
872
+ "num_input_tokens_seen": 7857712,
873
+ "step": 108
874
+ },
875
+ {
876
+ "epoch": 1.7975077881619939,
877
+ "grad_norm": 0.1043916717171669,
878
+ "learning_rate": 4.898899996577407e-05,
879
+ "loss": 0.2513,
880
+ "num_input_tokens_seen": 7916832,
881
+ "step": 109
882
+ },
883
+ {
884
+ "epoch": 1.814122533748702,
885
+ "grad_norm": 0.09203662723302841,
886
+ "learning_rate": 4.8970493371704826e-05,
887
+ "loss": 0.2974,
888
+ "num_input_tokens_seen": 7993056,
889
+ "step": 110
890
+ },
891
+ {
892
+ "epoch": 1.8307372793354102,
893
+ "grad_norm": 0.09319474548101425,
894
+ "learning_rate": 4.8951822486557986e-05,
895
+ "loss": 0.3096,
896
+ "num_input_tokens_seen": 8090056,
897
+ "step": 111
898
+ },
899
+ {
900
+ "epoch": 1.8473520249221185,
901
+ "grad_norm": 0.10193445533514023,
902
+ "learning_rate": 4.893298743830168e-05,
903
+ "loss": 0.2633,
904
+ "num_input_tokens_seen": 8164808,
905
+ "step": 112
906
+ },
907
+ {
908
+ "epoch": 1.8639667705088265,
909
+ "grad_norm": 0.11407948285341263,
910
+ "learning_rate": 4.891398835602925e-05,
911
+ "loss": 0.2584,
912
+ "num_input_tokens_seen": 8223568,
913
+ "step": 113
914
+ },
915
+ {
916
+ "epoch": 1.8805815160955348,
917
+ "grad_norm": 0.11977085471153259,
918
+ "learning_rate": 4.8894825369958255e-05,
919
+ "loss": 0.2619,
920
+ "num_input_tokens_seen": 8276160,
921
+ "step": 114
922
+ },
923
+ {
924
+ "epoch": 1.897196261682243,
925
+ "grad_norm": 0.10925433784723282,
926
+ "learning_rate": 4.8875498611429674e-05,
927
+ "loss": 0.2762,
928
+ "num_input_tokens_seen": 8354904,
929
+ "step": 115
930
+ },
931
+ {
932
+ "epoch": 1.9138110072689511,
933
+ "grad_norm": 0.09673939645290375,
934
+ "learning_rate": 4.8856008212906925e-05,
935
+ "loss": 0.3152,
936
+ "num_input_tokens_seen": 8442584,
937
+ "step": 116
938
+ },
939
+ {
940
+ "epoch": 1.9304257528556594,
941
+ "grad_norm": 0.10827789455652237,
942
+ "learning_rate": 4.8836354307975026e-05,
943
+ "loss": 0.2759,
944
+ "num_input_tokens_seen": 8506688,
945
+ "step": 117
946
+ },
947
+ {
948
+ "epoch": 1.9470404984423677,
949
+ "grad_norm": 0.08390220254659653,
950
+ "learning_rate": 4.881653703133966e-05,
951
+ "loss": 0.2192,
952
+ "num_input_tokens_seen": 8610712,
953
+ "step": 118
954
+ },
955
+ {
956
+ "epoch": 1.9636552440290758,
957
+ "grad_norm": 0.09252211451530457,
958
+ "learning_rate": 4.87965565188262e-05,
959
+ "loss": 0.2618,
960
+ "num_input_tokens_seen": 8692624,
961
+ "step": 119
962
+ },
963
+ {
964
+ "epoch": 1.980269989615784,
965
+ "grad_norm": 0.1107102632522583,
966
+ "learning_rate": 4.877641290737884e-05,
967
+ "loss": 0.2666,
968
+ "num_input_tokens_seen": 8772208,
969
+ "step": 120
970
+ }
971
+ ],
972
+ "logging_steps": 1.0,
973
+ "max_steps": 1200,
974
+ "num_input_tokens_seen": 8772208,
975
+ "num_train_epochs": 20,
976
+ "save_steps": 60,
977
+ "stateful_callbacks": {
978
+ "TrainerControl": {
979
+ "args": {
980
+ "should_epoch_stop": false,
981
+ "should_evaluate": false,
982
+ "should_log": false,
983
+ "should_save": true,
984
+ "should_training_stop": false
985
+ },
986
+ "attributes": {}
987
+ }
988
+ },
989
+ "total_flos": 7.400381371991982e+17,
990
+ "train_batch_size": 1,
991
+ "trial_name": null,
992
+ "trial_params": null
993
+ }
checkpoint-180/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-14B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-180/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-180/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-180/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-180/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-180/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-240/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-14B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-240/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-240/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-240/trainer_state.json ADDED
@@ -0,0 +1,1953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.9470404984423677,
5
+ "eval_steps": 500,
6
+ "global_step": 240,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016614745586708203,
13
+ "grad_norm": 0.050998032093048096,
14
+ "learning_rate": 4.999991432639962e-05,
15
+ "loss": 0.5487,
16
+ "num_input_tokens_seen": 70408,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.033229491173416406,
21
+ "grad_norm": 0.049370743334293365,
22
+ "learning_rate": 4.999965730618567e-05,
23
+ "loss": 0.4981,
24
+ "num_input_tokens_seen": 139640,
25
+ "step": 2
26
+ },
27
+ {
28
+ "epoch": 0.04984423676012461,
29
+ "grad_norm": 0.05077400803565979,
30
+ "learning_rate": 4.9999228941119745e-05,
31
+ "loss": 0.5505,
32
+ "num_input_tokens_seen": 223656,
33
+ "step": 3
34
+ },
35
+ {
36
+ "epoch": 0.06645898234683281,
37
+ "grad_norm": 0.04397282376885414,
38
+ "learning_rate": 4.999862923413781e-05,
39
+ "loss": 0.504,
40
+ "num_input_tokens_seen": 300688,
41
+ "step": 4
42
+ },
43
+ {
44
+ "epoch": 0.08307372793354102,
45
+ "grad_norm": 0.05225864797830582,
46
+ "learning_rate": 4.999785818935018e-05,
47
+ "loss": 0.4925,
48
+ "num_input_tokens_seen": 366368,
49
+ "step": 5
50
+ },
51
+ {
52
+ "epoch": 0.09968847352024922,
53
+ "grad_norm": 0.049482282251119614,
54
+ "learning_rate": 4.999691581204152e-05,
55
+ "loss": 0.4771,
56
+ "num_input_tokens_seen": 445808,
57
+ "step": 6
58
+ },
59
+ {
60
+ "epoch": 0.11630321910695743,
61
+ "grad_norm": 0.05594080314040184,
62
+ "learning_rate": 4.9995802108670775e-05,
63
+ "loss": 0.4986,
64
+ "num_input_tokens_seen": 522800,
65
+ "step": 7
66
+ },
67
+ {
68
+ "epoch": 0.13291796469366562,
69
+ "grad_norm": 0.051852282136678696,
70
+ "learning_rate": 4.999451708687114e-05,
71
+ "loss": 0.5171,
72
+ "num_input_tokens_seen": 599608,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.14953271028037382,
77
+ "grad_norm": 0.045517683029174805,
78
+ "learning_rate": 4.9993060755450015e-05,
79
+ "loss": 0.5669,
80
+ "num_input_tokens_seen": 681424,
81
+ "step": 9
82
+ },
83
+ {
84
+ "epoch": 0.16614745586708204,
85
+ "grad_norm": 0.044325754046440125,
86
+ "learning_rate": 4.999143312438893e-05,
87
+ "loss": 0.4218,
88
+ "num_input_tokens_seen": 756744,
89
+ "step": 10
90
+ },
91
+ {
92
+ "epoch": 0.18276220145379024,
93
+ "grad_norm": 0.04328459873795509,
94
+ "learning_rate": 4.998963420484349e-05,
95
+ "loss": 0.434,
96
+ "num_input_tokens_seen": 842576,
97
+ "step": 11
98
+ },
99
+ {
100
+ "epoch": 0.19937694704049844,
101
+ "grad_norm": 0.04725787043571472,
102
+ "learning_rate": 4.998766400914329e-05,
103
+ "loss": 0.4287,
104
+ "num_input_tokens_seen": 917232,
105
+ "step": 12
106
+ },
107
+ {
108
+ "epoch": 0.21599169262720663,
109
+ "grad_norm": 0.03806879371404648,
110
+ "learning_rate": 4.9985522550791825e-05,
111
+ "loss": 0.3454,
112
+ "num_input_tokens_seen": 1006800,
113
+ "step": 13
114
+ },
115
+ {
116
+ "epoch": 0.23260643821391486,
117
+ "grad_norm": 0.05201176926493645,
118
+ "learning_rate": 4.998320984446641e-05,
119
+ "loss": 0.436,
120
+ "num_input_tokens_seen": 1085824,
121
+ "step": 14
122
+ },
123
+ {
124
+ "epoch": 0.24922118380062305,
125
+ "grad_norm": 0.047955628484487534,
126
+ "learning_rate": 4.9980725906018074e-05,
127
+ "loss": 0.4625,
128
+ "num_input_tokens_seen": 1164160,
129
+ "step": 15
130
+ },
131
+ {
132
+ "epoch": 0.26583592938733125,
133
+ "grad_norm": 0.05529098957777023,
134
+ "learning_rate": 4.997807075247146e-05,
135
+ "loss": 0.5035,
136
+ "num_input_tokens_seen": 1242264,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2824506749740395,
141
+ "grad_norm": 0.04751162976026535,
142
+ "learning_rate": 4.997524440202469e-05,
143
+ "loss": 0.4354,
144
+ "num_input_tokens_seen": 1325904,
145
+ "step": 17
146
+ },
147
+ {
148
+ "epoch": 0.29906542056074764,
149
+ "grad_norm": 0.06726882606744766,
150
+ "learning_rate": 4.9972246874049254e-05,
151
+ "loss": 0.5439,
152
+ "num_input_tokens_seen": 1385632,
153
+ "step": 18
154
+ },
155
+ {
156
+ "epoch": 0.31568016614745587,
157
+ "grad_norm": 0.05245920270681381,
158
+ "learning_rate": 4.996907818908987e-05,
159
+ "loss": 0.3727,
160
+ "num_input_tokens_seen": 1470632,
161
+ "step": 19
162
+ },
163
+ {
164
+ "epoch": 0.3322949117341641,
165
+ "grad_norm": 0.05745376646518707,
166
+ "learning_rate": 4.996573836886435e-05,
167
+ "loss": 0.4894,
168
+ "num_input_tokens_seen": 1547536,
169
+ "step": 20
170
+ },
171
+ {
172
+ "epoch": 0.34890965732087226,
173
+ "grad_norm": 0.056607529520988464,
174
+ "learning_rate": 4.9962227436263453e-05,
175
+ "loss": 0.3846,
176
+ "num_input_tokens_seen": 1615528,
177
+ "step": 21
178
+ },
179
+ {
180
+ "epoch": 0.3655244029075805,
181
+ "grad_norm": 0.06150667741894722,
182
+ "learning_rate": 4.995854541535071e-05,
183
+ "loss": 0.4362,
184
+ "num_input_tokens_seen": 1694352,
185
+ "step": 22
186
+ },
187
+ {
188
+ "epoch": 0.3821391484942887,
189
+ "grad_norm": 0.056484442204236984,
190
+ "learning_rate": 4.9954692331362294e-05,
191
+ "loss": 0.4438,
192
+ "num_input_tokens_seen": 1753776,
193
+ "step": 23
194
+ },
195
+ {
196
+ "epoch": 0.3987538940809969,
197
+ "grad_norm": 0.0704159140586853,
198
+ "learning_rate": 4.995066821070679e-05,
199
+ "loss": 0.4496,
200
+ "num_input_tokens_seen": 1809048,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.4153686396677051,
205
+ "grad_norm": 0.06202029809355736,
206
+ "learning_rate": 4.994647308096509e-05,
207
+ "loss": 0.5096,
208
+ "num_input_tokens_seen": 1884264,
209
+ "step": 25
210
+ },
211
+ {
212
+ "epoch": 0.43198338525441327,
213
+ "grad_norm": 0.04237145930528641,
214
+ "learning_rate": 4.994210697089014e-05,
215
+ "loss": 0.3722,
216
+ "num_input_tokens_seen": 1981704,
217
+ "step": 26
218
+ },
219
+ {
220
+ "epoch": 0.4485981308411215,
221
+ "grad_norm": 0.06920398026704788,
222
+ "learning_rate": 4.9937569910406756e-05,
223
+ "loss": 0.4103,
224
+ "num_input_tokens_seen": 2044144,
225
+ "step": 27
226
+ },
227
+ {
228
+ "epoch": 0.4652128764278297,
229
+ "grad_norm": 0.062432270497083664,
230
+ "learning_rate": 4.9932861930611454e-05,
231
+ "loss": 0.357,
232
+ "num_input_tokens_seen": 2107584,
233
+ "step": 28
234
+ },
235
+ {
236
+ "epoch": 0.4818276220145379,
237
+ "grad_norm": 0.06791180372238159,
238
+ "learning_rate": 4.9927983063772196e-05,
239
+ "loss": 0.3889,
240
+ "num_input_tokens_seen": 2169248,
241
+ "step": 29
242
+ },
243
+ {
244
+ "epoch": 0.4984423676012461,
245
+ "grad_norm": 0.07219590991735458,
246
+ "learning_rate": 4.99229333433282e-05,
247
+ "loss": 0.3543,
248
+ "num_input_tokens_seen": 2230344,
249
+ "step": 30
250
+ },
251
+ {
252
+ "epoch": 0.5150571131879543,
253
+ "grad_norm": 0.0647474005818367,
254
+ "learning_rate": 4.9917712803889674e-05,
255
+ "loss": 0.3453,
256
+ "num_input_tokens_seen": 2302368,
257
+ "step": 31
258
+ },
259
+ {
260
+ "epoch": 0.5316718587746625,
261
+ "grad_norm": 0.07434642314910889,
262
+ "learning_rate": 4.991232148123761e-05,
263
+ "loss": 0.435,
264
+ "num_input_tokens_seen": 2369984,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.5482866043613707,
269
+ "grad_norm": 0.05302443355321884,
270
+ "learning_rate": 4.990675941232353e-05,
271
+ "loss": 0.3981,
272
+ "num_input_tokens_seen": 2453032,
273
+ "step": 33
274
+ },
275
+ {
276
+ "epoch": 0.564901349948079,
277
+ "grad_norm": 0.053745292127132416,
278
+ "learning_rate": 4.990102663526924e-05,
279
+ "loss": 0.3755,
280
+ "num_input_tokens_seen": 2527464,
281
+ "step": 34
282
+ },
283
+ {
284
+ "epoch": 0.5815160955347871,
285
+ "grad_norm": 0.06717613339424133,
286
+ "learning_rate": 4.989512318936655e-05,
287
+ "loss": 0.3699,
288
+ "num_input_tokens_seen": 2597032,
289
+ "step": 35
290
+ },
291
+ {
292
+ "epoch": 0.5981308411214953,
293
+ "grad_norm": 0.071847103536129,
294
+ "learning_rate": 4.9889049115077005e-05,
295
+ "loss": 0.3705,
296
+ "num_input_tokens_seen": 2671704,
297
+ "step": 36
298
+ },
299
+ {
300
+ "epoch": 0.6147455867082036,
301
+ "grad_norm": 0.0460306741297245,
302
+ "learning_rate": 4.988280445403164e-05,
303
+ "loss": 0.3797,
304
+ "num_input_tokens_seen": 2767640,
305
+ "step": 37
306
+ },
307
+ {
308
+ "epoch": 0.6313603322949117,
309
+ "grad_norm": 0.053273387253284454,
310
+ "learning_rate": 4.987638924903067e-05,
311
+ "loss": 0.3799,
312
+ "num_input_tokens_seen": 2843720,
313
+ "step": 38
314
+ },
315
+ {
316
+ "epoch": 0.6479750778816199,
317
+ "grad_norm": 0.05600422993302345,
318
+ "learning_rate": 4.9869803544043166e-05,
319
+ "loss": 0.2866,
320
+ "num_input_tokens_seen": 2921472,
321
+ "step": 39
322
+ },
323
+ {
324
+ "epoch": 0.6645898234683282,
325
+ "grad_norm": 0.06414052098989487,
326
+ "learning_rate": 4.9863047384206835e-05,
327
+ "loss": 0.4115,
328
+ "num_input_tokens_seen": 2998400,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.6812045690550363,
333
+ "grad_norm": 0.09214208275079727,
334
+ "learning_rate": 4.985612081582764e-05,
335
+ "loss": 0.3804,
336
+ "num_input_tokens_seen": 3059648,
337
+ "step": 41
338
+ },
339
+ {
340
+ "epoch": 0.6978193146417445,
341
+ "grad_norm": 0.0555964931845665,
342
+ "learning_rate": 4.98490238863795e-05,
343
+ "loss": 0.3121,
344
+ "num_input_tokens_seen": 3140184,
345
+ "step": 42
346
+ },
347
+ {
348
+ "epoch": 0.7144340602284528,
349
+ "grad_norm": 0.06256969273090363,
350
+ "learning_rate": 4.984175664450397e-05,
351
+ "loss": 0.3271,
352
+ "num_input_tokens_seen": 3207184,
353
+ "step": 43
354
+ },
355
+ {
356
+ "epoch": 0.731048805815161,
357
+ "grad_norm": 0.0543232187628746,
358
+ "learning_rate": 4.983431914000991e-05,
359
+ "loss": 0.364,
360
+ "num_input_tokens_seen": 3292344,
361
+ "step": 44
362
+ },
363
+ {
364
+ "epoch": 0.7476635514018691,
365
+ "grad_norm": 0.06077824532985687,
366
+ "learning_rate": 4.982671142387316e-05,
367
+ "loss": 0.3894,
368
+ "num_input_tokens_seen": 3365384,
369
+ "step": 45
370
+ },
371
+ {
372
+ "epoch": 0.7642782969885774,
373
+ "grad_norm": 0.06091070920228958,
374
+ "learning_rate": 4.981893354823614e-05,
375
+ "loss": 0.3354,
376
+ "num_input_tokens_seen": 3440720,
377
+ "step": 46
378
+ },
379
+ {
380
+ "epoch": 0.7808930425752856,
381
+ "grad_norm": 0.054153311997652054,
382
+ "learning_rate": 4.9810985566407544e-05,
383
+ "loss": 0.3058,
384
+ "num_input_tokens_seen": 3533576,
385
+ "step": 47
386
+ },
387
+ {
388
+ "epoch": 0.7975077881619937,
389
+ "grad_norm": 0.06662417948246002,
390
+ "learning_rate": 4.980286753286195e-05,
391
+ "loss": 0.4658,
392
+ "num_input_tokens_seen": 3599744,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.814122533748702,
397
+ "grad_norm": 0.05790851265192032,
398
+ "learning_rate": 4.979457950323945e-05,
399
+ "loss": 0.3647,
400
+ "num_input_tokens_seen": 3689520,
401
+ "step": 49
402
+ },
403
+ {
404
+ "epoch": 0.8307372793354102,
405
+ "grad_norm": 0.10742159187793732,
406
+ "learning_rate": 4.9786121534345265e-05,
407
+ "loss": 0.343,
408
+ "num_input_tokens_seen": 3751808,
409
+ "step": 50
410
+ },
411
+ {
412
+ "epoch": 0.8473520249221184,
413
+ "grad_norm": 0.05565556138753891,
414
+ "learning_rate": 4.9777493684149375e-05,
415
+ "loss": 0.3317,
416
+ "num_input_tokens_seen": 3839096,
417
+ "step": 51
418
+ },
419
+ {
420
+ "epoch": 0.8639667705088265,
421
+ "grad_norm": 0.05752381682395935,
422
+ "learning_rate": 4.976869601178609e-05,
423
+ "loss": 0.38,
424
+ "num_input_tokens_seen": 3919824,
425
+ "step": 52
426
+ },
427
+ {
428
+ "epoch": 0.8805815160955348,
429
+ "grad_norm": 0.06406434625387192,
430
+ "learning_rate": 4.975972857755369e-05,
431
+ "loss": 0.2676,
432
+ "num_input_tokens_seen": 3989312,
433
+ "step": 53
434
+ },
435
+ {
436
+ "epoch": 0.897196261682243,
437
+ "grad_norm": 0.0653691440820694,
438
+ "learning_rate": 4.975059144291394e-05,
439
+ "loss": 0.3516,
440
+ "num_input_tokens_seen": 4060528,
441
+ "step": 54
442
+ },
443
+ {
444
+ "epoch": 0.9138110072689511,
445
+ "grad_norm": 0.06272953748703003,
446
+ "learning_rate": 4.974128467049176e-05,
447
+ "loss": 0.3004,
448
+ "num_input_tokens_seen": 4129368,
449
+ "step": 55
450
+ },
451
+ {
452
+ "epoch": 0.9304257528556594,
453
+ "grad_norm": 0.08054930716753006,
454
+ "learning_rate": 4.9731808324074717e-05,
455
+ "loss": 0.3009,
456
+ "num_input_tokens_seen": 4175208,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.9470404984423676,
461
+ "grad_norm": 0.07523038238286972,
462
+ "learning_rate": 4.972216246861262e-05,
463
+ "loss": 0.2814,
464
+ "num_input_tokens_seen": 4218096,
465
+ "step": 57
466
+ },
467
+ {
468
+ "epoch": 0.9636552440290758,
469
+ "grad_norm": 0.07347433269023895,
470
+ "learning_rate": 4.971234717021709e-05,
471
+ "loss": 0.3321,
472
+ "num_input_tokens_seen": 4275968,
473
+ "step": 58
474
+ },
475
+ {
476
+ "epoch": 0.980269989615784,
477
+ "grad_norm": 0.05830248445272446,
478
+ "learning_rate": 4.9702362496161085e-05,
479
+ "loss": 0.2881,
480
+ "num_input_tokens_seen": 4346616,
481
+ "step": 59
482
+ },
483
+ {
484
+ "epoch": 0.9968847352024922,
485
+ "grad_norm": 0.061629410833120346,
486
+ "learning_rate": 4.9692208514878444e-05,
487
+ "loss": 0.2993,
488
+ "num_input_tokens_seen": 4425064,
489
+ "step": 60
490
+ },
491
+ {
492
+ "epoch": 1.0,
493
+ "grad_norm": 0.13380740582942963,
494
+ "learning_rate": 4.968188529596342e-05,
495
+ "loss": 0.2511,
496
+ "num_input_tokens_seen": 4435328,
497
+ "step": 61
498
+ },
499
+ {
500
+ "epoch": 1.0166147455867083,
501
+ "grad_norm": 0.0726238414645195,
502
+ "learning_rate": 4.9671392910170185e-05,
503
+ "loss": 0.3127,
504
+ "num_input_tokens_seen": 4500104,
505
+ "step": 62
506
+ },
507
+ {
508
+ "epoch": 1.0332294911734163,
509
+ "grad_norm": 0.05980083718895912,
510
+ "learning_rate": 4.966073142941239e-05,
511
+ "loss": 0.3601,
512
+ "num_input_tokens_seen": 4581976,
513
+ "step": 63
514
+ },
515
+ {
516
+ "epoch": 1.0498442367601246,
517
+ "grad_norm": 0.06445376574993134,
518
+ "learning_rate": 4.964990092676263e-05,
519
+ "loss": 0.3049,
520
+ "num_input_tokens_seen": 4652160,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 1.066458982346833,
525
+ "grad_norm": 0.07824505120515823,
526
+ "learning_rate": 4.9638901476451946e-05,
527
+ "loss": 0.3099,
528
+ "num_input_tokens_seen": 4709368,
529
+ "step": 65
530
+ },
531
+ {
532
+ "epoch": 1.083073727933541,
533
+ "grad_norm": 0.058268457651138306,
534
+ "learning_rate": 4.962773315386935e-05,
535
+ "loss": 0.3273,
536
+ "num_input_tokens_seen": 4798256,
537
+ "step": 66
538
+ },
539
+ {
540
+ "epoch": 1.0996884735202492,
541
+ "grad_norm": 0.07069691270589828,
542
+ "learning_rate": 4.961639603556127e-05,
543
+ "loss": 0.282,
544
+ "num_input_tokens_seen": 4859200,
545
+ "step": 67
546
+ },
547
+ {
548
+ "epoch": 1.1163032191069575,
549
+ "grad_norm": 0.0775996670126915,
550
+ "learning_rate": 4.960489019923105e-05,
551
+ "loss": 0.3642,
552
+ "num_input_tokens_seen": 4925992,
553
+ "step": 68
554
+ },
555
+ {
556
+ "epoch": 1.1329179646936656,
557
+ "grad_norm": 0.07044171541929245,
558
+ "learning_rate": 4.9593215723738404e-05,
559
+ "loss": 0.2896,
560
+ "num_input_tokens_seen": 4998808,
561
+ "step": 69
562
+ },
563
+ {
564
+ "epoch": 1.1495327102803738,
565
+ "grad_norm": 0.05971802771091461,
566
+ "learning_rate": 4.958137268909887e-05,
567
+ "loss": 0.2578,
568
+ "num_input_tokens_seen": 5089672,
569
+ "step": 70
570
+ },
571
+ {
572
+ "epoch": 1.1661474558670821,
573
+ "grad_norm": 0.07145556062459946,
574
+ "learning_rate": 4.9569361176483286e-05,
575
+ "loss": 0.3243,
576
+ "num_input_tokens_seen": 5166744,
577
+ "step": 71
578
+ },
579
+ {
580
+ "epoch": 1.1827622014537902,
581
+ "grad_norm": 0.07455787807703018,
582
+ "learning_rate": 4.9557181268217227e-05,
583
+ "loss": 0.3949,
584
+ "num_input_tokens_seen": 5228264,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 1.1993769470404985,
589
+ "grad_norm": 0.055582575500011444,
590
+ "learning_rate": 4.9544833047780394e-05,
591
+ "loss": 0.2877,
592
+ "num_input_tokens_seen": 5338224,
593
+ "step": 73
594
+ },
595
+ {
596
+ "epoch": 1.2159916926272065,
597
+ "grad_norm": 0.07675391435623169,
598
+ "learning_rate": 4.9532316599806124e-05,
599
+ "loss": 0.3152,
600
+ "num_input_tokens_seen": 5399848,
601
+ "step": 74
602
+ },
603
+ {
604
+ "epoch": 1.2326064382139148,
605
+ "grad_norm": 0.08048644661903381,
606
+ "learning_rate": 4.951963201008076e-05,
607
+ "loss": 0.2976,
608
+ "num_input_tokens_seen": 5468624,
609
+ "step": 75
610
+ },
611
+ {
612
+ "epoch": 1.249221183800623,
613
+ "grad_norm": 0.07579060643911362,
614
+ "learning_rate": 4.9506779365543046e-05,
615
+ "loss": 0.2982,
616
+ "num_input_tokens_seen": 5536776,
617
+ "step": 76
618
+ },
619
+ {
620
+ "epoch": 1.2658359293873311,
621
+ "grad_norm": 0.07828006893396378,
622
+ "learning_rate": 4.949375875428357e-05,
623
+ "loss": 0.3272,
624
+ "num_input_tokens_seen": 5609296,
625
+ "step": 77
626
+ },
627
+ {
628
+ "epoch": 1.2824506749740394,
629
+ "grad_norm": 0.08079098165035248,
630
+ "learning_rate": 4.9480570265544144e-05,
631
+ "loss": 0.2768,
632
+ "num_input_tokens_seen": 5663824,
633
+ "step": 78
634
+ },
635
+ {
636
+ "epoch": 1.2990654205607477,
637
+ "grad_norm": 0.07579358667135239,
638
+ "learning_rate": 4.94672139897172e-05,
639
+ "loss": 0.318,
640
+ "num_input_tokens_seen": 5742032,
641
+ "step": 79
642
+ },
643
+ {
644
+ "epoch": 1.3156801661474558,
645
+ "grad_norm": 0.07588379085063934,
646
+ "learning_rate": 4.9453690018345144e-05,
647
+ "loss": 0.3007,
648
+ "num_input_tokens_seen": 5816864,
649
+ "step": 80
650
+ },
651
+ {
652
+ "epoch": 1.332294911734164,
653
+ "grad_norm": 0.08709035068750381,
654
+ "learning_rate": 4.943999844411977e-05,
655
+ "loss": 0.2797,
656
+ "num_input_tokens_seen": 5881624,
657
+ "step": 81
658
+ },
659
+ {
660
+ "epoch": 1.3489096573208723,
661
+ "grad_norm": 0.05975884944200516,
662
+ "learning_rate": 4.94261393608816e-05,
663
+ "loss": 0.2591,
664
+ "num_input_tokens_seen": 5970272,
665
+ "step": 82
666
+ },
667
+ {
668
+ "epoch": 1.3655244029075804,
669
+ "grad_norm": 0.07372818142175674,
670
+ "learning_rate": 4.941211286361922e-05,
671
+ "loss": 0.2687,
672
+ "num_input_tokens_seen": 6058752,
673
+ "step": 83
674
+ },
675
+ {
676
+ "epoch": 1.3821391484942886,
677
+ "grad_norm": 0.09071576595306396,
678
+ "learning_rate": 4.939791904846869e-05,
679
+ "loss": 0.2979,
680
+ "num_input_tokens_seen": 6120064,
681
+ "step": 84
682
+ },
683
+ {
684
+ "epoch": 1.398753894080997,
685
+ "grad_norm": 0.0849960595369339,
686
+ "learning_rate": 4.938355801271282e-05,
687
+ "loss": 0.2927,
688
+ "num_input_tokens_seen": 6182072,
689
+ "step": 85
690
+ },
691
+ {
692
+ "epoch": 1.415368639667705,
693
+ "grad_norm": 0.08258760720491409,
694
+ "learning_rate": 4.936902985478055e-05,
695
+ "loss": 0.295,
696
+ "num_input_tokens_seen": 6269680,
697
+ "step": 86
698
+ },
699
+ {
700
+ "epoch": 1.4319833852544133,
701
+ "grad_norm": 0.0851503536105156,
702
+ "learning_rate": 4.935433467424624e-05,
703
+ "loss": 0.2925,
704
+ "num_input_tokens_seen": 6347424,
705
+ "step": 87
706
+ },
707
+ {
708
+ "epoch": 1.4485981308411215,
709
+ "grad_norm": 0.08852345496416092,
710
+ "learning_rate": 4.933947257182901e-05,
711
+ "loss": 0.3153,
712
+ "num_input_tokens_seen": 6412584,
713
+ "step": 88
714
+ },
715
+ {
716
+ "epoch": 1.4652128764278296,
717
+ "grad_norm": 0.08184897154569626,
718
+ "learning_rate": 4.932444364939205e-05,
719
+ "loss": 0.292,
720
+ "num_input_tokens_seen": 6482728,
721
+ "step": 89
722
+ },
723
+ {
724
+ "epoch": 1.4818276220145379,
725
+ "grad_norm": 0.08270515501499176,
726
+ "learning_rate": 4.9309248009941914e-05,
727
+ "loss": 0.3472,
728
+ "num_input_tokens_seen": 6562104,
729
+ "step": 90
730
+ },
731
+ {
732
+ "epoch": 1.4984423676012462,
733
+ "grad_norm": 0.07407747954130173,
734
+ "learning_rate": 4.929388575762782e-05,
735
+ "loss": 0.2995,
736
+ "num_input_tokens_seen": 6656552,
737
+ "step": 91
738
+ },
739
+ {
740
+ "epoch": 1.5150571131879542,
741
+ "grad_norm": 0.08710360527038574,
742
+ "learning_rate": 4.9278356997740904e-05,
743
+ "loss": 0.2549,
744
+ "num_input_tokens_seen": 6714184,
745
+ "step": 92
746
+ },
747
+ {
748
+ "epoch": 1.5316718587746625,
749
+ "grad_norm": 0.0773790255188942,
750
+ "learning_rate": 4.9262661836713564e-05,
751
+ "loss": 0.2814,
752
+ "num_input_tokens_seen": 6793552,
753
+ "step": 93
754
+ },
755
+ {
756
+ "epoch": 1.5482866043613708,
757
+ "grad_norm": 0.1002134457230568,
758
+ "learning_rate": 4.924680038211867e-05,
759
+ "loss": 0.2876,
760
+ "num_input_tokens_seen": 6865256,
761
+ "step": 94
762
+ },
763
+ {
764
+ "epoch": 1.5649013499480788,
765
+ "grad_norm": 0.09670394659042358,
766
+ "learning_rate": 4.9230772742668866e-05,
767
+ "loss": 0.2846,
768
+ "num_input_tokens_seen": 6931152,
769
+ "step": 95
770
+ },
771
+ {
772
+ "epoch": 1.5815160955347871,
773
+ "grad_norm": 0.08910100907087326,
774
+ "learning_rate": 4.9214579028215776e-05,
775
+ "loss": 0.2944,
776
+ "num_input_tokens_seen": 6998408,
777
+ "step": 96
778
+ },
779
+ {
780
+ "epoch": 1.5981308411214954,
781
+ "grad_norm": 0.09202459454536438,
782
+ "learning_rate": 4.919821934974933e-05,
783
+ "loss": 0.251,
784
+ "num_input_tokens_seen": 7053008,
785
+ "step": 97
786
+ },
787
+ {
788
+ "epoch": 1.6147455867082035,
789
+ "grad_norm": 0.10218881815671921,
790
+ "learning_rate": 4.918169381939692e-05,
791
+ "loss": 0.2851,
792
+ "num_input_tokens_seen": 7106440,
793
+ "step": 98
794
+ },
795
+ {
796
+ "epoch": 1.6313603322949117,
797
+ "grad_norm": 0.09290914982557297,
798
+ "learning_rate": 4.916500255042268e-05,
799
+ "loss": 0.2959,
800
+ "num_input_tokens_seen": 7167032,
801
+ "step": 99
802
+ },
803
+ {
804
+ "epoch": 1.64797507788162,
805
+ "grad_norm": 0.07791033387184143,
806
+ "learning_rate": 4.914814565722671e-05,
807
+ "loss": 0.2481,
808
+ "num_input_tokens_seen": 7245720,
809
+ "step": 100
810
+ },
811
+ {
812
+ "epoch": 1.664589823468328,
813
+ "grad_norm": 0.08885534107685089,
814
+ "learning_rate": 4.913112325534426e-05,
815
+ "loss": 0.3168,
816
+ "num_input_tokens_seen": 7326320,
817
+ "step": 101
818
+ },
819
+ {
820
+ "epoch": 1.6812045690550363,
821
+ "grad_norm": 0.08569750934839249,
822
+ "learning_rate": 4.9113935461444955e-05,
823
+ "loss": 0.2805,
824
+ "num_input_tokens_seen": 7442232,
825
+ "step": 102
826
+ },
827
+ {
828
+ "epoch": 1.6978193146417446,
829
+ "grad_norm": 0.1112508773803711,
830
+ "learning_rate": 4.9096582393332025e-05,
831
+ "loss": 0.2675,
832
+ "num_input_tokens_seen": 7502496,
833
+ "step": 103
834
+ },
835
+ {
836
+ "epoch": 1.7144340602284527,
837
+ "grad_norm": 0.09654372185468674,
838
+ "learning_rate": 4.907906416994146e-05,
839
+ "loss": 0.3038,
840
+ "num_input_tokens_seen": 7566496,
841
+ "step": 104
842
+ },
843
+ {
844
+ "epoch": 1.731048805815161,
845
+ "grad_norm": 0.10022995620965958,
846
+ "learning_rate": 4.906138091134118e-05,
847
+ "loss": 0.3639,
848
+ "num_input_tokens_seen": 7629056,
849
+ "step": 105
850
+ },
851
+ {
852
+ "epoch": 1.7476635514018692,
853
+ "grad_norm": 0.08336564153432846,
854
+ "learning_rate": 4.9043532738730284e-05,
855
+ "loss": 0.2944,
856
+ "num_input_tokens_seen": 7706096,
857
+ "step": 106
858
+ },
859
+ {
860
+ "epoch": 1.7642782969885773,
861
+ "grad_norm": 0.08539658784866333,
862
+ "learning_rate": 4.9025519774438136e-05,
863
+ "loss": 0.2392,
864
+ "num_input_tokens_seen": 7780072,
865
+ "step": 107
866
+ },
867
+ {
868
+ "epoch": 1.7808930425752856,
869
+ "grad_norm": 0.09139693528413773,
870
+ "learning_rate": 4.900734214192358e-05,
871
+ "loss": 0.2685,
872
+ "num_input_tokens_seen": 7857712,
873
+ "step": 108
874
+ },
875
+ {
876
+ "epoch": 1.7975077881619939,
877
+ "grad_norm": 0.1043916717171669,
878
+ "learning_rate": 4.898899996577407e-05,
879
+ "loss": 0.2513,
880
+ "num_input_tokens_seen": 7916832,
881
+ "step": 109
882
+ },
883
+ {
884
+ "epoch": 1.814122533748702,
885
+ "grad_norm": 0.09203662723302841,
886
+ "learning_rate": 4.8970493371704826e-05,
887
+ "loss": 0.2974,
888
+ "num_input_tokens_seen": 7993056,
889
+ "step": 110
890
+ },
891
+ {
892
+ "epoch": 1.8307372793354102,
893
+ "grad_norm": 0.09319474548101425,
894
+ "learning_rate": 4.8951822486557986e-05,
895
+ "loss": 0.3096,
896
+ "num_input_tokens_seen": 8090056,
897
+ "step": 111
898
+ },
899
+ {
900
+ "epoch": 1.8473520249221185,
901
+ "grad_norm": 0.10193445533514023,
902
+ "learning_rate": 4.893298743830168e-05,
903
+ "loss": 0.2633,
904
+ "num_input_tokens_seen": 8164808,
905
+ "step": 112
906
+ },
907
+ {
908
+ "epoch": 1.8639667705088265,
909
+ "grad_norm": 0.11407948285341263,
910
+ "learning_rate": 4.891398835602925e-05,
911
+ "loss": 0.2584,
912
+ "num_input_tokens_seen": 8223568,
913
+ "step": 113
914
+ },
915
+ {
916
+ "epoch": 1.8805815160955348,
917
+ "grad_norm": 0.11977085471153259,
918
+ "learning_rate": 4.8894825369958255e-05,
919
+ "loss": 0.2619,
920
+ "num_input_tokens_seen": 8276160,
921
+ "step": 114
922
+ },
923
+ {
924
+ "epoch": 1.897196261682243,
925
+ "grad_norm": 0.10925433784723282,
926
+ "learning_rate": 4.8875498611429674e-05,
927
+ "loss": 0.2762,
928
+ "num_input_tokens_seen": 8354904,
929
+ "step": 115
930
+ },
931
+ {
932
+ "epoch": 1.9138110072689511,
933
+ "grad_norm": 0.09673939645290375,
934
+ "learning_rate": 4.8856008212906925e-05,
935
+ "loss": 0.3152,
936
+ "num_input_tokens_seen": 8442584,
937
+ "step": 116
938
+ },
939
+ {
940
+ "epoch": 1.9304257528556594,
941
+ "grad_norm": 0.10827789455652237,
942
+ "learning_rate": 4.8836354307975026e-05,
943
+ "loss": 0.2759,
944
+ "num_input_tokens_seen": 8506688,
945
+ "step": 117
946
+ },
947
+ {
948
+ "epoch": 1.9470404984423677,
949
+ "grad_norm": 0.08390220254659653,
950
+ "learning_rate": 4.881653703133966e-05,
951
+ "loss": 0.2192,
952
+ "num_input_tokens_seen": 8610712,
953
+ "step": 118
954
+ },
955
+ {
956
+ "epoch": 1.9636552440290758,
957
+ "grad_norm": 0.09252211451530457,
958
+ "learning_rate": 4.87965565188262e-05,
959
+ "loss": 0.2618,
960
+ "num_input_tokens_seen": 8692624,
961
+ "step": 119
962
+ },
963
+ {
964
+ "epoch": 1.980269989615784,
965
+ "grad_norm": 0.1107102632522583,
966
+ "learning_rate": 4.877641290737884e-05,
967
+ "loss": 0.2666,
968
+ "num_input_tokens_seen": 8772208,
969
+ "step": 120
970
+ },
971
+ {
972
+ "epoch": 1.9968847352024923,
973
+ "grad_norm": 0.0917077362537384,
974
+ "learning_rate": 4.8756106335059646e-05,
975
+ "loss": 0.253,
976
+ "num_input_tokens_seen": 8854904,
977
+ "step": 121
978
+ },
979
+ {
980
+ "epoch": 2.0,
981
+ "grad_norm": 0.2606711685657501,
982
+ "learning_rate": 4.87356369410476e-05,
983
+ "loss": 0.235,
984
+ "num_input_tokens_seen": 8872656,
985
+ "step": 122
986
+ },
987
+ {
988
+ "epoch": 2.016614745586708,
989
+ "grad_norm": 0.10363993793725967,
990
+ "learning_rate": 4.8715004865637614e-05,
991
+ "loss": 0.266,
992
+ "num_input_tokens_seen": 8946480,
993
+ "step": 123
994
+ },
995
+ {
996
+ "epoch": 2.0332294911734166,
997
+ "grad_norm": 0.09997844696044922,
998
+ "learning_rate": 4.869421025023965e-05,
999
+ "loss": 0.2696,
1000
+ "num_input_tokens_seen": 9023328,
1001
+ "step": 124
1002
+ },
1003
+ {
1004
+ "epoch": 2.0498442367601246,
1005
+ "grad_norm": 0.13349319994449615,
1006
+ "learning_rate": 4.867325323737765e-05,
1007
+ "loss": 0.2552,
1008
+ "num_input_tokens_seen": 9074320,
1009
+ "step": 125
1010
+ },
1011
+ {
1012
+ "epoch": 2.0664589823468327,
1013
+ "grad_norm": 0.11201464384794235,
1014
+ "learning_rate": 4.8652133970688636e-05,
1015
+ "loss": 0.2486,
1016
+ "num_input_tokens_seen": 9148784,
1017
+ "step": 126
1018
+ },
1019
+ {
1020
+ "epoch": 2.083073727933541,
1021
+ "grad_norm": 0.10193142294883728,
1022
+ "learning_rate": 4.8630852594921706e-05,
1023
+ "loss": 0.2814,
1024
+ "num_input_tokens_seen": 9246624,
1025
+ "step": 127
1026
+ },
1027
+ {
1028
+ "epoch": 2.0996884735202492,
1029
+ "grad_norm": 0.1305130422115326,
1030
+ "learning_rate": 4.860940925593703e-05,
1031
+ "loss": 0.304,
1032
+ "num_input_tokens_seen": 9328176,
1033
+ "step": 128
1034
+ },
1035
+ {
1036
+ "epoch": 2.1163032191069573,
1037
+ "grad_norm": 0.1137692779302597,
1038
+ "learning_rate": 4.8587804100704845e-05,
1039
+ "loss": 0.2427,
1040
+ "num_input_tokens_seen": 9388936,
1041
+ "step": 129
1042
+ },
1043
+ {
1044
+ "epoch": 2.132917964693666,
1045
+ "grad_norm": 0.12126237154006958,
1046
+ "learning_rate": 4.856603727730447e-05,
1047
+ "loss": 0.2485,
1048
+ "num_input_tokens_seen": 9461664,
1049
+ "step": 130
1050
+ },
1051
+ {
1052
+ "epoch": 2.149532710280374,
1053
+ "grad_norm": 0.11567176878452301,
1054
+ "learning_rate": 4.854410893492326e-05,
1055
+ "loss": 0.2628,
1056
+ "num_input_tokens_seen": 9535000,
1057
+ "step": 131
1058
+ },
1059
+ {
1060
+ "epoch": 2.166147455867082,
1061
+ "grad_norm": 0.1399552971124649,
1062
+ "learning_rate": 4.852201922385564e-05,
1063
+ "loss": 0.2518,
1064
+ "num_input_tokens_seen": 9600296,
1065
+ "step": 132
1066
+ },
1067
+ {
1068
+ "epoch": 2.1827622014537904,
1069
+ "grad_norm": 0.13912151753902435,
1070
+ "learning_rate": 4.8499768295502004e-05,
1071
+ "loss": 0.2429,
1072
+ "num_input_tokens_seen": 9686784,
1073
+ "step": 133
1074
+ },
1075
+ {
1076
+ "epoch": 2.1993769470404985,
1077
+ "grad_norm": 0.11130474507808685,
1078
+ "learning_rate": 4.847735630236773e-05,
1079
+ "loss": 0.2775,
1080
+ "num_input_tokens_seen": 9781112,
1081
+ "step": 134
1082
+ },
1083
+ {
1084
+ "epoch": 2.2159916926272065,
1085
+ "grad_norm": 0.12169156968593597,
1086
+ "learning_rate": 4.8454783398062106e-05,
1087
+ "loss": 0.2439,
1088
+ "num_input_tokens_seen": 9849528,
1089
+ "step": 135
1090
+ },
1091
+ {
1092
+ "epoch": 2.232606438213915,
1093
+ "grad_norm": 0.11766713112592697,
1094
+ "learning_rate": 4.843204973729729e-05,
1095
+ "loss": 0.2538,
1096
+ "num_input_tokens_seen": 9931080,
1097
+ "step": 136
1098
+ },
1099
+ {
1100
+ "epoch": 2.249221183800623,
1101
+ "grad_norm": 0.11854218691587448,
1102
+ "learning_rate": 4.840915547588725e-05,
1103
+ "loss": 0.2782,
1104
+ "num_input_tokens_seen": 10011176,
1105
+ "step": 137
1106
+ },
1107
+ {
1108
+ "epoch": 2.265835929387331,
1109
+ "grad_norm": 0.1340581178665161,
1110
+ "learning_rate": 4.838610077074669e-05,
1111
+ "loss": 0.248,
1112
+ "num_input_tokens_seen": 10084128,
1113
+ "step": 138
1114
+ },
1115
+ {
1116
+ "epoch": 2.2824506749740396,
1117
+ "grad_norm": 0.12075436115264893,
1118
+ "learning_rate": 4.836288577988996e-05,
1119
+ "loss": 0.2582,
1120
+ "num_input_tokens_seen": 10155536,
1121
+ "step": 139
1122
+ },
1123
+ {
1124
+ "epoch": 2.2990654205607477,
1125
+ "grad_norm": 0.10599923878908157,
1126
+ "learning_rate": 4.8339510662430046e-05,
1127
+ "loss": 0.2199,
1128
+ "num_input_tokens_seen": 10251160,
1129
+ "step": 140
1130
+ },
1131
+ {
1132
+ "epoch": 2.3156801661474558,
1133
+ "grad_norm": 0.1117846742272377,
1134
+ "learning_rate": 4.8315975578577355e-05,
1135
+ "loss": 0.2324,
1136
+ "num_input_tokens_seen": 10345864,
1137
+ "step": 141
1138
+ },
1139
+ {
1140
+ "epoch": 2.3322949117341643,
1141
+ "grad_norm": 0.13972057402133942,
1142
+ "learning_rate": 4.8292280689638725e-05,
1143
+ "loss": 0.4072,
1144
+ "num_input_tokens_seen": 10417616,
1145
+ "step": 142
1146
+ },
1147
+ {
1148
+ "epoch": 2.3489096573208723,
1149
+ "grad_norm": 0.13837860524654388,
1150
+ "learning_rate": 4.826842615801628e-05,
1151
+ "loss": 0.2607,
1152
+ "num_input_tokens_seen": 10481816,
1153
+ "step": 143
1154
+ },
1155
+ {
1156
+ "epoch": 2.3655244029075804,
1157
+ "grad_norm": 0.14040137827396393,
1158
+ "learning_rate": 4.8244412147206284e-05,
1159
+ "loss": 0.3094,
1160
+ "num_input_tokens_seen": 10562056,
1161
+ "step": 144
1162
+ },
1163
+ {
1164
+ "epoch": 2.382139148494289,
1165
+ "grad_norm": 0.1393299251794815,
1166
+ "learning_rate": 4.822023882179811e-05,
1167
+ "loss": 0.2407,
1168
+ "num_input_tokens_seen": 10612808,
1169
+ "step": 145
1170
+ },
1171
+ {
1172
+ "epoch": 2.398753894080997,
1173
+ "grad_norm": 0.13878698647022247,
1174
+ "learning_rate": 4.8195906347473e-05,
1175
+ "loss": 0.2481,
1176
+ "num_input_tokens_seen": 10682328,
1177
+ "step": 146
1178
+ },
1179
+ {
1180
+ "epoch": 2.415368639667705,
1181
+ "grad_norm": 0.10430227965116501,
1182
+ "learning_rate": 4.817141489100302e-05,
1183
+ "loss": 0.2528,
1184
+ "num_input_tokens_seen": 10771912,
1185
+ "step": 147
1186
+ },
1187
+ {
1188
+ "epoch": 2.431983385254413,
1189
+ "grad_norm": 0.12963703274726868,
1190
+ "learning_rate": 4.814676462024988e-05,
1191
+ "loss": 0.2739,
1192
+ "num_input_tokens_seen": 10842232,
1193
+ "step": 148
1194
+ },
1195
+ {
1196
+ "epoch": 2.4485981308411215,
1197
+ "grad_norm": 0.13274963200092316,
1198
+ "learning_rate": 4.8121955704163745e-05,
1199
+ "loss": 0.2407,
1200
+ "num_input_tokens_seen": 10902264,
1201
+ "step": 149
1202
+ },
1203
+ {
1204
+ "epoch": 2.4652128764278296,
1205
+ "grad_norm": 0.11079717427492142,
1206
+ "learning_rate": 4.8096988312782174e-05,
1207
+ "loss": 0.2142,
1208
+ "num_input_tokens_seen": 10992744,
1209
+ "step": 150
1210
+ },
1211
+ {
1212
+ "epoch": 2.4818276220145377,
1213
+ "grad_norm": 0.08429212868213654,
1214
+ "learning_rate": 4.8071862617228855e-05,
1215
+ "loss": 0.1428,
1216
+ "num_input_tokens_seen": 11090064,
1217
+ "step": 151
1218
+ },
1219
+ {
1220
+ "epoch": 2.498442367601246,
1221
+ "grad_norm": 0.12903761863708496,
1222
+ "learning_rate": 4.8046578789712515e-05,
1223
+ "loss": 0.2268,
1224
+ "num_input_tokens_seen": 11162864,
1225
+ "step": 152
1226
+ },
1227
+ {
1228
+ "epoch": 2.515057113187954,
1229
+ "grad_norm": 0.14638672769069672,
1230
+ "learning_rate": 4.8021137003525664e-05,
1231
+ "loss": 0.2388,
1232
+ "num_input_tokens_seen": 11224368,
1233
+ "step": 153
1234
+ },
1235
+ {
1236
+ "epoch": 2.5316718587746623,
1237
+ "grad_norm": 0.1372838169336319,
1238
+ "learning_rate": 4.7995537433043446e-05,
1239
+ "loss": 0.2588,
1240
+ "num_input_tokens_seen": 11291056,
1241
+ "step": 154
1242
+ },
1243
+ {
1244
+ "epoch": 2.5482866043613708,
1245
+ "grad_norm": 0.15665481984615326,
1246
+ "learning_rate": 4.796978025372246e-05,
1247
+ "loss": 0.2225,
1248
+ "num_input_tokens_seen": 11345464,
1249
+ "step": 155
1250
+ },
1251
+ {
1252
+ "epoch": 2.564901349948079,
1253
+ "grad_norm": 0.13234855234622955,
1254
+ "learning_rate": 4.794386564209953e-05,
1255
+ "loss": 0.275,
1256
+ "num_input_tokens_seen": 11418912,
1257
+ "step": 156
1258
+ },
1259
+ {
1260
+ "epoch": 2.581516095534787,
1261
+ "grad_norm": 0.13585953414440155,
1262
+ "learning_rate": 4.79177937757905e-05,
1263
+ "loss": 0.2407,
1264
+ "num_input_tokens_seen": 11491216,
1265
+ "step": 157
1266
+ },
1267
+ {
1268
+ "epoch": 2.5981308411214954,
1269
+ "grad_norm": 0.1423913538455963,
1270
+ "learning_rate": 4.7891564833489035e-05,
1271
+ "loss": 0.1971,
1272
+ "num_input_tokens_seen": 11558016,
1273
+ "step": 158
1274
+ },
1275
+ {
1276
+ "epoch": 2.6147455867082035,
1277
+ "grad_norm": 0.13013511896133423,
1278
+ "learning_rate": 4.7865178994965344e-05,
1279
+ "loss": 0.2362,
1280
+ "num_input_tokens_seen": 11630432,
1281
+ "step": 159
1282
+ },
1283
+ {
1284
+ "epoch": 2.6313603322949115,
1285
+ "grad_norm": 0.1587141752243042,
1286
+ "learning_rate": 4.783863644106502e-05,
1287
+ "loss": 0.2252,
1288
+ "num_input_tokens_seen": 11684624,
1289
+ "step": 160
1290
+ },
1291
+ {
1292
+ "epoch": 2.64797507788162,
1293
+ "grad_norm": 0.12592960894107819,
1294
+ "learning_rate": 4.781193735370777e-05,
1295
+ "loss": 0.2506,
1296
+ "num_input_tokens_seen": 11770232,
1297
+ "step": 161
1298
+ },
1299
+ {
1300
+ "epoch": 2.664589823468328,
1301
+ "grad_norm": 0.1583249419927597,
1302
+ "learning_rate": 4.7785081915886134e-05,
1303
+ "loss": 0.2352,
1304
+ "num_input_tokens_seen": 11828360,
1305
+ "step": 162
1306
+ },
1307
+ {
1308
+ "epoch": 2.681204569055036,
1309
+ "grad_norm": 0.14881783723831177,
1310
+ "learning_rate": 4.775807031166428e-05,
1311
+ "loss": 0.2308,
1312
+ "num_input_tokens_seen": 11915944,
1313
+ "step": 163
1314
+ },
1315
+ {
1316
+ "epoch": 2.6978193146417446,
1317
+ "grad_norm": 0.1607823222875595,
1318
+ "learning_rate": 4.773090272617672e-05,
1319
+ "loss": 0.2238,
1320
+ "num_input_tokens_seen": 11981792,
1321
+ "step": 164
1322
+ },
1323
+ {
1324
+ "epoch": 2.7144340602284527,
1325
+ "grad_norm": 0.13583113253116608,
1326
+ "learning_rate": 4.7703579345627035e-05,
1327
+ "loss": 0.3196,
1328
+ "num_input_tokens_seen": 12044024,
1329
+ "step": 165
1330
+ },
1331
+ {
1332
+ "epoch": 2.7310488058151607,
1333
+ "grad_norm": 0.19167298078536987,
1334
+ "learning_rate": 4.7676100357286624e-05,
1335
+ "loss": 0.2745,
1336
+ "num_input_tokens_seen": 12093424,
1337
+ "step": 166
1338
+ },
1339
+ {
1340
+ "epoch": 2.7476635514018692,
1341
+ "grad_norm": 0.130703404545784,
1342
+ "learning_rate": 4.76484659494934e-05,
1343
+ "loss": 0.2285,
1344
+ "num_input_tokens_seen": 12167792,
1345
+ "step": 167
1346
+ },
1347
+ {
1348
+ "epoch": 2.7642782969885773,
1349
+ "grad_norm": 0.14331185817718506,
1350
+ "learning_rate": 4.762067631165049e-05,
1351
+ "loss": 0.2506,
1352
+ "num_input_tokens_seen": 12233712,
1353
+ "step": 168
1354
+ },
1355
+ {
1356
+ "epoch": 2.7808930425752854,
1357
+ "grad_norm": 0.12700341641902924,
1358
+ "learning_rate": 4.7592731634224966e-05,
1359
+ "loss": 0.2052,
1360
+ "num_input_tokens_seen": 12310544,
1361
+ "step": 169
1362
+ },
1363
+ {
1364
+ "epoch": 2.797507788161994,
1365
+ "grad_norm": 0.15118420124053955,
1366
+ "learning_rate": 4.756463210874652e-05,
1367
+ "loss": 0.2309,
1368
+ "num_input_tokens_seen": 12400160,
1369
+ "step": 170
1370
+ },
1371
+ {
1372
+ "epoch": 2.814122533748702,
1373
+ "grad_norm": 0.14001020789146423,
1374
+ "learning_rate": 4.753637792780614e-05,
1375
+ "loss": 0.2544,
1376
+ "num_input_tokens_seen": 12480432,
1377
+ "step": 171
1378
+ },
1379
+ {
1380
+ "epoch": 2.83073727933541,
1381
+ "grad_norm": 0.12076311558485031,
1382
+ "learning_rate": 4.7507969285054845e-05,
1383
+ "loss": 0.2434,
1384
+ "num_input_tokens_seen": 12568064,
1385
+ "step": 172
1386
+ },
1387
+ {
1388
+ "epoch": 2.8473520249221185,
1389
+ "grad_norm": 0.16462342441082,
1390
+ "learning_rate": 4.7479406375202264e-05,
1391
+ "loss": 0.2417,
1392
+ "num_input_tokens_seen": 12647400,
1393
+ "step": 173
1394
+ },
1395
+ {
1396
+ "epoch": 2.8639667705088265,
1397
+ "grad_norm": 0.17294971644878387,
1398
+ "learning_rate": 4.745068939401539e-05,
1399
+ "loss": 0.2121,
1400
+ "num_input_tokens_seen": 12698208,
1401
+ "step": 174
1402
+ },
1403
+ {
1404
+ "epoch": 2.8805815160955346,
1405
+ "grad_norm": 0.16743803024291992,
1406
+ "learning_rate": 4.742181853831721e-05,
1407
+ "loss": 0.2238,
1408
+ "num_input_tokens_seen": 12758528,
1409
+ "step": 175
1410
+ },
1411
+ {
1412
+ "epoch": 2.897196261682243,
1413
+ "grad_norm": 0.14583320915699005,
1414
+ "learning_rate": 4.7392794005985326e-05,
1415
+ "loss": 0.2333,
1416
+ "num_input_tokens_seen": 12837264,
1417
+ "step": 176
1418
+ },
1419
+ {
1420
+ "epoch": 2.913811007268951,
1421
+ "grad_norm": 0.1509270817041397,
1422
+ "learning_rate": 4.7363615995950626e-05,
1423
+ "loss": 0.2179,
1424
+ "num_input_tokens_seen": 12902368,
1425
+ "step": 177
1426
+ },
1427
+ {
1428
+ "epoch": 2.930425752855659,
1429
+ "grad_norm": 0.12910738587379456,
1430
+ "learning_rate": 4.733428470819594e-05,
1431
+ "loss": 0.2144,
1432
+ "num_input_tokens_seen": 12974296,
1433
+ "step": 178
1434
+ },
1435
+ {
1436
+ "epoch": 2.9470404984423677,
1437
+ "grad_norm": 0.142000213265419,
1438
+ "learning_rate": 4.730480034375462e-05,
1439
+ "loss": 0.2413,
1440
+ "num_input_tokens_seen": 13057280,
1441
+ "step": 179
1442
+ },
1443
+ {
1444
+ "epoch": 2.9636552440290758,
1445
+ "grad_norm": 0.131468266248703,
1446
+ "learning_rate": 4.72751631047092e-05,
1447
+ "loss": 0.294,
1448
+ "num_input_tokens_seen": 13158232,
1449
+ "step": 180
1450
+ },
1451
+ {
1452
+ "epoch": 2.980269989615784,
1453
+ "grad_norm": 0.1529342085123062,
1454
+ "learning_rate": 4.7245373194189994e-05,
1455
+ "loss": 0.216,
1456
+ "num_input_tokens_seen": 13229840,
1457
+ "step": 181
1458
+ },
1459
+ {
1460
+ "epoch": 2.9968847352024923,
1461
+ "grad_norm": 0.1573815941810608,
1462
+ "learning_rate": 4.7215430816373726e-05,
1463
+ "loss": 0.2384,
1464
+ "num_input_tokens_seen": 13296520,
1465
+ "step": 182
1466
+ },
1467
+ {
1468
+ "epoch": 3.0,
1469
+ "grad_norm": 0.2532118558883667,
1470
+ "learning_rate": 4.718533617648209e-05,
1471
+ "loss": 0.1459,
1472
+ "num_input_tokens_seen": 13309672,
1473
+ "step": 183
1474
+ },
1475
+ {
1476
+ "epoch": 3.016614745586708,
1477
+ "grad_norm": 0.16963432729244232,
1478
+ "learning_rate": 4.715508948078037e-05,
1479
+ "loss": 0.1985,
1480
+ "num_input_tokens_seen": 13371544,
1481
+ "step": 184
1482
+ },
1483
+ {
1484
+ "epoch": 3.0332294911734166,
1485
+ "grad_norm": 0.18877384066581726,
1486
+ "learning_rate": 4.712469093657605e-05,
1487
+ "loss": 0.1856,
1488
+ "num_input_tokens_seen": 13432984,
1489
+ "step": 185
1490
+ },
1491
+ {
1492
+ "epoch": 3.0498442367601246,
1493
+ "grad_norm": 0.14922884106636047,
1494
+ "learning_rate": 4.709414075221734e-05,
1495
+ "loss": 0.2385,
1496
+ "num_input_tokens_seen": 13500016,
1497
+ "step": 186
1498
+ },
1499
+ {
1500
+ "epoch": 3.0664589823468327,
1501
+ "grad_norm": 0.2028326541185379,
1502
+ "learning_rate": 4.706343913709178e-05,
1503
+ "loss": 0.2227,
1504
+ "num_input_tokens_seen": 13579672,
1505
+ "step": 187
1506
+ },
1507
+ {
1508
+ "epoch": 3.083073727933541,
1509
+ "grad_norm": 0.19964616000652313,
1510
+ "learning_rate": 4.70325863016248e-05,
1511
+ "loss": 0.2045,
1512
+ "num_input_tokens_seen": 13630704,
1513
+ "step": 188
1514
+ },
1515
+ {
1516
+ "epoch": 3.0996884735202492,
1517
+ "grad_norm": 0.1594657599925995,
1518
+ "learning_rate": 4.7001582457278304e-05,
1519
+ "loss": 0.2648,
1520
+ "num_input_tokens_seen": 13695472,
1521
+ "step": 189
1522
+ },
1523
+ {
1524
+ "epoch": 3.1163032191069573,
1525
+ "grad_norm": 0.16952532529830933,
1526
+ "learning_rate": 4.697042781654913e-05,
1527
+ "loss": 0.22,
1528
+ "num_input_tokens_seen": 13767792,
1529
+ "step": 190
1530
+ },
1531
+ {
1532
+ "epoch": 3.132917964693666,
1533
+ "grad_norm": 0.16775831580162048,
1534
+ "learning_rate": 4.693912259296773e-05,
1535
+ "loss": 0.2667,
1536
+ "num_input_tokens_seen": 13857352,
1537
+ "step": 191
1538
+ },
1539
+ {
1540
+ "epoch": 3.149532710280374,
1541
+ "grad_norm": 0.15529580414295197,
1542
+ "learning_rate": 4.690766700109659e-05,
1543
+ "loss": 0.2154,
1544
+ "num_input_tokens_seen": 13939928,
1545
+ "step": 192
1546
+ },
1547
+ {
1548
+ "epoch": 3.166147455867082,
1549
+ "grad_norm": 0.1619848757982254,
1550
+ "learning_rate": 4.687606125652882e-05,
1551
+ "loss": 0.1963,
1552
+ "num_input_tokens_seen": 14017936,
1553
+ "step": 193
1554
+ },
1555
+ {
1556
+ "epoch": 3.1827622014537904,
1557
+ "grad_norm": 0.18066684901714325,
1558
+ "learning_rate": 4.684430557588664e-05,
1559
+ "loss": 0.1862,
1560
+ "num_input_tokens_seen": 14074176,
1561
+ "step": 194
1562
+ },
1563
+ {
1564
+ "epoch": 3.1993769470404985,
1565
+ "grad_norm": 0.16520777344703674,
1566
+ "learning_rate": 4.681240017681993e-05,
1567
+ "loss": 0.2576,
1568
+ "num_input_tokens_seen": 14167656,
1569
+ "step": 195
1570
+ },
1571
+ {
1572
+ "epoch": 3.2159916926272065,
1573
+ "grad_norm": 0.15385325253009796,
1574
+ "learning_rate": 4.678034527800474e-05,
1575
+ "loss": 0.1813,
1576
+ "num_input_tokens_seen": 14235800,
1577
+ "step": 196
1578
+ },
1579
+ {
1580
+ "epoch": 3.232606438213915,
1581
+ "grad_norm": 0.16897696256637573,
1582
+ "learning_rate": 4.674814109914174e-05,
1583
+ "loss": 0.1741,
1584
+ "num_input_tokens_seen": 14301272,
1585
+ "step": 197
1586
+ },
1587
+ {
1588
+ "epoch": 3.249221183800623,
1589
+ "grad_norm": 0.19556447863578796,
1590
+ "learning_rate": 4.671578786095478e-05,
1591
+ "loss": 0.2186,
1592
+ "num_input_tokens_seen": 14347352,
1593
+ "step": 198
1594
+ },
1595
+ {
1596
+ "epoch": 3.265835929387331,
1597
+ "grad_norm": 0.17333142459392548,
1598
+ "learning_rate": 4.668328578518933e-05,
1599
+ "loss": 0.2892,
1600
+ "num_input_tokens_seen": 14434600,
1601
+ "step": 199
1602
+ },
1603
+ {
1604
+ "epoch": 3.2824506749740396,
1605
+ "grad_norm": 0.20295488834381104,
1606
+ "learning_rate": 4.665063509461097e-05,
1607
+ "loss": 0.2014,
1608
+ "num_input_tokens_seen": 14484104,
1609
+ "step": 200
1610
+ },
1611
+ {
1612
+ "epoch": 3.2990654205607477,
1613
+ "grad_norm": 0.1597638726234436,
1614
+ "learning_rate": 4.661783601300388e-05,
1615
+ "loss": 0.2158,
1616
+ "num_input_tokens_seen": 14567152,
1617
+ "step": 201
1618
+ },
1619
+ {
1620
+ "epoch": 3.3156801661474558,
1621
+ "grad_norm": 0.19849488139152527,
1622
+ "learning_rate": 4.6584888765169296e-05,
1623
+ "loss": 0.2578,
1624
+ "num_input_tokens_seen": 14647040,
1625
+ "step": 202
1626
+ },
1627
+ {
1628
+ "epoch": 3.3322949117341643,
1629
+ "grad_norm": 0.1508200466632843,
1630
+ "learning_rate": 4.6551793576923964e-05,
1631
+ "loss": 0.2213,
1632
+ "num_input_tokens_seen": 14738216,
1633
+ "step": 203
1634
+ },
1635
+ {
1636
+ "epoch": 3.3489096573208723,
1637
+ "grad_norm": 0.1687687337398529,
1638
+ "learning_rate": 4.65185506750986e-05,
1639
+ "loss": 0.1828,
1640
+ "num_input_tokens_seen": 14811216,
1641
+ "step": 204
1642
+ },
1643
+ {
1644
+ "epoch": 3.3655244029075804,
1645
+ "grad_norm": 0.16587376594543457,
1646
+ "learning_rate": 4.648516028753632e-05,
1647
+ "loss": 0.1619,
1648
+ "num_input_tokens_seen": 14885992,
1649
+ "step": 205
1650
+ },
1651
+ {
1652
+ "epoch": 3.382139148494289,
1653
+ "grad_norm": 0.16600169241428375,
1654
+ "learning_rate": 4.645162264309112e-05,
1655
+ "loss": 0.2438,
1656
+ "num_input_tokens_seen": 14961984,
1657
+ "step": 206
1658
+ },
1659
+ {
1660
+ "epoch": 3.398753894080997,
1661
+ "grad_norm": 0.1877703070640564,
1662
+ "learning_rate": 4.6417937971626245e-05,
1663
+ "loss": 0.1771,
1664
+ "num_input_tokens_seen": 15021240,
1665
+ "step": 207
1666
+ },
1667
+ {
1668
+ "epoch": 3.415368639667705,
1669
+ "grad_norm": 0.20105206966400146,
1670
+ "learning_rate": 4.638410650401267e-05,
1671
+ "loss": 0.1742,
1672
+ "num_input_tokens_seen": 15092016,
1673
+ "step": 208
1674
+ },
1675
+ {
1676
+ "epoch": 3.431983385254413,
1677
+ "grad_norm": 0.12934140861034393,
1678
+ "learning_rate": 4.635012847212748e-05,
1679
+ "loss": 0.1725,
1680
+ "num_input_tokens_seen": 15198192,
1681
+ "step": 209
1682
+ },
1683
+ {
1684
+ "epoch": 3.4485981308411215,
1685
+ "grad_norm": 0.18388882279396057,
1686
+ "learning_rate": 4.6316004108852305e-05,
1687
+ "loss": 0.186,
1688
+ "num_input_tokens_seen": 15258432,
1689
+ "step": 210
1690
+ },
1691
+ {
1692
+ "epoch": 3.4652128764278296,
1693
+ "grad_norm": 0.1766858547925949,
1694
+ "learning_rate": 4.628173364807171e-05,
1695
+ "loss": 0.2166,
1696
+ "num_input_tokens_seen": 15329600,
1697
+ "step": 211
1698
+ },
1699
+ {
1700
+ "epoch": 3.4818276220145377,
1701
+ "grad_norm": 0.16214998066425323,
1702
+ "learning_rate": 4.6247317324671605e-05,
1703
+ "loss": 0.2038,
1704
+ "num_input_tokens_seen": 15407920,
1705
+ "step": 212
1706
+ },
1707
+ {
1708
+ "epoch": 3.498442367601246,
1709
+ "grad_norm": 0.16933797299861908,
1710
+ "learning_rate": 4.6212755374537596e-05,
1711
+ "loss": 0.2017,
1712
+ "num_input_tokens_seen": 15479640,
1713
+ "step": 213
1714
+ },
1715
+ {
1716
+ "epoch": 3.515057113187954,
1717
+ "grad_norm": 0.19472749531269073,
1718
+ "learning_rate": 4.617804803455344e-05,
1719
+ "loss": 0.2048,
1720
+ "num_input_tokens_seen": 15561960,
1721
+ "step": 214
1722
+ },
1723
+ {
1724
+ "epoch": 3.5316718587746623,
1725
+ "grad_norm": 0.33335182070732117,
1726
+ "learning_rate": 4.614319554259934e-05,
1727
+ "loss": 0.2358,
1728
+ "num_input_tokens_seen": 15641440,
1729
+ "step": 215
1730
+ },
1731
+ {
1732
+ "epoch": 3.5482866043613708,
1733
+ "grad_norm": 0.19587557017803192,
1734
+ "learning_rate": 4.610819813755038e-05,
1735
+ "loss": 0.2374,
1736
+ "num_input_tokens_seen": 15728872,
1737
+ "step": 216
1738
+ },
1739
+ {
1740
+ "epoch": 3.564901349948079,
1741
+ "grad_norm": 0.19063518941402435,
1742
+ "learning_rate": 4.607305605927487e-05,
1743
+ "loss": 0.1919,
1744
+ "num_input_tokens_seen": 15798112,
1745
+ "step": 217
1746
+ },
1747
+ {
1748
+ "epoch": 3.581516095534787,
1749
+ "grad_norm": 0.19598323106765747,
1750
+ "learning_rate": 4.6037769548632656e-05,
1751
+ "loss": 0.2583,
1752
+ "num_input_tokens_seen": 15865936,
1753
+ "step": 218
1754
+ },
1755
+ {
1756
+ "epoch": 3.5981308411214954,
1757
+ "grad_norm": 0.18066690862178802,
1758
+ "learning_rate": 4.600233884747355e-05,
1759
+ "loss": 0.2337,
1760
+ "num_input_tokens_seen": 15941368,
1761
+ "step": 219
1762
+ },
1763
+ {
1764
+ "epoch": 3.6147455867082035,
1765
+ "grad_norm": 0.16981899738311768,
1766
+ "learning_rate": 4.5966764198635606e-05,
1767
+ "loss": 0.1818,
1768
+ "num_input_tokens_seen": 16028208,
1769
+ "step": 220
1770
+ },
1771
+ {
1772
+ "epoch": 3.6313603322949115,
1773
+ "grad_norm": 0.180410236120224,
1774
+ "learning_rate": 4.5931045845943474e-05,
1775
+ "loss": 0.1646,
1776
+ "num_input_tokens_seen": 16104408,
1777
+ "step": 221
1778
+ },
1779
+ {
1780
+ "epoch": 3.64797507788162,
1781
+ "grad_norm": 0.19180680811405182,
1782
+ "learning_rate": 4.5895184034206765e-05,
1783
+ "loss": 0.3263,
1784
+ "num_input_tokens_seen": 16156800,
1785
+ "step": 222
1786
+ },
1787
+ {
1788
+ "epoch": 3.664589823468328,
1789
+ "grad_norm": 0.16119280457496643,
1790
+ "learning_rate": 4.585917900921829e-05,
1791
+ "loss": 0.2636,
1792
+ "num_input_tokens_seen": 16256712,
1793
+ "step": 223
1794
+ },
1795
+ {
1796
+ "epoch": 3.681204569055036,
1797
+ "grad_norm": 0.18559172749519348,
1798
+ "learning_rate": 4.5823031017752485e-05,
1799
+ "loss": 0.1759,
1800
+ "num_input_tokens_seen": 16330344,
1801
+ "step": 224
1802
+ },
1803
+ {
1804
+ "epoch": 3.6978193146417446,
1805
+ "grad_norm": 0.17767880856990814,
1806
+ "learning_rate": 4.5786740307563636e-05,
1807
+ "loss": 0.196,
1808
+ "num_input_tokens_seen": 16399792,
1809
+ "step": 225
1810
+ },
1811
+ {
1812
+ "epoch": 3.7144340602284527,
1813
+ "grad_norm": 0.17806987464427948,
1814
+ "learning_rate": 4.575030712738419e-05,
1815
+ "loss": 0.186,
1816
+ "num_input_tokens_seen": 16466368,
1817
+ "step": 226
1818
+ },
1819
+ {
1820
+ "epoch": 3.7310488058151607,
1821
+ "grad_norm": 0.1952792853116989,
1822
+ "learning_rate": 4.571373172692309e-05,
1823
+ "loss": 0.1789,
1824
+ "num_input_tokens_seen": 16530976,
1825
+ "step": 227
1826
+ },
1827
+ {
1828
+ "epoch": 3.7476635514018692,
1829
+ "grad_norm": 0.1774374544620514,
1830
+ "learning_rate": 4.567701435686404e-05,
1831
+ "loss": 0.1929,
1832
+ "num_input_tokens_seen": 16600216,
1833
+ "step": 228
1834
+ },
1835
+ {
1836
+ "epoch": 3.7642782969885773,
1837
+ "grad_norm": 0.18798600137233734,
1838
+ "learning_rate": 4.5640155268863796e-05,
1839
+ "loss": 0.2268,
1840
+ "num_input_tokens_seen": 16673192,
1841
+ "step": 229
1842
+ },
1843
+ {
1844
+ "epoch": 3.7808930425752854,
1845
+ "grad_norm": 0.2022520750761032,
1846
+ "learning_rate": 4.5603154715550386e-05,
1847
+ "loss": 0.1716,
1848
+ "num_input_tokens_seen": 16739912,
1849
+ "step": 230
1850
+ },
1851
+ {
1852
+ "epoch": 3.797507788161994,
1853
+ "grad_norm": 0.15170948207378387,
1854
+ "learning_rate": 4.55660129505215e-05,
1855
+ "loss": 0.1844,
1856
+ "num_input_tokens_seen": 16834632,
1857
+ "step": 231
1858
+ },
1859
+ {
1860
+ "epoch": 3.814122533748702,
1861
+ "grad_norm": 0.16655084490776062,
1862
+ "learning_rate": 4.5528730228342605e-05,
1863
+ "loss": 0.1899,
1864
+ "num_input_tokens_seen": 16914728,
1865
+ "step": 232
1866
+ },
1867
+ {
1868
+ "epoch": 3.83073727933541,
1869
+ "grad_norm": 0.19025221467018127,
1870
+ "learning_rate": 4.549130680454532e-05,
1871
+ "loss": 0.2214,
1872
+ "num_input_tokens_seen": 17014304,
1873
+ "step": 233
1874
+ },
1875
+ {
1876
+ "epoch": 3.8473520249221185,
1877
+ "grad_norm": 0.17126557230949402,
1878
+ "learning_rate": 4.545374293562559e-05,
1879
+ "loss": 0.2062,
1880
+ "num_input_tokens_seen": 17106664,
1881
+ "step": 234
1882
+ },
1883
+ {
1884
+ "epoch": 3.8639667705088265,
1885
+ "grad_norm": 0.16162410378456116,
1886
+ "learning_rate": 4.541603887904198e-05,
1887
+ "loss": 0.2016,
1888
+ "num_input_tokens_seen": 17193744,
1889
+ "step": 235
1890
+ },
1891
+ {
1892
+ "epoch": 3.8805815160955346,
1893
+ "grad_norm": 0.2067136913537979,
1894
+ "learning_rate": 4.537819489321386e-05,
1895
+ "loss": 0.1992,
1896
+ "num_input_tokens_seen": 17254656,
1897
+ "step": 236
1898
+ },
1899
+ {
1900
+ "epoch": 3.897196261682243,
1901
+ "grad_norm": 0.200433611869812,
1902
+ "learning_rate": 4.534021123751968e-05,
1903
+ "loss": 0.1961,
1904
+ "num_input_tokens_seen": 17325896,
1905
+ "step": 237
1906
+ },
1907
+ {
1908
+ "epoch": 3.913811007268951,
1909
+ "grad_norm": 0.2062034010887146,
1910
+ "learning_rate": 4.5302088172295156e-05,
1911
+ "loss": 0.2302,
1912
+ "num_input_tokens_seen": 17394424,
1913
+ "step": 238
1914
+ },
1915
+ {
1916
+ "epoch": 3.930425752855659,
1917
+ "grad_norm": 0.1928798407316208,
1918
+ "learning_rate": 4.526382595883152e-05,
1919
+ "loss": 0.1846,
1920
+ "num_input_tokens_seen": 17456352,
1921
+ "step": 239
1922
+ },
1923
+ {
1924
+ "epoch": 3.9470404984423677,
1925
+ "grad_norm": 0.2011859118938446,
1926
+ "learning_rate": 4.522542485937369e-05,
1927
+ "loss": 0.1879,
1928
+ "num_input_tokens_seen": 17519168,
1929
+ "step": 240
1930
+ }
1931
+ ],
1932
+ "logging_steps": 1.0,
1933
+ "max_steps": 1200,
1934
+ "num_input_tokens_seen": 17519168,
1935
+ "num_train_epochs": 20,
1936
+ "save_steps": 60,
1937
+ "stateful_callbacks": {
1938
+ "TrainerControl": {
1939
+ "args": {
1940
+ "should_epoch_stop": false,
1941
+ "should_evaluate": false,
1942
+ "should_log": false,
1943
+ "should_save": true,
1944
+ "should_training_stop": false
1945
+ },
1946
+ "attributes": {}
1947
+ }
1948
+ },
1949
+ "total_flos": 1.477946311264174e+18,
1950
+ "train_batch_size": 1,
1951
+ "trial_name": null,
1952
+ "trial_params": null
1953
+ }
checkpoint-300/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-14B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-300/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-300/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-300/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-300/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-360/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-360/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-360/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-360/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-360/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-360/trainer_state.json ADDED
@@ -0,0 +1,2913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.913811007268951,
5
+ "eval_steps": 500,
6
+ "global_step": 360,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016614745586708203,
13
+ "grad_norm": 0.050998032093048096,
14
+ "learning_rate": 4.999991432639962e-05,
15
+ "loss": 0.5487,
16
+ "num_input_tokens_seen": 70408,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.033229491173416406,
21
+ "grad_norm": 0.049370743334293365,
22
+ "learning_rate": 4.999965730618567e-05,
23
+ "loss": 0.4981,
24
+ "num_input_tokens_seen": 139640,
25
+ "step": 2
26
+ },
27
+ {
28
+ "epoch": 0.04984423676012461,
29
+ "grad_norm": 0.05077400803565979,
30
+ "learning_rate": 4.9999228941119745e-05,
31
+ "loss": 0.5505,
32
+ "num_input_tokens_seen": 223656,
33
+ "step": 3
34
+ },
35
+ {
36
+ "epoch": 0.06645898234683281,
37
+ "grad_norm": 0.04397282376885414,
38
+ "learning_rate": 4.999862923413781e-05,
39
+ "loss": 0.504,
40
+ "num_input_tokens_seen": 300688,
41
+ "step": 4
42
+ },
43
+ {
44
+ "epoch": 0.08307372793354102,
45
+ "grad_norm": 0.05225864797830582,
46
+ "learning_rate": 4.999785818935018e-05,
47
+ "loss": 0.4925,
48
+ "num_input_tokens_seen": 366368,
49
+ "step": 5
50
+ },
51
+ {
52
+ "epoch": 0.09968847352024922,
53
+ "grad_norm": 0.049482282251119614,
54
+ "learning_rate": 4.999691581204152e-05,
55
+ "loss": 0.4771,
56
+ "num_input_tokens_seen": 445808,
57
+ "step": 6
58
+ },
59
+ {
60
+ "epoch": 0.11630321910695743,
61
+ "grad_norm": 0.05594080314040184,
62
+ "learning_rate": 4.9995802108670775e-05,
63
+ "loss": 0.4986,
64
+ "num_input_tokens_seen": 522800,
65
+ "step": 7
66
+ },
67
+ {
68
+ "epoch": 0.13291796469366562,
69
+ "grad_norm": 0.051852282136678696,
70
+ "learning_rate": 4.999451708687114e-05,
71
+ "loss": 0.5171,
72
+ "num_input_tokens_seen": 599608,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.14953271028037382,
77
+ "grad_norm": 0.045517683029174805,
78
+ "learning_rate": 4.9993060755450015e-05,
79
+ "loss": 0.5669,
80
+ "num_input_tokens_seen": 681424,
81
+ "step": 9
82
+ },
83
+ {
84
+ "epoch": 0.16614745586708204,
85
+ "grad_norm": 0.044325754046440125,
86
+ "learning_rate": 4.999143312438893e-05,
87
+ "loss": 0.4218,
88
+ "num_input_tokens_seen": 756744,
89
+ "step": 10
90
+ },
91
+ {
92
+ "epoch": 0.18276220145379024,
93
+ "grad_norm": 0.04328459873795509,
94
+ "learning_rate": 4.998963420484349e-05,
95
+ "loss": 0.434,
96
+ "num_input_tokens_seen": 842576,
97
+ "step": 11
98
+ },
99
+ {
100
+ "epoch": 0.19937694704049844,
101
+ "grad_norm": 0.04725787043571472,
102
+ "learning_rate": 4.998766400914329e-05,
103
+ "loss": 0.4287,
104
+ "num_input_tokens_seen": 917232,
105
+ "step": 12
106
+ },
107
+ {
108
+ "epoch": 0.21599169262720663,
109
+ "grad_norm": 0.03806879371404648,
110
+ "learning_rate": 4.9985522550791825e-05,
111
+ "loss": 0.3454,
112
+ "num_input_tokens_seen": 1006800,
113
+ "step": 13
114
+ },
115
+ {
116
+ "epoch": 0.23260643821391486,
117
+ "grad_norm": 0.05201176926493645,
118
+ "learning_rate": 4.998320984446641e-05,
119
+ "loss": 0.436,
120
+ "num_input_tokens_seen": 1085824,
121
+ "step": 14
122
+ },
123
+ {
124
+ "epoch": 0.24922118380062305,
125
+ "grad_norm": 0.047955628484487534,
126
+ "learning_rate": 4.9980725906018074e-05,
127
+ "loss": 0.4625,
128
+ "num_input_tokens_seen": 1164160,
129
+ "step": 15
130
+ },
131
+ {
132
+ "epoch": 0.26583592938733125,
133
+ "grad_norm": 0.05529098957777023,
134
+ "learning_rate": 4.997807075247146e-05,
135
+ "loss": 0.5035,
136
+ "num_input_tokens_seen": 1242264,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2824506749740395,
141
+ "grad_norm": 0.04751162976026535,
142
+ "learning_rate": 4.997524440202469e-05,
143
+ "loss": 0.4354,
144
+ "num_input_tokens_seen": 1325904,
145
+ "step": 17
146
+ },
147
+ {
148
+ "epoch": 0.29906542056074764,
149
+ "grad_norm": 0.06726882606744766,
150
+ "learning_rate": 4.9972246874049254e-05,
151
+ "loss": 0.5439,
152
+ "num_input_tokens_seen": 1385632,
153
+ "step": 18
154
+ },
155
+ {
156
+ "epoch": 0.31568016614745587,
157
+ "grad_norm": 0.05245920270681381,
158
+ "learning_rate": 4.996907818908987e-05,
159
+ "loss": 0.3727,
160
+ "num_input_tokens_seen": 1470632,
161
+ "step": 19
162
+ },
163
+ {
164
+ "epoch": 0.3322949117341641,
165
+ "grad_norm": 0.05745376646518707,
166
+ "learning_rate": 4.996573836886435e-05,
167
+ "loss": 0.4894,
168
+ "num_input_tokens_seen": 1547536,
169
+ "step": 20
170
+ },
171
+ {
172
+ "epoch": 0.34890965732087226,
173
+ "grad_norm": 0.056607529520988464,
174
+ "learning_rate": 4.9962227436263453e-05,
175
+ "loss": 0.3846,
176
+ "num_input_tokens_seen": 1615528,
177
+ "step": 21
178
+ },
179
+ {
180
+ "epoch": 0.3655244029075805,
181
+ "grad_norm": 0.06150667741894722,
182
+ "learning_rate": 4.995854541535071e-05,
183
+ "loss": 0.4362,
184
+ "num_input_tokens_seen": 1694352,
185
+ "step": 22
186
+ },
187
+ {
188
+ "epoch": 0.3821391484942887,
189
+ "grad_norm": 0.056484442204236984,
190
+ "learning_rate": 4.9954692331362294e-05,
191
+ "loss": 0.4438,
192
+ "num_input_tokens_seen": 1753776,
193
+ "step": 23
194
+ },
195
+ {
196
+ "epoch": 0.3987538940809969,
197
+ "grad_norm": 0.0704159140586853,
198
+ "learning_rate": 4.995066821070679e-05,
199
+ "loss": 0.4496,
200
+ "num_input_tokens_seen": 1809048,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.4153686396677051,
205
+ "grad_norm": 0.06202029809355736,
206
+ "learning_rate": 4.994647308096509e-05,
207
+ "loss": 0.5096,
208
+ "num_input_tokens_seen": 1884264,
209
+ "step": 25
210
+ },
211
+ {
212
+ "epoch": 0.43198338525441327,
213
+ "grad_norm": 0.04237145930528641,
214
+ "learning_rate": 4.994210697089014e-05,
215
+ "loss": 0.3722,
216
+ "num_input_tokens_seen": 1981704,
217
+ "step": 26
218
+ },
219
+ {
220
+ "epoch": 0.4485981308411215,
221
+ "grad_norm": 0.06920398026704788,
222
+ "learning_rate": 4.9937569910406756e-05,
223
+ "loss": 0.4103,
224
+ "num_input_tokens_seen": 2044144,
225
+ "step": 27
226
+ },
227
+ {
228
+ "epoch": 0.4652128764278297,
229
+ "grad_norm": 0.062432270497083664,
230
+ "learning_rate": 4.9932861930611454e-05,
231
+ "loss": 0.357,
232
+ "num_input_tokens_seen": 2107584,
233
+ "step": 28
234
+ },
235
+ {
236
+ "epoch": 0.4818276220145379,
237
+ "grad_norm": 0.06791180372238159,
238
+ "learning_rate": 4.9927983063772196e-05,
239
+ "loss": 0.3889,
240
+ "num_input_tokens_seen": 2169248,
241
+ "step": 29
242
+ },
243
+ {
244
+ "epoch": 0.4984423676012461,
245
+ "grad_norm": 0.07219590991735458,
246
+ "learning_rate": 4.99229333433282e-05,
247
+ "loss": 0.3543,
248
+ "num_input_tokens_seen": 2230344,
249
+ "step": 30
250
+ },
251
+ {
252
+ "epoch": 0.5150571131879543,
253
+ "grad_norm": 0.0647474005818367,
254
+ "learning_rate": 4.9917712803889674e-05,
255
+ "loss": 0.3453,
256
+ "num_input_tokens_seen": 2302368,
257
+ "step": 31
258
+ },
259
+ {
260
+ "epoch": 0.5316718587746625,
261
+ "grad_norm": 0.07434642314910889,
262
+ "learning_rate": 4.991232148123761e-05,
263
+ "loss": 0.435,
264
+ "num_input_tokens_seen": 2369984,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.5482866043613707,
269
+ "grad_norm": 0.05302443355321884,
270
+ "learning_rate": 4.990675941232353e-05,
271
+ "loss": 0.3981,
272
+ "num_input_tokens_seen": 2453032,
273
+ "step": 33
274
+ },
275
+ {
276
+ "epoch": 0.564901349948079,
277
+ "grad_norm": 0.053745292127132416,
278
+ "learning_rate": 4.990102663526924e-05,
279
+ "loss": 0.3755,
280
+ "num_input_tokens_seen": 2527464,
281
+ "step": 34
282
+ },
283
+ {
284
+ "epoch": 0.5815160955347871,
285
+ "grad_norm": 0.06717613339424133,
286
+ "learning_rate": 4.989512318936655e-05,
287
+ "loss": 0.3699,
288
+ "num_input_tokens_seen": 2597032,
289
+ "step": 35
290
+ },
291
+ {
292
+ "epoch": 0.5981308411214953,
293
+ "grad_norm": 0.071847103536129,
294
+ "learning_rate": 4.9889049115077005e-05,
295
+ "loss": 0.3705,
296
+ "num_input_tokens_seen": 2671704,
297
+ "step": 36
298
+ },
299
+ {
300
+ "epoch": 0.6147455867082036,
301
+ "grad_norm": 0.0460306741297245,
302
+ "learning_rate": 4.988280445403164e-05,
303
+ "loss": 0.3797,
304
+ "num_input_tokens_seen": 2767640,
305
+ "step": 37
306
+ },
307
+ {
308
+ "epoch": 0.6313603322949117,
309
+ "grad_norm": 0.053273387253284454,
310
+ "learning_rate": 4.987638924903067e-05,
311
+ "loss": 0.3799,
312
+ "num_input_tokens_seen": 2843720,
313
+ "step": 38
314
+ },
315
+ {
316
+ "epoch": 0.6479750778816199,
317
+ "grad_norm": 0.05600422993302345,
318
+ "learning_rate": 4.9869803544043166e-05,
319
+ "loss": 0.2866,
320
+ "num_input_tokens_seen": 2921472,
321
+ "step": 39
322
+ },
323
+ {
324
+ "epoch": 0.6645898234683282,
325
+ "grad_norm": 0.06414052098989487,
326
+ "learning_rate": 4.9863047384206835e-05,
327
+ "loss": 0.4115,
328
+ "num_input_tokens_seen": 2998400,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.6812045690550363,
333
+ "grad_norm": 0.09214208275079727,
334
+ "learning_rate": 4.985612081582764e-05,
335
+ "loss": 0.3804,
336
+ "num_input_tokens_seen": 3059648,
337
+ "step": 41
338
+ },
339
+ {
340
+ "epoch": 0.6978193146417445,
341
+ "grad_norm": 0.0555964931845665,
342
+ "learning_rate": 4.98490238863795e-05,
343
+ "loss": 0.3121,
344
+ "num_input_tokens_seen": 3140184,
345
+ "step": 42
346
+ },
347
+ {
348
+ "epoch": 0.7144340602284528,
349
+ "grad_norm": 0.06256969273090363,
350
+ "learning_rate": 4.984175664450397e-05,
351
+ "loss": 0.3271,
352
+ "num_input_tokens_seen": 3207184,
353
+ "step": 43
354
+ },
355
+ {
356
+ "epoch": 0.731048805815161,
357
+ "grad_norm": 0.0543232187628746,
358
+ "learning_rate": 4.983431914000991e-05,
359
+ "loss": 0.364,
360
+ "num_input_tokens_seen": 3292344,
361
+ "step": 44
362
+ },
363
+ {
364
+ "epoch": 0.7476635514018691,
365
+ "grad_norm": 0.06077824532985687,
366
+ "learning_rate": 4.982671142387316e-05,
367
+ "loss": 0.3894,
368
+ "num_input_tokens_seen": 3365384,
369
+ "step": 45
370
+ },
371
+ {
372
+ "epoch": 0.7642782969885774,
373
+ "grad_norm": 0.06091070920228958,
374
+ "learning_rate": 4.981893354823614e-05,
375
+ "loss": 0.3354,
376
+ "num_input_tokens_seen": 3440720,
377
+ "step": 46
378
+ },
379
+ {
380
+ "epoch": 0.7808930425752856,
381
+ "grad_norm": 0.054153311997652054,
382
+ "learning_rate": 4.9810985566407544e-05,
383
+ "loss": 0.3058,
384
+ "num_input_tokens_seen": 3533576,
385
+ "step": 47
386
+ },
387
+ {
388
+ "epoch": 0.7975077881619937,
389
+ "grad_norm": 0.06662417948246002,
390
+ "learning_rate": 4.980286753286195e-05,
391
+ "loss": 0.4658,
392
+ "num_input_tokens_seen": 3599744,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.814122533748702,
397
+ "grad_norm": 0.05790851265192032,
398
+ "learning_rate": 4.979457950323945e-05,
399
+ "loss": 0.3647,
400
+ "num_input_tokens_seen": 3689520,
401
+ "step": 49
402
+ },
403
+ {
404
+ "epoch": 0.8307372793354102,
405
+ "grad_norm": 0.10742159187793732,
406
+ "learning_rate": 4.9786121534345265e-05,
407
+ "loss": 0.343,
408
+ "num_input_tokens_seen": 3751808,
409
+ "step": 50
410
+ },
411
+ {
412
+ "epoch": 0.8473520249221184,
413
+ "grad_norm": 0.05565556138753891,
414
+ "learning_rate": 4.9777493684149375e-05,
415
+ "loss": 0.3317,
416
+ "num_input_tokens_seen": 3839096,
417
+ "step": 51
418
+ },
419
+ {
420
+ "epoch": 0.8639667705088265,
421
+ "grad_norm": 0.05752381682395935,
422
+ "learning_rate": 4.976869601178609e-05,
423
+ "loss": 0.38,
424
+ "num_input_tokens_seen": 3919824,
425
+ "step": 52
426
+ },
427
+ {
428
+ "epoch": 0.8805815160955348,
429
+ "grad_norm": 0.06406434625387192,
430
+ "learning_rate": 4.975972857755369e-05,
431
+ "loss": 0.2676,
432
+ "num_input_tokens_seen": 3989312,
433
+ "step": 53
434
+ },
435
+ {
436
+ "epoch": 0.897196261682243,
437
+ "grad_norm": 0.0653691440820694,
438
+ "learning_rate": 4.975059144291394e-05,
439
+ "loss": 0.3516,
440
+ "num_input_tokens_seen": 4060528,
441
+ "step": 54
442
+ },
443
+ {
444
+ "epoch": 0.9138110072689511,
445
+ "grad_norm": 0.06272953748703003,
446
+ "learning_rate": 4.974128467049176e-05,
447
+ "loss": 0.3004,
448
+ "num_input_tokens_seen": 4129368,
449
+ "step": 55
450
+ },
451
+ {
452
+ "epoch": 0.9304257528556594,
453
+ "grad_norm": 0.08054930716753006,
454
+ "learning_rate": 4.9731808324074717e-05,
455
+ "loss": 0.3009,
456
+ "num_input_tokens_seen": 4175208,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.9470404984423676,
461
+ "grad_norm": 0.07523038238286972,
462
+ "learning_rate": 4.972216246861262e-05,
463
+ "loss": 0.2814,
464
+ "num_input_tokens_seen": 4218096,
465
+ "step": 57
466
+ },
467
+ {
468
+ "epoch": 0.9636552440290758,
469
+ "grad_norm": 0.07347433269023895,
470
+ "learning_rate": 4.971234717021709e-05,
471
+ "loss": 0.3321,
472
+ "num_input_tokens_seen": 4275968,
473
+ "step": 58
474
+ },
475
+ {
476
+ "epoch": 0.980269989615784,
477
+ "grad_norm": 0.05830248445272446,
478
+ "learning_rate": 4.9702362496161085e-05,
479
+ "loss": 0.2881,
480
+ "num_input_tokens_seen": 4346616,
481
+ "step": 59
482
+ },
483
+ {
484
+ "epoch": 0.9968847352024922,
485
+ "grad_norm": 0.061629410833120346,
486
+ "learning_rate": 4.9692208514878444e-05,
487
+ "loss": 0.2993,
488
+ "num_input_tokens_seen": 4425064,
489
+ "step": 60
490
+ },
491
+ {
492
+ "epoch": 1.0,
493
+ "grad_norm": 0.13380740582942963,
494
+ "learning_rate": 4.968188529596342e-05,
495
+ "loss": 0.2511,
496
+ "num_input_tokens_seen": 4435328,
497
+ "step": 61
498
+ },
499
+ {
500
+ "epoch": 1.0166147455867083,
501
+ "grad_norm": 0.0726238414645195,
502
+ "learning_rate": 4.9671392910170185e-05,
503
+ "loss": 0.3127,
504
+ "num_input_tokens_seen": 4500104,
505
+ "step": 62
506
+ },
507
+ {
508
+ "epoch": 1.0332294911734163,
509
+ "grad_norm": 0.05980083718895912,
510
+ "learning_rate": 4.966073142941239e-05,
511
+ "loss": 0.3601,
512
+ "num_input_tokens_seen": 4581976,
513
+ "step": 63
514
+ },
515
+ {
516
+ "epoch": 1.0498442367601246,
517
+ "grad_norm": 0.06445376574993134,
518
+ "learning_rate": 4.964990092676263e-05,
519
+ "loss": 0.3049,
520
+ "num_input_tokens_seen": 4652160,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 1.066458982346833,
525
+ "grad_norm": 0.07824505120515823,
526
+ "learning_rate": 4.9638901476451946e-05,
527
+ "loss": 0.3099,
528
+ "num_input_tokens_seen": 4709368,
529
+ "step": 65
530
+ },
531
+ {
532
+ "epoch": 1.083073727933541,
533
+ "grad_norm": 0.058268457651138306,
534
+ "learning_rate": 4.962773315386935e-05,
535
+ "loss": 0.3273,
536
+ "num_input_tokens_seen": 4798256,
537
+ "step": 66
538
+ },
539
+ {
540
+ "epoch": 1.0996884735202492,
541
+ "grad_norm": 0.07069691270589828,
542
+ "learning_rate": 4.961639603556127e-05,
543
+ "loss": 0.282,
544
+ "num_input_tokens_seen": 4859200,
545
+ "step": 67
546
+ },
547
+ {
548
+ "epoch": 1.1163032191069575,
549
+ "grad_norm": 0.0775996670126915,
550
+ "learning_rate": 4.960489019923105e-05,
551
+ "loss": 0.3642,
552
+ "num_input_tokens_seen": 4925992,
553
+ "step": 68
554
+ },
555
+ {
556
+ "epoch": 1.1329179646936656,
557
+ "grad_norm": 0.07044171541929245,
558
+ "learning_rate": 4.9593215723738404e-05,
559
+ "loss": 0.2896,
560
+ "num_input_tokens_seen": 4998808,
561
+ "step": 69
562
+ },
563
+ {
564
+ "epoch": 1.1495327102803738,
565
+ "grad_norm": 0.05971802771091461,
566
+ "learning_rate": 4.958137268909887e-05,
567
+ "loss": 0.2578,
568
+ "num_input_tokens_seen": 5089672,
569
+ "step": 70
570
+ },
571
+ {
572
+ "epoch": 1.1661474558670821,
573
+ "grad_norm": 0.07145556062459946,
574
+ "learning_rate": 4.9569361176483286e-05,
575
+ "loss": 0.3243,
576
+ "num_input_tokens_seen": 5166744,
577
+ "step": 71
578
+ },
579
+ {
580
+ "epoch": 1.1827622014537902,
581
+ "grad_norm": 0.07455787807703018,
582
+ "learning_rate": 4.9557181268217227e-05,
583
+ "loss": 0.3949,
584
+ "num_input_tokens_seen": 5228264,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 1.1993769470404985,
589
+ "grad_norm": 0.055582575500011444,
590
+ "learning_rate": 4.9544833047780394e-05,
591
+ "loss": 0.2877,
592
+ "num_input_tokens_seen": 5338224,
593
+ "step": 73
594
+ },
595
+ {
596
+ "epoch": 1.2159916926272065,
597
+ "grad_norm": 0.07675391435623169,
598
+ "learning_rate": 4.9532316599806124e-05,
599
+ "loss": 0.3152,
600
+ "num_input_tokens_seen": 5399848,
601
+ "step": 74
602
+ },
603
+ {
604
+ "epoch": 1.2326064382139148,
605
+ "grad_norm": 0.08048644661903381,
606
+ "learning_rate": 4.951963201008076e-05,
607
+ "loss": 0.2976,
608
+ "num_input_tokens_seen": 5468624,
609
+ "step": 75
610
+ },
611
+ {
612
+ "epoch": 1.249221183800623,
613
+ "grad_norm": 0.07579060643911362,
614
+ "learning_rate": 4.9506779365543046e-05,
615
+ "loss": 0.2982,
616
+ "num_input_tokens_seen": 5536776,
617
+ "step": 76
618
+ },
619
+ {
620
+ "epoch": 1.2658359293873311,
621
+ "grad_norm": 0.07828006893396378,
622
+ "learning_rate": 4.949375875428357e-05,
623
+ "loss": 0.3272,
624
+ "num_input_tokens_seen": 5609296,
625
+ "step": 77
626
+ },
627
+ {
628
+ "epoch": 1.2824506749740394,
629
+ "grad_norm": 0.08079098165035248,
630
+ "learning_rate": 4.9480570265544144e-05,
631
+ "loss": 0.2768,
632
+ "num_input_tokens_seen": 5663824,
633
+ "step": 78
634
+ },
635
+ {
636
+ "epoch": 1.2990654205607477,
637
+ "grad_norm": 0.07579358667135239,
638
+ "learning_rate": 4.94672139897172e-05,
639
+ "loss": 0.318,
640
+ "num_input_tokens_seen": 5742032,
641
+ "step": 79
642
+ },
643
+ {
644
+ "epoch": 1.3156801661474558,
645
+ "grad_norm": 0.07588379085063934,
646
+ "learning_rate": 4.9453690018345144e-05,
647
+ "loss": 0.3007,
648
+ "num_input_tokens_seen": 5816864,
649
+ "step": 80
650
+ },
651
+ {
652
+ "epoch": 1.332294911734164,
653
+ "grad_norm": 0.08709035068750381,
654
+ "learning_rate": 4.943999844411977e-05,
655
+ "loss": 0.2797,
656
+ "num_input_tokens_seen": 5881624,
657
+ "step": 81
658
+ },
659
+ {
660
+ "epoch": 1.3489096573208723,
661
+ "grad_norm": 0.05975884944200516,
662
+ "learning_rate": 4.94261393608816e-05,
663
+ "loss": 0.2591,
664
+ "num_input_tokens_seen": 5970272,
665
+ "step": 82
666
+ },
667
+ {
668
+ "epoch": 1.3655244029075804,
669
+ "grad_norm": 0.07372818142175674,
670
+ "learning_rate": 4.941211286361922e-05,
671
+ "loss": 0.2687,
672
+ "num_input_tokens_seen": 6058752,
673
+ "step": 83
674
+ },
675
+ {
676
+ "epoch": 1.3821391484942886,
677
+ "grad_norm": 0.09071576595306396,
678
+ "learning_rate": 4.939791904846869e-05,
679
+ "loss": 0.2979,
680
+ "num_input_tokens_seen": 6120064,
681
+ "step": 84
682
+ },
683
+ {
684
+ "epoch": 1.398753894080997,
685
+ "grad_norm": 0.0849960595369339,
686
+ "learning_rate": 4.938355801271282e-05,
687
+ "loss": 0.2927,
688
+ "num_input_tokens_seen": 6182072,
689
+ "step": 85
690
+ },
691
+ {
692
+ "epoch": 1.415368639667705,
693
+ "grad_norm": 0.08258760720491409,
694
+ "learning_rate": 4.936902985478055e-05,
695
+ "loss": 0.295,
696
+ "num_input_tokens_seen": 6269680,
697
+ "step": 86
698
+ },
699
+ {
700
+ "epoch": 1.4319833852544133,
701
+ "grad_norm": 0.0851503536105156,
702
+ "learning_rate": 4.935433467424624e-05,
703
+ "loss": 0.2925,
704
+ "num_input_tokens_seen": 6347424,
705
+ "step": 87
706
+ },
707
+ {
708
+ "epoch": 1.4485981308411215,
709
+ "grad_norm": 0.08852345496416092,
710
+ "learning_rate": 4.933947257182901e-05,
711
+ "loss": 0.3153,
712
+ "num_input_tokens_seen": 6412584,
713
+ "step": 88
714
+ },
715
+ {
716
+ "epoch": 1.4652128764278296,
717
+ "grad_norm": 0.08184897154569626,
718
+ "learning_rate": 4.932444364939205e-05,
719
+ "loss": 0.292,
720
+ "num_input_tokens_seen": 6482728,
721
+ "step": 89
722
+ },
723
+ {
724
+ "epoch": 1.4818276220145379,
725
+ "grad_norm": 0.08270515501499176,
726
+ "learning_rate": 4.9309248009941914e-05,
727
+ "loss": 0.3472,
728
+ "num_input_tokens_seen": 6562104,
729
+ "step": 90
730
+ },
731
+ {
732
+ "epoch": 1.4984423676012462,
733
+ "grad_norm": 0.07407747954130173,
734
+ "learning_rate": 4.929388575762782e-05,
735
+ "loss": 0.2995,
736
+ "num_input_tokens_seen": 6656552,
737
+ "step": 91
738
+ },
739
+ {
740
+ "epoch": 1.5150571131879542,
741
+ "grad_norm": 0.08710360527038574,
742
+ "learning_rate": 4.9278356997740904e-05,
743
+ "loss": 0.2549,
744
+ "num_input_tokens_seen": 6714184,
745
+ "step": 92
746
+ },
747
+ {
748
+ "epoch": 1.5316718587746625,
749
+ "grad_norm": 0.0773790255188942,
750
+ "learning_rate": 4.9262661836713564e-05,
751
+ "loss": 0.2814,
752
+ "num_input_tokens_seen": 6793552,
753
+ "step": 93
754
+ },
755
+ {
756
+ "epoch": 1.5482866043613708,
757
+ "grad_norm": 0.1002134457230568,
758
+ "learning_rate": 4.924680038211867e-05,
759
+ "loss": 0.2876,
760
+ "num_input_tokens_seen": 6865256,
761
+ "step": 94
762
+ },
763
+ {
764
+ "epoch": 1.5649013499480788,
765
+ "grad_norm": 0.09670394659042358,
766
+ "learning_rate": 4.9230772742668866e-05,
767
+ "loss": 0.2846,
768
+ "num_input_tokens_seen": 6931152,
769
+ "step": 95
770
+ },
771
+ {
772
+ "epoch": 1.5815160955347871,
773
+ "grad_norm": 0.08910100907087326,
774
+ "learning_rate": 4.9214579028215776e-05,
775
+ "loss": 0.2944,
776
+ "num_input_tokens_seen": 6998408,
777
+ "step": 96
778
+ },
779
+ {
780
+ "epoch": 1.5981308411214954,
781
+ "grad_norm": 0.09202459454536438,
782
+ "learning_rate": 4.919821934974933e-05,
783
+ "loss": 0.251,
784
+ "num_input_tokens_seen": 7053008,
785
+ "step": 97
786
+ },
787
+ {
788
+ "epoch": 1.6147455867082035,
789
+ "grad_norm": 0.10218881815671921,
790
+ "learning_rate": 4.918169381939692e-05,
791
+ "loss": 0.2851,
792
+ "num_input_tokens_seen": 7106440,
793
+ "step": 98
794
+ },
795
+ {
796
+ "epoch": 1.6313603322949117,
797
+ "grad_norm": 0.09290914982557297,
798
+ "learning_rate": 4.916500255042268e-05,
799
+ "loss": 0.2959,
800
+ "num_input_tokens_seen": 7167032,
801
+ "step": 99
802
+ },
803
+ {
804
+ "epoch": 1.64797507788162,
805
+ "grad_norm": 0.07791033387184143,
806
+ "learning_rate": 4.914814565722671e-05,
807
+ "loss": 0.2481,
808
+ "num_input_tokens_seen": 7245720,
809
+ "step": 100
810
+ },
811
+ {
812
+ "epoch": 1.664589823468328,
813
+ "grad_norm": 0.08885534107685089,
814
+ "learning_rate": 4.913112325534426e-05,
815
+ "loss": 0.3168,
816
+ "num_input_tokens_seen": 7326320,
817
+ "step": 101
818
+ },
819
+ {
820
+ "epoch": 1.6812045690550363,
821
+ "grad_norm": 0.08569750934839249,
822
+ "learning_rate": 4.9113935461444955e-05,
823
+ "loss": 0.2805,
824
+ "num_input_tokens_seen": 7442232,
825
+ "step": 102
826
+ },
827
+ {
828
+ "epoch": 1.6978193146417446,
829
+ "grad_norm": 0.1112508773803711,
830
+ "learning_rate": 4.9096582393332025e-05,
831
+ "loss": 0.2675,
832
+ "num_input_tokens_seen": 7502496,
833
+ "step": 103
834
+ },
835
+ {
836
+ "epoch": 1.7144340602284527,
837
+ "grad_norm": 0.09654372185468674,
838
+ "learning_rate": 4.907906416994146e-05,
839
+ "loss": 0.3038,
840
+ "num_input_tokens_seen": 7566496,
841
+ "step": 104
842
+ },
843
+ {
844
+ "epoch": 1.731048805815161,
845
+ "grad_norm": 0.10022995620965958,
846
+ "learning_rate": 4.906138091134118e-05,
847
+ "loss": 0.3639,
848
+ "num_input_tokens_seen": 7629056,
849
+ "step": 105
850
+ },
851
+ {
852
+ "epoch": 1.7476635514018692,
853
+ "grad_norm": 0.08336564153432846,
854
+ "learning_rate": 4.9043532738730284e-05,
855
+ "loss": 0.2944,
856
+ "num_input_tokens_seen": 7706096,
857
+ "step": 106
858
+ },
859
+ {
860
+ "epoch": 1.7642782969885773,
861
+ "grad_norm": 0.08539658784866333,
862
+ "learning_rate": 4.9025519774438136e-05,
863
+ "loss": 0.2392,
864
+ "num_input_tokens_seen": 7780072,
865
+ "step": 107
866
+ },
867
+ {
868
+ "epoch": 1.7808930425752856,
869
+ "grad_norm": 0.09139693528413773,
870
+ "learning_rate": 4.900734214192358e-05,
871
+ "loss": 0.2685,
872
+ "num_input_tokens_seen": 7857712,
873
+ "step": 108
874
+ },
875
+ {
876
+ "epoch": 1.7975077881619939,
877
+ "grad_norm": 0.1043916717171669,
878
+ "learning_rate": 4.898899996577407e-05,
879
+ "loss": 0.2513,
880
+ "num_input_tokens_seen": 7916832,
881
+ "step": 109
882
+ },
883
+ {
884
+ "epoch": 1.814122533748702,
885
+ "grad_norm": 0.09203662723302841,
886
+ "learning_rate": 4.8970493371704826e-05,
887
+ "loss": 0.2974,
888
+ "num_input_tokens_seen": 7993056,
889
+ "step": 110
890
+ },
891
+ {
892
+ "epoch": 1.8307372793354102,
893
+ "grad_norm": 0.09319474548101425,
894
+ "learning_rate": 4.8951822486557986e-05,
895
+ "loss": 0.3096,
896
+ "num_input_tokens_seen": 8090056,
897
+ "step": 111
898
+ },
899
+ {
900
+ "epoch": 1.8473520249221185,
901
+ "grad_norm": 0.10193445533514023,
902
+ "learning_rate": 4.893298743830168e-05,
903
+ "loss": 0.2633,
904
+ "num_input_tokens_seen": 8164808,
905
+ "step": 112
906
+ },
907
+ {
908
+ "epoch": 1.8639667705088265,
909
+ "grad_norm": 0.11407948285341263,
910
+ "learning_rate": 4.891398835602925e-05,
911
+ "loss": 0.2584,
912
+ "num_input_tokens_seen": 8223568,
913
+ "step": 113
914
+ },
915
+ {
916
+ "epoch": 1.8805815160955348,
917
+ "grad_norm": 0.11977085471153259,
918
+ "learning_rate": 4.8894825369958255e-05,
919
+ "loss": 0.2619,
920
+ "num_input_tokens_seen": 8276160,
921
+ "step": 114
922
+ },
923
+ {
924
+ "epoch": 1.897196261682243,
925
+ "grad_norm": 0.10925433784723282,
926
+ "learning_rate": 4.8875498611429674e-05,
927
+ "loss": 0.2762,
928
+ "num_input_tokens_seen": 8354904,
929
+ "step": 115
930
+ },
931
+ {
932
+ "epoch": 1.9138110072689511,
933
+ "grad_norm": 0.09673939645290375,
934
+ "learning_rate": 4.8856008212906925e-05,
935
+ "loss": 0.3152,
936
+ "num_input_tokens_seen": 8442584,
937
+ "step": 116
938
+ },
939
+ {
940
+ "epoch": 1.9304257528556594,
941
+ "grad_norm": 0.10827789455652237,
942
+ "learning_rate": 4.8836354307975026e-05,
943
+ "loss": 0.2759,
944
+ "num_input_tokens_seen": 8506688,
945
+ "step": 117
946
+ },
947
+ {
948
+ "epoch": 1.9470404984423677,
949
+ "grad_norm": 0.08390220254659653,
950
+ "learning_rate": 4.881653703133966e-05,
951
+ "loss": 0.2192,
952
+ "num_input_tokens_seen": 8610712,
953
+ "step": 118
954
+ },
955
+ {
956
+ "epoch": 1.9636552440290758,
957
+ "grad_norm": 0.09252211451530457,
958
+ "learning_rate": 4.87965565188262e-05,
959
+ "loss": 0.2618,
960
+ "num_input_tokens_seen": 8692624,
961
+ "step": 119
962
+ },
963
+ {
964
+ "epoch": 1.980269989615784,
965
+ "grad_norm": 0.1107102632522583,
966
+ "learning_rate": 4.877641290737884e-05,
967
+ "loss": 0.2666,
968
+ "num_input_tokens_seen": 8772208,
969
+ "step": 120
970
+ },
971
+ {
972
+ "epoch": 1.9968847352024923,
973
+ "grad_norm": 0.0917077362537384,
974
+ "learning_rate": 4.8756106335059646e-05,
975
+ "loss": 0.253,
976
+ "num_input_tokens_seen": 8854904,
977
+ "step": 121
978
+ },
979
+ {
980
+ "epoch": 2.0,
981
+ "grad_norm": 0.2606711685657501,
982
+ "learning_rate": 4.87356369410476e-05,
983
+ "loss": 0.235,
984
+ "num_input_tokens_seen": 8872656,
985
+ "step": 122
986
+ },
987
+ {
988
+ "epoch": 2.016614745586708,
989
+ "grad_norm": 0.10363993793725967,
990
+ "learning_rate": 4.8715004865637614e-05,
991
+ "loss": 0.266,
992
+ "num_input_tokens_seen": 8946480,
993
+ "step": 123
994
+ },
995
+ {
996
+ "epoch": 2.0332294911734166,
997
+ "grad_norm": 0.09997844696044922,
998
+ "learning_rate": 4.869421025023965e-05,
999
+ "loss": 0.2696,
1000
+ "num_input_tokens_seen": 9023328,
1001
+ "step": 124
1002
+ },
1003
+ {
1004
+ "epoch": 2.0498442367601246,
1005
+ "grad_norm": 0.13349319994449615,
1006
+ "learning_rate": 4.867325323737765e-05,
1007
+ "loss": 0.2552,
1008
+ "num_input_tokens_seen": 9074320,
1009
+ "step": 125
1010
+ },
1011
+ {
1012
+ "epoch": 2.0664589823468327,
1013
+ "grad_norm": 0.11201464384794235,
1014
+ "learning_rate": 4.8652133970688636e-05,
1015
+ "loss": 0.2486,
1016
+ "num_input_tokens_seen": 9148784,
1017
+ "step": 126
1018
+ },
1019
+ {
1020
+ "epoch": 2.083073727933541,
1021
+ "grad_norm": 0.10193142294883728,
1022
+ "learning_rate": 4.8630852594921706e-05,
1023
+ "loss": 0.2814,
1024
+ "num_input_tokens_seen": 9246624,
1025
+ "step": 127
1026
+ },
1027
+ {
1028
+ "epoch": 2.0996884735202492,
1029
+ "grad_norm": 0.1305130422115326,
1030
+ "learning_rate": 4.860940925593703e-05,
1031
+ "loss": 0.304,
1032
+ "num_input_tokens_seen": 9328176,
1033
+ "step": 128
1034
+ },
1035
+ {
1036
+ "epoch": 2.1163032191069573,
1037
+ "grad_norm": 0.1137692779302597,
1038
+ "learning_rate": 4.8587804100704845e-05,
1039
+ "loss": 0.2427,
1040
+ "num_input_tokens_seen": 9388936,
1041
+ "step": 129
1042
+ },
1043
+ {
1044
+ "epoch": 2.132917964693666,
1045
+ "grad_norm": 0.12126237154006958,
1046
+ "learning_rate": 4.856603727730447e-05,
1047
+ "loss": 0.2485,
1048
+ "num_input_tokens_seen": 9461664,
1049
+ "step": 130
1050
+ },
1051
+ {
1052
+ "epoch": 2.149532710280374,
1053
+ "grad_norm": 0.11567176878452301,
1054
+ "learning_rate": 4.854410893492326e-05,
1055
+ "loss": 0.2628,
1056
+ "num_input_tokens_seen": 9535000,
1057
+ "step": 131
1058
+ },
1059
+ {
1060
+ "epoch": 2.166147455867082,
1061
+ "grad_norm": 0.1399552971124649,
1062
+ "learning_rate": 4.852201922385564e-05,
1063
+ "loss": 0.2518,
1064
+ "num_input_tokens_seen": 9600296,
1065
+ "step": 132
1066
+ },
1067
+ {
1068
+ "epoch": 2.1827622014537904,
1069
+ "grad_norm": 0.13912151753902435,
1070
+ "learning_rate": 4.8499768295502004e-05,
1071
+ "loss": 0.2429,
1072
+ "num_input_tokens_seen": 9686784,
1073
+ "step": 133
1074
+ },
1075
+ {
1076
+ "epoch": 2.1993769470404985,
1077
+ "grad_norm": 0.11130474507808685,
1078
+ "learning_rate": 4.847735630236773e-05,
1079
+ "loss": 0.2775,
1080
+ "num_input_tokens_seen": 9781112,
1081
+ "step": 134
1082
+ },
1083
+ {
1084
+ "epoch": 2.2159916926272065,
1085
+ "grad_norm": 0.12169156968593597,
1086
+ "learning_rate": 4.8454783398062106e-05,
1087
+ "loss": 0.2439,
1088
+ "num_input_tokens_seen": 9849528,
1089
+ "step": 135
1090
+ },
1091
+ {
1092
+ "epoch": 2.232606438213915,
1093
+ "grad_norm": 0.11766713112592697,
1094
+ "learning_rate": 4.843204973729729e-05,
1095
+ "loss": 0.2538,
1096
+ "num_input_tokens_seen": 9931080,
1097
+ "step": 136
1098
+ },
1099
+ {
1100
+ "epoch": 2.249221183800623,
1101
+ "grad_norm": 0.11854218691587448,
1102
+ "learning_rate": 4.840915547588725e-05,
1103
+ "loss": 0.2782,
1104
+ "num_input_tokens_seen": 10011176,
1105
+ "step": 137
1106
+ },
1107
+ {
1108
+ "epoch": 2.265835929387331,
1109
+ "grad_norm": 0.1340581178665161,
1110
+ "learning_rate": 4.838610077074669e-05,
1111
+ "loss": 0.248,
1112
+ "num_input_tokens_seen": 10084128,
1113
+ "step": 138
1114
+ },
1115
+ {
1116
+ "epoch": 2.2824506749740396,
1117
+ "grad_norm": 0.12075436115264893,
1118
+ "learning_rate": 4.836288577988996e-05,
1119
+ "loss": 0.2582,
1120
+ "num_input_tokens_seen": 10155536,
1121
+ "step": 139
1122
+ },
1123
+ {
1124
+ "epoch": 2.2990654205607477,
1125
+ "grad_norm": 0.10599923878908157,
1126
+ "learning_rate": 4.8339510662430046e-05,
1127
+ "loss": 0.2199,
1128
+ "num_input_tokens_seen": 10251160,
1129
+ "step": 140
1130
+ },
1131
+ {
1132
+ "epoch": 2.3156801661474558,
1133
+ "grad_norm": 0.1117846742272377,
1134
+ "learning_rate": 4.8315975578577355e-05,
1135
+ "loss": 0.2324,
1136
+ "num_input_tokens_seen": 10345864,
1137
+ "step": 141
1138
+ },
1139
+ {
1140
+ "epoch": 2.3322949117341643,
1141
+ "grad_norm": 0.13972057402133942,
1142
+ "learning_rate": 4.8292280689638725e-05,
1143
+ "loss": 0.4072,
1144
+ "num_input_tokens_seen": 10417616,
1145
+ "step": 142
1146
+ },
1147
+ {
1148
+ "epoch": 2.3489096573208723,
1149
+ "grad_norm": 0.13837860524654388,
1150
+ "learning_rate": 4.826842615801628e-05,
1151
+ "loss": 0.2607,
1152
+ "num_input_tokens_seen": 10481816,
1153
+ "step": 143
1154
+ },
1155
+ {
1156
+ "epoch": 2.3655244029075804,
1157
+ "grad_norm": 0.14040137827396393,
1158
+ "learning_rate": 4.8244412147206284e-05,
1159
+ "loss": 0.3094,
1160
+ "num_input_tokens_seen": 10562056,
1161
+ "step": 144
1162
+ },
1163
+ {
1164
+ "epoch": 2.382139148494289,
1165
+ "grad_norm": 0.1393299251794815,
1166
+ "learning_rate": 4.822023882179811e-05,
1167
+ "loss": 0.2407,
1168
+ "num_input_tokens_seen": 10612808,
1169
+ "step": 145
1170
+ },
1171
+ {
1172
+ "epoch": 2.398753894080997,
1173
+ "grad_norm": 0.13878698647022247,
1174
+ "learning_rate": 4.8195906347473e-05,
1175
+ "loss": 0.2481,
1176
+ "num_input_tokens_seen": 10682328,
1177
+ "step": 146
1178
+ },
1179
+ {
1180
+ "epoch": 2.415368639667705,
1181
+ "grad_norm": 0.10430227965116501,
1182
+ "learning_rate": 4.817141489100302e-05,
1183
+ "loss": 0.2528,
1184
+ "num_input_tokens_seen": 10771912,
1185
+ "step": 147
1186
+ },
1187
+ {
1188
+ "epoch": 2.431983385254413,
1189
+ "grad_norm": 0.12963703274726868,
1190
+ "learning_rate": 4.814676462024988e-05,
1191
+ "loss": 0.2739,
1192
+ "num_input_tokens_seen": 10842232,
1193
+ "step": 148
1194
+ },
1195
+ {
1196
+ "epoch": 2.4485981308411215,
1197
+ "grad_norm": 0.13274963200092316,
1198
+ "learning_rate": 4.8121955704163745e-05,
1199
+ "loss": 0.2407,
1200
+ "num_input_tokens_seen": 10902264,
1201
+ "step": 149
1202
+ },
1203
+ {
1204
+ "epoch": 2.4652128764278296,
1205
+ "grad_norm": 0.11079717427492142,
1206
+ "learning_rate": 4.8096988312782174e-05,
1207
+ "loss": 0.2142,
1208
+ "num_input_tokens_seen": 10992744,
1209
+ "step": 150
1210
+ },
1211
+ {
1212
+ "epoch": 2.4818276220145377,
1213
+ "grad_norm": 0.08429212868213654,
1214
+ "learning_rate": 4.8071862617228855e-05,
1215
+ "loss": 0.1428,
1216
+ "num_input_tokens_seen": 11090064,
1217
+ "step": 151
1218
+ },
1219
+ {
1220
+ "epoch": 2.498442367601246,
1221
+ "grad_norm": 0.12903761863708496,
1222
+ "learning_rate": 4.8046578789712515e-05,
1223
+ "loss": 0.2268,
1224
+ "num_input_tokens_seen": 11162864,
1225
+ "step": 152
1226
+ },
1227
+ {
1228
+ "epoch": 2.515057113187954,
1229
+ "grad_norm": 0.14638672769069672,
1230
+ "learning_rate": 4.8021137003525664e-05,
1231
+ "loss": 0.2388,
1232
+ "num_input_tokens_seen": 11224368,
1233
+ "step": 153
1234
+ },
1235
+ {
1236
+ "epoch": 2.5316718587746623,
1237
+ "grad_norm": 0.1372838169336319,
1238
+ "learning_rate": 4.7995537433043446e-05,
1239
+ "loss": 0.2588,
1240
+ "num_input_tokens_seen": 11291056,
1241
+ "step": 154
1242
+ },
1243
+ {
1244
+ "epoch": 2.5482866043613708,
1245
+ "grad_norm": 0.15665481984615326,
1246
+ "learning_rate": 4.796978025372246e-05,
1247
+ "loss": 0.2225,
1248
+ "num_input_tokens_seen": 11345464,
1249
+ "step": 155
1250
+ },
1251
+ {
1252
+ "epoch": 2.564901349948079,
1253
+ "grad_norm": 0.13234855234622955,
1254
+ "learning_rate": 4.794386564209953e-05,
1255
+ "loss": 0.275,
1256
+ "num_input_tokens_seen": 11418912,
1257
+ "step": 156
1258
+ },
1259
+ {
1260
+ "epoch": 2.581516095534787,
1261
+ "grad_norm": 0.13585953414440155,
1262
+ "learning_rate": 4.79177937757905e-05,
1263
+ "loss": 0.2407,
1264
+ "num_input_tokens_seen": 11491216,
1265
+ "step": 157
1266
+ },
1267
+ {
1268
+ "epoch": 2.5981308411214954,
1269
+ "grad_norm": 0.1423913538455963,
1270
+ "learning_rate": 4.7891564833489035e-05,
1271
+ "loss": 0.1971,
1272
+ "num_input_tokens_seen": 11558016,
1273
+ "step": 158
1274
+ },
1275
+ {
1276
+ "epoch": 2.6147455867082035,
1277
+ "grad_norm": 0.13013511896133423,
1278
+ "learning_rate": 4.7865178994965344e-05,
1279
+ "loss": 0.2362,
1280
+ "num_input_tokens_seen": 11630432,
1281
+ "step": 159
1282
+ },
1283
+ {
1284
+ "epoch": 2.6313603322949115,
1285
+ "grad_norm": 0.1587141752243042,
1286
+ "learning_rate": 4.783863644106502e-05,
1287
+ "loss": 0.2252,
1288
+ "num_input_tokens_seen": 11684624,
1289
+ "step": 160
1290
+ },
1291
+ {
1292
+ "epoch": 2.64797507788162,
1293
+ "grad_norm": 0.12592960894107819,
1294
+ "learning_rate": 4.781193735370777e-05,
1295
+ "loss": 0.2506,
1296
+ "num_input_tokens_seen": 11770232,
1297
+ "step": 161
1298
+ },
1299
+ {
1300
+ "epoch": 2.664589823468328,
1301
+ "grad_norm": 0.1583249419927597,
1302
+ "learning_rate": 4.7785081915886134e-05,
1303
+ "loss": 0.2352,
1304
+ "num_input_tokens_seen": 11828360,
1305
+ "step": 162
1306
+ },
1307
+ {
1308
+ "epoch": 2.681204569055036,
1309
+ "grad_norm": 0.14881783723831177,
1310
+ "learning_rate": 4.775807031166428e-05,
1311
+ "loss": 0.2308,
1312
+ "num_input_tokens_seen": 11915944,
1313
+ "step": 163
1314
+ },
1315
+ {
1316
+ "epoch": 2.6978193146417446,
1317
+ "grad_norm": 0.1607823222875595,
1318
+ "learning_rate": 4.773090272617672e-05,
1319
+ "loss": 0.2238,
1320
+ "num_input_tokens_seen": 11981792,
1321
+ "step": 164
1322
+ },
1323
+ {
1324
+ "epoch": 2.7144340602284527,
1325
+ "grad_norm": 0.13583113253116608,
1326
+ "learning_rate": 4.7703579345627035e-05,
1327
+ "loss": 0.3196,
1328
+ "num_input_tokens_seen": 12044024,
1329
+ "step": 165
1330
+ },
1331
+ {
1332
+ "epoch": 2.7310488058151607,
1333
+ "grad_norm": 0.19167298078536987,
1334
+ "learning_rate": 4.7676100357286624e-05,
1335
+ "loss": 0.2745,
1336
+ "num_input_tokens_seen": 12093424,
1337
+ "step": 166
1338
+ },
1339
+ {
1340
+ "epoch": 2.7476635514018692,
1341
+ "grad_norm": 0.130703404545784,
1342
+ "learning_rate": 4.76484659494934e-05,
1343
+ "loss": 0.2285,
1344
+ "num_input_tokens_seen": 12167792,
1345
+ "step": 167
1346
+ },
1347
+ {
1348
+ "epoch": 2.7642782969885773,
1349
+ "grad_norm": 0.14331185817718506,
1350
+ "learning_rate": 4.762067631165049e-05,
1351
+ "loss": 0.2506,
1352
+ "num_input_tokens_seen": 12233712,
1353
+ "step": 168
1354
+ },
1355
+ {
1356
+ "epoch": 2.7808930425752854,
1357
+ "grad_norm": 0.12700341641902924,
1358
+ "learning_rate": 4.7592731634224966e-05,
1359
+ "loss": 0.2052,
1360
+ "num_input_tokens_seen": 12310544,
1361
+ "step": 169
1362
+ },
1363
+ {
1364
+ "epoch": 2.797507788161994,
1365
+ "grad_norm": 0.15118420124053955,
1366
+ "learning_rate": 4.756463210874652e-05,
1367
+ "loss": 0.2309,
1368
+ "num_input_tokens_seen": 12400160,
1369
+ "step": 170
1370
+ },
1371
+ {
1372
+ "epoch": 2.814122533748702,
1373
+ "grad_norm": 0.14001020789146423,
1374
+ "learning_rate": 4.753637792780614e-05,
1375
+ "loss": 0.2544,
1376
+ "num_input_tokens_seen": 12480432,
1377
+ "step": 171
1378
+ },
1379
+ {
1380
+ "epoch": 2.83073727933541,
1381
+ "grad_norm": 0.12076311558485031,
1382
+ "learning_rate": 4.7507969285054845e-05,
1383
+ "loss": 0.2434,
1384
+ "num_input_tokens_seen": 12568064,
1385
+ "step": 172
1386
+ },
1387
+ {
1388
+ "epoch": 2.8473520249221185,
1389
+ "grad_norm": 0.16462342441082,
1390
+ "learning_rate": 4.7479406375202264e-05,
1391
+ "loss": 0.2417,
1392
+ "num_input_tokens_seen": 12647400,
1393
+ "step": 173
1394
+ },
1395
+ {
1396
+ "epoch": 2.8639667705088265,
1397
+ "grad_norm": 0.17294971644878387,
1398
+ "learning_rate": 4.745068939401539e-05,
1399
+ "loss": 0.2121,
1400
+ "num_input_tokens_seen": 12698208,
1401
+ "step": 174
1402
+ },
1403
+ {
1404
+ "epoch": 2.8805815160955346,
1405
+ "grad_norm": 0.16743803024291992,
1406
+ "learning_rate": 4.742181853831721e-05,
1407
+ "loss": 0.2238,
1408
+ "num_input_tokens_seen": 12758528,
1409
+ "step": 175
1410
+ },
1411
+ {
1412
+ "epoch": 2.897196261682243,
1413
+ "grad_norm": 0.14583320915699005,
1414
+ "learning_rate": 4.7392794005985326e-05,
1415
+ "loss": 0.2333,
1416
+ "num_input_tokens_seen": 12837264,
1417
+ "step": 176
1418
+ },
1419
+ {
1420
+ "epoch": 2.913811007268951,
1421
+ "grad_norm": 0.1509270817041397,
1422
+ "learning_rate": 4.7363615995950626e-05,
1423
+ "loss": 0.2179,
1424
+ "num_input_tokens_seen": 12902368,
1425
+ "step": 177
1426
+ },
1427
+ {
1428
+ "epoch": 2.930425752855659,
1429
+ "grad_norm": 0.12910738587379456,
1430
+ "learning_rate": 4.733428470819594e-05,
1431
+ "loss": 0.2144,
1432
+ "num_input_tokens_seen": 12974296,
1433
+ "step": 178
1434
+ },
1435
+ {
1436
+ "epoch": 2.9470404984423677,
1437
+ "grad_norm": 0.142000213265419,
1438
+ "learning_rate": 4.730480034375462e-05,
1439
+ "loss": 0.2413,
1440
+ "num_input_tokens_seen": 13057280,
1441
+ "step": 179
1442
+ },
1443
+ {
1444
+ "epoch": 2.9636552440290758,
1445
+ "grad_norm": 0.131468266248703,
1446
+ "learning_rate": 4.72751631047092e-05,
1447
+ "loss": 0.294,
1448
+ "num_input_tokens_seen": 13158232,
1449
+ "step": 180
1450
+ },
1451
+ {
1452
+ "epoch": 2.980269989615784,
1453
+ "grad_norm": 0.1529342085123062,
1454
+ "learning_rate": 4.7245373194189994e-05,
1455
+ "loss": 0.216,
1456
+ "num_input_tokens_seen": 13229840,
1457
+ "step": 181
1458
+ },
1459
+ {
1460
+ "epoch": 2.9968847352024923,
1461
+ "grad_norm": 0.1573815941810608,
1462
+ "learning_rate": 4.7215430816373726e-05,
1463
+ "loss": 0.2384,
1464
+ "num_input_tokens_seen": 13296520,
1465
+ "step": 182
1466
+ },
1467
+ {
1468
+ "epoch": 3.0,
1469
+ "grad_norm": 0.2532118558883667,
1470
+ "learning_rate": 4.718533617648209e-05,
1471
+ "loss": 0.1459,
1472
+ "num_input_tokens_seen": 13309672,
1473
+ "step": 183
1474
+ },
1475
+ {
1476
+ "epoch": 3.016614745586708,
1477
+ "grad_norm": 0.16963432729244232,
1478
+ "learning_rate": 4.715508948078037e-05,
1479
+ "loss": 0.1985,
1480
+ "num_input_tokens_seen": 13371544,
1481
+ "step": 184
1482
+ },
1483
+ {
1484
+ "epoch": 3.0332294911734166,
1485
+ "grad_norm": 0.18877384066581726,
1486
+ "learning_rate": 4.712469093657605e-05,
1487
+ "loss": 0.1856,
1488
+ "num_input_tokens_seen": 13432984,
1489
+ "step": 185
1490
+ },
1491
+ {
1492
+ "epoch": 3.0498442367601246,
1493
+ "grad_norm": 0.14922884106636047,
1494
+ "learning_rate": 4.709414075221734e-05,
1495
+ "loss": 0.2385,
1496
+ "num_input_tokens_seen": 13500016,
1497
+ "step": 186
1498
+ },
1499
+ {
1500
+ "epoch": 3.0664589823468327,
1501
+ "grad_norm": 0.2028326541185379,
1502
+ "learning_rate": 4.706343913709178e-05,
1503
+ "loss": 0.2227,
1504
+ "num_input_tokens_seen": 13579672,
1505
+ "step": 187
1506
+ },
1507
+ {
1508
+ "epoch": 3.083073727933541,
1509
+ "grad_norm": 0.19964616000652313,
1510
+ "learning_rate": 4.70325863016248e-05,
1511
+ "loss": 0.2045,
1512
+ "num_input_tokens_seen": 13630704,
1513
+ "step": 188
1514
+ },
1515
+ {
1516
+ "epoch": 3.0996884735202492,
1517
+ "grad_norm": 0.1594657599925995,
1518
+ "learning_rate": 4.7001582457278304e-05,
1519
+ "loss": 0.2648,
1520
+ "num_input_tokens_seen": 13695472,
1521
+ "step": 189
1522
+ },
1523
+ {
1524
+ "epoch": 3.1163032191069573,
1525
+ "grad_norm": 0.16952532529830933,
1526
+ "learning_rate": 4.697042781654913e-05,
1527
+ "loss": 0.22,
1528
+ "num_input_tokens_seen": 13767792,
1529
+ "step": 190
1530
+ },
1531
+ {
1532
+ "epoch": 3.132917964693666,
1533
+ "grad_norm": 0.16775831580162048,
1534
+ "learning_rate": 4.693912259296773e-05,
1535
+ "loss": 0.2667,
1536
+ "num_input_tokens_seen": 13857352,
1537
+ "step": 191
1538
+ },
1539
+ {
1540
+ "epoch": 3.149532710280374,
1541
+ "grad_norm": 0.15529580414295197,
1542
+ "learning_rate": 4.690766700109659e-05,
1543
+ "loss": 0.2154,
1544
+ "num_input_tokens_seen": 13939928,
1545
+ "step": 192
1546
+ },
1547
+ {
1548
+ "epoch": 3.166147455867082,
1549
+ "grad_norm": 0.1619848757982254,
1550
+ "learning_rate": 4.687606125652882e-05,
1551
+ "loss": 0.1963,
1552
+ "num_input_tokens_seen": 14017936,
1553
+ "step": 193
1554
+ },
1555
+ {
1556
+ "epoch": 3.1827622014537904,
1557
+ "grad_norm": 0.18066684901714325,
1558
+ "learning_rate": 4.684430557588664e-05,
1559
+ "loss": 0.1862,
1560
+ "num_input_tokens_seen": 14074176,
1561
+ "step": 194
1562
+ },
1563
+ {
1564
+ "epoch": 3.1993769470404985,
1565
+ "grad_norm": 0.16520777344703674,
1566
+ "learning_rate": 4.681240017681993e-05,
1567
+ "loss": 0.2576,
1568
+ "num_input_tokens_seen": 14167656,
1569
+ "step": 195
1570
+ },
1571
+ {
1572
+ "epoch": 3.2159916926272065,
1573
+ "grad_norm": 0.15385325253009796,
1574
+ "learning_rate": 4.678034527800474e-05,
1575
+ "loss": 0.1813,
1576
+ "num_input_tokens_seen": 14235800,
1577
+ "step": 196
1578
+ },
1579
+ {
1580
+ "epoch": 3.232606438213915,
1581
+ "grad_norm": 0.16897696256637573,
1582
+ "learning_rate": 4.674814109914174e-05,
1583
+ "loss": 0.1741,
1584
+ "num_input_tokens_seen": 14301272,
1585
+ "step": 197
1586
+ },
1587
+ {
1588
+ "epoch": 3.249221183800623,
1589
+ "grad_norm": 0.19556447863578796,
1590
+ "learning_rate": 4.671578786095478e-05,
1591
+ "loss": 0.2186,
1592
+ "num_input_tokens_seen": 14347352,
1593
+ "step": 198
1594
+ },
1595
+ {
1596
+ "epoch": 3.265835929387331,
1597
+ "grad_norm": 0.17333142459392548,
1598
+ "learning_rate": 4.668328578518933e-05,
1599
+ "loss": 0.2892,
1600
+ "num_input_tokens_seen": 14434600,
1601
+ "step": 199
1602
+ },
1603
+ {
1604
+ "epoch": 3.2824506749740396,
1605
+ "grad_norm": 0.20295488834381104,
1606
+ "learning_rate": 4.665063509461097e-05,
1607
+ "loss": 0.2014,
1608
+ "num_input_tokens_seen": 14484104,
1609
+ "step": 200
1610
+ },
1611
+ {
1612
+ "epoch": 3.2990654205607477,
1613
+ "grad_norm": 0.1597638726234436,
1614
+ "learning_rate": 4.661783601300388e-05,
1615
+ "loss": 0.2158,
1616
+ "num_input_tokens_seen": 14567152,
1617
+ "step": 201
1618
+ },
1619
+ {
1620
+ "epoch": 3.3156801661474558,
1621
+ "grad_norm": 0.19849488139152527,
1622
+ "learning_rate": 4.6584888765169296e-05,
1623
+ "loss": 0.2578,
1624
+ "num_input_tokens_seen": 14647040,
1625
+ "step": 202
1626
+ },
1627
+ {
1628
+ "epoch": 3.3322949117341643,
1629
+ "grad_norm": 0.1508200466632843,
1630
+ "learning_rate": 4.6551793576923964e-05,
1631
+ "loss": 0.2213,
1632
+ "num_input_tokens_seen": 14738216,
1633
+ "step": 203
1634
+ },
1635
+ {
1636
+ "epoch": 3.3489096573208723,
1637
+ "grad_norm": 0.1687687337398529,
1638
+ "learning_rate": 4.65185506750986e-05,
1639
+ "loss": 0.1828,
1640
+ "num_input_tokens_seen": 14811216,
1641
+ "step": 204
1642
+ },
1643
+ {
1644
+ "epoch": 3.3655244029075804,
1645
+ "grad_norm": 0.16587376594543457,
1646
+ "learning_rate": 4.648516028753632e-05,
1647
+ "loss": 0.1619,
1648
+ "num_input_tokens_seen": 14885992,
1649
+ "step": 205
1650
+ },
1651
+ {
1652
+ "epoch": 3.382139148494289,
1653
+ "grad_norm": 0.16600169241428375,
1654
+ "learning_rate": 4.645162264309112e-05,
1655
+ "loss": 0.2438,
1656
+ "num_input_tokens_seen": 14961984,
1657
+ "step": 206
1658
+ },
1659
+ {
1660
+ "epoch": 3.398753894080997,
1661
+ "grad_norm": 0.1877703070640564,
1662
+ "learning_rate": 4.6417937971626245e-05,
1663
+ "loss": 0.1771,
1664
+ "num_input_tokens_seen": 15021240,
1665
+ "step": 207
1666
+ },
1667
+ {
1668
+ "epoch": 3.415368639667705,
1669
+ "grad_norm": 0.20105206966400146,
1670
+ "learning_rate": 4.638410650401267e-05,
1671
+ "loss": 0.1742,
1672
+ "num_input_tokens_seen": 15092016,
1673
+ "step": 208
1674
+ },
1675
+ {
1676
+ "epoch": 3.431983385254413,
1677
+ "grad_norm": 0.12934140861034393,
1678
+ "learning_rate": 4.635012847212748e-05,
1679
+ "loss": 0.1725,
1680
+ "num_input_tokens_seen": 15198192,
1681
+ "step": 209
1682
+ },
1683
+ {
1684
+ "epoch": 3.4485981308411215,
1685
+ "grad_norm": 0.18388882279396057,
1686
+ "learning_rate": 4.6316004108852305e-05,
1687
+ "loss": 0.186,
1688
+ "num_input_tokens_seen": 15258432,
1689
+ "step": 210
1690
+ },
1691
+ {
1692
+ "epoch": 3.4652128764278296,
1693
+ "grad_norm": 0.1766858547925949,
1694
+ "learning_rate": 4.628173364807171e-05,
1695
+ "loss": 0.2166,
1696
+ "num_input_tokens_seen": 15329600,
1697
+ "step": 211
1698
+ },
1699
+ {
1700
+ "epoch": 3.4818276220145377,
1701
+ "grad_norm": 0.16214998066425323,
1702
+ "learning_rate": 4.6247317324671605e-05,
1703
+ "loss": 0.2038,
1704
+ "num_input_tokens_seen": 15407920,
1705
+ "step": 212
1706
+ },
1707
+ {
1708
+ "epoch": 3.498442367601246,
1709
+ "grad_norm": 0.16933797299861908,
1710
+ "learning_rate": 4.6212755374537596e-05,
1711
+ "loss": 0.2017,
1712
+ "num_input_tokens_seen": 15479640,
1713
+ "step": 213
1714
+ },
1715
+ {
1716
+ "epoch": 3.515057113187954,
1717
+ "grad_norm": 0.19472749531269073,
1718
+ "learning_rate": 4.617804803455344e-05,
1719
+ "loss": 0.2048,
1720
+ "num_input_tokens_seen": 15561960,
1721
+ "step": 214
1722
+ },
1723
+ {
1724
+ "epoch": 3.5316718587746623,
1725
+ "grad_norm": 0.33335182070732117,
1726
+ "learning_rate": 4.614319554259934e-05,
1727
+ "loss": 0.2358,
1728
+ "num_input_tokens_seen": 15641440,
1729
+ "step": 215
1730
+ },
1731
+ {
1732
+ "epoch": 3.5482866043613708,
1733
+ "grad_norm": 0.19587557017803192,
1734
+ "learning_rate": 4.610819813755038e-05,
1735
+ "loss": 0.2374,
1736
+ "num_input_tokens_seen": 15728872,
1737
+ "step": 216
1738
+ },
1739
+ {
1740
+ "epoch": 3.564901349948079,
1741
+ "grad_norm": 0.19063518941402435,
1742
+ "learning_rate": 4.607305605927487e-05,
1743
+ "loss": 0.1919,
1744
+ "num_input_tokens_seen": 15798112,
1745
+ "step": 217
1746
+ },
1747
+ {
1748
+ "epoch": 3.581516095534787,
1749
+ "grad_norm": 0.19598323106765747,
1750
+ "learning_rate": 4.6037769548632656e-05,
1751
+ "loss": 0.2583,
1752
+ "num_input_tokens_seen": 15865936,
1753
+ "step": 218
1754
+ },
1755
+ {
1756
+ "epoch": 3.5981308411214954,
1757
+ "grad_norm": 0.18066690862178802,
1758
+ "learning_rate": 4.600233884747355e-05,
1759
+ "loss": 0.2337,
1760
+ "num_input_tokens_seen": 15941368,
1761
+ "step": 219
1762
+ },
1763
+ {
1764
+ "epoch": 3.6147455867082035,
1765
+ "grad_norm": 0.16981899738311768,
1766
+ "learning_rate": 4.5966764198635606e-05,
1767
+ "loss": 0.1818,
1768
+ "num_input_tokens_seen": 16028208,
1769
+ "step": 220
1770
+ },
1771
+ {
1772
+ "epoch": 3.6313603322949115,
1773
+ "grad_norm": 0.180410236120224,
1774
+ "learning_rate": 4.5931045845943474e-05,
1775
+ "loss": 0.1646,
1776
+ "num_input_tokens_seen": 16104408,
1777
+ "step": 221
1778
+ },
1779
+ {
1780
+ "epoch": 3.64797507788162,
1781
+ "grad_norm": 0.19180680811405182,
1782
+ "learning_rate": 4.5895184034206765e-05,
1783
+ "loss": 0.3263,
1784
+ "num_input_tokens_seen": 16156800,
1785
+ "step": 222
1786
+ },
1787
+ {
1788
+ "epoch": 3.664589823468328,
1789
+ "grad_norm": 0.16119280457496643,
1790
+ "learning_rate": 4.585917900921829e-05,
1791
+ "loss": 0.2636,
1792
+ "num_input_tokens_seen": 16256712,
1793
+ "step": 223
1794
+ },
1795
+ {
1796
+ "epoch": 3.681204569055036,
1797
+ "grad_norm": 0.18559172749519348,
1798
+ "learning_rate": 4.5823031017752485e-05,
1799
+ "loss": 0.1759,
1800
+ "num_input_tokens_seen": 16330344,
1801
+ "step": 224
1802
+ },
1803
+ {
1804
+ "epoch": 3.6978193146417446,
1805
+ "grad_norm": 0.17767880856990814,
1806
+ "learning_rate": 4.5786740307563636e-05,
1807
+ "loss": 0.196,
1808
+ "num_input_tokens_seen": 16399792,
1809
+ "step": 225
1810
+ },
1811
+ {
1812
+ "epoch": 3.7144340602284527,
1813
+ "grad_norm": 0.17806987464427948,
1814
+ "learning_rate": 4.575030712738419e-05,
1815
+ "loss": 0.186,
1816
+ "num_input_tokens_seen": 16466368,
1817
+ "step": 226
1818
+ },
1819
+ {
1820
+ "epoch": 3.7310488058151607,
1821
+ "grad_norm": 0.1952792853116989,
1822
+ "learning_rate": 4.571373172692309e-05,
1823
+ "loss": 0.1789,
1824
+ "num_input_tokens_seen": 16530976,
1825
+ "step": 227
1826
+ },
1827
+ {
1828
+ "epoch": 3.7476635514018692,
1829
+ "grad_norm": 0.1774374544620514,
1830
+ "learning_rate": 4.567701435686404e-05,
1831
+ "loss": 0.1929,
1832
+ "num_input_tokens_seen": 16600216,
1833
+ "step": 228
1834
+ },
1835
+ {
1836
+ "epoch": 3.7642782969885773,
1837
+ "grad_norm": 0.18798600137233734,
1838
+ "learning_rate": 4.5640155268863796e-05,
1839
+ "loss": 0.2268,
1840
+ "num_input_tokens_seen": 16673192,
1841
+ "step": 229
1842
+ },
1843
+ {
1844
+ "epoch": 3.7808930425752854,
1845
+ "grad_norm": 0.2022520750761032,
1846
+ "learning_rate": 4.5603154715550386e-05,
1847
+ "loss": 0.1716,
1848
+ "num_input_tokens_seen": 16739912,
1849
+ "step": 230
1850
+ },
1851
+ {
1852
+ "epoch": 3.797507788161994,
1853
+ "grad_norm": 0.15170948207378387,
1854
+ "learning_rate": 4.55660129505215e-05,
1855
+ "loss": 0.1844,
1856
+ "num_input_tokens_seen": 16834632,
1857
+ "step": 231
1858
+ },
1859
+ {
1860
+ "epoch": 3.814122533748702,
1861
+ "grad_norm": 0.16655084490776062,
1862
+ "learning_rate": 4.5528730228342605e-05,
1863
+ "loss": 0.1899,
1864
+ "num_input_tokens_seen": 16914728,
1865
+ "step": 232
1866
+ },
1867
+ {
1868
+ "epoch": 3.83073727933541,
1869
+ "grad_norm": 0.19025221467018127,
1870
+ "learning_rate": 4.549130680454532e-05,
1871
+ "loss": 0.2214,
1872
+ "num_input_tokens_seen": 17014304,
1873
+ "step": 233
1874
+ },
1875
+ {
1876
+ "epoch": 3.8473520249221185,
1877
+ "grad_norm": 0.17126557230949402,
1878
+ "learning_rate": 4.545374293562559e-05,
1879
+ "loss": 0.2062,
1880
+ "num_input_tokens_seen": 17106664,
1881
+ "step": 234
1882
+ },
1883
+ {
1884
+ "epoch": 3.8639667705088265,
1885
+ "grad_norm": 0.16162410378456116,
1886
+ "learning_rate": 4.541603887904198e-05,
1887
+ "loss": 0.2016,
1888
+ "num_input_tokens_seen": 17193744,
1889
+ "step": 235
1890
+ },
1891
+ {
1892
+ "epoch": 3.8805815160955346,
1893
+ "grad_norm": 0.2067136913537979,
1894
+ "learning_rate": 4.537819489321386e-05,
1895
+ "loss": 0.1992,
1896
+ "num_input_tokens_seen": 17254656,
1897
+ "step": 236
1898
+ },
1899
+ {
1900
+ "epoch": 3.897196261682243,
1901
+ "grad_norm": 0.200433611869812,
1902
+ "learning_rate": 4.534021123751968e-05,
1903
+ "loss": 0.1961,
1904
+ "num_input_tokens_seen": 17325896,
1905
+ "step": 237
1906
+ },
1907
+ {
1908
+ "epoch": 3.913811007268951,
1909
+ "grad_norm": 0.2062034010887146,
1910
+ "learning_rate": 4.5302088172295156e-05,
1911
+ "loss": 0.2302,
1912
+ "num_input_tokens_seen": 17394424,
1913
+ "step": 238
1914
+ },
1915
+ {
1916
+ "epoch": 3.930425752855659,
1917
+ "grad_norm": 0.1928798407316208,
1918
+ "learning_rate": 4.526382595883152e-05,
1919
+ "loss": 0.1846,
1920
+ "num_input_tokens_seen": 17456352,
1921
+ "step": 239
1922
+ },
1923
+ {
1924
+ "epoch": 3.9470404984423677,
1925
+ "grad_norm": 0.2011859118938446,
1926
+ "learning_rate": 4.522542485937369e-05,
1927
+ "loss": 0.1879,
1928
+ "num_input_tokens_seen": 17519168,
1929
+ "step": 240
1930
+ },
1931
+ {
1932
+ "epoch": 3.9636552440290758,
1933
+ "grad_norm": 0.20441657304763794,
1934
+ "learning_rate": 4.51868851371185e-05,
1935
+ "loss": 0.206,
1936
+ "num_input_tokens_seen": 17585144,
1937
+ "step": 241
1938
+ },
1939
+ {
1940
+ "epoch": 3.980269989615784,
1941
+ "grad_norm": 0.18314018845558167,
1942
+ "learning_rate": 4.5148207056212896e-05,
1943
+ "loss": 0.1676,
1944
+ "num_input_tokens_seen": 17662024,
1945
+ "step": 242
1946
+ },
1947
+ {
1948
+ "epoch": 3.9968847352024923,
1949
+ "grad_norm": 0.21530692279338837,
1950
+ "learning_rate": 4.5109390881752114e-05,
1951
+ "loss": 0.1961,
1952
+ "num_input_tokens_seen": 17724360,
1953
+ "step": 243
1954
+ },
1955
+ {
1956
+ "epoch": 4.0,
1957
+ "grad_norm": 0.38912200927734375,
1958
+ "learning_rate": 4.5070436879777865e-05,
1959
+ "loss": 0.185,
1960
+ "num_input_tokens_seen": 17746200,
1961
+ "step": 244
1962
+ },
1963
+ {
1964
+ "epoch": 4.0166147455867085,
1965
+ "grad_norm": 0.15166164934635162,
1966
+ "learning_rate": 4.503134531727652e-05,
1967
+ "loss": 0.1674,
1968
+ "num_input_tokens_seen": 17830760,
1969
+ "step": 245
1970
+ },
1971
+ {
1972
+ "epoch": 4.033229491173416,
1973
+ "grad_norm": 0.1999833583831787,
1974
+ "learning_rate": 4.499211646217727e-05,
1975
+ "loss": 0.1739,
1976
+ "num_input_tokens_seen": 17903840,
1977
+ "step": 246
1978
+ },
1979
+ {
1980
+ "epoch": 4.049844236760125,
1981
+ "grad_norm": 0.2024000585079193,
1982
+ "learning_rate": 4.495275058335029e-05,
1983
+ "loss": 0.1753,
1984
+ "num_input_tokens_seen": 17990448,
1985
+ "step": 247
1986
+ },
1987
+ {
1988
+ "epoch": 4.066458982346833,
1989
+ "grad_norm": 0.22637376189231873,
1990
+ "learning_rate": 4.491324795060491e-05,
1991
+ "loss": 0.1896,
1992
+ "num_input_tokens_seen": 18069520,
1993
+ "step": 248
1994
+ },
1995
+ {
1996
+ "epoch": 4.083073727933541,
1997
+ "grad_norm": 0.24361123144626617,
1998
+ "learning_rate": 4.487360883468775e-05,
1999
+ "loss": 0.1688,
2000
+ "num_input_tokens_seen": 18129128,
2001
+ "step": 249
2002
+ },
2003
+ {
2004
+ "epoch": 4.099688473520249,
2005
+ "grad_norm": 0.21949416399002075,
2006
+ "learning_rate": 4.4833833507280884e-05,
2007
+ "loss": 0.1928,
2008
+ "num_input_tokens_seen": 18202472,
2009
+ "step": 250
2010
+ },
2011
+ {
2012
+ "epoch": 4.116303219106958,
2013
+ "grad_norm": 0.22039519250392914,
2014
+ "learning_rate": 4.4793922240999933e-05,
2015
+ "loss": 0.1737,
2016
+ "num_input_tokens_seen": 18267232,
2017
+ "step": 251
2018
+ },
2019
+ {
2020
+ "epoch": 4.132917964693665,
2021
+ "grad_norm": 0.23173294961452484,
2022
+ "learning_rate": 4.4753875309392266e-05,
2023
+ "loss": 0.1883,
2024
+ "num_input_tokens_seen": 18325216,
2025
+ "step": 252
2026
+ },
2027
+ {
2028
+ "epoch": 4.149532710280374,
2029
+ "grad_norm": 0.24100351333618164,
2030
+ "learning_rate": 4.471369298693505e-05,
2031
+ "loss": 0.2042,
2032
+ "num_input_tokens_seen": 18406184,
2033
+ "step": 253
2034
+ },
2035
+ {
2036
+ "epoch": 4.166147455867082,
2037
+ "grad_norm": 0.1888919323682785,
2038
+ "learning_rate": 4.467337554903344e-05,
2039
+ "loss": 0.1656,
2040
+ "num_input_tokens_seen": 18481056,
2041
+ "step": 254
2042
+ },
2043
+ {
2044
+ "epoch": 4.18276220145379,
2045
+ "grad_norm": 0.17849119007587433,
2046
+ "learning_rate": 4.463292327201862e-05,
2047
+ "loss": 0.1454,
2048
+ "num_input_tokens_seen": 18554864,
2049
+ "step": 255
2050
+ },
2051
+ {
2052
+ "epoch": 4.1993769470404985,
2053
+ "grad_norm": 0.24600732326507568,
2054
+ "learning_rate": 4.4592336433146e-05,
2055
+ "loss": 0.2039,
2056
+ "num_input_tokens_seen": 18612120,
2057
+ "step": 256
2058
+ },
2059
+ {
2060
+ "epoch": 4.215991692627207,
2061
+ "grad_norm": 0.23695628345012665,
2062
+ "learning_rate": 4.4551615310593195e-05,
2063
+ "loss": 0.2112,
2064
+ "num_input_tokens_seen": 18710408,
2065
+ "step": 257
2066
+ },
2067
+ {
2068
+ "epoch": 4.232606438213915,
2069
+ "grad_norm": 0.2511826753616333,
2070
+ "learning_rate": 4.451076018345825e-05,
2071
+ "loss": 0.1831,
2072
+ "num_input_tokens_seen": 18769400,
2073
+ "step": 258
2074
+ },
2075
+ {
2076
+ "epoch": 4.249221183800623,
2077
+ "grad_norm": 0.1971820890903473,
2078
+ "learning_rate": 4.4469771331757604e-05,
2079
+ "loss": 0.1722,
2080
+ "num_input_tokens_seen": 18849704,
2081
+ "step": 259
2082
+ },
2083
+ {
2084
+ "epoch": 4.265835929387332,
2085
+ "grad_norm": 0.23203876614570618,
2086
+ "learning_rate": 4.442864903642428e-05,
2087
+ "loss": 0.1981,
2088
+ "num_input_tokens_seen": 18943328,
2089
+ "step": 260
2090
+ },
2091
+ {
2092
+ "epoch": 4.282450674974039,
2093
+ "grad_norm": 0.23434185981750488,
2094
+ "learning_rate": 4.4387393579305865e-05,
2095
+ "loss": 0.2014,
2096
+ "num_input_tokens_seen": 19022536,
2097
+ "step": 261
2098
+ },
2099
+ {
2100
+ "epoch": 4.299065420560748,
2101
+ "grad_norm": 0.2373885214328766,
2102
+ "learning_rate": 4.434600524316266e-05,
2103
+ "loss": 0.167,
2104
+ "num_input_tokens_seen": 19089200,
2105
+ "step": 262
2106
+ },
2107
+ {
2108
+ "epoch": 4.315680166147456,
2109
+ "grad_norm": 0.20130722224712372,
2110
+ "learning_rate": 4.430448431166567e-05,
2111
+ "loss": 0.2747,
2112
+ "num_input_tokens_seen": 19171216,
2113
+ "step": 263
2114
+ },
2115
+ {
2116
+ "epoch": 4.332294911734164,
2117
+ "grad_norm": 0.17947593331336975,
2118
+ "learning_rate": 4.426283106939474e-05,
2119
+ "loss": 0.1508,
2120
+ "num_input_tokens_seen": 19271872,
2121
+ "step": 264
2122
+ },
2123
+ {
2124
+ "epoch": 4.348909657320872,
2125
+ "grad_norm": 0.23321041464805603,
2126
+ "learning_rate": 4.4221045801836494e-05,
2127
+ "loss": 0.2585,
2128
+ "num_input_tokens_seen": 19342984,
2129
+ "step": 265
2130
+ },
2131
+ {
2132
+ "epoch": 4.365524402907581,
2133
+ "grad_norm": 0.26581740379333496,
2134
+ "learning_rate": 4.41791287953825e-05,
2135
+ "loss": 0.1789,
2136
+ "num_input_tokens_seen": 19391640,
2137
+ "step": 266
2138
+ },
2139
+ {
2140
+ "epoch": 4.382139148494288,
2141
+ "grad_norm": 0.20715415477752686,
2142
+ "learning_rate": 4.4137080337327205e-05,
2143
+ "loss": 0.1953,
2144
+ "num_input_tokens_seen": 19463232,
2145
+ "step": 267
2146
+ },
2147
+ {
2148
+ "epoch": 4.398753894080997,
2149
+ "grad_norm": 0.20641866326332092,
2150
+ "learning_rate": 4.4094900715866064e-05,
2151
+ "loss": 0.1752,
2152
+ "num_input_tokens_seen": 19523728,
2153
+ "step": 268
2154
+ },
2155
+ {
2156
+ "epoch": 4.415368639667705,
2157
+ "grad_norm": 0.23343385756015778,
2158
+ "learning_rate": 4.4052590220093446e-05,
2159
+ "loss": 0.1904,
2160
+ "num_input_tokens_seen": 19598960,
2161
+ "step": 269
2162
+ },
2163
+ {
2164
+ "epoch": 4.431983385254413,
2165
+ "grad_norm": 0.20117436349391937,
2166
+ "learning_rate": 4.401014914000078e-05,
2167
+ "loss": 0.1801,
2168
+ "num_input_tokens_seen": 19666136,
2169
+ "step": 270
2170
+ },
2171
+ {
2172
+ "epoch": 4.4485981308411215,
2173
+ "grad_norm": 0.24009813368320465,
2174
+ "learning_rate": 4.3967577766474455e-05,
2175
+ "loss": 0.1798,
2176
+ "num_input_tokens_seen": 19728600,
2177
+ "step": 271
2178
+ },
2179
+ {
2180
+ "epoch": 4.46521287642783,
2181
+ "grad_norm": 0.2242031991481781,
2182
+ "learning_rate": 4.3924876391293915e-05,
2183
+ "loss": 0.2221,
2184
+ "num_input_tokens_seen": 19801032,
2185
+ "step": 272
2186
+ },
2187
+ {
2188
+ "epoch": 4.481827622014538,
2189
+ "grad_norm": 0.22890391945838928,
2190
+ "learning_rate": 4.3882045307129594e-05,
2191
+ "loss": 0.1906,
2192
+ "num_input_tokens_seen": 19885496,
2193
+ "step": 273
2194
+ },
2195
+ {
2196
+ "epoch": 4.498442367601246,
2197
+ "grad_norm": 0.21996937692165375,
2198
+ "learning_rate": 4.383908480754095e-05,
2199
+ "loss": 0.1775,
2200
+ "num_input_tokens_seen": 19952072,
2201
+ "step": 274
2202
+ },
2203
+ {
2204
+ "epoch": 4.515057113187955,
2205
+ "grad_norm": 0.1860388070344925,
2206
+ "learning_rate": 4.379599518697444e-05,
2207
+ "loss": 0.1593,
2208
+ "num_input_tokens_seen": 20026536,
2209
+ "step": 275
2210
+ },
2211
+ {
2212
+ "epoch": 4.531671858774662,
2213
+ "grad_norm": 0.20987707376480103,
2214
+ "learning_rate": 4.375277674076149e-05,
2215
+ "loss": 0.1409,
2216
+ "num_input_tokens_seen": 20079112,
2217
+ "step": 276
2218
+ },
2219
+ {
2220
+ "epoch": 4.548286604361371,
2221
+ "grad_norm": 0.21347324550151825,
2222
+ "learning_rate": 4.3709429765116504e-05,
2223
+ "loss": 0.2701,
2224
+ "num_input_tokens_seen": 20144264,
2225
+ "step": 277
2226
+ },
2227
+ {
2228
+ "epoch": 4.564901349948079,
2229
+ "grad_norm": 0.27563896775245667,
2230
+ "learning_rate": 4.366595455713479e-05,
2231
+ "loss": 0.1856,
2232
+ "num_input_tokens_seen": 20207568,
2233
+ "step": 278
2234
+ },
2235
+ {
2236
+ "epoch": 4.581516095534787,
2237
+ "grad_norm": 0.21850791573524475,
2238
+ "learning_rate": 4.3622351414790554e-05,
2239
+ "loss": 0.2204,
2240
+ "num_input_tokens_seen": 20292376,
2241
+ "step": 279
2242
+ },
2243
+ {
2244
+ "epoch": 4.598130841121495,
2245
+ "grad_norm": 0.19672711193561554,
2246
+ "learning_rate": 4.357862063693486e-05,
2247
+ "loss": 0.1397,
2248
+ "num_input_tokens_seen": 20383048,
2249
+ "step": 280
2250
+ },
2251
+ {
2252
+ "epoch": 4.614745586708204,
2253
+ "grad_norm": 0.23507343232631683,
2254
+ "learning_rate": 4.353476252329356e-05,
2255
+ "loss": 0.1655,
2256
+ "num_input_tokens_seen": 20463376,
2257
+ "step": 281
2258
+ },
2259
+ {
2260
+ "epoch": 4.6313603322949115,
2261
+ "grad_norm": 0.23900464177131653,
2262
+ "learning_rate": 4.349077737446525e-05,
2263
+ "loss": 0.1511,
2264
+ "num_input_tokens_seen": 20537808,
2265
+ "step": 282
2266
+ },
2267
+ {
2268
+ "epoch": 4.64797507788162,
2269
+ "grad_norm": 0.2231033891439438,
2270
+ "learning_rate": 4.344666549191921e-05,
2271
+ "loss": 0.1783,
2272
+ "num_input_tokens_seen": 20605496,
2273
+ "step": 283
2274
+ },
2275
+ {
2276
+ "epoch": 4.6645898234683285,
2277
+ "grad_norm": 0.20772908627986908,
2278
+ "learning_rate": 4.3402427177993366e-05,
2279
+ "loss": 0.2135,
2280
+ "num_input_tokens_seen": 20692096,
2281
+ "step": 284
2282
+ },
2283
+ {
2284
+ "epoch": 4.681204569055036,
2285
+ "grad_norm": 0.24927115440368652,
2286
+ "learning_rate": 4.335806273589214e-05,
2287
+ "loss": 0.1919,
2288
+ "num_input_tokens_seen": 20762800,
2289
+ "step": 285
2290
+ },
2291
+ {
2292
+ "epoch": 4.697819314641745,
2293
+ "grad_norm": 0.2112305462360382,
2294
+ "learning_rate": 4.3313572469684474e-05,
2295
+ "loss": 0.1546,
2296
+ "num_input_tokens_seen": 20831584,
2297
+ "step": 286
2298
+ },
2299
+ {
2300
+ "epoch": 4.714434060228453,
2301
+ "grad_norm": 0.22639551758766174,
2302
+ "learning_rate": 4.326895668430166e-05,
2303
+ "loss": 0.124,
2304
+ "num_input_tokens_seen": 20897320,
2305
+ "step": 287
2306
+ },
2307
+ {
2308
+ "epoch": 4.731048805815161,
2309
+ "grad_norm": 0.2295934110879898,
2310
+ "learning_rate": 4.3224215685535294e-05,
2311
+ "loss": 0.1639,
2312
+ "num_input_tokens_seen": 20966136,
2313
+ "step": 288
2314
+ },
2315
+ {
2316
+ "epoch": 4.747663551401869,
2317
+ "grad_norm": 0.2341577112674713,
2318
+ "learning_rate": 4.317934978003517e-05,
2319
+ "loss": 0.1584,
2320
+ "num_input_tokens_seen": 21034800,
2321
+ "step": 289
2322
+ },
2323
+ {
2324
+ "epoch": 4.764278296988578,
2325
+ "grad_norm": 0.2542404234409332,
2326
+ "learning_rate": 4.313435927530719e-05,
2327
+ "loss": 0.1918,
2328
+ "num_input_tokens_seen": 21098672,
2329
+ "step": 290
2330
+ },
2331
+ {
2332
+ "epoch": 4.780893042575285,
2333
+ "grad_norm": 0.23311223089694977,
2334
+ "learning_rate": 4.3089244479711236e-05,
2335
+ "loss": 0.1597,
2336
+ "num_input_tokens_seen": 21177632,
2337
+ "step": 291
2338
+ },
2339
+ {
2340
+ "epoch": 4.797507788161994,
2341
+ "grad_norm": 0.2642923593521118,
2342
+ "learning_rate": 4.304400570245906e-05,
2343
+ "loss": 0.1847,
2344
+ "num_input_tokens_seen": 21240896,
2345
+ "step": 292
2346
+ },
2347
+ {
2348
+ "epoch": 4.814122533748702,
2349
+ "grad_norm": 0.18841278553009033,
2350
+ "learning_rate": 4.299864325361217e-05,
2351
+ "loss": 0.1472,
2352
+ "num_input_tokens_seen": 21322984,
2353
+ "step": 293
2354
+ },
2355
+ {
2356
+ "epoch": 4.83073727933541,
2357
+ "grad_norm": 0.22440434992313385,
2358
+ "learning_rate": 4.295315744407972e-05,
2359
+ "loss": 0.1607,
2360
+ "num_input_tokens_seen": 21389128,
2361
+ "step": 294
2362
+ },
2363
+ {
2364
+ "epoch": 4.8473520249221185,
2365
+ "grad_norm": 0.22145289182662964,
2366
+ "learning_rate": 4.290754858561637e-05,
2367
+ "loss": 0.1851,
2368
+ "num_input_tokens_seen": 21469912,
2369
+ "step": 295
2370
+ },
2371
+ {
2372
+ "epoch": 4.863966770508826,
2373
+ "grad_norm": 0.22817087173461914,
2374
+ "learning_rate": 4.2861816990820084e-05,
2375
+ "loss": 0.1531,
2376
+ "num_input_tokens_seen": 21540320,
2377
+ "step": 296
2378
+ },
2379
+ {
2380
+ "epoch": 4.880581516095535,
2381
+ "grad_norm": 0.22014038264751434,
2382
+ "learning_rate": 4.281596297313013e-05,
2383
+ "loss": 0.1815,
2384
+ "num_input_tokens_seen": 21626312,
2385
+ "step": 297
2386
+ },
2387
+ {
2388
+ "epoch": 4.897196261682243,
2389
+ "grad_norm": 0.2234148383140564,
2390
+ "learning_rate": 4.2769986846824815e-05,
2391
+ "loss": 0.1667,
2392
+ "num_input_tokens_seen": 21702792,
2393
+ "step": 298
2394
+ },
2395
+ {
2396
+ "epoch": 4.913811007268951,
2397
+ "grad_norm": 0.2851375341415405,
2398
+ "learning_rate": 4.272388892701934e-05,
2399
+ "loss": 0.1805,
2400
+ "num_input_tokens_seen": 21771880,
2401
+ "step": 299
2402
+ },
2403
+ {
2404
+ "epoch": 4.930425752855659,
2405
+ "grad_norm": 0.2221265286207199,
2406
+ "learning_rate": 4.267766952966369e-05,
2407
+ "loss": 0.1653,
2408
+ "num_input_tokens_seen": 21844024,
2409
+ "step": 300
2410
+ },
2411
+ {
2412
+ "epoch": 4.947040498442368,
2413
+ "grad_norm": 0.20688939094543457,
2414
+ "learning_rate": 4.2631328971540444e-05,
2415
+ "loss": 0.1654,
2416
+ "num_input_tokens_seen": 21925632,
2417
+ "step": 301
2418
+ },
2419
+ {
2420
+ "epoch": 4.963655244029075,
2421
+ "grad_norm": 0.2270977795124054,
2422
+ "learning_rate": 4.2584867570262597e-05,
2423
+ "loss": 0.1774,
2424
+ "num_input_tokens_seen": 21981952,
2425
+ "step": 302
2426
+ },
2427
+ {
2428
+ "epoch": 4.980269989615784,
2429
+ "grad_norm": 0.18652501702308655,
2430
+ "learning_rate": 4.25382856442714e-05,
2431
+ "loss": 0.1452,
2432
+ "num_input_tokens_seen": 22070440,
2433
+ "step": 303
2434
+ },
2435
+ {
2436
+ "epoch": 4.996884735202492,
2437
+ "grad_norm": 0.19792407751083374,
2438
+ "learning_rate": 4.249158351283414e-05,
2439
+ "loss": 0.1806,
2440
+ "num_input_tokens_seen": 22170184,
2441
+ "step": 304
2442
+ },
2443
+ {
2444
+ "epoch": 5.0,
2445
+ "grad_norm": 0.5933757424354553,
2446
+ "learning_rate": 4.244476149604201e-05,
2447
+ "loss": 0.2115,
2448
+ "num_input_tokens_seen": 22181856,
2449
+ "step": 305
2450
+ },
2451
+ {
2452
+ "epoch": 5.0166147455867085,
2453
+ "grad_norm": 0.22399979829788208,
2454
+ "learning_rate": 4.2397819914807856e-05,
2455
+ "loss": 0.1614,
2456
+ "num_input_tokens_seen": 22256808,
2457
+ "step": 306
2458
+ },
2459
+ {
2460
+ "epoch": 5.033229491173416,
2461
+ "grad_norm": 0.2595834732055664,
2462
+ "learning_rate": 4.2350759090864046e-05,
2463
+ "loss": 0.1838,
2464
+ "num_input_tokens_seen": 22325224,
2465
+ "step": 307
2466
+ },
2467
+ {
2468
+ "epoch": 5.049844236760125,
2469
+ "grad_norm": 0.188430517911911,
2470
+ "learning_rate": 4.230357934676017e-05,
2471
+ "loss": 0.1421,
2472
+ "num_input_tokens_seen": 22389624,
2473
+ "step": 308
2474
+ },
2475
+ {
2476
+ "epoch": 5.066458982346833,
2477
+ "grad_norm": 0.325431764125824,
2478
+ "learning_rate": 4.225628100586093e-05,
2479
+ "loss": 0.1852,
2480
+ "num_input_tokens_seen": 22463872,
2481
+ "step": 309
2482
+ },
2483
+ {
2484
+ "epoch": 5.083073727933541,
2485
+ "grad_norm": 0.27097174525260925,
2486
+ "learning_rate": 4.220886439234385e-05,
2487
+ "loss": 0.1526,
2488
+ "num_input_tokens_seen": 22515824,
2489
+ "step": 310
2490
+ },
2491
+ {
2492
+ "epoch": 5.099688473520249,
2493
+ "grad_norm": 0.21379193663597107,
2494
+ "learning_rate": 4.2161329831197095e-05,
2495
+ "loss": 0.151,
2496
+ "num_input_tokens_seen": 22602336,
2497
+ "step": 311
2498
+ },
2499
+ {
2500
+ "epoch": 5.116303219106958,
2501
+ "grad_norm": 0.24827998876571655,
2502
+ "learning_rate": 4.211367764821722e-05,
2503
+ "loss": 0.142,
2504
+ "num_input_tokens_seen": 22655176,
2505
+ "step": 312
2506
+ },
2507
+ {
2508
+ "epoch": 5.132917964693665,
2509
+ "grad_norm": 0.26246964931488037,
2510
+ "learning_rate": 4.2065908170006955e-05,
2511
+ "loss": 0.1589,
2512
+ "num_input_tokens_seen": 22728680,
2513
+ "step": 313
2514
+ },
2515
+ {
2516
+ "epoch": 5.149532710280374,
2517
+ "grad_norm": 0.24459198117256165,
2518
+ "learning_rate": 4.201802172397295e-05,
2519
+ "loss": 0.1435,
2520
+ "num_input_tokens_seen": 22806784,
2521
+ "step": 314
2522
+ },
2523
+ {
2524
+ "epoch": 5.166147455867082,
2525
+ "grad_norm": 0.26540517807006836,
2526
+ "learning_rate": 4.197001863832355e-05,
2527
+ "loss": 0.1447,
2528
+ "num_input_tokens_seen": 22880648,
2529
+ "step": 315
2530
+ },
2531
+ {
2532
+ "epoch": 5.18276220145379,
2533
+ "grad_norm": 0.25646644830703735,
2534
+ "learning_rate": 4.192189924206652e-05,
2535
+ "loss": 0.1418,
2536
+ "num_input_tokens_seen": 22953184,
2537
+ "step": 316
2538
+ },
2539
+ {
2540
+ "epoch": 5.1993769470404985,
2541
+ "grad_norm": 0.2358384132385254,
2542
+ "learning_rate": 4.187366386500683e-05,
2543
+ "loss": 0.1845,
2544
+ "num_input_tokens_seen": 23037392,
2545
+ "step": 317
2546
+ },
2547
+ {
2548
+ "epoch": 5.215991692627207,
2549
+ "grad_norm": 0.2270258218050003,
2550
+ "learning_rate": 4.182531283774434e-05,
2551
+ "loss": 0.2668,
2552
+ "num_input_tokens_seen": 23086552,
2553
+ "step": 318
2554
+ },
2555
+ {
2556
+ "epoch": 5.232606438213915,
2557
+ "grad_norm": 0.24396558105945587,
2558
+ "learning_rate": 4.177684649167158e-05,
2559
+ "loss": 0.1567,
2560
+ "num_input_tokens_seen": 23153152,
2561
+ "step": 319
2562
+ },
2563
+ {
2564
+ "epoch": 5.249221183800623,
2565
+ "grad_norm": 0.2542375922203064,
2566
+ "learning_rate": 4.172826515897146e-05,
2567
+ "loss": 0.1617,
2568
+ "num_input_tokens_seen": 23240928,
2569
+ "step": 320
2570
+ },
2571
+ {
2572
+ "epoch": 5.265835929387332,
2573
+ "grad_norm": 0.2268146276473999,
2574
+ "learning_rate": 4.1679569172614996e-05,
2575
+ "loss": 0.1573,
2576
+ "num_input_tokens_seen": 23325912,
2577
+ "step": 321
2578
+ },
2579
+ {
2580
+ "epoch": 5.282450674974039,
2581
+ "grad_norm": 0.26405712962150574,
2582
+ "learning_rate": 4.163075886635902e-05,
2583
+ "loss": 0.1738,
2584
+ "num_input_tokens_seen": 23401952,
2585
+ "step": 322
2586
+ },
2587
+ {
2588
+ "epoch": 5.299065420560748,
2589
+ "grad_norm": 0.2599943280220032,
2590
+ "learning_rate": 4.1581834574743915e-05,
2591
+ "loss": 0.1433,
2592
+ "num_input_tokens_seen": 23463760,
2593
+ "step": 323
2594
+ },
2595
+ {
2596
+ "epoch": 5.315680166147456,
2597
+ "grad_norm": 0.24650180339813232,
2598
+ "learning_rate": 4.1532796633091296e-05,
2599
+ "loss": 0.1511,
2600
+ "num_input_tokens_seen": 23535272,
2601
+ "step": 324
2602
+ },
2603
+ {
2604
+ "epoch": 5.332294911734164,
2605
+ "grad_norm": 0.22445832192897797,
2606
+ "learning_rate": 4.148364537750172e-05,
2607
+ "loss": 0.1296,
2608
+ "num_input_tokens_seen": 23607752,
2609
+ "step": 325
2610
+ },
2611
+ {
2612
+ "epoch": 5.348909657320872,
2613
+ "grad_norm": 0.20715995132923126,
2614
+ "learning_rate": 4.14343811448524e-05,
2615
+ "loss": 0.142,
2616
+ "num_input_tokens_seen": 23674872,
2617
+ "step": 326
2618
+ },
2619
+ {
2620
+ "epoch": 5.365524402907581,
2621
+ "grad_norm": 0.2707969546318054,
2622
+ "learning_rate": 4.138500427279485e-05,
2623
+ "loss": 0.1679,
2624
+ "num_input_tokens_seen": 23736384,
2625
+ "step": 327
2626
+ },
2627
+ {
2628
+ "epoch": 5.382139148494288,
2629
+ "grad_norm": 0.2017841637134552,
2630
+ "learning_rate": 4.133551509975264e-05,
2631
+ "loss": 0.1346,
2632
+ "num_input_tokens_seen": 23835000,
2633
+ "step": 328
2634
+ },
2635
+ {
2636
+ "epoch": 5.398753894080997,
2637
+ "grad_norm": 0.21116195619106293,
2638
+ "learning_rate": 4.128591396491901e-05,
2639
+ "loss": 0.1364,
2640
+ "num_input_tokens_seen": 23912552,
2641
+ "step": 329
2642
+ },
2643
+ {
2644
+ "epoch": 5.415368639667705,
2645
+ "grad_norm": 0.2331131547689438,
2646
+ "learning_rate": 4.123620120825459e-05,
2647
+ "loss": 0.1719,
2648
+ "num_input_tokens_seen": 23987368,
2649
+ "step": 330
2650
+ },
2651
+ {
2652
+ "epoch": 5.431983385254413,
2653
+ "grad_norm": 0.27115845680236816,
2654
+ "learning_rate": 4.118637717048506e-05,
2655
+ "loss": 0.1468,
2656
+ "num_input_tokens_seen": 24050848,
2657
+ "step": 331
2658
+ },
2659
+ {
2660
+ "epoch": 5.4485981308411215,
2661
+ "grad_norm": 0.21654783189296722,
2662
+ "learning_rate": 4.113644219309877e-05,
2663
+ "loss": 0.1418,
2664
+ "num_input_tokens_seen": 24146104,
2665
+ "step": 332
2666
+ },
2667
+ {
2668
+ "epoch": 5.46521287642783,
2669
+ "grad_norm": 0.2783348560333252,
2670
+ "learning_rate": 4.1086396618344476e-05,
2671
+ "loss": 0.1502,
2672
+ "num_input_tokens_seen": 24194184,
2673
+ "step": 333
2674
+ },
2675
+ {
2676
+ "epoch": 5.481827622014538,
2677
+ "grad_norm": 0.23255467414855957,
2678
+ "learning_rate": 4.1036240789228954e-05,
2679
+ "loss": 0.1571,
2680
+ "num_input_tokens_seen": 24275368,
2681
+ "step": 334
2682
+ },
2683
+ {
2684
+ "epoch": 5.498442367601246,
2685
+ "grad_norm": 0.2655453681945801,
2686
+ "learning_rate": 4.098597504951462e-05,
2687
+ "loss": 0.1607,
2688
+ "num_input_tokens_seen": 24329192,
2689
+ "step": 335
2690
+ },
2691
+ {
2692
+ "epoch": 5.515057113187955,
2693
+ "grad_norm": 0.23245719075202942,
2694
+ "learning_rate": 4.093559974371725e-05,
2695
+ "loss": 0.1453,
2696
+ "num_input_tokens_seen": 24426696,
2697
+ "step": 336
2698
+ },
2699
+ {
2700
+ "epoch": 5.531671858774662,
2701
+ "grad_norm": 0.20036327838897705,
2702
+ "learning_rate": 4.088511521710352e-05,
2703
+ "loss": 0.1411,
2704
+ "num_input_tokens_seen": 24514344,
2705
+ "step": 337
2706
+ },
2707
+ {
2708
+ "epoch": 5.548286604361371,
2709
+ "grad_norm": 0.32294195890426636,
2710
+ "learning_rate": 4.083452181568875e-05,
2711
+ "loss": 0.1467,
2712
+ "num_input_tokens_seen": 24584464,
2713
+ "step": 338
2714
+ },
2715
+ {
2716
+ "epoch": 5.564901349948079,
2717
+ "grad_norm": 0.23969624936580658,
2718
+ "learning_rate": 4.0783819886234445e-05,
2719
+ "loss": 0.1304,
2720
+ "num_input_tokens_seen": 24660600,
2721
+ "step": 339
2722
+ },
2723
+ {
2724
+ "epoch": 5.581516095534787,
2725
+ "grad_norm": 0.2539989948272705,
2726
+ "learning_rate": 4.073300977624594e-05,
2727
+ "loss": 0.1374,
2728
+ "num_input_tokens_seen": 24717088,
2729
+ "step": 340
2730
+ },
2731
+ {
2732
+ "epoch": 5.598130841121495,
2733
+ "grad_norm": 0.26608580350875854,
2734
+ "learning_rate": 4.068209183397004e-05,
2735
+ "loss": 0.1519,
2736
+ "num_input_tokens_seen": 24775352,
2737
+ "step": 341
2738
+ },
2739
+ {
2740
+ "epoch": 5.614745586708204,
2741
+ "grad_norm": 0.2161550372838974,
2742
+ "learning_rate": 4.063106640839264e-05,
2743
+ "loss": 0.1409,
2744
+ "num_input_tokens_seen": 24860072,
2745
+ "step": 342
2746
+ },
2747
+ {
2748
+ "epoch": 5.6313603322949115,
2749
+ "grad_norm": 0.22482600808143616,
2750
+ "learning_rate": 4.057993384923626e-05,
2751
+ "loss": 0.1393,
2752
+ "num_input_tokens_seen": 24947856,
2753
+ "step": 343
2754
+ },
2755
+ {
2756
+ "epoch": 5.64797507788162,
2757
+ "grad_norm": 0.2367829829454422,
2758
+ "learning_rate": 4.052869450695776e-05,
2759
+ "loss": 0.1539,
2760
+ "num_input_tokens_seen": 25024992,
2761
+ "step": 344
2762
+ },
2763
+ {
2764
+ "epoch": 5.6645898234683285,
2765
+ "grad_norm": 0.29229775071144104,
2766
+ "learning_rate": 4.047734873274586e-05,
2767
+ "loss": 0.1522,
2768
+ "num_input_tokens_seen": 25092248,
2769
+ "step": 345
2770
+ },
2771
+ {
2772
+ "epoch": 5.681204569055036,
2773
+ "grad_norm": 0.2589828670024872,
2774
+ "learning_rate": 4.042589687851872e-05,
2775
+ "loss": 0.1493,
2776
+ "num_input_tokens_seen": 25170496,
2777
+ "step": 346
2778
+ },
2779
+ {
2780
+ "epoch": 5.697819314641745,
2781
+ "grad_norm": 0.23003339767456055,
2782
+ "learning_rate": 4.037433929692161e-05,
2783
+ "loss": 0.1529,
2784
+ "num_input_tokens_seen": 25268720,
2785
+ "step": 347
2786
+ },
2787
+ {
2788
+ "epoch": 5.714434060228453,
2789
+ "grad_norm": 0.26932114362716675,
2790
+ "learning_rate": 4.0322676341324415e-05,
2791
+ "loss": 0.1499,
2792
+ "num_input_tokens_seen": 25332688,
2793
+ "step": 348
2794
+ },
2795
+ {
2796
+ "epoch": 5.731048805815161,
2797
+ "grad_norm": 0.27059391140937805,
2798
+ "learning_rate": 4.027090836581925e-05,
2799
+ "loss": 0.173,
2800
+ "num_input_tokens_seen": 25413904,
2801
+ "step": 349
2802
+ },
2803
+ {
2804
+ "epoch": 5.747663551401869,
2805
+ "grad_norm": 0.24265804886817932,
2806
+ "learning_rate": 4.021903572521802e-05,
2807
+ "loss": 0.1531,
2808
+ "num_input_tokens_seen": 25503720,
2809
+ "step": 350
2810
+ },
2811
+ {
2812
+ "epoch": 5.764278296988578,
2813
+ "grad_norm": 0.28688696026802063,
2814
+ "learning_rate": 4.0167058775049996e-05,
2815
+ "loss": 0.1615,
2816
+ "num_input_tokens_seen": 25568560,
2817
+ "step": 351
2818
+ },
2819
+ {
2820
+ "epoch": 5.780893042575285,
2821
+ "grad_norm": 0.26201075315475464,
2822
+ "learning_rate": 4.011497787155938e-05,
2823
+ "loss": 0.1452,
2824
+ "num_input_tokens_seen": 25635184,
2825
+ "step": 352
2826
+ },
2827
+ {
2828
+ "epoch": 5.797507788161994,
2829
+ "grad_norm": 0.22841767966747284,
2830
+ "learning_rate": 4.006279337170283e-05,
2831
+ "loss": 0.148,
2832
+ "num_input_tokens_seen": 25719768,
2833
+ "step": 353
2834
+ },
2835
+ {
2836
+ "epoch": 5.814122533748702,
2837
+ "grad_norm": 0.28246188163757324,
2838
+ "learning_rate": 4.0010505633147106e-05,
2839
+ "loss": 0.1446,
2840
+ "num_input_tokens_seen": 25795016,
2841
+ "step": 354
2842
+ },
2843
+ {
2844
+ "epoch": 5.83073727933541,
2845
+ "grad_norm": 0.2533949315547943,
2846
+ "learning_rate": 3.995811501426648e-05,
2847
+ "loss": 0.1297,
2848
+ "num_input_tokens_seen": 25863184,
2849
+ "step": 355
2850
+ },
2851
+ {
2852
+ "epoch": 5.8473520249221185,
2853
+ "grad_norm": 0.25117793679237366,
2854
+ "learning_rate": 3.99056218741404e-05,
2855
+ "loss": 0.1479,
2856
+ "num_input_tokens_seen": 25935752,
2857
+ "step": 356
2858
+ },
2859
+ {
2860
+ "epoch": 5.863966770508826,
2861
+ "grad_norm": 0.2759612202644348,
2862
+ "learning_rate": 3.985302657255097e-05,
2863
+ "loss": 0.1466,
2864
+ "num_input_tokens_seen": 25995760,
2865
+ "step": 357
2866
+ },
2867
+ {
2868
+ "epoch": 5.880581516095535,
2869
+ "grad_norm": 0.24218028783798218,
2870
+ "learning_rate": 3.980032946998049e-05,
2871
+ "loss": 0.1297,
2872
+ "num_input_tokens_seen": 26061240,
2873
+ "step": 358
2874
+ },
2875
+ {
2876
+ "epoch": 5.897196261682243,
2877
+ "grad_norm": 0.2628185749053955,
2878
+ "learning_rate": 3.974753092760901e-05,
2879
+ "loss": 0.1421,
2880
+ "num_input_tokens_seen": 26131024,
2881
+ "step": 359
2882
+ },
2883
+ {
2884
+ "epoch": 5.913811007268951,
2885
+ "grad_norm": 0.18742328882217407,
2886
+ "learning_rate": 3.969463130731183e-05,
2887
+ "loss": 0.1737,
2888
+ "num_input_tokens_seen": 26233672,
2889
+ "step": 360
2890
+ }
2891
+ ],
2892
+ "logging_steps": 1.0,
2893
+ "max_steps": 1200,
2894
+ "num_input_tokens_seen": 26233672,
2895
+ "num_train_epochs": 20,
2896
+ "save_steps": 60,
2897
+ "stateful_callbacks": {
2898
+ "TrainerControl": {
2899
+ "args": {
2900
+ "should_epoch_stop": false,
2901
+ "should_evaluate": false,
2902
+ "should_log": false,
2903
+ "should_save": true,
2904
+ "should_training_stop": false
2905
+ },
2906
+ "attributes": {}
2907
+ }
2908
+ },
2909
+ "total_flos": 2.2131164458254336e+18,
2910
+ "train_batch_size": 1,
2911
+ "trial_name": null,
2912
+ "trial_params": null
2913
+ }
checkpoint-360/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-420/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-420/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-420/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
checkpoint-420/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-420/trainer_state.json ADDED
@@ -0,0 +1,3393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.897196261682243,
5
+ "eval_steps": 500,
6
+ "global_step": 420,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016614745586708203,
13
+ "grad_norm": 0.050998032093048096,
14
+ "learning_rate": 4.999991432639962e-05,
15
+ "loss": 0.5487,
16
+ "num_input_tokens_seen": 70408,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.033229491173416406,
21
+ "grad_norm": 0.049370743334293365,
22
+ "learning_rate": 4.999965730618567e-05,
23
+ "loss": 0.4981,
24
+ "num_input_tokens_seen": 139640,
25
+ "step": 2
26
+ },
27
+ {
28
+ "epoch": 0.04984423676012461,
29
+ "grad_norm": 0.05077400803565979,
30
+ "learning_rate": 4.9999228941119745e-05,
31
+ "loss": 0.5505,
32
+ "num_input_tokens_seen": 223656,
33
+ "step": 3
34
+ },
35
+ {
36
+ "epoch": 0.06645898234683281,
37
+ "grad_norm": 0.04397282376885414,
38
+ "learning_rate": 4.999862923413781e-05,
39
+ "loss": 0.504,
40
+ "num_input_tokens_seen": 300688,
41
+ "step": 4
42
+ },
43
+ {
44
+ "epoch": 0.08307372793354102,
45
+ "grad_norm": 0.05225864797830582,
46
+ "learning_rate": 4.999785818935018e-05,
47
+ "loss": 0.4925,
48
+ "num_input_tokens_seen": 366368,
49
+ "step": 5
50
+ },
51
+ {
52
+ "epoch": 0.09968847352024922,
53
+ "grad_norm": 0.049482282251119614,
54
+ "learning_rate": 4.999691581204152e-05,
55
+ "loss": 0.4771,
56
+ "num_input_tokens_seen": 445808,
57
+ "step": 6
58
+ },
59
+ {
60
+ "epoch": 0.11630321910695743,
61
+ "grad_norm": 0.05594080314040184,
62
+ "learning_rate": 4.9995802108670775e-05,
63
+ "loss": 0.4986,
64
+ "num_input_tokens_seen": 522800,
65
+ "step": 7
66
+ },
67
+ {
68
+ "epoch": 0.13291796469366562,
69
+ "grad_norm": 0.051852282136678696,
70
+ "learning_rate": 4.999451708687114e-05,
71
+ "loss": 0.5171,
72
+ "num_input_tokens_seen": 599608,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.14953271028037382,
77
+ "grad_norm": 0.045517683029174805,
78
+ "learning_rate": 4.9993060755450015e-05,
79
+ "loss": 0.5669,
80
+ "num_input_tokens_seen": 681424,
81
+ "step": 9
82
+ },
83
+ {
84
+ "epoch": 0.16614745586708204,
85
+ "grad_norm": 0.044325754046440125,
86
+ "learning_rate": 4.999143312438893e-05,
87
+ "loss": 0.4218,
88
+ "num_input_tokens_seen": 756744,
89
+ "step": 10
90
+ },
91
+ {
92
+ "epoch": 0.18276220145379024,
93
+ "grad_norm": 0.04328459873795509,
94
+ "learning_rate": 4.998963420484349e-05,
95
+ "loss": 0.434,
96
+ "num_input_tokens_seen": 842576,
97
+ "step": 11
98
+ },
99
+ {
100
+ "epoch": 0.19937694704049844,
101
+ "grad_norm": 0.04725787043571472,
102
+ "learning_rate": 4.998766400914329e-05,
103
+ "loss": 0.4287,
104
+ "num_input_tokens_seen": 917232,
105
+ "step": 12
106
+ },
107
+ {
108
+ "epoch": 0.21599169262720663,
109
+ "grad_norm": 0.03806879371404648,
110
+ "learning_rate": 4.9985522550791825e-05,
111
+ "loss": 0.3454,
112
+ "num_input_tokens_seen": 1006800,
113
+ "step": 13
114
+ },
115
+ {
116
+ "epoch": 0.23260643821391486,
117
+ "grad_norm": 0.05201176926493645,
118
+ "learning_rate": 4.998320984446641e-05,
119
+ "loss": 0.436,
120
+ "num_input_tokens_seen": 1085824,
121
+ "step": 14
122
+ },
123
+ {
124
+ "epoch": 0.24922118380062305,
125
+ "grad_norm": 0.047955628484487534,
126
+ "learning_rate": 4.9980725906018074e-05,
127
+ "loss": 0.4625,
128
+ "num_input_tokens_seen": 1164160,
129
+ "step": 15
130
+ },
131
+ {
132
+ "epoch": 0.26583592938733125,
133
+ "grad_norm": 0.05529098957777023,
134
+ "learning_rate": 4.997807075247146e-05,
135
+ "loss": 0.5035,
136
+ "num_input_tokens_seen": 1242264,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2824506749740395,
141
+ "grad_norm": 0.04751162976026535,
142
+ "learning_rate": 4.997524440202469e-05,
143
+ "loss": 0.4354,
144
+ "num_input_tokens_seen": 1325904,
145
+ "step": 17
146
+ },
147
+ {
148
+ "epoch": 0.29906542056074764,
149
+ "grad_norm": 0.06726882606744766,
150
+ "learning_rate": 4.9972246874049254e-05,
151
+ "loss": 0.5439,
152
+ "num_input_tokens_seen": 1385632,
153
+ "step": 18
154
+ },
155
+ {
156
+ "epoch": 0.31568016614745587,
157
+ "grad_norm": 0.05245920270681381,
158
+ "learning_rate": 4.996907818908987e-05,
159
+ "loss": 0.3727,
160
+ "num_input_tokens_seen": 1470632,
161
+ "step": 19
162
+ },
163
+ {
164
+ "epoch": 0.3322949117341641,
165
+ "grad_norm": 0.05745376646518707,
166
+ "learning_rate": 4.996573836886435e-05,
167
+ "loss": 0.4894,
168
+ "num_input_tokens_seen": 1547536,
169
+ "step": 20
170
+ },
171
+ {
172
+ "epoch": 0.34890965732087226,
173
+ "grad_norm": 0.056607529520988464,
174
+ "learning_rate": 4.9962227436263453e-05,
175
+ "loss": 0.3846,
176
+ "num_input_tokens_seen": 1615528,
177
+ "step": 21
178
+ },
179
+ {
180
+ "epoch": 0.3655244029075805,
181
+ "grad_norm": 0.06150667741894722,
182
+ "learning_rate": 4.995854541535071e-05,
183
+ "loss": 0.4362,
184
+ "num_input_tokens_seen": 1694352,
185
+ "step": 22
186
+ },
187
+ {
188
+ "epoch": 0.3821391484942887,
189
+ "grad_norm": 0.056484442204236984,
190
+ "learning_rate": 4.9954692331362294e-05,
191
+ "loss": 0.4438,
192
+ "num_input_tokens_seen": 1753776,
193
+ "step": 23
194
+ },
195
+ {
196
+ "epoch": 0.3987538940809969,
197
+ "grad_norm": 0.0704159140586853,
198
+ "learning_rate": 4.995066821070679e-05,
199
+ "loss": 0.4496,
200
+ "num_input_tokens_seen": 1809048,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.4153686396677051,
205
+ "grad_norm": 0.06202029809355736,
206
+ "learning_rate": 4.994647308096509e-05,
207
+ "loss": 0.5096,
208
+ "num_input_tokens_seen": 1884264,
209
+ "step": 25
210
+ },
211
+ {
212
+ "epoch": 0.43198338525441327,
213
+ "grad_norm": 0.04237145930528641,
214
+ "learning_rate": 4.994210697089014e-05,
215
+ "loss": 0.3722,
216
+ "num_input_tokens_seen": 1981704,
217
+ "step": 26
218
+ },
219
+ {
220
+ "epoch": 0.4485981308411215,
221
+ "grad_norm": 0.06920398026704788,
222
+ "learning_rate": 4.9937569910406756e-05,
223
+ "loss": 0.4103,
224
+ "num_input_tokens_seen": 2044144,
225
+ "step": 27
226
+ },
227
+ {
228
+ "epoch": 0.4652128764278297,
229
+ "grad_norm": 0.062432270497083664,
230
+ "learning_rate": 4.9932861930611454e-05,
231
+ "loss": 0.357,
232
+ "num_input_tokens_seen": 2107584,
233
+ "step": 28
234
+ },
235
+ {
236
+ "epoch": 0.4818276220145379,
237
+ "grad_norm": 0.06791180372238159,
238
+ "learning_rate": 4.9927983063772196e-05,
239
+ "loss": 0.3889,
240
+ "num_input_tokens_seen": 2169248,
241
+ "step": 29
242
+ },
243
+ {
244
+ "epoch": 0.4984423676012461,
245
+ "grad_norm": 0.07219590991735458,
246
+ "learning_rate": 4.99229333433282e-05,
247
+ "loss": 0.3543,
248
+ "num_input_tokens_seen": 2230344,
249
+ "step": 30
250
+ },
251
+ {
252
+ "epoch": 0.5150571131879543,
253
+ "grad_norm": 0.0647474005818367,
254
+ "learning_rate": 4.9917712803889674e-05,
255
+ "loss": 0.3453,
256
+ "num_input_tokens_seen": 2302368,
257
+ "step": 31
258
+ },
259
+ {
260
+ "epoch": 0.5316718587746625,
261
+ "grad_norm": 0.07434642314910889,
262
+ "learning_rate": 4.991232148123761e-05,
263
+ "loss": 0.435,
264
+ "num_input_tokens_seen": 2369984,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.5482866043613707,
269
+ "grad_norm": 0.05302443355321884,
270
+ "learning_rate": 4.990675941232353e-05,
271
+ "loss": 0.3981,
272
+ "num_input_tokens_seen": 2453032,
273
+ "step": 33
274
+ },
275
+ {
276
+ "epoch": 0.564901349948079,
277
+ "grad_norm": 0.053745292127132416,
278
+ "learning_rate": 4.990102663526924e-05,
279
+ "loss": 0.3755,
280
+ "num_input_tokens_seen": 2527464,
281
+ "step": 34
282
+ },
283
+ {
284
+ "epoch": 0.5815160955347871,
285
+ "grad_norm": 0.06717613339424133,
286
+ "learning_rate": 4.989512318936655e-05,
287
+ "loss": 0.3699,
288
+ "num_input_tokens_seen": 2597032,
289
+ "step": 35
290
+ },
291
+ {
292
+ "epoch": 0.5981308411214953,
293
+ "grad_norm": 0.071847103536129,
294
+ "learning_rate": 4.9889049115077005e-05,
295
+ "loss": 0.3705,
296
+ "num_input_tokens_seen": 2671704,
297
+ "step": 36
298
+ },
299
+ {
300
+ "epoch": 0.6147455867082036,
301
+ "grad_norm": 0.0460306741297245,
302
+ "learning_rate": 4.988280445403164e-05,
303
+ "loss": 0.3797,
304
+ "num_input_tokens_seen": 2767640,
305
+ "step": 37
306
+ },
307
+ {
308
+ "epoch": 0.6313603322949117,
309
+ "grad_norm": 0.053273387253284454,
310
+ "learning_rate": 4.987638924903067e-05,
311
+ "loss": 0.3799,
312
+ "num_input_tokens_seen": 2843720,
313
+ "step": 38
314
+ },
315
+ {
316
+ "epoch": 0.6479750778816199,
317
+ "grad_norm": 0.05600422993302345,
318
+ "learning_rate": 4.9869803544043166e-05,
319
+ "loss": 0.2866,
320
+ "num_input_tokens_seen": 2921472,
321
+ "step": 39
322
+ },
323
+ {
324
+ "epoch": 0.6645898234683282,
325
+ "grad_norm": 0.06414052098989487,
326
+ "learning_rate": 4.9863047384206835e-05,
327
+ "loss": 0.4115,
328
+ "num_input_tokens_seen": 2998400,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.6812045690550363,
333
+ "grad_norm": 0.09214208275079727,
334
+ "learning_rate": 4.985612081582764e-05,
335
+ "loss": 0.3804,
336
+ "num_input_tokens_seen": 3059648,
337
+ "step": 41
338
+ },
339
+ {
340
+ "epoch": 0.6978193146417445,
341
+ "grad_norm": 0.0555964931845665,
342
+ "learning_rate": 4.98490238863795e-05,
343
+ "loss": 0.3121,
344
+ "num_input_tokens_seen": 3140184,
345
+ "step": 42
346
+ },
347
+ {
348
+ "epoch": 0.7144340602284528,
349
+ "grad_norm": 0.06256969273090363,
350
+ "learning_rate": 4.984175664450397e-05,
351
+ "loss": 0.3271,
352
+ "num_input_tokens_seen": 3207184,
353
+ "step": 43
354
+ },
355
+ {
356
+ "epoch": 0.731048805815161,
357
+ "grad_norm": 0.0543232187628746,
358
+ "learning_rate": 4.983431914000991e-05,
359
+ "loss": 0.364,
360
+ "num_input_tokens_seen": 3292344,
361
+ "step": 44
362
+ },
363
+ {
364
+ "epoch": 0.7476635514018691,
365
+ "grad_norm": 0.06077824532985687,
366
+ "learning_rate": 4.982671142387316e-05,
367
+ "loss": 0.3894,
368
+ "num_input_tokens_seen": 3365384,
369
+ "step": 45
370
+ },
371
+ {
372
+ "epoch": 0.7642782969885774,
373
+ "grad_norm": 0.06091070920228958,
374
+ "learning_rate": 4.981893354823614e-05,
375
+ "loss": 0.3354,
376
+ "num_input_tokens_seen": 3440720,
377
+ "step": 46
378
+ },
379
+ {
380
+ "epoch": 0.7808930425752856,
381
+ "grad_norm": 0.054153311997652054,
382
+ "learning_rate": 4.9810985566407544e-05,
383
+ "loss": 0.3058,
384
+ "num_input_tokens_seen": 3533576,
385
+ "step": 47
386
+ },
387
+ {
388
+ "epoch": 0.7975077881619937,
389
+ "grad_norm": 0.06662417948246002,
390
+ "learning_rate": 4.980286753286195e-05,
391
+ "loss": 0.4658,
392
+ "num_input_tokens_seen": 3599744,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.814122533748702,
397
+ "grad_norm": 0.05790851265192032,
398
+ "learning_rate": 4.979457950323945e-05,
399
+ "loss": 0.3647,
400
+ "num_input_tokens_seen": 3689520,
401
+ "step": 49
402
+ },
403
+ {
404
+ "epoch": 0.8307372793354102,
405
+ "grad_norm": 0.10742159187793732,
406
+ "learning_rate": 4.9786121534345265e-05,
407
+ "loss": 0.343,
408
+ "num_input_tokens_seen": 3751808,
409
+ "step": 50
410
+ },
411
+ {
412
+ "epoch": 0.8473520249221184,
413
+ "grad_norm": 0.05565556138753891,
414
+ "learning_rate": 4.9777493684149375e-05,
415
+ "loss": 0.3317,
416
+ "num_input_tokens_seen": 3839096,
417
+ "step": 51
418
+ },
419
+ {
420
+ "epoch": 0.8639667705088265,
421
+ "grad_norm": 0.05752381682395935,
422
+ "learning_rate": 4.976869601178609e-05,
423
+ "loss": 0.38,
424
+ "num_input_tokens_seen": 3919824,
425
+ "step": 52
426
+ },
427
+ {
428
+ "epoch": 0.8805815160955348,
429
+ "grad_norm": 0.06406434625387192,
430
+ "learning_rate": 4.975972857755369e-05,
431
+ "loss": 0.2676,
432
+ "num_input_tokens_seen": 3989312,
433
+ "step": 53
434
+ },
435
+ {
436
+ "epoch": 0.897196261682243,
437
+ "grad_norm": 0.0653691440820694,
438
+ "learning_rate": 4.975059144291394e-05,
439
+ "loss": 0.3516,
440
+ "num_input_tokens_seen": 4060528,
441
+ "step": 54
442
+ },
443
+ {
444
+ "epoch": 0.9138110072689511,
445
+ "grad_norm": 0.06272953748703003,
446
+ "learning_rate": 4.974128467049176e-05,
447
+ "loss": 0.3004,
448
+ "num_input_tokens_seen": 4129368,
449
+ "step": 55
450
+ },
451
+ {
452
+ "epoch": 0.9304257528556594,
453
+ "grad_norm": 0.08054930716753006,
454
+ "learning_rate": 4.9731808324074717e-05,
455
+ "loss": 0.3009,
456
+ "num_input_tokens_seen": 4175208,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.9470404984423676,
461
+ "grad_norm": 0.07523038238286972,
462
+ "learning_rate": 4.972216246861262e-05,
463
+ "loss": 0.2814,
464
+ "num_input_tokens_seen": 4218096,
465
+ "step": 57
466
+ },
467
+ {
468
+ "epoch": 0.9636552440290758,
469
+ "grad_norm": 0.07347433269023895,
470
+ "learning_rate": 4.971234717021709e-05,
471
+ "loss": 0.3321,
472
+ "num_input_tokens_seen": 4275968,
473
+ "step": 58
474
+ },
475
+ {
476
+ "epoch": 0.980269989615784,
477
+ "grad_norm": 0.05830248445272446,
478
+ "learning_rate": 4.9702362496161085e-05,
479
+ "loss": 0.2881,
480
+ "num_input_tokens_seen": 4346616,
481
+ "step": 59
482
+ },
483
+ {
484
+ "epoch": 0.9968847352024922,
485
+ "grad_norm": 0.061629410833120346,
486
+ "learning_rate": 4.9692208514878444e-05,
487
+ "loss": 0.2993,
488
+ "num_input_tokens_seen": 4425064,
489
+ "step": 60
490
+ },
491
+ {
492
+ "epoch": 1.0,
493
+ "grad_norm": 0.13380740582942963,
494
+ "learning_rate": 4.968188529596342e-05,
495
+ "loss": 0.2511,
496
+ "num_input_tokens_seen": 4435328,
497
+ "step": 61
498
+ },
499
+ {
500
+ "epoch": 1.0166147455867083,
501
+ "grad_norm": 0.0726238414645195,
502
+ "learning_rate": 4.9671392910170185e-05,
503
+ "loss": 0.3127,
504
+ "num_input_tokens_seen": 4500104,
505
+ "step": 62
506
+ },
507
+ {
508
+ "epoch": 1.0332294911734163,
509
+ "grad_norm": 0.05980083718895912,
510
+ "learning_rate": 4.966073142941239e-05,
511
+ "loss": 0.3601,
512
+ "num_input_tokens_seen": 4581976,
513
+ "step": 63
514
+ },
515
+ {
516
+ "epoch": 1.0498442367601246,
517
+ "grad_norm": 0.06445376574993134,
518
+ "learning_rate": 4.964990092676263e-05,
519
+ "loss": 0.3049,
520
+ "num_input_tokens_seen": 4652160,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 1.066458982346833,
525
+ "grad_norm": 0.07824505120515823,
526
+ "learning_rate": 4.9638901476451946e-05,
527
+ "loss": 0.3099,
528
+ "num_input_tokens_seen": 4709368,
529
+ "step": 65
530
+ },
531
+ {
532
+ "epoch": 1.083073727933541,
533
+ "grad_norm": 0.058268457651138306,
534
+ "learning_rate": 4.962773315386935e-05,
535
+ "loss": 0.3273,
536
+ "num_input_tokens_seen": 4798256,
537
+ "step": 66
538
+ },
539
+ {
540
+ "epoch": 1.0996884735202492,
541
+ "grad_norm": 0.07069691270589828,
542
+ "learning_rate": 4.961639603556127e-05,
543
+ "loss": 0.282,
544
+ "num_input_tokens_seen": 4859200,
545
+ "step": 67
546
+ },
547
+ {
548
+ "epoch": 1.1163032191069575,
549
+ "grad_norm": 0.0775996670126915,
550
+ "learning_rate": 4.960489019923105e-05,
551
+ "loss": 0.3642,
552
+ "num_input_tokens_seen": 4925992,
553
+ "step": 68
554
+ },
555
+ {
556
+ "epoch": 1.1329179646936656,
557
+ "grad_norm": 0.07044171541929245,
558
+ "learning_rate": 4.9593215723738404e-05,
559
+ "loss": 0.2896,
560
+ "num_input_tokens_seen": 4998808,
561
+ "step": 69
562
+ },
563
+ {
564
+ "epoch": 1.1495327102803738,
565
+ "grad_norm": 0.05971802771091461,
566
+ "learning_rate": 4.958137268909887e-05,
567
+ "loss": 0.2578,
568
+ "num_input_tokens_seen": 5089672,
569
+ "step": 70
570
+ },
571
+ {
572
+ "epoch": 1.1661474558670821,
573
+ "grad_norm": 0.07145556062459946,
574
+ "learning_rate": 4.9569361176483286e-05,
575
+ "loss": 0.3243,
576
+ "num_input_tokens_seen": 5166744,
577
+ "step": 71
578
+ },
579
+ {
580
+ "epoch": 1.1827622014537902,
581
+ "grad_norm": 0.07455787807703018,
582
+ "learning_rate": 4.9557181268217227e-05,
583
+ "loss": 0.3949,
584
+ "num_input_tokens_seen": 5228264,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 1.1993769470404985,
589
+ "grad_norm": 0.055582575500011444,
590
+ "learning_rate": 4.9544833047780394e-05,
591
+ "loss": 0.2877,
592
+ "num_input_tokens_seen": 5338224,
593
+ "step": 73
594
+ },
595
+ {
596
+ "epoch": 1.2159916926272065,
597
+ "grad_norm": 0.07675391435623169,
598
+ "learning_rate": 4.9532316599806124e-05,
599
+ "loss": 0.3152,
600
+ "num_input_tokens_seen": 5399848,
601
+ "step": 74
602
+ },
603
+ {
604
+ "epoch": 1.2326064382139148,
605
+ "grad_norm": 0.08048644661903381,
606
+ "learning_rate": 4.951963201008076e-05,
607
+ "loss": 0.2976,
608
+ "num_input_tokens_seen": 5468624,
609
+ "step": 75
610
+ },
611
+ {
612
+ "epoch": 1.249221183800623,
613
+ "grad_norm": 0.07579060643911362,
614
+ "learning_rate": 4.9506779365543046e-05,
615
+ "loss": 0.2982,
616
+ "num_input_tokens_seen": 5536776,
617
+ "step": 76
618
+ },
619
+ {
620
+ "epoch": 1.2658359293873311,
621
+ "grad_norm": 0.07828006893396378,
622
+ "learning_rate": 4.949375875428357e-05,
623
+ "loss": 0.3272,
624
+ "num_input_tokens_seen": 5609296,
625
+ "step": 77
626
+ },
627
+ {
628
+ "epoch": 1.2824506749740394,
629
+ "grad_norm": 0.08079098165035248,
630
+ "learning_rate": 4.9480570265544144e-05,
631
+ "loss": 0.2768,
632
+ "num_input_tokens_seen": 5663824,
633
+ "step": 78
634
+ },
635
+ {
636
+ "epoch": 1.2990654205607477,
637
+ "grad_norm": 0.07579358667135239,
638
+ "learning_rate": 4.94672139897172e-05,
639
+ "loss": 0.318,
640
+ "num_input_tokens_seen": 5742032,
641
+ "step": 79
642
+ },
643
+ {
644
+ "epoch": 1.3156801661474558,
645
+ "grad_norm": 0.07588379085063934,
646
+ "learning_rate": 4.9453690018345144e-05,
647
+ "loss": 0.3007,
648
+ "num_input_tokens_seen": 5816864,
649
+ "step": 80
650
+ },
651
+ {
652
+ "epoch": 1.332294911734164,
653
+ "grad_norm": 0.08709035068750381,
654
+ "learning_rate": 4.943999844411977e-05,
655
+ "loss": 0.2797,
656
+ "num_input_tokens_seen": 5881624,
657
+ "step": 81
658
+ },
659
+ {
660
+ "epoch": 1.3489096573208723,
661
+ "grad_norm": 0.05975884944200516,
662
+ "learning_rate": 4.94261393608816e-05,
663
+ "loss": 0.2591,
664
+ "num_input_tokens_seen": 5970272,
665
+ "step": 82
666
+ },
667
+ {
668
+ "epoch": 1.3655244029075804,
669
+ "grad_norm": 0.07372818142175674,
670
+ "learning_rate": 4.941211286361922e-05,
671
+ "loss": 0.2687,
672
+ "num_input_tokens_seen": 6058752,
673
+ "step": 83
674
+ },
675
+ {
676
+ "epoch": 1.3821391484942886,
677
+ "grad_norm": 0.09071576595306396,
678
+ "learning_rate": 4.939791904846869e-05,
679
+ "loss": 0.2979,
680
+ "num_input_tokens_seen": 6120064,
681
+ "step": 84
682
+ },
683
+ {
684
+ "epoch": 1.398753894080997,
685
+ "grad_norm": 0.0849960595369339,
686
+ "learning_rate": 4.938355801271282e-05,
687
+ "loss": 0.2927,
688
+ "num_input_tokens_seen": 6182072,
689
+ "step": 85
690
+ },
691
+ {
692
+ "epoch": 1.415368639667705,
693
+ "grad_norm": 0.08258760720491409,
694
+ "learning_rate": 4.936902985478055e-05,
695
+ "loss": 0.295,
696
+ "num_input_tokens_seen": 6269680,
697
+ "step": 86
698
+ },
699
+ {
700
+ "epoch": 1.4319833852544133,
701
+ "grad_norm": 0.0851503536105156,
702
+ "learning_rate": 4.935433467424624e-05,
703
+ "loss": 0.2925,
704
+ "num_input_tokens_seen": 6347424,
705
+ "step": 87
706
+ },
707
+ {
708
+ "epoch": 1.4485981308411215,
709
+ "grad_norm": 0.08852345496416092,
710
+ "learning_rate": 4.933947257182901e-05,
711
+ "loss": 0.3153,
712
+ "num_input_tokens_seen": 6412584,
713
+ "step": 88
714
+ },
715
+ {
716
+ "epoch": 1.4652128764278296,
717
+ "grad_norm": 0.08184897154569626,
718
+ "learning_rate": 4.932444364939205e-05,
719
+ "loss": 0.292,
720
+ "num_input_tokens_seen": 6482728,
721
+ "step": 89
722
+ },
723
+ {
724
+ "epoch": 1.4818276220145379,
725
+ "grad_norm": 0.08270515501499176,
726
+ "learning_rate": 4.9309248009941914e-05,
727
+ "loss": 0.3472,
728
+ "num_input_tokens_seen": 6562104,
729
+ "step": 90
730
+ },
731
+ {
732
+ "epoch": 1.4984423676012462,
733
+ "grad_norm": 0.07407747954130173,
734
+ "learning_rate": 4.929388575762782e-05,
735
+ "loss": 0.2995,
736
+ "num_input_tokens_seen": 6656552,
737
+ "step": 91
738
+ },
739
+ {
740
+ "epoch": 1.5150571131879542,
741
+ "grad_norm": 0.08710360527038574,
742
+ "learning_rate": 4.9278356997740904e-05,
743
+ "loss": 0.2549,
744
+ "num_input_tokens_seen": 6714184,
745
+ "step": 92
746
+ },
747
+ {
748
+ "epoch": 1.5316718587746625,
749
+ "grad_norm": 0.0773790255188942,
750
+ "learning_rate": 4.9262661836713564e-05,
751
+ "loss": 0.2814,
752
+ "num_input_tokens_seen": 6793552,
753
+ "step": 93
754
+ },
755
+ {
756
+ "epoch": 1.5482866043613708,
757
+ "grad_norm": 0.1002134457230568,
758
+ "learning_rate": 4.924680038211867e-05,
759
+ "loss": 0.2876,
760
+ "num_input_tokens_seen": 6865256,
761
+ "step": 94
762
+ },
763
+ {
764
+ "epoch": 1.5649013499480788,
765
+ "grad_norm": 0.09670394659042358,
766
+ "learning_rate": 4.9230772742668866e-05,
767
+ "loss": 0.2846,
768
+ "num_input_tokens_seen": 6931152,
769
+ "step": 95
770
+ },
771
+ {
772
+ "epoch": 1.5815160955347871,
773
+ "grad_norm": 0.08910100907087326,
774
+ "learning_rate": 4.9214579028215776e-05,
775
+ "loss": 0.2944,
776
+ "num_input_tokens_seen": 6998408,
777
+ "step": 96
778
+ },
779
+ {
780
+ "epoch": 1.5981308411214954,
781
+ "grad_norm": 0.09202459454536438,
782
+ "learning_rate": 4.919821934974933e-05,
783
+ "loss": 0.251,
784
+ "num_input_tokens_seen": 7053008,
785
+ "step": 97
786
+ },
787
+ {
788
+ "epoch": 1.6147455867082035,
789
+ "grad_norm": 0.10218881815671921,
790
+ "learning_rate": 4.918169381939692e-05,
791
+ "loss": 0.2851,
792
+ "num_input_tokens_seen": 7106440,
793
+ "step": 98
794
+ },
795
+ {
796
+ "epoch": 1.6313603322949117,
797
+ "grad_norm": 0.09290914982557297,
798
+ "learning_rate": 4.916500255042268e-05,
799
+ "loss": 0.2959,
800
+ "num_input_tokens_seen": 7167032,
801
+ "step": 99
802
+ },
803
+ {
804
+ "epoch": 1.64797507788162,
805
+ "grad_norm": 0.07791033387184143,
806
+ "learning_rate": 4.914814565722671e-05,
807
+ "loss": 0.2481,
808
+ "num_input_tokens_seen": 7245720,
809
+ "step": 100
810
+ },
811
+ {
812
+ "epoch": 1.664589823468328,
813
+ "grad_norm": 0.08885534107685089,
814
+ "learning_rate": 4.913112325534426e-05,
815
+ "loss": 0.3168,
816
+ "num_input_tokens_seen": 7326320,
817
+ "step": 101
818
+ },
819
+ {
820
+ "epoch": 1.6812045690550363,
821
+ "grad_norm": 0.08569750934839249,
822
+ "learning_rate": 4.9113935461444955e-05,
823
+ "loss": 0.2805,
824
+ "num_input_tokens_seen": 7442232,
825
+ "step": 102
826
+ },
827
+ {
828
+ "epoch": 1.6978193146417446,
829
+ "grad_norm": 0.1112508773803711,
830
+ "learning_rate": 4.9096582393332025e-05,
831
+ "loss": 0.2675,
832
+ "num_input_tokens_seen": 7502496,
833
+ "step": 103
834
+ },
835
+ {
836
+ "epoch": 1.7144340602284527,
837
+ "grad_norm": 0.09654372185468674,
838
+ "learning_rate": 4.907906416994146e-05,
839
+ "loss": 0.3038,
840
+ "num_input_tokens_seen": 7566496,
841
+ "step": 104
842
+ },
843
+ {
844
+ "epoch": 1.731048805815161,
845
+ "grad_norm": 0.10022995620965958,
846
+ "learning_rate": 4.906138091134118e-05,
847
+ "loss": 0.3639,
848
+ "num_input_tokens_seen": 7629056,
849
+ "step": 105
850
+ },
851
+ {
852
+ "epoch": 1.7476635514018692,
853
+ "grad_norm": 0.08336564153432846,
854
+ "learning_rate": 4.9043532738730284e-05,
855
+ "loss": 0.2944,
856
+ "num_input_tokens_seen": 7706096,
857
+ "step": 106
858
+ },
859
+ {
860
+ "epoch": 1.7642782969885773,
861
+ "grad_norm": 0.08539658784866333,
862
+ "learning_rate": 4.9025519774438136e-05,
863
+ "loss": 0.2392,
864
+ "num_input_tokens_seen": 7780072,
865
+ "step": 107
866
+ },
867
+ {
868
+ "epoch": 1.7808930425752856,
869
+ "grad_norm": 0.09139693528413773,
870
+ "learning_rate": 4.900734214192358e-05,
871
+ "loss": 0.2685,
872
+ "num_input_tokens_seen": 7857712,
873
+ "step": 108
874
+ },
875
+ {
876
+ "epoch": 1.7975077881619939,
877
+ "grad_norm": 0.1043916717171669,
878
+ "learning_rate": 4.898899996577407e-05,
879
+ "loss": 0.2513,
880
+ "num_input_tokens_seen": 7916832,
881
+ "step": 109
882
+ },
883
+ {
884
+ "epoch": 1.814122533748702,
885
+ "grad_norm": 0.09203662723302841,
886
+ "learning_rate": 4.8970493371704826e-05,
887
+ "loss": 0.2974,
888
+ "num_input_tokens_seen": 7993056,
889
+ "step": 110
890
+ },
891
+ {
892
+ "epoch": 1.8307372793354102,
893
+ "grad_norm": 0.09319474548101425,
894
+ "learning_rate": 4.8951822486557986e-05,
895
+ "loss": 0.3096,
896
+ "num_input_tokens_seen": 8090056,
897
+ "step": 111
898
+ },
899
+ {
900
+ "epoch": 1.8473520249221185,
901
+ "grad_norm": 0.10193445533514023,
902
+ "learning_rate": 4.893298743830168e-05,
903
+ "loss": 0.2633,
904
+ "num_input_tokens_seen": 8164808,
905
+ "step": 112
906
+ },
907
+ {
908
+ "epoch": 1.8639667705088265,
909
+ "grad_norm": 0.11407948285341263,
910
+ "learning_rate": 4.891398835602925e-05,
911
+ "loss": 0.2584,
912
+ "num_input_tokens_seen": 8223568,
913
+ "step": 113
914
+ },
915
+ {
916
+ "epoch": 1.8805815160955348,
917
+ "grad_norm": 0.11977085471153259,
918
+ "learning_rate": 4.8894825369958255e-05,
919
+ "loss": 0.2619,
920
+ "num_input_tokens_seen": 8276160,
921
+ "step": 114
922
+ },
923
+ {
924
+ "epoch": 1.897196261682243,
925
+ "grad_norm": 0.10925433784723282,
926
+ "learning_rate": 4.8875498611429674e-05,
927
+ "loss": 0.2762,
928
+ "num_input_tokens_seen": 8354904,
929
+ "step": 115
930
+ },
931
+ {
932
+ "epoch": 1.9138110072689511,
933
+ "grad_norm": 0.09673939645290375,
934
+ "learning_rate": 4.8856008212906925e-05,
935
+ "loss": 0.3152,
936
+ "num_input_tokens_seen": 8442584,
937
+ "step": 116
938
+ },
939
+ {
940
+ "epoch": 1.9304257528556594,
941
+ "grad_norm": 0.10827789455652237,
942
+ "learning_rate": 4.8836354307975026e-05,
943
+ "loss": 0.2759,
944
+ "num_input_tokens_seen": 8506688,
945
+ "step": 117
946
+ },
947
+ {
948
+ "epoch": 1.9470404984423677,
949
+ "grad_norm": 0.08390220254659653,
950
+ "learning_rate": 4.881653703133966e-05,
951
+ "loss": 0.2192,
952
+ "num_input_tokens_seen": 8610712,
953
+ "step": 118
954
+ },
955
+ {
956
+ "epoch": 1.9636552440290758,
957
+ "grad_norm": 0.09252211451530457,
958
+ "learning_rate": 4.87965565188262e-05,
959
+ "loss": 0.2618,
960
+ "num_input_tokens_seen": 8692624,
961
+ "step": 119
962
+ },
963
+ {
964
+ "epoch": 1.980269989615784,
965
+ "grad_norm": 0.1107102632522583,
966
+ "learning_rate": 4.877641290737884e-05,
967
+ "loss": 0.2666,
968
+ "num_input_tokens_seen": 8772208,
969
+ "step": 120
970
+ },
971
+ {
972
+ "epoch": 1.9968847352024923,
973
+ "grad_norm": 0.0917077362537384,
974
+ "learning_rate": 4.8756106335059646e-05,
975
+ "loss": 0.253,
976
+ "num_input_tokens_seen": 8854904,
977
+ "step": 121
978
+ },
979
+ {
980
+ "epoch": 2.0,
981
+ "grad_norm": 0.2606711685657501,
982
+ "learning_rate": 4.87356369410476e-05,
983
+ "loss": 0.235,
984
+ "num_input_tokens_seen": 8872656,
985
+ "step": 122
986
+ },
987
+ {
988
+ "epoch": 2.016614745586708,
989
+ "grad_norm": 0.10363993793725967,
990
+ "learning_rate": 4.8715004865637614e-05,
991
+ "loss": 0.266,
992
+ "num_input_tokens_seen": 8946480,
993
+ "step": 123
994
+ },
995
+ {
996
+ "epoch": 2.0332294911734166,
997
+ "grad_norm": 0.09997844696044922,
998
+ "learning_rate": 4.869421025023965e-05,
999
+ "loss": 0.2696,
1000
+ "num_input_tokens_seen": 9023328,
1001
+ "step": 124
1002
+ },
1003
+ {
1004
+ "epoch": 2.0498442367601246,
1005
+ "grad_norm": 0.13349319994449615,
1006
+ "learning_rate": 4.867325323737765e-05,
1007
+ "loss": 0.2552,
1008
+ "num_input_tokens_seen": 9074320,
1009
+ "step": 125
1010
+ },
1011
+ {
1012
+ "epoch": 2.0664589823468327,
1013
+ "grad_norm": 0.11201464384794235,
1014
+ "learning_rate": 4.8652133970688636e-05,
1015
+ "loss": 0.2486,
1016
+ "num_input_tokens_seen": 9148784,
1017
+ "step": 126
1018
+ },
1019
+ {
1020
+ "epoch": 2.083073727933541,
1021
+ "grad_norm": 0.10193142294883728,
1022
+ "learning_rate": 4.8630852594921706e-05,
1023
+ "loss": 0.2814,
1024
+ "num_input_tokens_seen": 9246624,
1025
+ "step": 127
1026
+ },
1027
+ {
1028
+ "epoch": 2.0996884735202492,
1029
+ "grad_norm": 0.1305130422115326,
1030
+ "learning_rate": 4.860940925593703e-05,
1031
+ "loss": 0.304,
1032
+ "num_input_tokens_seen": 9328176,
1033
+ "step": 128
1034
+ },
1035
+ {
1036
+ "epoch": 2.1163032191069573,
1037
+ "grad_norm": 0.1137692779302597,
1038
+ "learning_rate": 4.8587804100704845e-05,
1039
+ "loss": 0.2427,
1040
+ "num_input_tokens_seen": 9388936,
1041
+ "step": 129
1042
+ },
1043
+ {
1044
+ "epoch": 2.132917964693666,
1045
+ "grad_norm": 0.12126237154006958,
1046
+ "learning_rate": 4.856603727730447e-05,
1047
+ "loss": 0.2485,
1048
+ "num_input_tokens_seen": 9461664,
1049
+ "step": 130
1050
+ },
1051
+ {
1052
+ "epoch": 2.149532710280374,
1053
+ "grad_norm": 0.11567176878452301,
1054
+ "learning_rate": 4.854410893492326e-05,
1055
+ "loss": 0.2628,
1056
+ "num_input_tokens_seen": 9535000,
1057
+ "step": 131
1058
+ },
1059
+ {
1060
+ "epoch": 2.166147455867082,
1061
+ "grad_norm": 0.1399552971124649,
1062
+ "learning_rate": 4.852201922385564e-05,
1063
+ "loss": 0.2518,
1064
+ "num_input_tokens_seen": 9600296,
1065
+ "step": 132
1066
+ },
1067
+ {
1068
+ "epoch": 2.1827622014537904,
1069
+ "grad_norm": 0.13912151753902435,
1070
+ "learning_rate": 4.8499768295502004e-05,
1071
+ "loss": 0.2429,
1072
+ "num_input_tokens_seen": 9686784,
1073
+ "step": 133
1074
+ },
1075
+ {
1076
+ "epoch": 2.1993769470404985,
1077
+ "grad_norm": 0.11130474507808685,
1078
+ "learning_rate": 4.847735630236773e-05,
1079
+ "loss": 0.2775,
1080
+ "num_input_tokens_seen": 9781112,
1081
+ "step": 134
1082
+ },
1083
+ {
1084
+ "epoch": 2.2159916926272065,
1085
+ "grad_norm": 0.12169156968593597,
1086
+ "learning_rate": 4.8454783398062106e-05,
1087
+ "loss": 0.2439,
1088
+ "num_input_tokens_seen": 9849528,
1089
+ "step": 135
1090
+ },
1091
+ {
1092
+ "epoch": 2.232606438213915,
1093
+ "grad_norm": 0.11766713112592697,
1094
+ "learning_rate": 4.843204973729729e-05,
1095
+ "loss": 0.2538,
1096
+ "num_input_tokens_seen": 9931080,
1097
+ "step": 136
1098
+ },
1099
+ {
1100
+ "epoch": 2.249221183800623,
1101
+ "grad_norm": 0.11854218691587448,
1102
+ "learning_rate": 4.840915547588725e-05,
1103
+ "loss": 0.2782,
1104
+ "num_input_tokens_seen": 10011176,
1105
+ "step": 137
1106
+ },
1107
+ {
1108
+ "epoch": 2.265835929387331,
1109
+ "grad_norm": 0.1340581178665161,
1110
+ "learning_rate": 4.838610077074669e-05,
1111
+ "loss": 0.248,
1112
+ "num_input_tokens_seen": 10084128,
1113
+ "step": 138
1114
+ },
1115
+ {
1116
+ "epoch": 2.2824506749740396,
1117
+ "grad_norm": 0.12075436115264893,
1118
+ "learning_rate": 4.836288577988996e-05,
1119
+ "loss": 0.2582,
1120
+ "num_input_tokens_seen": 10155536,
1121
+ "step": 139
1122
+ },
1123
+ {
1124
+ "epoch": 2.2990654205607477,
1125
+ "grad_norm": 0.10599923878908157,
1126
+ "learning_rate": 4.8339510662430046e-05,
1127
+ "loss": 0.2199,
1128
+ "num_input_tokens_seen": 10251160,
1129
+ "step": 140
1130
+ },
1131
+ {
1132
+ "epoch": 2.3156801661474558,
1133
+ "grad_norm": 0.1117846742272377,
1134
+ "learning_rate": 4.8315975578577355e-05,
1135
+ "loss": 0.2324,
1136
+ "num_input_tokens_seen": 10345864,
1137
+ "step": 141
1138
+ },
1139
+ {
1140
+ "epoch": 2.3322949117341643,
1141
+ "grad_norm": 0.13972057402133942,
1142
+ "learning_rate": 4.8292280689638725e-05,
1143
+ "loss": 0.4072,
1144
+ "num_input_tokens_seen": 10417616,
1145
+ "step": 142
1146
+ },
1147
+ {
1148
+ "epoch": 2.3489096573208723,
1149
+ "grad_norm": 0.13837860524654388,
1150
+ "learning_rate": 4.826842615801628e-05,
1151
+ "loss": 0.2607,
1152
+ "num_input_tokens_seen": 10481816,
1153
+ "step": 143
1154
+ },
1155
+ {
1156
+ "epoch": 2.3655244029075804,
1157
+ "grad_norm": 0.14040137827396393,
1158
+ "learning_rate": 4.8244412147206284e-05,
1159
+ "loss": 0.3094,
1160
+ "num_input_tokens_seen": 10562056,
1161
+ "step": 144
1162
+ },
1163
+ {
1164
+ "epoch": 2.382139148494289,
1165
+ "grad_norm": 0.1393299251794815,
1166
+ "learning_rate": 4.822023882179811e-05,
1167
+ "loss": 0.2407,
1168
+ "num_input_tokens_seen": 10612808,
1169
+ "step": 145
1170
+ },
1171
+ {
1172
+ "epoch": 2.398753894080997,
1173
+ "grad_norm": 0.13878698647022247,
1174
+ "learning_rate": 4.8195906347473e-05,
1175
+ "loss": 0.2481,
1176
+ "num_input_tokens_seen": 10682328,
1177
+ "step": 146
1178
+ },
1179
+ {
1180
+ "epoch": 2.415368639667705,
1181
+ "grad_norm": 0.10430227965116501,
1182
+ "learning_rate": 4.817141489100302e-05,
1183
+ "loss": 0.2528,
1184
+ "num_input_tokens_seen": 10771912,
1185
+ "step": 147
1186
+ },
1187
+ {
1188
+ "epoch": 2.431983385254413,
1189
+ "grad_norm": 0.12963703274726868,
1190
+ "learning_rate": 4.814676462024988e-05,
1191
+ "loss": 0.2739,
1192
+ "num_input_tokens_seen": 10842232,
1193
+ "step": 148
1194
+ },
1195
+ {
1196
+ "epoch": 2.4485981308411215,
1197
+ "grad_norm": 0.13274963200092316,
1198
+ "learning_rate": 4.8121955704163745e-05,
1199
+ "loss": 0.2407,
1200
+ "num_input_tokens_seen": 10902264,
1201
+ "step": 149
1202
+ },
1203
+ {
1204
+ "epoch": 2.4652128764278296,
1205
+ "grad_norm": 0.11079717427492142,
1206
+ "learning_rate": 4.8096988312782174e-05,
1207
+ "loss": 0.2142,
1208
+ "num_input_tokens_seen": 10992744,
1209
+ "step": 150
1210
+ },
1211
+ {
1212
+ "epoch": 2.4818276220145377,
1213
+ "grad_norm": 0.08429212868213654,
1214
+ "learning_rate": 4.8071862617228855e-05,
1215
+ "loss": 0.1428,
1216
+ "num_input_tokens_seen": 11090064,
1217
+ "step": 151
1218
+ },
1219
+ {
1220
+ "epoch": 2.498442367601246,
1221
+ "grad_norm": 0.12903761863708496,
1222
+ "learning_rate": 4.8046578789712515e-05,
1223
+ "loss": 0.2268,
1224
+ "num_input_tokens_seen": 11162864,
1225
+ "step": 152
1226
+ },
1227
+ {
1228
+ "epoch": 2.515057113187954,
1229
+ "grad_norm": 0.14638672769069672,
1230
+ "learning_rate": 4.8021137003525664e-05,
1231
+ "loss": 0.2388,
1232
+ "num_input_tokens_seen": 11224368,
1233
+ "step": 153
1234
+ },
1235
+ {
1236
+ "epoch": 2.5316718587746623,
1237
+ "grad_norm": 0.1372838169336319,
1238
+ "learning_rate": 4.7995537433043446e-05,
1239
+ "loss": 0.2588,
1240
+ "num_input_tokens_seen": 11291056,
1241
+ "step": 154
1242
+ },
1243
+ {
1244
+ "epoch": 2.5482866043613708,
1245
+ "grad_norm": 0.15665481984615326,
1246
+ "learning_rate": 4.796978025372246e-05,
1247
+ "loss": 0.2225,
1248
+ "num_input_tokens_seen": 11345464,
1249
+ "step": 155
1250
+ },
1251
+ {
1252
+ "epoch": 2.564901349948079,
1253
+ "grad_norm": 0.13234855234622955,
1254
+ "learning_rate": 4.794386564209953e-05,
1255
+ "loss": 0.275,
1256
+ "num_input_tokens_seen": 11418912,
1257
+ "step": 156
1258
+ },
1259
+ {
1260
+ "epoch": 2.581516095534787,
1261
+ "grad_norm": 0.13585953414440155,
1262
+ "learning_rate": 4.79177937757905e-05,
1263
+ "loss": 0.2407,
1264
+ "num_input_tokens_seen": 11491216,
1265
+ "step": 157
1266
+ },
1267
+ {
1268
+ "epoch": 2.5981308411214954,
1269
+ "grad_norm": 0.1423913538455963,
1270
+ "learning_rate": 4.7891564833489035e-05,
1271
+ "loss": 0.1971,
1272
+ "num_input_tokens_seen": 11558016,
1273
+ "step": 158
1274
+ },
1275
+ {
1276
+ "epoch": 2.6147455867082035,
1277
+ "grad_norm": 0.13013511896133423,
1278
+ "learning_rate": 4.7865178994965344e-05,
1279
+ "loss": 0.2362,
1280
+ "num_input_tokens_seen": 11630432,
1281
+ "step": 159
1282
+ },
1283
+ {
1284
+ "epoch": 2.6313603322949115,
1285
+ "grad_norm": 0.1587141752243042,
1286
+ "learning_rate": 4.783863644106502e-05,
1287
+ "loss": 0.2252,
1288
+ "num_input_tokens_seen": 11684624,
1289
+ "step": 160
1290
+ },
1291
+ {
1292
+ "epoch": 2.64797507788162,
1293
+ "grad_norm": 0.12592960894107819,
1294
+ "learning_rate": 4.781193735370777e-05,
1295
+ "loss": 0.2506,
1296
+ "num_input_tokens_seen": 11770232,
1297
+ "step": 161
1298
+ },
1299
+ {
1300
+ "epoch": 2.664589823468328,
1301
+ "grad_norm": 0.1583249419927597,
1302
+ "learning_rate": 4.7785081915886134e-05,
1303
+ "loss": 0.2352,
1304
+ "num_input_tokens_seen": 11828360,
1305
+ "step": 162
1306
+ },
1307
+ {
1308
+ "epoch": 2.681204569055036,
1309
+ "grad_norm": 0.14881783723831177,
1310
+ "learning_rate": 4.775807031166428e-05,
1311
+ "loss": 0.2308,
1312
+ "num_input_tokens_seen": 11915944,
1313
+ "step": 163
1314
+ },
1315
+ {
1316
+ "epoch": 2.6978193146417446,
1317
+ "grad_norm": 0.1607823222875595,
1318
+ "learning_rate": 4.773090272617672e-05,
1319
+ "loss": 0.2238,
1320
+ "num_input_tokens_seen": 11981792,
1321
+ "step": 164
1322
+ },
1323
+ {
1324
+ "epoch": 2.7144340602284527,
1325
+ "grad_norm": 0.13583113253116608,
1326
+ "learning_rate": 4.7703579345627035e-05,
1327
+ "loss": 0.3196,
1328
+ "num_input_tokens_seen": 12044024,
1329
+ "step": 165
1330
+ },
1331
+ {
1332
+ "epoch": 2.7310488058151607,
1333
+ "grad_norm": 0.19167298078536987,
1334
+ "learning_rate": 4.7676100357286624e-05,
1335
+ "loss": 0.2745,
1336
+ "num_input_tokens_seen": 12093424,
1337
+ "step": 166
1338
+ },
1339
+ {
1340
+ "epoch": 2.7476635514018692,
1341
+ "grad_norm": 0.130703404545784,
1342
+ "learning_rate": 4.76484659494934e-05,
1343
+ "loss": 0.2285,
1344
+ "num_input_tokens_seen": 12167792,
1345
+ "step": 167
1346
+ },
1347
+ {
1348
+ "epoch": 2.7642782969885773,
1349
+ "grad_norm": 0.14331185817718506,
1350
+ "learning_rate": 4.762067631165049e-05,
1351
+ "loss": 0.2506,
1352
+ "num_input_tokens_seen": 12233712,
1353
+ "step": 168
1354
+ },
1355
+ {
1356
+ "epoch": 2.7808930425752854,
1357
+ "grad_norm": 0.12700341641902924,
1358
+ "learning_rate": 4.7592731634224966e-05,
1359
+ "loss": 0.2052,
1360
+ "num_input_tokens_seen": 12310544,
1361
+ "step": 169
1362
+ },
1363
+ {
1364
+ "epoch": 2.797507788161994,
1365
+ "grad_norm": 0.15118420124053955,
1366
+ "learning_rate": 4.756463210874652e-05,
1367
+ "loss": 0.2309,
1368
+ "num_input_tokens_seen": 12400160,
1369
+ "step": 170
1370
+ },
1371
+ {
1372
+ "epoch": 2.814122533748702,
1373
+ "grad_norm": 0.14001020789146423,
1374
+ "learning_rate": 4.753637792780614e-05,
1375
+ "loss": 0.2544,
1376
+ "num_input_tokens_seen": 12480432,
1377
+ "step": 171
1378
+ },
1379
+ {
1380
+ "epoch": 2.83073727933541,
1381
+ "grad_norm": 0.12076311558485031,
1382
+ "learning_rate": 4.7507969285054845e-05,
1383
+ "loss": 0.2434,
1384
+ "num_input_tokens_seen": 12568064,
1385
+ "step": 172
1386
+ },
1387
+ {
1388
+ "epoch": 2.8473520249221185,
1389
+ "grad_norm": 0.16462342441082,
1390
+ "learning_rate": 4.7479406375202264e-05,
1391
+ "loss": 0.2417,
1392
+ "num_input_tokens_seen": 12647400,
1393
+ "step": 173
1394
+ },
1395
+ {
1396
+ "epoch": 2.8639667705088265,
1397
+ "grad_norm": 0.17294971644878387,
1398
+ "learning_rate": 4.745068939401539e-05,
1399
+ "loss": 0.2121,
1400
+ "num_input_tokens_seen": 12698208,
1401
+ "step": 174
1402
+ },
1403
+ {
1404
+ "epoch": 2.8805815160955346,
1405
+ "grad_norm": 0.16743803024291992,
1406
+ "learning_rate": 4.742181853831721e-05,
1407
+ "loss": 0.2238,
1408
+ "num_input_tokens_seen": 12758528,
1409
+ "step": 175
1410
+ },
1411
+ {
1412
+ "epoch": 2.897196261682243,
1413
+ "grad_norm": 0.14583320915699005,
1414
+ "learning_rate": 4.7392794005985326e-05,
1415
+ "loss": 0.2333,
1416
+ "num_input_tokens_seen": 12837264,
1417
+ "step": 176
1418
+ },
1419
+ {
1420
+ "epoch": 2.913811007268951,
1421
+ "grad_norm": 0.1509270817041397,
1422
+ "learning_rate": 4.7363615995950626e-05,
1423
+ "loss": 0.2179,
1424
+ "num_input_tokens_seen": 12902368,
1425
+ "step": 177
1426
+ },
1427
+ {
1428
+ "epoch": 2.930425752855659,
1429
+ "grad_norm": 0.12910738587379456,
1430
+ "learning_rate": 4.733428470819594e-05,
1431
+ "loss": 0.2144,
1432
+ "num_input_tokens_seen": 12974296,
1433
+ "step": 178
1434
+ },
1435
+ {
1436
+ "epoch": 2.9470404984423677,
1437
+ "grad_norm": 0.142000213265419,
1438
+ "learning_rate": 4.730480034375462e-05,
1439
+ "loss": 0.2413,
1440
+ "num_input_tokens_seen": 13057280,
1441
+ "step": 179
1442
+ },
1443
+ {
1444
+ "epoch": 2.9636552440290758,
1445
+ "grad_norm": 0.131468266248703,
1446
+ "learning_rate": 4.72751631047092e-05,
1447
+ "loss": 0.294,
1448
+ "num_input_tokens_seen": 13158232,
1449
+ "step": 180
1450
+ },
1451
+ {
1452
+ "epoch": 2.980269989615784,
1453
+ "grad_norm": 0.1529342085123062,
1454
+ "learning_rate": 4.7245373194189994e-05,
1455
+ "loss": 0.216,
1456
+ "num_input_tokens_seen": 13229840,
1457
+ "step": 181
1458
+ },
1459
+ {
1460
+ "epoch": 2.9968847352024923,
1461
+ "grad_norm": 0.1573815941810608,
1462
+ "learning_rate": 4.7215430816373726e-05,
1463
+ "loss": 0.2384,
1464
+ "num_input_tokens_seen": 13296520,
1465
+ "step": 182
1466
+ },
1467
+ {
1468
+ "epoch": 3.0,
1469
+ "grad_norm": 0.2532118558883667,
1470
+ "learning_rate": 4.718533617648209e-05,
1471
+ "loss": 0.1459,
1472
+ "num_input_tokens_seen": 13309672,
1473
+ "step": 183
1474
+ },
1475
+ {
1476
+ "epoch": 3.016614745586708,
1477
+ "grad_norm": 0.16963432729244232,
1478
+ "learning_rate": 4.715508948078037e-05,
1479
+ "loss": 0.1985,
1480
+ "num_input_tokens_seen": 13371544,
1481
+ "step": 184
1482
+ },
1483
+ {
1484
+ "epoch": 3.0332294911734166,
1485
+ "grad_norm": 0.18877384066581726,
1486
+ "learning_rate": 4.712469093657605e-05,
1487
+ "loss": 0.1856,
1488
+ "num_input_tokens_seen": 13432984,
1489
+ "step": 185
1490
+ },
1491
+ {
1492
+ "epoch": 3.0498442367601246,
1493
+ "grad_norm": 0.14922884106636047,
1494
+ "learning_rate": 4.709414075221734e-05,
1495
+ "loss": 0.2385,
1496
+ "num_input_tokens_seen": 13500016,
1497
+ "step": 186
1498
+ },
1499
+ {
1500
+ "epoch": 3.0664589823468327,
1501
+ "grad_norm": 0.2028326541185379,
1502
+ "learning_rate": 4.706343913709178e-05,
1503
+ "loss": 0.2227,
1504
+ "num_input_tokens_seen": 13579672,
1505
+ "step": 187
1506
+ },
1507
+ {
1508
+ "epoch": 3.083073727933541,
1509
+ "grad_norm": 0.19964616000652313,
1510
+ "learning_rate": 4.70325863016248e-05,
1511
+ "loss": 0.2045,
1512
+ "num_input_tokens_seen": 13630704,
1513
+ "step": 188
1514
+ },
1515
+ {
1516
+ "epoch": 3.0996884735202492,
1517
+ "grad_norm": 0.1594657599925995,
1518
+ "learning_rate": 4.7001582457278304e-05,
1519
+ "loss": 0.2648,
1520
+ "num_input_tokens_seen": 13695472,
1521
+ "step": 189
1522
+ },
1523
+ {
1524
+ "epoch": 3.1163032191069573,
1525
+ "grad_norm": 0.16952532529830933,
1526
+ "learning_rate": 4.697042781654913e-05,
1527
+ "loss": 0.22,
1528
+ "num_input_tokens_seen": 13767792,
1529
+ "step": 190
1530
+ },
1531
+ {
1532
+ "epoch": 3.132917964693666,
1533
+ "grad_norm": 0.16775831580162048,
1534
+ "learning_rate": 4.693912259296773e-05,
1535
+ "loss": 0.2667,
1536
+ "num_input_tokens_seen": 13857352,
1537
+ "step": 191
1538
+ },
1539
+ {
1540
+ "epoch": 3.149532710280374,
1541
+ "grad_norm": 0.15529580414295197,
1542
+ "learning_rate": 4.690766700109659e-05,
1543
+ "loss": 0.2154,
1544
+ "num_input_tokens_seen": 13939928,
1545
+ "step": 192
1546
+ },
1547
+ {
1548
+ "epoch": 3.166147455867082,
1549
+ "grad_norm": 0.1619848757982254,
1550
+ "learning_rate": 4.687606125652882e-05,
1551
+ "loss": 0.1963,
1552
+ "num_input_tokens_seen": 14017936,
1553
+ "step": 193
1554
+ },
1555
+ {
1556
+ "epoch": 3.1827622014537904,
1557
+ "grad_norm": 0.18066684901714325,
1558
+ "learning_rate": 4.684430557588664e-05,
1559
+ "loss": 0.1862,
1560
+ "num_input_tokens_seen": 14074176,
1561
+ "step": 194
1562
+ },
1563
+ {
1564
+ "epoch": 3.1993769470404985,
1565
+ "grad_norm": 0.16520777344703674,
1566
+ "learning_rate": 4.681240017681993e-05,
1567
+ "loss": 0.2576,
1568
+ "num_input_tokens_seen": 14167656,
1569
+ "step": 195
1570
+ },
1571
+ {
1572
+ "epoch": 3.2159916926272065,
1573
+ "grad_norm": 0.15385325253009796,
1574
+ "learning_rate": 4.678034527800474e-05,
1575
+ "loss": 0.1813,
1576
+ "num_input_tokens_seen": 14235800,
1577
+ "step": 196
1578
+ },
1579
+ {
1580
+ "epoch": 3.232606438213915,
1581
+ "grad_norm": 0.16897696256637573,
1582
+ "learning_rate": 4.674814109914174e-05,
1583
+ "loss": 0.1741,
1584
+ "num_input_tokens_seen": 14301272,
1585
+ "step": 197
1586
+ },
1587
+ {
1588
+ "epoch": 3.249221183800623,
1589
+ "grad_norm": 0.19556447863578796,
1590
+ "learning_rate": 4.671578786095478e-05,
1591
+ "loss": 0.2186,
1592
+ "num_input_tokens_seen": 14347352,
1593
+ "step": 198
1594
+ },
1595
+ {
1596
+ "epoch": 3.265835929387331,
1597
+ "grad_norm": 0.17333142459392548,
1598
+ "learning_rate": 4.668328578518933e-05,
1599
+ "loss": 0.2892,
1600
+ "num_input_tokens_seen": 14434600,
1601
+ "step": 199
1602
+ },
1603
+ {
1604
+ "epoch": 3.2824506749740396,
1605
+ "grad_norm": 0.20295488834381104,
1606
+ "learning_rate": 4.665063509461097e-05,
1607
+ "loss": 0.2014,
1608
+ "num_input_tokens_seen": 14484104,
1609
+ "step": 200
1610
+ },
1611
+ {
1612
+ "epoch": 3.2990654205607477,
1613
+ "grad_norm": 0.1597638726234436,
1614
+ "learning_rate": 4.661783601300388e-05,
1615
+ "loss": 0.2158,
1616
+ "num_input_tokens_seen": 14567152,
1617
+ "step": 201
1618
+ },
1619
+ {
1620
+ "epoch": 3.3156801661474558,
1621
+ "grad_norm": 0.19849488139152527,
1622
+ "learning_rate": 4.6584888765169296e-05,
1623
+ "loss": 0.2578,
1624
+ "num_input_tokens_seen": 14647040,
1625
+ "step": 202
1626
+ },
1627
+ {
1628
+ "epoch": 3.3322949117341643,
1629
+ "grad_norm": 0.1508200466632843,
1630
+ "learning_rate": 4.6551793576923964e-05,
1631
+ "loss": 0.2213,
1632
+ "num_input_tokens_seen": 14738216,
1633
+ "step": 203
1634
+ },
1635
+ {
1636
+ "epoch": 3.3489096573208723,
1637
+ "grad_norm": 0.1687687337398529,
1638
+ "learning_rate": 4.65185506750986e-05,
1639
+ "loss": 0.1828,
1640
+ "num_input_tokens_seen": 14811216,
1641
+ "step": 204
1642
+ },
1643
+ {
1644
+ "epoch": 3.3655244029075804,
1645
+ "grad_norm": 0.16587376594543457,
1646
+ "learning_rate": 4.648516028753632e-05,
1647
+ "loss": 0.1619,
1648
+ "num_input_tokens_seen": 14885992,
1649
+ "step": 205
1650
+ },
1651
+ {
1652
+ "epoch": 3.382139148494289,
1653
+ "grad_norm": 0.16600169241428375,
1654
+ "learning_rate": 4.645162264309112e-05,
1655
+ "loss": 0.2438,
1656
+ "num_input_tokens_seen": 14961984,
1657
+ "step": 206
1658
+ },
1659
+ {
1660
+ "epoch": 3.398753894080997,
1661
+ "grad_norm": 0.1877703070640564,
1662
+ "learning_rate": 4.6417937971626245e-05,
1663
+ "loss": 0.1771,
1664
+ "num_input_tokens_seen": 15021240,
1665
+ "step": 207
1666
+ },
1667
+ {
1668
+ "epoch": 3.415368639667705,
1669
+ "grad_norm": 0.20105206966400146,
1670
+ "learning_rate": 4.638410650401267e-05,
1671
+ "loss": 0.1742,
1672
+ "num_input_tokens_seen": 15092016,
1673
+ "step": 208
1674
+ },
1675
+ {
1676
+ "epoch": 3.431983385254413,
1677
+ "grad_norm": 0.12934140861034393,
1678
+ "learning_rate": 4.635012847212748e-05,
1679
+ "loss": 0.1725,
1680
+ "num_input_tokens_seen": 15198192,
1681
+ "step": 209
1682
+ },
1683
+ {
1684
+ "epoch": 3.4485981308411215,
1685
+ "grad_norm": 0.18388882279396057,
1686
+ "learning_rate": 4.6316004108852305e-05,
1687
+ "loss": 0.186,
1688
+ "num_input_tokens_seen": 15258432,
1689
+ "step": 210
1690
+ },
1691
+ {
1692
+ "epoch": 3.4652128764278296,
1693
+ "grad_norm": 0.1766858547925949,
1694
+ "learning_rate": 4.628173364807171e-05,
1695
+ "loss": 0.2166,
1696
+ "num_input_tokens_seen": 15329600,
1697
+ "step": 211
1698
+ },
1699
+ {
1700
+ "epoch": 3.4818276220145377,
1701
+ "grad_norm": 0.16214998066425323,
1702
+ "learning_rate": 4.6247317324671605e-05,
1703
+ "loss": 0.2038,
1704
+ "num_input_tokens_seen": 15407920,
1705
+ "step": 212
1706
+ },
1707
+ {
1708
+ "epoch": 3.498442367601246,
1709
+ "grad_norm": 0.16933797299861908,
1710
+ "learning_rate": 4.6212755374537596e-05,
1711
+ "loss": 0.2017,
1712
+ "num_input_tokens_seen": 15479640,
1713
+ "step": 213
1714
+ },
1715
+ {
1716
+ "epoch": 3.515057113187954,
1717
+ "grad_norm": 0.19472749531269073,
1718
+ "learning_rate": 4.617804803455344e-05,
1719
+ "loss": 0.2048,
1720
+ "num_input_tokens_seen": 15561960,
1721
+ "step": 214
1722
+ },
1723
+ {
1724
+ "epoch": 3.5316718587746623,
1725
+ "grad_norm": 0.33335182070732117,
1726
+ "learning_rate": 4.614319554259934e-05,
1727
+ "loss": 0.2358,
1728
+ "num_input_tokens_seen": 15641440,
1729
+ "step": 215
1730
+ },
1731
+ {
1732
+ "epoch": 3.5482866043613708,
1733
+ "grad_norm": 0.19587557017803192,
1734
+ "learning_rate": 4.610819813755038e-05,
1735
+ "loss": 0.2374,
1736
+ "num_input_tokens_seen": 15728872,
1737
+ "step": 216
1738
+ },
1739
+ {
1740
+ "epoch": 3.564901349948079,
1741
+ "grad_norm": 0.19063518941402435,
1742
+ "learning_rate": 4.607305605927487e-05,
1743
+ "loss": 0.1919,
1744
+ "num_input_tokens_seen": 15798112,
1745
+ "step": 217
1746
+ },
1747
+ {
1748
+ "epoch": 3.581516095534787,
1749
+ "grad_norm": 0.19598323106765747,
1750
+ "learning_rate": 4.6037769548632656e-05,
1751
+ "loss": 0.2583,
1752
+ "num_input_tokens_seen": 15865936,
1753
+ "step": 218
1754
+ },
1755
+ {
1756
+ "epoch": 3.5981308411214954,
1757
+ "grad_norm": 0.18066690862178802,
1758
+ "learning_rate": 4.600233884747355e-05,
1759
+ "loss": 0.2337,
1760
+ "num_input_tokens_seen": 15941368,
1761
+ "step": 219
1762
+ },
1763
+ {
1764
+ "epoch": 3.6147455867082035,
1765
+ "grad_norm": 0.16981899738311768,
1766
+ "learning_rate": 4.5966764198635606e-05,
1767
+ "loss": 0.1818,
1768
+ "num_input_tokens_seen": 16028208,
1769
+ "step": 220
1770
+ },
1771
+ {
1772
+ "epoch": 3.6313603322949115,
1773
+ "grad_norm": 0.180410236120224,
1774
+ "learning_rate": 4.5931045845943474e-05,
1775
+ "loss": 0.1646,
1776
+ "num_input_tokens_seen": 16104408,
1777
+ "step": 221
1778
+ },
1779
+ {
1780
+ "epoch": 3.64797507788162,
1781
+ "grad_norm": 0.19180680811405182,
1782
+ "learning_rate": 4.5895184034206765e-05,
1783
+ "loss": 0.3263,
1784
+ "num_input_tokens_seen": 16156800,
1785
+ "step": 222
1786
+ },
1787
+ {
1788
+ "epoch": 3.664589823468328,
1789
+ "grad_norm": 0.16119280457496643,
1790
+ "learning_rate": 4.585917900921829e-05,
1791
+ "loss": 0.2636,
1792
+ "num_input_tokens_seen": 16256712,
1793
+ "step": 223
1794
+ },
1795
+ {
1796
+ "epoch": 3.681204569055036,
1797
+ "grad_norm": 0.18559172749519348,
1798
+ "learning_rate": 4.5823031017752485e-05,
1799
+ "loss": 0.1759,
1800
+ "num_input_tokens_seen": 16330344,
1801
+ "step": 224
1802
+ },
1803
+ {
1804
+ "epoch": 3.6978193146417446,
1805
+ "grad_norm": 0.17767880856990814,
1806
+ "learning_rate": 4.5786740307563636e-05,
1807
+ "loss": 0.196,
1808
+ "num_input_tokens_seen": 16399792,
1809
+ "step": 225
1810
+ },
1811
+ {
1812
+ "epoch": 3.7144340602284527,
1813
+ "grad_norm": 0.17806987464427948,
1814
+ "learning_rate": 4.575030712738419e-05,
1815
+ "loss": 0.186,
1816
+ "num_input_tokens_seen": 16466368,
1817
+ "step": 226
1818
+ },
1819
+ {
1820
+ "epoch": 3.7310488058151607,
1821
+ "grad_norm": 0.1952792853116989,
1822
+ "learning_rate": 4.571373172692309e-05,
1823
+ "loss": 0.1789,
1824
+ "num_input_tokens_seen": 16530976,
1825
+ "step": 227
1826
+ },
1827
+ {
1828
+ "epoch": 3.7476635514018692,
1829
+ "grad_norm": 0.1774374544620514,
1830
+ "learning_rate": 4.567701435686404e-05,
1831
+ "loss": 0.1929,
1832
+ "num_input_tokens_seen": 16600216,
1833
+ "step": 228
1834
+ },
1835
+ {
1836
+ "epoch": 3.7642782969885773,
1837
+ "grad_norm": 0.18798600137233734,
1838
+ "learning_rate": 4.5640155268863796e-05,
1839
+ "loss": 0.2268,
1840
+ "num_input_tokens_seen": 16673192,
1841
+ "step": 229
1842
+ },
1843
+ {
1844
+ "epoch": 3.7808930425752854,
1845
+ "grad_norm": 0.2022520750761032,
1846
+ "learning_rate": 4.5603154715550386e-05,
1847
+ "loss": 0.1716,
1848
+ "num_input_tokens_seen": 16739912,
1849
+ "step": 230
1850
+ },
1851
+ {
1852
+ "epoch": 3.797507788161994,
1853
+ "grad_norm": 0.15170948207378387,
1854
+ "learning_rate": 4.55660129505215e-05,
1855
+ "loss": 0.1844,
1856
+ "num_input_tokens_seen": 16834632,
1857
+ "step": 231
1858
+ },
1859
+ {
1860
+ "epoch": 3.814122533748702,
1861
+ "grad_norm": 0.16655084490776062,
1862
+ "learning_rate": 4.5528730228342605e-05,
1863
+ "loss": 0.1899,
1864
+ "num_input_tokens_seen": 16914728,
1865
+ "step": 232
1866
+ },
1867
+ {
1868
+ "epoch": 3.83073727933541,
1869
+ "grad_norm": 0.19025221467018127,
1870
+ "learning_rate": 4.549130680454532e-05,
1871
+ "loss": 0.2214,
1872
+ "num_input_tokens_seen": 17014304,
1873
+ "step": 233
1874
+ },
1875
+ {
1876
+ "epoch": 3.8473520249221185,
1877
+ "grad_norm": 0.17126557230949402,
1878
+ "learning_rate": 4.545374293562559e-05,
1879
+ "loss": 0.2062,
1880
+ "num_input_tokens_seen": 17106664,
1881
+ "step": 234
1882
+ },
1883
+ {
1884
+ "epoch": 3.8639667705088265,
1885
+ "grad_norm": 0.16162410378456116,
1886
+ "learning_rate": 4.541603887904198e-05,
1887
+ "loss": 0.2016,
1888
+ "num_input_tokens_seen": 17193744,
1889
+ "step": 235
1890
+ },
1891
+ {
1892
+ "epoch": 3.8805815160955346,
1893
+ "grad_norm": 0.2067136913537979,
1894
+ "learning_rate": 4.537819489321386e-05,
1895
+ "loss": 0.1992,
1896
+ "num_input_tokens_seen": 17254656,
1897
+ "step": 236
1898
+ },
1899
+ {
1900
+ "epoch": 3.897196261682243,
1901
+ "grad_norm": 0.200433611869812,
1902
+ "learning_rate": 4.534021123751968e-05,
1903
+ "loss": 0.1961,
1904
+ "num_input_tokens_seen": 17325896,
1905
+ "step": 237
1906
+ },
1907
+ {
1908
+ "epoch": 3.913811007268951,
1909
+ "grad_norm": 0.2062034010887146,
1910
+ "learning_rate": 4.5302088172295156e-05,
1911
+ "loss": 0.2302,
1912
+ "num_input_tokens_seen": 17394424,
1913
+ "step": 238
1914
+ },
1915
+ {
1916
+ "epoch": 3.930425752855659,
1917
+ "grad_norm": 0.1928798407316208,
1918
+ "learning_rate": 4.526382595883152e-05,
1919
+ "loss": 0.1846,
1920
+ "num_input_tokens_seen": 17456352,
1921
+ "step": 239
1922
+ },
1923
+ {
1924
+ "epoch": 3.9470404984423677,
1925
+ "grad_norm": 0.2011859118938446,
1926
+ "learning_rate": 4.522542485937369e-05,
1927
+ "loss": 0.1879,
1928
+ "num_input_tokens_seen": 17519168,
1929
+ "step": 240
1930
+ },
1931
+ {
1932
+ "epoch": 3.9636552440290758,
1933
+ "grad_norm": 0.20441657304763794,
1934
+ "learning_rate": 4.51868851371185e-05,
1935
+ "loss": 0.206,
1936
+ "num_input_tokens_seen": 17585144,
1937
+ "step": 241
1938
+ },
1939
+ {
1940
+ "epoch": 3.980269989615784,
1941
+ "grad_norm": 0.18314018845558167,
1942
+ "learning_rate": 4.5148207056212896e-05,
1943
+ "loss": 0.1676,
1944
+ "num_input_tokens_seen": 17662024,
1945
+ "step": 242
1946
+ },
1947
+ {
1948
+ "epoch": 3.9968847352024923,
1949
+ "grad_norm": 0.21530692279338837,
1950
+ "learning_rate": 4.5109390881752114e-05,
1951
+ "loss": 0.1961,
1952
+ "num_input_tokens_seen": 17724360,
1953
+ "step": 243
1954
+ },
1955
+ {
1956
+ "epoch": 4.0,
1957
+ "grad_norm": 0.38912200927734375,
1958
+ "learning_rate": 4.5070436879777865e-05,
1959
+ "loss": 0.185,
1960
+ "num_input_tokens_seen": 17746200,
1961
+ "step": 244
1962
+ },
1963
+ {
1964
+ "epoch": 4.0166147455867085,
1965
+ "grad_norm": 0.15166164934635162,
1966
+ "learning_rate": 4.503134531727652e-05,
1967
+ "loss": 0.1674,
1968
+ "num_input_tokens_seen": 17830760,
1969
+ "step": 245
1970
+ },
1971
+ {
1972
+ "epoch": 4.033229491173416,
1973
+ "grad_norm": 0.1999833583831787,
1974
+ "learning_rate": 4.499211646217727e-05,
1975
+ "loss": 0.1739,
1976
+ "num_input_tokens_seen": 17903840,
1977
+ "step": 246
1978
+ },
1979
+ {
1980
+ "epoch": 4.049844236760125,
1981
+ "grad_norm": 0.2024000585079193,
1982
+ "learning_rate": 4.495275058335029e-05,
1983
+ "loss": 0.1753,
1984
+ "num_input_tokens_seen": 17990448,
1985
+ "step": 247
1986
+ },
1987
+ {
1988
+ "epoch": 4.066458982346833,
1989
+ "grad_norm": 0.22637376189231873,
1990
+ "learning_rate": 4.491324795060491e-05,
1991
+ "loss": 0.1896,
1992
+ "num_input_tokens_seen": 18069520,
1993
+ "step": 248
1994
+ },
1995
+ {
1996
+ "epoch": 4.083073727933541,
1997
+ "grad_norm": 0.24361123144626617,
1998
+ "learning_rate": 4.487360883468775e-05,
1999
+ "loss": 0.1688,
2000
+ "num_input_tokens_seen": 18129128,
2001
+ "step": 249
2002
+ },
2003
+ {
2004
+ "epoch": 4.099688473520249,
2005
+ "grad_norm": 0.21949416399002075,
2006
+ "learning_rate": 4.4833833507280884e-05,
2007
+ "loss": 0.1928,
2008
+ "num_input_tokens_seen": 18202472,
2009
+ "step": 250
2010
+ },
2011
+ {
2012
+ "epoch": 4.116303219106958,
2013
+ "grad_norm": 0.22039519250392914,
2014
+ "learning_rate": 4.4793922240999933e-05,
2015
+ "loss": 0.1737,
2016
+ "num_input_tokens_seen": 18267232,
2017
+ "step": 251
2018
+ },
2019
+ {
2020
+ "epoch": 4.132917964693665,
2021
+ "grad_norm": 0.23173294961452484,
2022
+ "learning_rate": 4.4753875309392266e-05,
2023
+ "loss": 0.1883,
2024
+ "num_input_tokens_seen": 18325216,
2025
+ "step": 252
2026
+ },
2027
+ {
2028
+ "epoch": 4.149532710280374,
2029
+ "grad_norm": 0.24100351333618164,
2030
+ "learning_rate": 4.471369298693505e-05,
2031
+ "loss": 0.2042,
2032
+ "num_input_tokens_seen": 18406184,
2033
+ "step": 253
2034
+ },
2035
+ {
2036
+ "epoch": 4.166147455867082,
2037
+ "grad_norm": 0.1888919323682785,
2038
+ "learning_rate": 4.467337554903344e-05,
2039
+ "loss": 0.1656,
2040
+ "num_input_tokens_seen": 18481056,
2041
+ "step": 254
2042
+ },
2043
+ {
2044
+ "epoch": 4.18276220145379,
2045
+ "grad_norm": 0.17849119007587433,
2046
+ "learning_rate": 4.463292327201862e-05,
2047
+ "loss": 0.1454,
2048
+ "num_input_tokens_seen": 18554864,
2049
+ "step": 255
2050
+ },
2051
+ {
2052
+ "epoch": 4.1993769470404985,
2053
+ "grad_norm": 0.24600732326507568,
2054
+ "learning_rate": 4.4592336433146e-05,
2055
+ "loss": 0.2039,
2056
+ "num_input_tokens_seen": 18612120,
2057
+ "step": 256
2058
+ },
2059
+ {
2060
+ "epoch": 4.215991692627207,
2061
+ "grad_norm": 0.23695628345012665,
2062
+ "learning_rate": 4.4551615310593195e-05,
2063
+ "loss": 0.2112,
2064
+ "num_input_tokens_seen": 18710408,
2065
+ "step": 257
2066
+ },
2067
+ {
2068
+ "epoch": 4.232606438213915,
2069
+ "grad_norm": 0.2511826753616333,
2070
+ "learning_rate": 4.451076018345825e-05,
2071
+ "loss": 0.1831,
2072
+ "num_input_tokens_seen": 18769400,
2073
+ "step": 258
2074
+ },
2075
+ {
2076
+ "epoch": 4.249221183800623,
2077
+ "grad_norm": 0.1971820890903473,
2078
+ "learning_rate": 4.4469771331757604e-05,
2079
+ "loss": 0.1722,
2080
+ "num_input_tokens_seen": 18849704,
2081
+ "step": 259
2082
+ },
2083
+ {
2084
+ "epoch": 4.265835929387332,
2085
+ "grad_norm": 0.23203876614570618,
2086
+ "learning_rate": 4.442864903642428e-05,
2087
+ "loss": 0.1981,
2088
+ "num_input_tokens_seen": 18943328,
2089
+ "step": 260
2090
+ },
2091
+ {
2092
+ "epoch": 4.282450674974039,
2093
+ "grad_norm": 0.23434185981750488,
2094
+ "learning_rate": 4.4387393579305865e-05,
2095
+ "loss": 0.2014,
2096
+ "num_input_tokens_seen": 19022536,
2097
+ "step": 261
2098
+ },
2099
+ {
2100
+ "epoch": 4.299065420560748,
2101
+ "grad_norm": 0.2373885214328766,
2102
+ "learning_rate": 4.434600524316266e-05,
2103
+ "loss": 0.167,
2104
+ "num_input_tokens_seen": 19089200,
2105
+ "step": 262
2106
+ },
2107
+ {
2108
+ "epoch": 4.315680166147456,
2109
+ "grad_norm": 0.20130722224712372,
2110
+ "learning_rate": 4.430448431166567e-05,
2111
+ "loss": 0.2747,
2112
+ "num_input_tokens_seen": 19171216,
2113
+ "step": 263
2114
+ },
2115
+ {
2116
+ "epoch": 4.332294911734164,
2117
+ "grad_norm": 0.17947593331336975,
2118
+ "learning_rate": 4.426283106939474e-05,
2119
+ "loss": 0.1508,
2120
+ "num_input_tokens_seen": 19271872,
2121
+ "step": 264
2122
+ },
2123
+ {
2124
+ "epoch": 4.348909657320872,
2125
+ "grad_norm": 0.23321041464805603,
2126
+ "learning_rate": 4.4221045801836494e-05,
2127
+ "loss": 0.2585,
2128
+ "num_input_tokens_seen": 19342984,
2129
+ "step": 265
2130
+ },
2131
+ {
2132
+ "epoch": 4.365524402907581,
2133
+ "grad_norm": 0.26581740379333496,
2134
+ "learning_rate": 4.41791287953825e-05,
2135
+ "loss": 0.1789,
2136
+ "num_input_tokens_seen": 19391640,
2137
+ "step": 266
2138
+ },
2139
+ {
2140
+ "epoch": 4.382139148494288,
2141
+ "grad_norm": 0.20715415477752686,
2142
+ "learning_rate": 4.4137080337327205e-05,
2143
+ "loss": 0.1953,
2144
+ "num_input_tokens_seen": 19463232,
2145
+ "step": 267
2146
+ },
2147
+ {
2148
+ "epoch": 4.398753894080997,
2149
+ "grad_norm": 0.20641866326332092,
2150
+ "learning_rate": 4.4094900715866064e-05,
2151
+ "loss": 0.1752,
2152
+ "num_input_tokens_seen": 19523728,
2153
+ "step": 268
2154
+ },
2155
+ {
2156
+ "epoch": 4.415368639667705,
2157
+ "grad_norm": 0.23343385756015778,
2158
+ "learning_rate": 4.4052590220093446e-05,
2159
+ "loss": 0.1904,
2160
+ "num_input_tokens_seen": 19598960,
2161
+ "step": 269
2162
+ },
2163
+ {
2164
+ "epoch": 4.431983385254413,
2165
+ "grad_norm": 0.20117436349391937,
2166
+ "learning_rate": 4.401014914000078e-05,
2167
+ "loss": 0.1801,
2168
+ "num_input_tokens_seen": 19666136,
2169
+ "step": 270
2170
+ },
2171
+ {
2172
+ "epoch": 4.4485981308411215,
2173
+ "grad_norm": 0.24009813368320465,
2174
+ "learning_rate": 4.3967577766474455e-05,
2175
+ "loss": 0.1798,
2176
+ "num_input_tokens_seen": 19728600,
2177
+ "step": 271
2178
+ },
2179
+ {
2180
+ "epoch": 4.46521287642783,
2181
+ "grad_norm": 0.2242031991481781,
2182
+ "learning_rate": 4.3924876391293915e-05,
2183
+ "loss": 0.2221,
2184
+ "num_input_tokens_seen": 19801032,
2185
+ "step": 272
2186
+ },
2187
+ {
2188
+ "epoch": 4.481827622014538,
2189
+ "grad_norm": 0.22890391945838928,
2190
+ "learning_rate": 4.3882045307129594e-05,
2191
+ "loss": 0.1906,
2192
+ "num_input_tokens_seen": 19885496,
2193
+ "step": 273
2194
+ },
2195
+ {
2196
+ "epoch": 4.498442367601246,
2197
+ "grad_norm": 0.21996937692165375,
2198
+ "learning_rate": 4.383908480754095e-05,
2199
+ "loss": 0.1775,
2200
+ "num_input_tokens_seen": 19952072,
2201
+ "step": 274
2202
+ },
2203
+ {
2204
+ "epoch": 4.515057113187955,
2205
+ "grad_norm": 0.1860388070344925,
2206
+ "learning_rate": 4.379599518697444e-05,
2207
+ "loss": 0.1593,
2208
+ "num_input_tokens_seen": 20026536,
2209
+ "step": 275
2210
+ },
2211
+ {
2212
+ "epoch": 4.531671858774662,
2213
+ "grad_norm": 0.20987707376480103,
2214
+ "learning_rate": 4.375277674076149e-05,
2215
+ "loss": 0.1409,
2216
+ "num_input_tokens_seen": 20079112,
2217
+ "step": 276
2218
+ },
2219
+ {
2220
+ "epoch": 4.548286604361371,
2221
+ "grad_norm": 0.21347324550151825,
2222
+ "learning_rate": 4.3709429765116504e-05,
2223
+ "loss": 0.2701,
2224
+ "num_input_tokens_seen": 20144264,
2225
+ "step": 277
2226
+ },
2227
+ {
2228
+ "epoch": 4.564901349948079,
2229
+ "grad_norm": 0.27563896775245667,
2230
+ "learning_rate": 4.366595455713479e-05,
2231
+ "loss": 0.1856,
2232
+ "num_input_tokens_seen": 20207568,
2233
+ "step": 278
2234
+ },
2235
+ {
2236
+ "epoch": 4.581516095534787,
2237
+ "grad_norm": 0.21850791573524475,
2238
+ "learning_rate": 4.3622351414790554e-05,
2239
+ "loss": 0.2204,
2240
+ "num_input_tokens_seen": 20292376,
2241
+ "step": 279
2242
+ },
2243
+ {
2244
+ "epoch": 4.598130841121495,
2245
+ "grad_norm": 0.19672711193561554,
2246
+ "learning_rate": 4.357862063693486e-05,
2247
+ "loss": 0.1397,
2248
+ "num_input_tokens_seen": 20383048,
2249
+ "step": 280
2250
+ },
2251
+ {
2252
+ "epoch": 4.614745586708204,
2253
+ "grad_norm": 0.23507343232631683,
2254
+ "learning_rate": 4.353476252329356e-05,
2255
+ "loss": 0.1655,
2256
+ "num_input_tokens_seen": 20463376,
2257
+ "step": 281
2258
+ },
2259
+ {
2260
+ "epoch": 4.6313603322949115,
2261
+ "grad_norm": 0.23900464177131653,
2262
+ "learning_rate": 4.349077737446525e-05,
2263
+ "loss": 0.1511,
2264
+ "num_input_tokens_seen": 20537808,
2265
+ "step": 282
2266
+ },
2267
+ {
2268
+ "epoch": 4.64797507788162,
2269
+ "grad_norm": 0.2231033891439438,
2270
+ "learning_rate": 4.344666549191921e-05,
2271
+ "loss": 0.1783,
2272
+ "num_input_tokens_seen": 20605496,
2273
+ "step": 283
2274
+ },
2275
+ {
2276
+ "epoch": 4.6645898234683285,
2277
+ "grad_norm": 0.20772908627986908,
2278
+ "learning_rate": 4.3402427177993366e-05,
2279
+ "loss": 0.2135,
2280
+ "num_input_tokens_seen": 20692096,
2281
+ "step": 284
2282
+ },
2283
+ {
2284
+ "epoch": 4.681204569055036,
2285
+ "grad_norm": 0.24927115440368652,
2286
+ "learning_rate": 4.335806273589214e-05,
2287
+ "loss": 0.1919,
2288
+ "num_input_tokens_seen": 20762800,
2289
+ "step": 285
2290
+ },
2291
+ {
2292
+ "epoch": 4.697819314641745,
2293
+ "grad_norm": 0.2112305462360382,
2294
+ "learning_rate": 4.3313572469684474e-05,
2295
+ "loss": 0.1546,
2296
+ "num_input_tokens_seen": 20831584,
2297
+ "step": 286
2298
+ },
2299
+ {
2300
+ "epoch": 4.714434060228453,
2301
+ "grad_norm": 0.22639551758766174,
2302
+ "learning_rate": 4.326895668430166e-05,
2303
+ "loss": 0.124,
2304
+ "num_input_tokens_seen": 20897320,
2305
+ "step": 287
2306
+ },
2307
+ {
2308
+ "epoch": 4.731048805815161,
2309
+ "grad_norm": 0.2295934110879898,
2310
+ "learning_rate": 4.3224215685535294e-05,
2311
+ "loss": 0.1639,
2312
+ "num_input_tokens_seen": 20966136,
2313
+ "step": 288
2314
+ },
2315
+ {
2316
+ "epoch": 4.747663551401869,
2317
+ "grad_norm": 0.2341577112674713,
2318
+ "learning_rate": 4.317934978003517e-05,
2319
+ "loss": 0.1584,
2320
+ "num_input_tokens_seen": 21034800,
2321
+ "step": 289
2322
+ },
2323
+ {
2324
+ "epoch": 4.764278296988578,
2325
+ "grad_norm": 0.2542404234409332,
2326
+ "learning_rate": 4.313435927530719e-05,
2327
+ "loss": 0.1918,
2328
+ "num_input_tokens_seen": 21098672,
2329
+ "step": 290
2330
+ },
2331
+ {
2332
+ "epoch": 4.780893042575285,
2333
+ "grad_norm": 0.23311223089694977,
2334
+ "learning_rate": 4.3089244479711236e-05,
2335
+ "loss": 0.1597,
2336
+ "num_input_tokens_seen": 21177632,
2337
+ "step": 291
2338
+ },
2339
+ {
2340
+ "epoch": 4.797507788161994,
2341
+ "grad_norm": 0.2642923593521118,
2342
+ "learning_rate": 4.304400570245906e-05,
2343
+ "loss": 0.1847,
2344
+ "num_input_tokens_seen": 21240896,
2345
+ "step": 292
2346
+ },
2347
+ {
2348
+ "epoch": 4.814122533748702,
2349
+ "grad_norm": 0.18841278553009033,
2350
+ "learning_rate": 4.299864325361217e-05,
2351
+ "loss": 0.1472,
2352
+ "num_input_tokens_seen": 21322984,
2353
+ "step": 293
2354
+ },
2355
+ {
2356
+ "epoch": 4.83073727933541,
2357
+ "grad_norm": 0.22440434992313385,
2358
+ "learning_rate": 4.295315744407972e-05,
2359
+ "loss": 0.1607,
2360
+ "num_input_tokens_seen": 21389128,
2361
+ "step": 294
2362
+ },
2363
+ {
2364
+ "epoch": 4.8473520249221185,
2365
+ "grad_norm": 0.22145289182662964,
2366
+ "learning_rate": 4.290754858561637e-05,
2367
+ "loss": 0.1851,
2368
+ "num_input_tokens_seen": 21469912,
2369
+ "step": 295
2370
+ },
2371
+ {
2372
+ "epoch": 4.863966770508826,
2373
+ "grad_norm": 0.22817087173461914,
2374
+ "learning_rate": 4.2861816990820084e-05,
2375
+ "loss": 0.1531,
2376
+ "num_input_tokens_seen": 21540320,
2377
+ "step": 296
2378
+ },
2379
+ {
2380
+ "epoch": 4.880581516095535,
2381
+ "grad_norm": 0.22014038264751434,
2382
+ "learning_rate": 4.281596297313013e-05,
2383
+ "loss": 0.1815,
2384
+ "num_input_tokens_seen": 21626312,
2385
+ "step": 297
2386
+ },
2387
+ {
2388
+ "epoch": 4.897196261682243,
2389
+ "grad_norm": 0.2234148383140564,
2390
+ "learning_rate": 4.2769986846824815e-05,
2391
+ "loss": 0.1667,
2392
+ "num_input_tokens_seen": 21702792,
2393
+ "step": 298
2394
+ },
2395
+ {
2396
+ "epoch": 4.913811007268951,
2397
+ "grad_norm": 0.2851375341415405,
2398
+ "learning_rate": 4.272388892701934e-05,
2399
+ "loss": 0.1805,
2400
+ "num_input_tokens_seen": 21771880,
2401
+ "step": 299
2402
+ },
2403
+ {
2404
+ "epoch": 4.930425752855659,
2405
+ "grad_norm": 0.2221265286207199,
2406
+ "learning_rate": 4.267766952966369e-05,
2407
+ "loss": 0.1653,
2408
+ "num_input_tokens_seen": 21844024,
2409
+ "step": 300
2410
+ },
2411
+ {
2412
+ "epoch": 4.947040498442368,
2413
+ "grad_norm": 0.20688939094543457,
2414
+ "learning_rate": 4.2631328971540444e-05,
2415
+ "loss": 0.1654,
2416
+ "num_input_tokens_seen": 21925632,
2417
+ "step": 301
2418
+ },
2419
+ {
2420
+ "epoch": 4.963655244029075,
2421
+ "grad_norm": 0.2270977795124054,
2422
+ "learning_rate": 4.2584867570262597e-05,
2423
+ "loss": 0.1774,
2424
+ "num_input_tokens_seen": 21981952,
2425
+ "step": 302
2426
+ },
2427
+ {
2428
+ "epoch": 4.980269989615784,
2429
+ "grad_norm": 0.18652501702308655,
2430
+ "learning_rate": 4.25382856442714e-05,
2431
+ "loss": 0.1452,
2432
+ "num_input_tokens_seen": 22070440,
2433
+ "step": 303
2434
+ },
2435
+ {
2436
+ "epoch": 4.996884735202492,
2437
+ "grad_norm": 0.19792407751083374,
2438
+ "learning_rate": 4.249158351283414e-05,
2439
+ "loss": 0.1806,
2440
+ "num_input_tokens_seen": 22170184,
2441
+ "step": 304
2442
+ },
2443
+ {
2444
+ "epoch": 5.0,
2445
+ "grad_norm": 0.5933757424354553,
2446
+ "learning_rate": 4.244476149604201e-05,
2447
+ "loss": 0.2115,
2448
+ "num_input_tokens_seen": 22181856,
2449
+ "step": 305
2450
+ },
2451
+ {
2452
+ "epoch": 5.0166147455867085,
2453
+ "grad_norm": 0.22399979829788208,
2454
+ "learning_rate": 4.2397819914807856e-05,
2455
+ "loss": 0.1614,
2456
+ "num_input_tokens_seen": 22256808,
2457
+ "step": 306
2458
+ },
2459
+ {
2460
+ "epoch": 5.033229491173416,
2461
+ "grad_norm": 0.2595834732055664,
2462
+ "learning_rate": 4.2350759090864046e-05,
2463
+ "loss": 0.1838,
2464
+ "num_input_tokens_seen": 22325224,
2465
+ "step": 307
2466
+ },
2467
+ {
2468
+ "epoch": 5.049844236760125,
2469
+ "grad_norm": 0.188430517911911,
2470
+ "learning_rate": 4.230357934676017e-05,
2471
+ "loss": 0.1421,
2472
+ "num_input_tokens_seen": 22389624,
2473
+ "step": 308
2474
+ },
2475
+ {
2476
+ "epoch": 5.066458982346833,
2477
+ "grad_norm": 0.325431764125824,
2478
+ "learning_rate": 4.225628100586093e-05,
2479
+ "loss": 0.1852,
2480
+ "num_input_tokens_seen": 22463872,
2481
+ "step": 309
2482
+ },
2483
+ {
2484
+ "epoch": 5.083073727933541,
2485
+ "grad_norm": 0.27097174525260925,
2486
+ "learning_rate": 4.220886439234385e-05,
2487
+ "loss": 0.1526,
2488
+ "num_input_tokens_seen": 22515824,
2489
+ "step": 310
2490
+ },
2491
+ {
2492
+ "epoch": 5.099688473520249,
2493
+ "grad_norm": 0.21379193663597107,
2494
+ "learning_rate": 4.2161329831197095e-05,
2495
+ "loss": 0.151,
2496
+ "num_input_tokens_seen": 22602336,
2497
+ "step": 311
2498
+ },
2499
+ {
2500
+ "epoch": 5.116303219106958,
2501
+ "grad_norm": 0.24827998876571655,
2502
+ "learning_rate": 4.211367764821722e-05,
2503
+ "loss": 0.142,
2504
+ "num_input_tokens_seen": 22655176,
2505
+ "step": 312
2506
+ },
2507
+ {
2508
+ "epoch": 5.132917964693665,
2509
+ "grad_norm": 0.26246964931488037,
2510
+ "learning_rate": 4.2065908170006955e-05,
2511
+ "loss": 0.1589,
2512
+ "num_input_tokens_seen": 22728680,
2513
+ "step": 313
2514
+ },
2515
+ {
2516
+ "epoch": 5.149532710280374,
2517
+ "grad_norm": 0.24459198117256165,
2518
+ "learning_rate": 4.201802172397295e-05,
2519
+ "loss": 0.1435,
2520
+ "num_input_tokens_seen": 22806784,
2521
+ "step": 314
2522
+ },
2523
+ {
2524
+ "epoch": 5.166147455867082,
2525
+ "grad_norm": 0.26540517807006836,
2526
+ "learning_rate": 4.197001863832355e-05,
2527
+ "loss": 0.1447,
2528
+ "num_input_tokens_seen": 22880648,
2529
+ "step": 315
2530
+ },
2531
+ {
2532
+ "epoch": 5.18276220145379,
2533
+ "grad_norm": 0.25646644830703735,
2534
+ "learning_rate": 4.192189924206652e-05,
2535
+ "loss": 0.1418,
2536
+ "num_input_tokens_seen": 22953184,
2537
+ "step": 316
2538
+ },
2539
+ {
2540
+ "epoch": 5.1993769470404985,
2541
+ "grad_norm": 0.2358384132385254,
2542
+ "learning_rate": 4.187366386500683e-05,
2543
+ "loss": 0.1845,
2544
+ "num_input_tokens_seen": 23037392,
2545
+ "step": 317
2546
+ },
2547
+ {
2548
+ "epoch": 5.215991692627207,
2549
+ "grad_norm": 0.2270258218050003,
2550
+ "learning_rate": 4.182531283774434e-05,
2551
+ "loss": 0.2668,
2552
+ "num_input_tokens_seen": 23086552,
2553
+ "step": 318
2554
+ },
2555
+ {
2556
+ "epoch": 5.232606438213915,
2557
+ "grad_norm": 0.24396558105945587,
2558
+ "learning_rate": 4.177684649167158e-05,
2559
+ "loss": 0.1567,
2560
+ "num_input_tokens_seen": 23153152,
2561
+ "step": 319
2562
+ },
2563
+ {
2564
+ "epoch": 5.249221183800623,
2565
+ "grad_norm": 0.2542375922203064,
2566
+ "learning_rate": 4.172826515897146e-05,
2567
+ "loss": 0.1617,
2568
+ "num_input_tokens_seen": 23240928,
2569
+ "step": 320
2570
+ },
2571
+ {
2572
+ "epoch": 5.265835929387332,
2573
+ "grad_norm": 0.2268146276473999,
2574
+ "learning_rate": 4.1679569172614996e-05,
2575
+ "loss": 0.1573,
2576
+ "num_input_tokens_seen": 23325912,
2577
+ "step": 321
2578
+ },
2579
+ {
2580
+ "epoch": 5.282450674974039,
2581
+ "grad_norm": 0.26405712962150574,
2582
+ "learning_rate": 4.163075886635902e-05,
2583
+ "loss": 0.1738,
2584
+ "num_input_tokens_seen": 23401952,
2585
+ "step": 322
2586
+ },
2587
+ {
2588
+ "epoch": 5.299065420560748,
2589
+ "grad_norm": 0.2599943280220032,
2590
+ "learning_rate": 4.1581834574743915e-05,
2591
+ "loss": 0.1433,
2592
+ "num_input_tokens_seen": 23463760,
2593
+ "step": 323
2594
+ },
2595
+ {
2596
+ "epoch": 5.315680166147456,
2597
+ "grad_norm": 0.24650180339813232,
2598
+ "learning_rate": 4.1532796633091296e-05,
2599
+ "loss": 0.1511,
2600
+ "num_input_tokens_seen": 23535272,
2601
+ "step": 324
2602
+ },
2603
+ {
2604
+ "epoch": 5.332294911734164,
2605
+ "grad_norm": 0.22445832192897797,
2606
+ "learning_rate": 4.148364537750172e-05,
2607
+ "loss": 0.1296,
2608
+ "num_input_tokens_seen": 23607752,
2609
+ "step": 325
2610
+ },
2611
+ {
2612
+ "epoch": 5.348909657320872,
2613
+ "grad_norm": 0.20715995132923126,
2614
+ "learning_rate": 4.14343811448524e-05,
2615
+ "loss": 0.142,
2616
+ "num_input_tokens_seen": 23674872,
2617
+ "step": 326
2618
+ },
2619
+ {
2620
+ "epoch": 5.365524402907581,
2621
+ "grad_norm": 0.2707969546318054,
2622
+ "learning_rate": 4.138500427279485e-05,
2623
+ "loss": 0.1679,
2624
+ "num_input_tokens_seen": 23736384,
2625
+ "step": 327
2626
+ },
2627
+ {
2628
+ "epoch": 5.382139148494288,
2629
+ "grad_norm": 0.2017841637134552,
2630
+ "learning_rate": 4.133551509975264e-05,
2631
+ "loss": 0.1346,
2632
+ "num_input_tokens_seen": 23835000,
2633
+ "step": 328
2634
+ },
2635
+ {
2636
+ "epoch": 5.398753894080997,
2637
+ "grad_norm": 0.21116195619106293,
2638
+ "learning_rate": 4.128591396491901e-05,
2639
+ "loss": 0.1364,
2640
+ "num_input_tokens_seen": 23912552,
2641
+ "step": 329
2642
+ },
2643
+ {
2644
+ "epoch": 5.415368639667705,
2645
+ "grad_norm": 0.2331131547689438,
2646
+ "learning_rate": 4.123620120825459e-05,
2647
+ "loss": 0.1719,
2648
+ "num_input_tokens_seen": 23987368,
2649
+ "step": 330
2650
+ },
2651
+ {
2652
+ "epoch": 5.431983385254413,
2653
+ "grad_norm": 0.27115845680236816,
2654
+ "learning_rate": 4.118637717048506e-05,
2655
+ "loss": 0.1468,
2656
+ "num_input_tokens_seen": 24050848,
2657
+ "step": 331
2658
+ },
2659
+ {
2660
+ "epoch": 5.4485981308411215,
2661
+ "grad_norm": 0.21654783189296722,
2662
+ "learning_rate": 4.113644219309877e-05,
2663
+ "loss": 0.1418,
2664
+ "num_input_tokens_seen": 24146104,
2665
+ "step": 332
2666
+ },
2667
+ {
2668
+ "epoch": 5.46521287642783,
2669
+ "grad_norm": 0.2783348560333252,
2670
+ "learning_rate": 4.1086396618344476e-05,
2671
+ "loss": 0.1502,
2672
+ "num_input_tokens_seen": 24194184,
2673
+ "step": 333
2674
+ },
2675
+ {
2676
+ "epoch": 5.481827622014538,
2677
+ "grad_norm": 0.23255467414855957,
2678
+ "learning_rate": 4.1036240789228954e-05,
2679
+ "loss": 0.1571,
2680
+ "num_input_tokens_seen": 24275368,
2681
+ "step": 334
2682
+ },
2683
+ {
2684
+ "epoch": 5.498442367601246,
2685
+ "grad_norm": 0.2655453681945801,
2686
+ "learning_rate": 4.098597504951462e-05,
2687
+ "loss": 0.1607,
2688
+ "num_input_tokens_seen": 24329192,
2689
+ "step": 335
2690
+ },
2691
+ {
2692
+ "epoch": 5.515057113187955,
2693
+ "grad_norm": 0.23245719075202942,
2694
+ "learning_rate": 4.093559974371725e-05,
2695
+ "loss": 0.1453,
2696
+ "num_input_tokens_seen": 24426696,
2697
+ "step": 336
2698
+ },
2699
+ {
2700
+ "epoch": 5.531671858774662,
2701
+ "grad_norm": 0.20036327838897705,
2702
+ "learning_rate": 4.088511521710352e-05,
2703
+ "loss": 0.1411,
2704
+ "num_input_tokens_seen": 24514344,
2705
+ "step": 337
2706
+ },
2707
+ {
2708
+ "epoch": 5.548286604361371,
2709
+ "grad_norm": 0.32294195890426636,
2710
+ "learning_rate": 4.083452181568875e-05,
2711
+ "loss": 0.1467,
2712
+ "num_input_tokens_seen": 24584464,
2713
+ "step": 338
2714
+ },
2715
+ {
2716
+ "epoch": 5.564901349948079,
2717
+ "grad_norm": 0.23969624936580658,
2718
+ "learning_rate": 4.0783819886234445e-05,
2719
+ "loss": 0.1304,
2720
+ "num_input_tokens_seen": 24660600,
2721
+ "step": 339
2722
+ },
2723
+ {
2724
+ "epoch": 5.581516095534787,
2725
+ "grad_norm": 0.2539989948272705,
2726
+ "learning_rate": 4.073300977624594e-05,
2727
+ "loss": 0.1374,
2728
+ "num_input_tokens_seen": 24717088,
2729
+ "step": 340
2730
+ },
2731
+ {
2732
+ "epoch": 5.598130841121495,
2733
+ "grad_norm": 0.26608580350875854,
2734
+ "learning_rate": 4.068209183397004e-05,
2735
+ "loss": 0.1519,
2736
+ "num_input_tokens_seen": 24775352,
2737
+ "step": 341
2738
+ },
2739
+ {
2740
+ "epoch": 5.614745586708204,
2741
+ "grad_norm": 0.2161550372838974,
2742
+ "learning_rate": 4.063106640839264e-05,
2743
+ "loss": 0.1409,
2744
+ "num_input_tokens_seen": 24860072,
2745
+ "step": 342
2746
+ },
2747
+ {
2748
+ "epoch": 5.6313603322949115,
2749
+ "grad_norm": 0.22482600808143616,
2750
+ "learning_rate": 4.057993384923626e-05,
2751
+ "loss": 0.1393,
2752
+ "num_input_tokens_seen": 24947856,
2753
+ "step": 343
2754
+ },
2755
+ {
2756
+ "epoch": 5.64797507788162,
2757
+ "grad_norm": 0.2367829829454422,
2758
+ "learning_rate": 4.052869450695776e-05,
2759
+ "loss": 0.1539,
2760
+ "num_input_tokens_seen": 25024992,
2761
+ "step": 344
2762
+ },
2763
+ {
2764
+ "epoch": 5.6645898234683285,
2765
+ "grad_norm": 0.29229775071144104,
2766
+ "learning_rate": 4.047734873274586e-05,
2767
+ "loss": 0.1522,
2768
+ "num_input_tokens_seen": 25092248,
2769
+ "step": 345
2770
+ },
2771
+ {
2772
+ "epoch": 5.681204569055036,
2773
+ "grad_norm": 0.2589828670024872,
2774
+ "learning_rate": 4.042589687851872e-05,
2775
+ "loss": 0.1493,
2776
+ "num_input_tokens_seen": 25170496,
2777
+ "step": 346
2778
+ },
2779
+ {
2780
+ "epoch": 5.697819314641745,
2781
+ "grad_norm": 0.23003339767456055,
2782
+ "learning_rate": 4.037433929692161e-05,
2783
+ "loss": 0.1529,
2784
+ "num_input_tokens_seen": 25268720,
2785
+ "step": 347
2786
+ },
2787
+ {
2788
+ "epoch": 5.714434060228453,
2789
+ "grad_norm": 0.26932114362716675,
2790
+ "learning_rate": 4.0322676341324415e-05,
2791
+ "loss": 0.1499,
2792
+ "num_input_tokens_seen": 25332688,
2793
+ "step": 348
2794
+ },
2795
+ {
2796
+ "epoch": 5.731048805815161,
2797
+ "grad_norm": 0.27059391140937805,
2798
+ "learning_rate": 4.027090836581925e-05,
2799
+ "loss": 0.173,
2800
+ "num_input_tokens_seen": 25413904,
2801
+ "step": 349
2802
+ },
2803
+ {
2804
+ "epoch": 5.747663551401869,
2805
+ "grad_norm": 0.24265804886817932,
2806
+ "learning_rate": 4.021903572521802e-05,
2807
+ "loss": 0.1531,
2808
+ "num_input_tokens_seen": 25503720,
2809
+ "step": 350
2810
+ },
2811
+ {
2812
+ "epoch": 5.764278296988578,
2813
+ "grad_norm": 0.28688696026802063,
2814
+ "learning_rate": 4.0167058775049996e-05,
2815
+ "loss": 0.1615,
2816
+ "num_input_tokens_seen": 25568560,
2817
+ "step": 351
2818
+ },
2819
+ {
2820
+ "epoch": 5.780893042575285,
2821
+ "grad_norm": 0.26201075315475464,
2822
+ "learning_rate": 4.011497787155938e-05,
2823
+ "loss": 0.1452,
2824
+ "num_input_tokens_seen": 25635184,
2825
+ "step": 352
2826
+ },
2827
+ {
2828
+ "epoch": 5.797507788161994,
2829
+ "grad_norm": 0.22841767966747284,
2830
+ "learning_rate": 4.006279337170283e-05,
2831
+ "loss": 0.148,
2832
+ "num_input_tokens_seen": 25719768,
2833
+ "step": 353
2834
+ },
2835
+ {
2836
+ "epoch": 5.814122533748702,
2837
+ "grad_norm": 0.28246188163757324,
2838
+ "learning_rate": 4.0010505633147106e-05,
2839
+ "loss": 0.1446,
2840
+ "num_input_tokens_seen": 25795016,
2841
+ "step": 354
2842
+ },
2843
+ {
2844
+ "epoch": 5.83073727933541,
2845
+ "grad_norm": 0.2533949315547943,
2846
+ "learning_rate": 3.995811501426648e-05,
2847
+ "loss": 0.1297,
2848
+ "num_input_tokens_seen": 25863184,
2849
+ "step": 355
2850
+ },
2851
+ {
2852
+ "epoch": 5.8473520249221185,
2853
+ "grad_norm": 0.25117793679237366,
2854
+ "learning_rate": 3.99056218741404e-05,
2855
+ "loss": 0.1479,
2856
+ "num_input_tokens_seen": 25935752,
2857
+ "step": 356
2858
+ },
2859
+ {
2860
+ "epoch": 5.863966770508826,
2861
+ "grad_norm": 0.2759612202644348,
2862
+ "learning_rate": 3.985302657255097e-05,
2863
+ "loss": 0.1466,
2864
+ "num_input_tokens_seen": 25995760,
2865
+ "step": 357
2866
+ },
2867
+ {
2868
+ "epoch": 5.880581516095535,
2869
+ "grad_norm": 0.24218028783798218,
2870
+ "learning_rate": 3.980032946998049e-05,
2871
+ "loss": 0.1297,
2872
+ "num_input_tokens_seen": 26061240,
2873
+ "step": 358
2874
+ },
2875
+ {
2876
+ "epoch": 5.897196261682243,
2877
+ "grad_norm": 0.2628185749053955,
2878
+ "learning_rate": 3.974753092760901e-05,
2879
+ "loss": 0.1421,
2880
+ "num_input_tokens_seen": 26131024,
2881
+ "step": 359
2882
+ },
2883
+ {
2884
+ "epoch": 5.913811007268951,
2885
+ "grad_norm": 0.18742328882217407,
2886
+ "learning_rate": 3.969463130731183e-05,
2887
+ "loss": 0.1737,
2888
+ "num_input_tokens_seen": 26233672,
2889
+ "step": 360
2890
+ },
2891
+ {
2892
+ "epoch": 5.930425752855659,
2893
+ "grad_norm": 0.22187161445617676,
2894
+ "learning_rate": 3.964163097165702e-05,
2895
+ "loss": 0.1153,
2896
+ "num_input_tokens_seen": 26303488,
2897
+ "step": 361
2898
+ },
2899
+ {
2900
+ "epoch": 5.947040498442368,
2901
+ "grad_norm": 0.25965312123298645,
2902
+ "learning_rate": 3.958853028390294e-05,
2903
+ "loss": 0.1347,
2904
+ "num_input_tokens_seen": 26377768,
2905
+ "step": 362
2906
+ },
2907
+ {
2908
+ "epoch": 5.963655244029075,
2909
+ "grad_norm": 0.2957039475440979,
2910
+ "learning_rate": 3.953532960799577e-05,
2911
+ "loss": 0.2736,
2912
+ "num_input_tokens_seen": 26435984,
2913
+ "step": 363
2914
+ },
2915
+ {
2916
+ "epoch": 5.980269989615784,
2917
+ "grad_norm": 0.3063158392906189,
2918
+ "learning_rate": 3.948202930856697e-05,
2919
+ "loss": 0.1563,
2920
+ "num_input_tokens_seen": 26513960,
2921
+ "step": 364
2922
+ },
2923
+ {
2924
+ "epoch": 5.996884735202492,
2925
+ "grad_norm": 0.21576139330863953,
2926
+ "learning_rate": 3.942862975093085e-05,
2927
+ "loss": 0.1361,
2928
+ "num_input_tokens_seen": 26599064,
2929
+ "step": 365
2930
+ },
2931
+ {
2932
+ "epoch": 6.0,
2933
+ "grad_norm": 0.49579551815986633,
2934
+ "learning_rate": 3.937513130108197e-05,
2935
+ "loss": 0.1849,
2936
+ "num_input_tokens_seen": 26617264,
2937
+ "step": 366
2938
+ },
2939
+ {
2940
+ "epoch": 6.0166147455867085,
2941
+ "grad_norm": 0.25051239132881165,
2942
+ "learning_rate": 3.9321534325692726e-05,
2943
+ "loss": 0.1452,
2944
+ "num_input_tokens_seen": 26717024,
2945
+ "step": 367
2946
+ },
2947
+ {
2948
+ "epoch": 6.033229491173416,
2949
+ "grad_norm": 0.2842366099357605,
2950
+ "learning_rate": 3.92678391921108e-05,
2951
+ "loss": 0.1361,
2952
+ "num_input_tokens_seen": 26788120,
2953
+ "step": 368
2954
+ },
2955
+ {
2956
+ "epoch": 6.049844236760125,
2957
+ "grad_norm": 0.32458341121673584,
2958
+ "learning_rate": 3.92140462683566e-05,
2959
+ "loss": 0.1537,
2960
+ "num_input_tokens_seen": 26853760,
2961
+ "step": 369
2962
+ },
2963
+ {
2964
+ "epoch": 6.066458982346833,
2965
+ "grad_norm": 0.27452871203422546,
2966
+ "learning_rate": 3.916015592312082e-05,
2967
+ "loss": 0.1354,
2968
+ "num_input_tokens_seen": 26923848,
2969
+ "step": 370
2970
+ },
2971
+ {
2972
+ "epoch": 6.083073727933541,
2973
+ "grad_norm": 0.2542634904384613,
2974
+ "learning_rate": 3.9106168525761855e-05,
2975
+ "loss": 0.1233,
2976
+ "num_input_tokens_seen": 26976184,
2977
+ "step": 371
2978
+ },
2979
+ {
2980
+ "epoch": 6.099688473520249,
2981
+ "grad_norm": 0.2786627411842346,
2982
+ "learning_rate": 3.905208444630327e-05,
2983
+ "loss": 0.125,
2984
+ "num_input_tokens_seen": 27065712,
2985
+ "step": 372
2986
+ },
2987
+ {
2988
+ "epoch": 6.116303219106958,
2989
+ "grad_norm": 0.19119420647621155,
2990
+ "learning_rate": 3.899790405543129e-05,
2991
+ "loss": 0.1475,
2992
+ "num_input_tokens_seen": 27145472,
2993
+ "step": 373
2994
+ },
2995
+ {
2996
+ "epoch": 6.132917964693665,
2997
+ "grad_norm": 0.25163477659225464,
2998
+ "learning_rate": 3.894362772449226e-05,
2999
+ "loss": 0.1235,
3000
+ "num_input_tokens_seen": 27233904,
3001
+ "step": 374
3002
+ },
3003
+ {
3004
+ "epoch": 6.149532710280374,
3005
+ "grad_norm": 0.2418321669101715,
3006
+ "learning_rate": 3.888925582549006e-05,
3007
+ "loss": 0.1117,
3008
+ "num_input_tokens_seen": 27311512,
3009
+ "step": 375
3010
+ },
3011
+ {
3012
+ "epoch": 6.166147455867082,
3013
+ "grad_norm": 0.26851242780685425,
3014
+ "learning_rate": 3.883478873108361e-05,
3015
+ "loss": 0.1226,
3016
+ "num_input_tokens_seen": 27387400,
3017
+ "step": 376
3018
+ },
3019
+ {
3020
+ "epoch": 6.18276220145379,
3021
+ "grad_norm": 0.280127614736557,
3022
+ "learning_rate": 3.878022681458426e-05,
3023
+ "loss": 0.1295,
3024
+ "num_input_tokens_seen": 27461280,
3025
+ "step": 377
3026
+ },
3027
+ {
3028
+ "epoch": 6.1993769470404985,
3029
+ "grad_norm": 0.255588173866272,
3030
+ "learning_rate": 3.87255704499533e-05,
3031
+ "loss": 0.1197,
3032
+ "num_input_tokens_seen": 27556400,
3033
+ "step": 378
3034
+ },
3035
+ {
3036
+ "epoch": 6.215991692627207,
3037
+ "grad_norm": 0.2888232171535492,
3038
+ "learning_rate": 3.8670820011799315e-05,
3039
+ "loss": 0.1417,
3040
+ "num_input_tokens_seen": 27613664,
3041
+ "step": 379
3042
+ },
3043
+ {
3044
+ "epoch": 6.232606438213915,
3045
+ "grad_norm": 0.28014957904815674,
3046
+ "learning_rate": 3.861597587537568e-05,
3047
+ "loss": 0.1565,
3048
+ "num_input_tokens_seen": 27681024,
3049
+ "step": 380
3050
+ },
3051
+ {
3052
+ "epoch": 6.249221183800623,
3053
+ "grad_norm": 0.2911868095397949,
3054
+ "learning_rate": 3.856103841657797e-05,
3055
+ "loss": 0.1293,
3056
+ "num_input_tokens_seen": 27759536,
3057
+ "step": 381
3058
+ },
3059
+ {
3060
+ "epoch": 6.265835929387332,
3061
+ "grad_norm": 0.19534167647361755,
3062
+ "learning_rate": 3.850600801194138e-05,
3063
+ "loss": 0.0967,
3064
+ "num_input_tokens_seen": 27857288,
3065
+ "step": 382
3066
+ },
3067
+ {
3068
+ "epoch": 6.282450674974039,
3069
+ "grad_norm": 0.2868412733078003,
3070
+ "learning_rate": 3.8450885038638127e-05,
3071
+ "loss": 0.1395,
3072
+ "num_input_tokens_seen": 27940528,
3073
+ "step": 383
3074
+ },
3075
+ {
3076
+ "epoch": 6.299065420560748,
3077
+ "grad_norm": 0.3421030342578888,
3078
+ "learning_rate": 3.8395669874474915e-05,
3079
+ "loss": 0.1402,
3080
+ "num_input_tokens_seen": 28033824,
3081
+ "step": 384
3082
+ },
3083
+ {
3084
+ "epoch": 6.315680166147456,
3085
+ "grad_norm": 0.2819141447544098,
3086
+ "learning_rate": 3.834036289789029e-05,
3087
+ "loss": 0.1137,
3088
+ "num_input_tokens_seen": 28096192,
3089
+ "step": 385
3090
+ },
3091
+ {
3092
+ "epoch": 6.332294911734164,
3093
+ "grad_norm": 0.22595854103565216,
3094
+ "learning_rate": 3.828496448795207e-05,
3095
+ "loss": 0.1143,
3096
+ "num_input_tokens_seen": 28181256,
3097
+ "step": 386
3098
+ },
3099
+ {
3100
+ "epoch": 6.348909657320872,
3101
+ "grad_norm": 0.2450927495956421,
3102
+ "learning_rate": 3.822947502435477e-05,
3103
+ "loss": 0.121,
3104
+ "num_input_tokens_seen": 28245480,
3105
+ "step": 387
3106
+ },
3107
+ {
3108
+ "epoch": 6.365524402907581,
3109
+ "grad_norm": 0.2552040219306946,
3110
+ "learning_rate": 3.8173894887416945e-05,
3111
+ "loss": 0.1183,
3112
+ "num_input_tokens_seen": 28307200,
3113
+ "step": 388
3114
+ },
3115
+ {
3116
+ "epoch": 6.382139148494288,
3117
+ "grad_norm": 0.3974437415599823,
3118
+ "learning_rate": 3.811822445807863e-05,
3119
+ "loss": 0.1423,
3120
+ "num_input_tokens_seen": 28384640,
3121
+ "step": 389
3122
+ },
3123
+ {
3124
+ "epoch": 6.398753894080997,
3125
+ "grad_norm": 0.2750483453273773,
3126
+ "learning_rate": 3.8062464117898724e-05,
3127
+ "loss": 0.1068,
3128
+ "num_input_tokens_seen": 28447992,
3129
+ "step": 390
3130
+ },
3131
+ {
3132
+ "epoch": 6.415368639667705,
3133
+ "grad_norm": 0.3021928071975708,
3134
+ "learning_rate": 3.800661424905235e-05,
3135
+ "loss": 0.1233,
3136
+ "num_input_tokens_seen": 28513856,
3137
+ "step": 391
3138
+ },
3139
+ {
3140
+ "epoch": 6.431983385254413,
3141
+ "grad_norm": 0.4087376296520233,
3142
+ "learning_rate": 3.795067523432826e-05,
3143
+ "loss": 0.1354,
3144
+ "num_input_tokens_seen": 28596584,
3145
+ "step": 392
3146
+ },
3147
+ {
3148
+ "epoch": 6.4485981308411215,
3149
+ "grad_norm": 0.32668039202690125,
3150
+ "learning_rate": 3.789464745712619e-05,
3151
+ "loss": 0.1483,
3152
+ "num_input_tokens_seen": 28664560,
3153
+ "step": 393
3154
+ },
3155
+ {
3156
+ "epoch": 6.46521287642783,
3157
+ "grad_norm": 0.24187126755714417,
3158
+ "learning_rate": 3.7838531301454254e-05,
3159
+ "loss": 0.1225,
3160
+ "num_input_tokens_seen": 28739512,
3161
+ "step": 394
3162
+ },
3163
+ {
3164
+ "epoch": 6.481827622014538,
3165
+ "grad_norm": 0.2861412465572357,
3166
+ "learning_rate": 3.77823271519263e-05,
3167
+ "loss": 0.1318,
3168
+ "num_input_tokens_seen": 28831848,
3169
+ "step": 395
3170
+ },
3171
+ {
3172
+ "epoch": 6.498442367601246,
3173
+ "grad_norm": 0.30458521842956543,
3174
+ "learning_rate": 3.7726035393759285e-05,
3175
+ "loss": 0.1337,
3176
+ "num_input_tokens_seen": 28898408,
3177
+ "step": 396
3178
+ },
3179
+ {
3180
+ "epoch": 6.515057113187955,
3181
+ "grad_norm": 0.2644833028316498,
3182
+ "learning_rate": 3.76696564127706e-05,
3183
+ "loss": 0.1283,
3184
+ "num_input_tokens_seen": 28960224,
3185
+ "step": 397
3186
+ },
3187
+ {
3188
+ "epoch": 6.531671858774662,
3189
+ "grad_norm": 0.3309299349784851,
3190
+ "learning_rate": 3.761319059537548e-05,
3191
+ "loss": 0.1548,
3192
+ "num_input_tokens_seen": 29020568,
3193
+ "step": 398
3194
+ },
3195
+ {
3196
+ "epoch": 6.548286604361371,
3197
+ "grad_norm": 0.2788560092449188,
3198
+ "learning_rate": 3.755663832858432e-05,
3199
+ "loss": 0.1154,
3200
+ "num_input_tokens_seen": 29095448,
3201
+ "step": 399
3202
+ },
3203
+ {
3204
+ "epoch": 6.564901349948079,
3205
+ "grad_norm": 0.26450732350349426,
3206
+ "learning_rate": 3.7500000000000003e-05,
3207
+ "loss": 0.1276,
3208
+ "num_input_tokens_seen": 29186600,
3209
+ "step": 400
3210
+ },
3211
+ {
3212
+ "epoch": 6.581516095534787,
3213
+ "grad_norm": 0.2650800049304962,
3214
+ "learning_rate": 3.744327599781531e-05,
3215
+ "loss": 0.2184,
3216
+ "num_input_tokens_seen": 29258552,
3217
+ "step": 401
3218
+ },
3219
+ {
3220
+ "epoch": 6.598130841121495,
3221
+ "grad_norm": 0.2814129590988159,
3222
+ "learning_rate": 3.7386466710810194e-05,
3223
+ "loss": 0.1508,
3224
+ "num_input_tokens_seen": 29344848,
3225
+ "step": 402
3226
+ },
3227
+ {
3228
+ "epoch": 6.614745586708204,
3229
+ "grad_norm": 0.2940891981124878,
3230
+ "learning_rate": 3.7329572528349146e-05,
3231
+ "loss": 0.2035,
3232
+ "num_input_tokens_seen": 29410184,
3233
+ "step": 403
3234
+ },
3235
+ {
3236
+ "epoch": 6.6313603322949115,
3237
+ "grad_norm": 0.25937849283218384,
3238
+ "learning_rate": 3.727259384037852e-05,
3239
+ "loss": 0.1308,
3240
+ "num_input_tokens_seen": 29484928,
3241
+ "step": 404
3242
+ },
3243
+ {
3244
+ "epoch": 6.64797507788162,
3245
+ "grad_norm": 0.26631370186805725,
3246
+ "learning_rate": 3.721553103742388e-05,
3247
+ "loss": 0.1349,
3248
+ "num_input_tokens_seen": 29566432,
3249
+ "step": 405
3250
+ },
3251
+ {
3252
+ "epoch": 6.6645898234683285,
3253
+ "grad_norm": 0.2721930146217346,
3254
+ "learning_rate": 3.715838451058726e-05,
3255
+ "loss": 0.1246,
3256
+ "num_input_tokens_seen": 29634032,
3257
+ "step": 406
3258
+ },
3259
+ {
3260
+ "epoch": 6.681204569055036,
3261
+ "grad_norm": 0.3131358325481415,
3262
+ "learning_rate": 3.7101154651544584e-05,
3263
+ "loss": 0.142,
3264
+ "num_input_tokens_seen": 29681424,
3265
+ "step": 407
3266
+ },
3267
+ {
3268
+ "epoch": 6.697819314641745,
3269
+ "grad_norm": 0.266184002161026,
3270
+ "learning_rate": 3.704384185254288e-05,
3271
+ "loss": 0.118,
3272
+ "num_input_tokens_seen": 29762208,
3273
+ "step": 408
3274
+ },
3275
+ {
3276
+ "epoch": 6.714434060228453,
3277
+ "grad_norm": 0.29790574312210083,
3278
+ "learning_rate": 3.6986446506397666e-05,
3279
+ "loss": 0.1286,
3280
+ "num_input_tokens_seen": 29816280,
3281
+ "step": 409
3282
+ },
3283
+ {
3284
+ "epoch": 6.731048805815161,
3285
+ "grad_norm": 0.2710191309452057,
3286
+ "learning_rate": 3.692896900649021e-05,
3287
+ "loss": 0.1211,
3288
+ "num_input_tokens_seen": 29893040,
3289
+ "step": 410
3290
+ },
3291
+ {
3292
+ "epoch": 6.747663551401869,
3293
+ "grad_norm": 0.46238377690315247,
3294
+ "learning_rate": 3.6871409746764865e-05,
3295
+ "loss": 0.1324,
3296
+ "num_input_tokens_seen": 29971688,
3297
+ "step": 411
3298
+ },
3299
+ {
3300
+ "epoch": 6.764278296988578,
3301
+ "grad_norm": 0.24678252637386322,
3302
+ "learning_rate": 3.681376912172636e-05,
3303
+ "loss": 0.1175,
3304
+ "num_input_tokens_seen": 30051784,
3305
+ "step": 412
3306
+ },
3307
+ {
3308
+ "epoch": 6.780893042575285,
3309
+ "grad_norm": 0.2699359655380249,
3310
+ "learning_rate": 3.675604752643706e-05,
3311
+ "loss": 0.1279,
3312
+ "num_input_tokens_seen": 30146048,
3313
+ "step": 413
3314
+ },
3315
+ {
3316
+ "epoch": 6.797507788161994,
3317
+ "grad_norm": 0.3923390805721283,
3318
+ "learning_rate": 3.6698245356514335e-05,
3319
+ "loss": 0.1204,
3320
+ "num_input_tokens_seen": 30221296,
3321
+ "step": 414
3322
+ },
3323
+ {
3324
+ "epoch": 6.814122533748702,
3325
+ "grad_norm": 0.26230940222740173,
3326
+ "learning_rate": 3.6640363008127784e-05,
3327
+ "loss": 0.1311,
3328
+ "num_input_tokens_seen": 30287664,
3329
+ "step": 415
3330
+ },
3331
+ {
3332
+ "epoch": 6.83073727933541,
3333
+ "grad_norm": 0.31548890471458435,
3334
+ "learning_rate": 3.6582400877996546e-05,
3335
+ "loss": 0.1417,
3336
+ "num_input_tokens_seen": 30352816,
3337
+ "step": 416
3338
+ },
3339
+ {
3340
+ "epoch": 6.8473520249221185,
3341
+ "grad_norm": 0.25795799493789673,
3342
+ "learning_rate": 3.652435936338656e-05,
3343
+ "loss": 0.1316,
3344
+ "num_input_tokens_seen": 30439688,
3345
+ "step": 417
3346
+ },
3347
+ {
3348
+ "epoch": 6.863966770508826,
3349
+ "grad_norm": 0.3696773648262024,
3350
+ "learning_rate": 3.646623886210788e-05,
3351
+ "loss": 0.1422,
3352
+ "num_input_tokens_seen": 30506856,
3353
+ "step": 418
3354
+ },
3355
+ {
3356
+ "epoch": 6.880581516095535,
3357
+ "grad_norm": 0.28359490633010864,
3358
+ "learning_rate": 3.64080397725119e-05,
3359
+ "loss": 0.1148,
3360
+ "num_input_tokens_seen": 30565848,
3361
+ "step": 419
3362
+ },
3363
+ {
3364
+ "epoch": 6.897196261682243,
3365
+ "grad_norm": 0.3696165978908539,
3366
+ "learning_rate": 3.634976249348867e-05,
3367
+ "loss": 0.1472,
3368
+ "num_input_tokens_seen": 30633944,
3369
+ "step": 420
3370
+ }
3371
+ ],
3372
+ "logging_steps": 1.0,
3373
+ "max_steps": 1200,
3374
+ "num_input_tokens_seen": 30633944,
3375
+ "num_train_epochs": 20,
3376
+ "save_steps": 60,
3377
+ "stateful_callbacks": {
3378
+ "TrainerControl": {
3379
+ "args": {
3380
+ "should_epoch_stop": false,
3381
+ "should_evaluate": false,
3382
+ "should_log": false,
3383
+ "should_save": true,
3384
+ "should_training_stop": false
3385
+ },
3386
+ "attributes": {}
3387
+ }
3388
+ },
3389
+ "total_flos": 2.5843307538945147e+18,
3390
+ "train_batch_size": 1,
3391
+ "trial_name": null,
3392
+ "trial_params": null
3393
+ }
checkpoint-420/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-480/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-480/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-480/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-480/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-540/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-540/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 17500,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
checkpoint-540/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-60/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Coder-14B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
checkpoint-60/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-Coder-14B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "down_proj",
25
+ "v_proj",
26
+ "q_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-60/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
checkpoint-60/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-60/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e481e2c5f00842111d2e7b8a7a74708af234360fb1162f5d56df2a2fe7e117b8
3
+ size 14512