MictoNode commited on
Commit
bd4fad7
·
verified ·
1 Parent(s): ae4d9ac

End of training

Browse files
README.md CHANGED
@@ -41,7 +41,7 @@ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing
41
  - TRL: 0.15.2
42
  - Transformers: 4.51.3
43
  - Pytorch: 2.6.0
44
- - Datasets: 3.5.1
45
  - Tokenizers: 0.21.1
46
 
47
  ## Citations
 
41
  - TRL: 0.15.2
42
  - Transformers: 4.51.3
43
  - Pytorch: 2.6.0
44
+ - Datasets: 3.6.0
45
  - Tokenizers: 0.21.1
46
 
47
  ## Citations
adapter_config.json CHANGED
@@ -24,13 +24,13 @@
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
27
- "v_proj",
28
- "o_proj",
29
- "q_proj",
30
  "down_proj",
31
  "up_proj",
 
 
 
32
  "gate_proj",
33
- "k_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
 
24
  "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
 
 
 
27
  "down_proj",
28
  "up_proj",
29
+ "k_proj",
30
+ "v_proj",
31
+ "q_proj",
32
  "gate_proj",
33
+ "o_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b06d57c60e1f7f54ad1258d16af6eebc53214324b7f015d2565aeb42838181b
3
  size 73911112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99c4ea0e40aef54a7b118b6a6c47f53c3a8df09d82a539d466dbf8dd1d7c4b6c
3
  size 73911112
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 7.424532896038727e-07,
4
- "train_runtime": 344.1703,
5
- "train_samples": 14,
6
- "train_samples_per_second": 0.93,
7
- "train_steps_per_second": 0.058
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 1.0554939626672422e-06,
4
+ "train_runtime": 1935.7748,
5
+ "train_samples": 20,
6
+ "train_samples_per_second": 0.207,
7
+ "train_steps_per_second": 0.013
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 7.424532896038727e-07,
4
- "train_runtime": 344.1703,
5
- "train_samples": 14,
6
- "train_samples_per_second": 0.93,
7
- "train_steps_per_second": 0.058
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 1.0554939626672422e-06,
4
+ "train_runtime": 1935.7748,
5
+ "train_samples": 20,
6
+ "train_samples_per_second": 0.207,
7
+ "train_steps_per_second": 0.013
8
  }
trainer_state.json CHANGED
@@ -4,215 +4,265 @@
4
  "best_model_checkpoint": null,
5
  "epoch": 5.0,
6
  "eval_steps": 500,
7
- "global_step": 20,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "completion_length": 309.25,
14
- "epoch": 0.5714285714285714,
15
- "grad_norm": 0.5461550951004028,
16
- "kl": 0.0009220477732014842,
17
  "learning_rate": 5e-07,
18
  "loss": 0.0,
19
- "reward": 3.0049657989293337,
20
- "reward_std": 3.3861664421856403,
21
- "rewards/concensus_correctness_reward_func": 1.4184375147451647,
22
- "rewards/consensus_reward_func": 0.375,
23
  "rewards/cumulative_reward_2": 0.0,
24
- "rewards/final_correctness_reward_func": 0.4375,
25
- "rewards/question_recreation_reward_func": 0.5458408743143082,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
- "rewards/xmlcount_reward_func": 0.22818750143051147,
29
  "step": 2
30
  },
31
  {
32
- "completion_length": 309.3333333333333,
33
- "epoch": 1.0,
34
- "grad_norm": 0.4232158064842224,
35
- "kl": 0.0007590926543343812,
36
- "learning_rate": 4.864543104251586e-07,
37
  "loss": 0.0,
38
- "reward": 2.918933798869451,
39
- "reward_std": 2.6660856008529663,
40
- "rewards/concensus_correctness_reward_func": 0.9195000032583872,
41
- "rewards/consensus_reward_func": 0.25,
42
  "rewards/cumulative_reward_2": 0.0,
43
- "rewards/final_correctness_reward_func": 0.9166666666666666,
44
- "rewards/question_recreation_reward_func": 0.5955170492331187,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
- "rewards/xmlcount_reward_func": 0.23724999030431113,
48
  "step": 4
49
  },
50
  {
51
- "completion_length": 332.90625,
52
- "epoch": 1.5714285714285714,
53
- "grad_norm": 0.49632948637008667,
54
- "kl": 0.0008320739943883382,
55
- "learning_rate": 4.472851273490984e-07,
56
  "loss": 0.0,
57
- "reward": 3.5371958827599883,
58
- "reward_std": 3.318536162376404,
59
- "rewards/concensus_correctness_reward_func": 1.9525625724927522,
60
- "rewards/consensus_reward_func": 0.375,
61
  "rewards/cumulative_reward_2": 0.0,
62
- "rewards/final_correctness_reward_func": 0.5,
63
- "rewards/question_recreation_reward_func": 0.5218521282076836,
64
- "rewards/soft_format_reward_func": 0.0,
65
- "rewards/strict_format_reward_func": 0.0,
66
- "rewards/xmlcount_reward_func": 0.18778124800883234,
67
  "step": 6
68
  },
69
  {
70
- "completion_length": 281.6666666666667,
71
- "epoch": 2.0,
72
- "grad_norm": 0.43755120038986206,
73
- "kl": 0.0006982047537652155,
74
- "learning_rate": 3.867370395306068e-07,
75
  "loss": 0.0,
76
- "reward": 9.014580527941385,
77
- "reward_std": 6.108865926663081,
78
- "rewards/concensus_correctness_reward_func": 6.691999999806285,
79
- "rewards/consensus_reward_func": 0.75,
80
  "rewards/cumulative_reward_2": 0.0,
81
- "rewards/final_correctness_reward_func": 0.6666666666666666,
82
- "rewards/question_recreation_reward_func": 0.4938725878794988,
83
  "rewards/soft_format_reward_func": 0.0,
84
- "rewards/strict_format_reward_func": 0.0,
85
- "rewards/xmlcount_reward_func": 0.41204166164000827,
86
  "step": 8
87
  },
88
  {
89
- "completion_length": 303.46875,
90
- "epoch": 2.571428571428571,
91
- "grad_norm": 0.6612632870674133,
92
- "kl": 0.0009638653064030223,
93
- "learning_rate": 3.1137137178519977e-07,
94
  "loss": 0.0,
95
- "reward": 2.8641053661704063,
96
- "reward_std": 3.624991323798895,
97
- "rewards/concensus_correctness_reward_func": 1.875,
98
- "rewards/consensus_reward_func": 0.1875,
99
  "rewards/cumulative_reward_2": 0.0,
100
- "rewards/final_correctness_reward_func": 0.125,
101
- "rewards/question_recreation_reward_func": 0.4982929490506649,
102
  "rewards/soft_format_reward_func": 0.0,
103
  "rewards/strict_format_reward_func": 0.0,
104
- "rewards/xmlcount_reward_func": 0.17831249348819256,
105
  "step": 10
106
  },
107
  {
108
- "completion_length": 310.625,
109
- "epoch": 3.0,
110
- "grad_norm": 0.4002675414085388,
111
- "kl": 0.0008224714838434011,
112
- "learning_rate": 2.2935516363191693e-07,
113
  "loss": 0.0,
114
- "reward": 2.8802956144014993,
115
- "reward_std": 2.705232401688894,
116
- "rewards/concensus_correctness_reward_func": 0.9346666640291611,
117
- "rewards/consensus_reward_func": 0.3333333333333333,
118
  "rewards/cumulative_reward_2": 0.0,
119
- "rewards/final_correctness_reward_func": 0.9166666666666666,
120
- "rewards/question_recreation_reward_func": 0.41537899772326153,
121
  "rewards/soft_format_reward_func": 0.0,
122
  "rewards/strict_format_reward_func": 0.0,
123
- "rewards/xmlcount_reward_func": 0.28025000045696896,
124
  "step": 12
125
  },
126
  {
127
- "completion_length": 308.96875,
128
- "epoch": 3.571428571428571,
129
- "grad_norm": 0.5559430718421936,
130
- "kl": 0.0008217594295274466,
131
- "learning_rate": 1.4957614383675767e-07,
132
  "loss": 0.0,
133
- "reward": 5.252937890589237,
134
- "reward_std": 4.048280119895935,
135
- "rewards/concensus_correctness_reward_func": 3.2305000035557896,
136
- "rewards/consensus_reward_func": 0.6875,
137
  "rewards/cumulative_reward_2": 0.0,
138
  "rewards/final_correctness_reward_func": 0.4375,
139
- "rewards/question_recreation_reward_func": 0.6550942175090313,
140
  "rewards/soft_format_reward_func": 0.0,
141
- "rewards/strict_format_reward_func": 0.0,
142
- "rewards/xmlcount_reward_func": 0.2423437489196658,
143
  "step": 14
144
  },
145
  {
146
- "completion_length": 341.7916666666667,
147
- "epoch": 4.0,
148
- "grad_norm": 0.340472012758255,
149
- "kl": 0.0008785632477762798,
150
- "learning_rate": 8.067960709356478e-08,
151
  "loss": 0.0,
152
- "reward": 3.29603590319554,
153
- "reward_std": 4.0770609776179,
154
- "rewards/concensus_correctness_reward_func": 1.7687500171984236,
155
- "rewards/consensus_reward_func": 0.3333333333333333,
156
  "rewards/cumulative_reward_2": 0.0,
157
- "rewards/final_correctness_reward_func": 0.6666666666666666,
158
- "rewards/question_recreation_reward_func": 0.38282742351293564,
159
  "rewards/soft_format_reward_func": 0.0,
160
- "rewards/strict_format_reward_func": 0.0,
161
- "rewards/xmlcount_reward_func": 0.14445833986004195,
162
  "step": 16
163
  },
164
  {
165
- "completion_length": 315.71875,
166
- "epoch": 4.571428571428571,
167
- "grad_norm": 0.752004086971283,
168
- "kl": 0.0008350697899004444,
169
- "learning_rate": 3.013156219837776e-08,
170
  "loss": 0.0,
171
- "reward": 2.703938379883766,
172
- "reward_std": 2.4889378771185875,
173
- "rewards/concensus_correctness_reward_func": 0.9213124964153394,
174
- "rewards/consensus_reward_func": 0.5,
175
  "rewards/cumulative_reward_2": 0.0,
176
  "rewards/final_correctness_reward_func": 0.5625,
177
- "rewards/question_recreation_reward_func": 0.5067508853971958,
178
  "rewards/soft_format_reward_func": 0.0,
179
- "rewards/strict_format_reward_func": 0.0,
180
- "rewards/xmlcount_reward_func": 0.21337500168010592,
181
  "step": 18
182
  },
183
  {
184
- "completion_length": 289.0833333333333,
185
- "epoch": 5.0,
186
- "grad_norm": 0.3535054624080658,
187
- "kl": 0.0008812252393302819,
188
- "learning_rate": 3.4096741493194193e-09,
189
  "loss": 0.0,
190
- "reward": 1.0508066192269325,
191
- "reward_std": 0.8113361944754919,
192
- "rewards/concensus_correctness_reward_func": 0.019499999471008778,
193
- "rewards/consensus_reward_func": 0.16666666666666666,
194
  "rewards/cumulative_reward_2": 0.0,
195
- "rewards/final_correctness_reward_func": 0.16666666666666666,
196
- "rewards/question_recreation_reward_func": 0.3977649410565694,
197
  "rewards/soft_format_reward_func": 0.0,
198
- "rewards/strict_format_reward_func": 0.0,
199
- "rewards/xmlcount_reward_func": 0.3002083438138167,
200
  "step": 20
201
  },
202
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  "epoch": 5.0,
204
- "step": 20,
 
 
 
 
 
 
 
 
 
 
 
205
  "total_flos": 0.0,
206
- "train_loss": 7.424532896038727e-07,
207
- "train_runtime": 344.1703,
208
- "train_samples_per_second": 0.93,
209
- "train_steps_per_second": 0.058
210
  }
211
  ],
212
  "logging_steps": 2,
213
- "max_steps": 20,
214
  "num_input_tokens_seen": 0,
215
- "num_train_epochs": 7,
216
  "save_steps": 25,
217
  "stateful_callbacks": {
218
  "TrainerControl": {
 
4
  "best_model_checkpoint": null,
5
  "epoch": 5.0,
6
  "eval_steps": 500,
7
+ "global_step": 25,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "completion_length": 353.03125,
14
+ "epoch": 0.4,
15
+ "grad_norm": 0.46145281195640564,
16
+ "kl": 0.0008386401459574699,
17
  "learning_rate": 5e-07,
18
  "loss": 0.0,
19
+ "reward": 1.3044774606823921,
20
+ "reward_std": 1.3216868042945862,
21
+ "rewards/concensus_correctness_reward_func": 0.18774999678134918,
22
+ "rewards/consensus_reward_func": 0.4375,
23
  "rewards/cumulative_reward_2": 0.0,
24
+ "rewards/final_correctness_reward_func": 0.1875,
25
+ "rewards/question_recreation_reward_func": 0.4195087403059006,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
+ "rewards/xmlcount_reward_func": 0.07221875246614218,
29
  "step": 2
30
  },
31
  {
32
+ "completion_length": 352.125,
33
+ "epoch": 0.8,
34
+ "grad_norm": 0.5013545155525208,
35
+ "kl": 0.0008077259626588784,
36
+ "learning_rate": 4.91481456572267e-07,
37
  "loss": 0.0,
38
+ "reward": 1.8831986337900162,
39
+ "reward_std": 1.584916140884161,
40
+ "rewards/concensus_correctness_reward_func": 0.3190000057220459,
41
+ "rewards/consensus_reward_func": 0.375,
42
  "rewards/cumulative_reward_2": 0.0,
43
+ "rewards/final_correctness_reward_func": 0.4375,
44
+ "rewards/question_recreation_reward_func": 0.5861985795199871,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
+ "rewards/xmlcount_reward_func": 0.165500002913177,
48
  "step": 4
49
  },
50
  {
51
+ "completion_length": 275.28125,
52
+ "epoch": 1.2,
53
+ "grad_norm": 0.5912909507751465,
54
+ "kl": 0.0009123186537181027,
55
+ "learning_rate": 4.6650635094610966e-07,
56
  "loss": 0.0,
57
+ "reward": 1.5717561542987823,
58
+ "reward_std": 1.3975044712424278,
59
+ "rewards/concensus_correctness_reward_func": 0.0546875,
60
+ "rewards/consensus_reward_func": 0.4375,
61
  "rewards/cumulative_reward_2": 0.0,
62
+ "rewards/final_correctness_reward_func": 0.25,
63
+ "rewards/question_recreation_reward_func": 0.5156624000519514,
64
+ "rewards/soft_format_reward_func": 0.015625,
65
+ "rewards/strict_format_reward_func": 0.015625,
66
+ "rewards/xmlcount_reward_func": 0.2826562523841858,
67
  "step": 6
68
  },
69
  {
70
+ "completion_length": 289.4375,
71
+ "epoch": 1.6,
72
+ "grad_norm": 0.7356537580490112,
73
+ "kl": 0.0009301297905039974,
74
+ "learning_rate": 4.2677669529663686e-07,
75
  "loss": 0.0,
76
+ "reward": 2.088467765599489,
77
+ "reward_std": 2.1497373655438423,
78
+ "rewards/concensus_correctness_reward_func": 0.6559999994933605,
79
+ "rewards/consensus_reward_func": 0.1875,
80
  "rewards/cumulative_reward_2": 0.0,
81
+ "rewards/final_correctness_reward_func": 0.375,
82
+ "rewards/question_recreation_reward_func": 0.4805927164852619,
83
  "rewards/soft_format_reward_func": 0.0,
84
+ "rewards/strict_format_reward_func": 0.015625,
85
+ "rewards/xmlcount_reward_func": 0.37375001423060894,
86
  "step": 8
87
  },
88
  {
89
+ "completion_length": 339.125,
90
+ "epoch": 2.0,
91
+ "grad_norm": 5.122199535369873,
92
+ "kl": 0.0009551418406772427,
93
+ "learning_rate": 3.75e-07,
94
  "loss": 0.0,
95
+ "reward": 1.356352262198925,
96
+ "reward_std": 0.8040661737322807,
97
+ "rewards/concensus_correctness_reward_func": 0.031312499195337296,
98
+ "rewards/consensus_reward_func": 0.125,
99
  "rewards/cumulative_reward_2": 0.0,
100
+ "rewards/final_correctness_reward_func": 0.5,
101
+ "rewards/question_recreation_reward_func": 0.4886647742241621,
102
  "rewards/soft_format_reward_func": 0.0,
103
  "rewards/strict_format_reward_func": 0.0,
104
+ "rewards/xmlcount_reward_func": 0.21137499436736107,
105
  "step": 10
106
  },
107
  {
108
+ "completion_length": 331.59375,
109
+ "epoch": 2.4,
110
+ "grad_norm": 0.6923710107803345,
111
+ "kl": 0.0008254477070295252,
112
+ "learning_rate": 3.147047612756302e-07,
113
  "loss": 0.0,
114
+ "reward": 2.2641742639243603,
115
+ "reward_std": 2.478426046669483,
116
+ "rewards/concensus_correctness_reward_func": 0.7421875,
117
+ "rewards/consensus_reward_func": 0.25,
118
  "rewards/cumulative_reward_2": 0.0,
119
+ "rewards/final_correctness_reward_func": 0.5625,
120
+ "rewards/question_recreation_reward_func": 0.4648617208003998,
121
  "rewards/soft_format_reward_func": 0.0,
122
  "rewards/strict_format_reward_func": 0.0,
123
+ "rewards/xmlcount_reward_func": 0.2446250095963478,
124
  "step": 12
125
  },
126
  {
127
+ "completion_length": 269.21875,
128
+ "epoch": 2.8,
129
+ "grad_norm": 0.93145751953125,
130
+ "kl": 0.0008568289995309897,
131
+ "learning_rate": 2.5e-07,
132
  "loss": 0.0,
133
+ "reward": 1.4057147353887558,
134
+ "reward_std": 0.8676463589072227,
135
+ "rewards/concensus_correctness_reward_func": 0.10481250286102295,
136
+ "rewards/consensus_reward_func": 0.125,
137
  "rewards/cumulative_reward_2": 0.0,
138
  "rewards/final_correctness_reward_func": 0.4375,
139
+ "rewards/question_recreation_reward_func": 0.45208977349102497,
140
  "rewards/soft_format_reward_func": 0.0,
141
+ "rewards/strict_format_reward_func": 0.015625,
142
+ "rewards/xmlcount_reward_func": 0.2706875056028366,
143
  "step": 14
144
  },
145
  {
146
+ "completion_length": 283.96875,
147
+ "epoch": 3.2,
148
+ "grad_norm": 29.920019149780273,
149
+ "kl": 0.003008850064361468,
150
+ "learning_rate": 1.8529523872436977e-07,
151
  "loss": 0.0,
152
+ "reward": 1.2449401542544365,
153
+ "reward_std": 0.9733890295028687,
154
+ "rewards/concensus_correctness_reward_func": 0.1639375016093254,
155
+ "rewards/consensus_reward_func": 0.1875,
156
  "rewards/cumulative_reward_2": 0.0,
157
+ "rewards/final_correctness_reward_func": 0.0,
158
+ "rewards/question_recreation_reward_func": 0.47715888544917107,
159
  "rewards/soft_format_reward_func": 0.0,
160
+ "rewards/strict_format_reward_func": 0.03125,
161
+ "rewards/xmlcount_reward_func": 0.3850937522947788,
162
  "step": 16
163
  },
164
  {
165
+ "completion_length": 274.03125,
166
+ "epoch": 3.6,
167
+ "grad_norm": 0.6539471745491028,
168
+ "kl": 0.0008241815667133778,
169
+ "learning_rate": 1.2500000000000005e-07,
170
  "loss": 0.0,
171
+ "reward": 1.5772868506610394,
172
+ "reward_std": 1.0867214687168598,
173
+ "rewards/concensus_correctness_reward_func": 0.109375,
174
+ "rewards/consensus_reward_func": 0.125,
175
  "rewards/cumulative_reward_2": 0.0,
176
  "rewards/final_correctness_reward_func": 0.5625,
177
+ "rewards/question_recreation_reward_func": 0.3505368586629629,
178
  "rewards/soft_format_reward_func": 0.0,
179
+ "rewards/strict_format_reward_func": 0.015625,
180
+ "rewards/xmlcount_reward_func": 0.41424999153241515,
181
  "step": 18
182
  },
183
  {
184
+ "completion_length": 296.59375,
185
+ "epoch": 4.0,
186
+ "grad_norm": 0.5222050547599792,
187
+ "kl": 0.0008954378354246728,
188
+ "learning_rate": 7.322330470336313e-08,
189
  "loss": 0.0,
190
+ "reward": 1.7973168343305588,
191
+ "reward_std": 1.314620640128851,
192
+ "rewards/concensus_correctness_reward_func": 0.07456249743700027,
193
+ "rewards/consensus_reward_func": 0.1875,
194
  "rewards/cumulative_reward_2": 0.0,
195
+ "rewards/final_correctness_reward_func": 0.625,
196
+ "rewards/question_recreation_reward_func": 0.6552544049918652,
197
  "rewards/soft_format_reward_func": 0.0,
198
+ "rewards/strict_format_reward_func": 0.015625,
199
+ "rewards/xmlcount_reward_func": 0.2393750031478703,
200
  "step": 20
201
  },
202
  {
203
+ "completion_length": 270.375,
204
+ "epoch": 4.4,
205
+ "grad_norm": 0.41121411323547363,
206
+ "kl": 0.0008414066614932381,
207
+ "learning_rate": 3.349364905389032e-08,
208
+ "loss": 0.0,
209
+ "reward": 1.6854848377406597,
210
+ "reward_std": 0.9369991458952427,
211
+ "rewards/concensus_correctness_reward_func": 0.015625,
212
+ "rewards/consensus_reward_func": 0.25,
213
+ "rewards/cumulative_reward_2": 0.0,
214
+ "rewards/final_correctness_reward_func": 0.625,
215
+ "rewards/question_recreation_reward_func": 0.4195785839110613,
216
+ "rewards/soft_format_reward_func": 0.0,
217
+ "rewards/strict_format_reward_func": 0.0,
218
+ "rewards/xmlcount_reward_func": 0.3752812519669533,
219
+ "step": 22
220
+ },
221
+ {
222
+ "completion_length": 311.40625,
223
+ "epoch": 4.8,
224
+ "grad_norm": 0.4889400601387024,
225
+ "kl": 0.0010594725827104412,
226
+ "learning_rate": 8.518543427732949e-09,
227
+ "loss": 0.0,
228
+ "reward": 1.1538150012493134,
229
+ "reward_std": 0.8227499965578318,
230
+ "rewards/concensus_correctness_reward_func": 0.03099999949336052,
231
+ "rewards/consensus_reward_func": 0.375,
232
+ "rewards/cumulative_reward_2": 0.0,
233
+ "rewards/final_correctness_reward_func": 0.0625,
234
+ "rewards/question_recreation_reward_func": 0.4522524643689394,
235
+ "rewards/soft_format_reward_func": 0.0,
236
+ "rewards/strict_format_reward_func": 0.0,
237
+ "rewards/xmlcount_reward_func": 0.23306249594315886,
238
+ "step": 24
239
+ },
240
+ {
241
+ "completion_length": 318.4375,
242
  "epoch": 5.0,
243
+ "kl": 0.0008752793655730784,
244
+ "reward": 3.1140042692422867,
245
+ "reward_std": 3.7438226342201233,
246
+ "rewards/concensus_correctness_reward_func": 1.46875,
247
+ "rewards/consensus_reward_func": 0.125,
248
+ "rewards/cumulative_reward_2": 0.0,
249
+ "rewards/final_correctness_reward_func": 0.75,
250
+ "rewards/question_recreation_reward_func": 0.4615042470395565,
251
+ "rewards/soft_format_reward_func": 0.0,
252
+ "rewards/strict_format_reward_func": 0.0,
253
+ "rewards/xmlcount_reward_func": 0.3087499998509884,
254
+ "step": 25,
255
  "total_flos": 0.0,
256
+ "train_loss": 1.0554939626672422e-06,
257
+ "train_runtime": 1935.7748,
258
+ "train_samples_per_second": 0.207,
259
+ "train_steps_per_second": 0.013
260
  }
261
  ],
262
  "logging_steps": 2,
263
+ "max_steps": 25,
264
  "num_input_tokens_seen": 0,
265
+ "num_train_epochs": 5,
266
  "save_steps": 25,
267
  "stateful_callbacks": {
268
  "TrainerControl": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c91090e7d9ee3274d13ccbc81952e2cccb8091ff6c3aaa8b215a1ccce4c266b
3
  size 6008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c45d95aa6abf8391b5945b8c407039aaf2b430adfcaa8a3c388622de5efa1b67
3
  size 6008