maplekeng commited on
Commit
f73739d
·
verified ·
1 Parent(s): ed5b77b

End of training

Browse files
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 2.8505885711638258e-05,
4
- "train_runtime": 6580.358,
5
- "train_samples": 6,
6
- "train_samples_per_second": 0.006,
7
- "train_steps_per_second": 0.002
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 6.198883056640625e-07,
4
+ "train_runtime": 1226.5225,
5
+ "train_samples": 5,
6
+ "train_samples_per_second": 0.033,
7
+ "train_steps_per_second": 0.008
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf61dd9734743863a69b2b540d36bd9b51536bf59f1333e13c0e52dd6e45c6bd
3
  size 1976163472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b17adb89ed44f71d4a01f77b6fd841a58ed9f3e4a0f1dc8880e91944f7b3b71d
3
  size 1976163472
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 2.8505885711638258e-05,
4
- "train_runtime": 6580.358,
5
- "train_samples": 6,
6
- "train_samples_per_second": 0.006,
7
- "train_steps_per_second": 0.002
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 6.198883056640625e-07,
4
+ "train_runtime": 1226.5225,
5
+ "train_samples": 5,
6
+ "train_samples_per_second": 0.033,
7
+ "train_steps_per_second": 0.008
8
  }
trainer_state.json CHANGED
@@ -2,7 +2,7 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 3.3333333333333335,
6
  "eval_steps": 500,
7
  "global_step": 10,
8
  "is_hyper_param_search": false,
@@ -10,114 +10,114 @@
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "completion_length": 17.25,
14
- "epoch": 0.6666666666666666,
15
- "grad_norm": 48.05388641357422,
16
  "kl": 0.0,
17
  "learning_rate": 5e-07,
18
- "loss": -0.0,
19
- "reward": 0.6133153163827956,
20
- "reward_std": 0.7537651844322681,
21
  "rewards/concensus_correctness_reward_func": 0.0,
22
  "rewards/consensus_reward_func": 0.0,
23
  "rewards/cumulative_reward_2": 0.0,
24
- "rewards/final_correctness_reward_func": 0.5,
25
- "rewards/question_recreation_reward_func": 0.0705653433687985,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
- "rewards/xmlcount_reward_func": 0.042750000953674316,
29
  "step": 2
30
  },
31
  {
32
- "completion_length": 45.25,
33
- "epoch": 1.3333333333333333,
34
- "grad_norm": 78.80774688720703,
35
- "kl": 0.020999361149733886,
36
  "learning_rate": 4.415111107797445e-07,
37
  "loss": 0.0,
38
- "reward": 0.09825393743813038,
39
- "reward_std": 0.02789358670997899,
40
  "rewards/concensus_correctness_reward_func": 0.0,
41
  "rewards/consensus_reward_func": 0.0,
42
  "rewards/cumulative_reward_2": 0.0,
43
  "rewards/final_correctness_reward_func": 0.0,
44
- "rewards/question_recreation_reward_func": 0.08175393752753735,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
- "rewards/xmlcount_reward_func": 0.016499999910593033,
48
  "step": 4
49
  },
50
  {
51
- "completion_length": 42.625,
52
  "epoch": 2.0,
53
- "grad_norm": 0.004175778012722731,
54
- "kl": 0.049324687279295176,
55
  "learning_rate": 2.934120444167326e-07,
56
  "loss": 0.0,
57
- "reward": 0.9253544360399246,
58
- "reward_std": 0.40504125878214836,
59
  "rewards/concensus_correctness_reward_func": 0.0,
60
  "rewards/consensus_reward_func": 0.0,
61
  "rewards/cumulative_reward_2": 0.0,
62
- "rewards/final_correctness_reward_func": 0.75,
63
- "rewards/question_recreation_reward_func": 0.05735448165796697,
64
  "rewards/soft_format_reward_func": 0.0,
65
  "rewards/strict_format_reward_func": 0.0,
66
- "rewards/xmlcount_reward_func": 0.11800000071525574,
67
  "step": 6
68
  },
69
  {
70
- "completion_length": 17.75,
71
- "epoch": 2.6666666666666665,
72
- "grad_norm": 0.003430261043831706,
73
- "kl": 0.04985775472596288,
74
  "learning_rate": 1.2500000000000005e-07,
75
  "loss": 0.0,
76
- "reward": 0.6951797232031822,
77
- "reward_std": 0.020487656816840172,
78
  "rewards/concensus_correctness_reward_func": 0.0,
79
  "rewards/consensus_reward_func": 0.0,
80
  "rewards/cumulative_reward_2": 0.0,
81
- "rewards/final_correctness_reward_func": 0.5,
82
- "rewards/question_recreation_reward_func": 0.05430475785396993,
83
  "rewards/soft_format_reward_func": 0.0,
84
  "rewards/strict_format_reward_func": 0.0,
85
- "rewards/xmlcount_reward_func": 0.14087500050663948,
86
  "step": 8
87
  },
88
  {
89
- "completion_length": 20.0,
90
- "epoch": 3.3333333333333335,
91
- "grad_norm": 55.28184127807617,
92
- "kl": 0.02525496296584606,
93
  "learning_rate": 1.507684480352292e-08,
94
  "loss": 0.0,
95
- "reward": 0.39077210798859596,
96
- "reward_std": 0.3560393115039915,
97
  "rewards/concensus_correctness_reward_func": 0.0,
98
  "rewards/consensus_reward_func": 0.0,
99
  "rewards/cumulative_reward_2": 0.0,
100
- "rewards/final_correctness_reward_func": 0.25,
101
- "rewards/question_recreation_reward_func": 0.10927211865782738,
102
  "rewards/soft_format_reward_func": 0.0,
103
  "rewards/strict_format_reward_func": 0.0,
104
- "rewards/xmlcount_reward_func": 0.03150000050663948,
105
  "step": 10
106
  },
107
  {
108
- "epoch": 3.3333333333333335,
109
  "step": 10,
110
  "total_flos": 0.0,
111
- "train_loss": 2.8505885711638258e-05,
112
- "train_runtime": 6580.358,
113
- "train_samples_per_second": 0.006,
114
- "train_steps_per_second": 0.002
115
  }
116
  ],
117
  "logging_steps": 2,
118
  "max_steps": 10,
119
  "num_input_tokens_seen": 0,
120
- "num_train_epochs": 4,
121
  "save_steps": 10,
122
  "stateful_callbacks": {
123
  "TrainerControl": {
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 3.4,
6
  "eval_steps": 500,
7
  "global_step": 10,
8
  "is_hyper_param_search": false,
 
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "completion_length": 176.875,
14
+ "epoch": 0.8,
15
+ "grad_norm": 7.9261298179626465,
16
  "kl": 0.0,
17
  "learning_rate": 5e-07,
18
+ "loss": 0.0,
19
+ "reward": 0.06529645575210452,
20
+ "reward_std": 0.031127130147069693,
21
  "rewards/concensus_correctness_reward_func": 0.0,
22
  "rewards/consensus_reward_func": 0.0,
23
  "rewards/cumulative_reward_2": 0.0,
24
+ "rewards/final_correctness_reward_func": 0.0,
25
+ "rewards/question_recreation_reward_func": 0.06529645575210452,
26
  "rewards/soft_format_reward_func": 0.0,
27
  "rewards/strict_format_reward_func": 0.0,
28
+ "rewards/xmlcount_reward_func": 0.0,
29
  "step": 2
30
  },
31
  {
32
+ "completion_length": 175.83333333333334,
33
+ "epoch": 1.4,
34
+ "grad_norm": 7.617096424102783,
35
+ "kl": 0.00035043573977115255,
36
  "learning_rate": 4.415111107797445e-07,
37
  "loss": 0.0,
38
+ "reward": 0.19371196130911508,
39
+ "reward_std": 0.08724394316474597,
40
  "rewards/concensus_correctness_reward_func": 0.0,
41
  "rewards/consensus_reward_func": 0.0,
42
  "rewards/cumulative_reward_2": 0.0,
43
  "rewards/final_correctness_reward_func": 0.0,
44
+ "rewards/question_recreation_reward_func": 0.19371196130911508,
45
  "rewards/soft_format_reward_func": 0.0,
46
  "rewards/strict_format_reward_func": 0.0,
47
+ "rewards/xmlcount_reward_func": 0.0,
48
  "step": 4
49
  },
50
  {
51
+ "completion_length": 217.66666666666666,
52
  "epoch": 2.0,
53
+ "grad_norm": 5.332751750946045,
54
+ "kl": 0.000615184020716697,
55
  "learning_rate": 2.934120444167326e-07,
56
  "loss": 0.0,
57
+ "reward": 0.02679351655145486,
58
+ "reward_std": 0.009128622089823088,
59
  "rewards/concensus_correctness_reward_func": 0.0,
60
  "rewards/consensus_reward_func": 0.0,
61
  "rewards/cumulative_reward_2": 0.0,
62
+ "rewards/final_correctness_reward_func": 0.0,
63
+ "rewards/question_recreation_reward_func": 0.02679351655145486,
64
  "rewards/soft_format_reward_func": 0.0,
65
  "rewards/strict_format_reward_func": 0.0,
66
+ "rewards/xmlcount_reward_func": 0.0,
67
  "step": 6
68
  },
69
  {
70
+ "completion_length": 189.75,
71
+ "epoch": 2.8,
72
+ "grad_norm": 6.812089920043945,
73
+ "kl": 0.0007357565191341564,
74
  "learning_rate": 1.2500000000000005e-07,
75
  "loss": 0.0,
76
+ "reward": 0.06324641685932875,
77
+ "reward_std": 0.013577945763245225,
78
  "rewards/concensus_correctness_reward_func": 0.0,
79
  "rewards/consensus_reward_func": 0.0,
80
  "rewards/cumulative_reward_2": 0.0,
81
+ "rewards/final_correctness_reward_func": 0.0,
82
+ "rewards/question_recreation_reward_func": 0.06324641685932875,
83
  "rewards/soft_format_reward_func": 0.0,
84
  "rewards/strict_format_reward_func": 0.0,
85
+ "rewards/xmlcount_reward_func": 0.0,
86
  "step": 8
87
  },
88
  {
89
+ "completion_length": 148.83333333333334,
90
+ "epoch": 3.4,
91
+ "grad_norm": 19.7174129486084,
92
+ "kl": 0.0018994022199573617,
93
  "learning_rate": 1.507684480352292e-08,
94
  "loss": 0.0,
95
+ "reward": 0.0978486401339372,
96
+ "reward_std": 0.02896895833934347,
97
  "rewards/concensus_correctness_reward_func": 0.0,
98
  "rewards/consensus_reward_func": 0.0,
99
  "rewards/cumulative_reward_2": 0.0,
100
+ "rewards/final_correctness_reward_func": 0.0,
101
+ "rewards/question_recreation_reward_func": 0.0978486401339372,
102
  "rewards/soft_format_reward_func": 0.0,
103
  "rewards/strict_format_reward_func": 0.0,
104
+ "rewards/xmlcount_reward_func": 0.0,
105
  "step": 10
106
  },
107
  {
108
+ "epoch": 3.4,
109
  "step": 10,
110
  "total_flos": 0.0,
111
+ "train_loss": 6.198883056640625e-07,
112
+ "train_runtime": 1226.5225,
113
+ "train_samples_per_second": 0.033,
114
+ "train_steps_per_second": 0.008
115
  }
116
  ],
117
  "logging_steps": 2,
118
  "max_steps": 10,
119
  "num_input_tokens_seen": 0,
120
+ "num_train_epochs": 5,
121
  "save_steps": 10,
122
  "stateful_callbacks": {
123
  "TrainerControl": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24a80b9404f4fffaedc1b8ca509e6ed1757684c5d9bfe58301d5f689c832b365
3
  size 5880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5811d4022ee43bf43a3d7e557e467d606190ea87357fe97015c06855109fba
3
  size 5880