hamedkharazmi commited on
Commit
723cbf1
·
verified ·
1 Parent(s): ca6c5ea

End of training

Browse files
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.27013154824574787,
4
- "train_runtime": 1786.8594,
5
- "train_samples": 23,
6
- "train_samples_per_second": 0.013,
7
- "train_steps_per_second": 0.002
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.06536803146203359,
4
+ "train_runtime": 3637.9329,
5
+ "train_samples": 30,
6
+ "train_samples_per_second": 0.007,
7
+ "train_steps_per_second": 0.001
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:042df9bab814993de332623fa5c40266568096ef8b903104ca3c676525f23d13
3
  size 1976163472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77c40427fbbd03f4aa28c342daabd9193951114485d54cc99709a32af1c7b81a
3
  size 1976163472
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.27013154824574787,
4
- "train_runtime": 1786.8594,
5
- "train_samples": 23,
6
- "train_samples_per_second": 0.013,
7
- "train_steps_per_second": 0.002
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.06536803146203359,
4
+ "train_runtime": 3637.9329,
5
+ "train_samples": 30,
6
+ "train_samples_per_second": 0.007,
7
+ "train_steps_per_second": 0.001
8
  }
trainer_state.json CHANGED
@@ -2,7 +2,7 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.5217391304347826,
6
  "eval_steps": 500,
7
  "global_step": 3,
8
  "is_hyper_param_search": false,
@@ -15,37 +15,37 @@
15
  "clip_ratio/low_mean": 0.0,
16
  "clip_ratio/low_min": 0.0,
17
  "clip_ratio/region_mean": 0.0,
18
- "completions/clipped_ratio": 0.125,
19
- "completions/max_length": 759.0,
20
- "completions/max_terminated_length": 544.0,
21
- "completions/mean_length": 344.3125,
22
- "completions/mean_terminated_length": 246.29166412353516,
23
- "completions/min_length": 64.5,
24
- "completions/min_terminated_length": 64.5,
25
- "epoch": 0.34782608695652173,
26
- "grad_norm": 6.1356120109558105,
27
  "kl": 0.0,
28
  "learning_rate": 5e-07,
29
- "loss": 0.1231,
30
- "num_tokens": 9605.0,
31
- "reward": 0.0872948095202446,
32
- "reward_std": 0.036231483216397464,
33
  "rewards/concensus_correctness_reward_func/mean": 0.0,
34
  "rewards/concensus_correctness_reward_func/std": 0.0,
35
- "rewards/consensus_reward_func/mean": 0.0,
36
- "rewards/consensus_reward_func/std": 0.0,
37
  "rewards/cumulative_reward_2/mean": 0.0,
38
  "rewards/cumulative_reward_2/std": 0.0,
39
- "rewards/final_correctness_reward_func/mean": 0.0,
40
- "rewards/final_correctness_reward_func/std": 0.0,
41
- "rewards/question_recreation_reward_func/mean": 0.0872948169708252,
42
- "rewards/question_recreation_reward_func/std": 0.042022028006613255,
43
  "rewards/soft_format_reward_func/mean": 0.0,
44
  "rewards/soft_format_reward_func/std": 0.0,
45
  "rewards/strict_format_reward_func/mean": 0.0,
46
  "rewards/strict_format_reward_func/std": 0.0,
47
- "rewards/xmlcount_reward_func/mean": 0.0,
48
- "rewards/xmlcount_reward_func/std": 0.0,
49
  "step": 2
50
  },
51
  {
@@ -55,27 +55,27 @@
55
  "clip_ratio/low_min": 0.0,
56
  "clip_ratio/region_mean": 0.0,
57
  "completions/clipped_ratio": 0.0,
58
- "completions/max_length": 369.0,
59
- "completions/max_terminated_length": 369.0,
60
- "completions/mean_length": 126.25,
61
- "completions/mean_terminated_length": 126.25,
62
- "completions/min_length": 2.0,
63
- "completions/min_terminated_length": 2.0,
64
- "epoch": 0.5217391304347826,
65
- "kl": 0.0009940905874827877,
66
- "num_tokens": 12663.0,
67
- "reward": 2.516864776611328,
68
- "reward_std": 3.5382814407348633,
69
- "rewards/concensus_correctness_reward_func/mean": 2.5,
70
- "rewards/concensus_correctness_reward_func/std": 7.071068286895752,
71
  "rewards/consensus_reward_func/mean": 0.0,
72
  "rewards/consensus_reward_func/std": 0.0,
73
  "rewards/cumulative_reward_2/mean": 0.0,
74
  "rewards/cumulative_reward_2/std": 0.0,
75
  "rewards/final_correctness_reward_func/mean": 0.0,
76
  "rewards/final_correctness_reward_func/std": 0.0,
77
- "rewards/question_recreation_reward_func/mean": 0.01686476171016693,
78
- "rewards/question_recreation_reward_func/std": 0.01196382101625204,
79
  "rewards/soft_format_reward_func/mean": 0.0,
80
  "rewards/soft_format_reward_func/std": 0.0,
81
  "rewards/strict_format_reward_func/mean": 0.0,
@@ -84,15 +84,15 @@
84
  "rewards/xmlcount_reward_func/std": 0.0,
85
  "step": 3,
86
  "total_flos": 0.0,
87
- "train_loss": 0.27013154824574787,
88
- "train_runtime": 1786.8594,
89
- "train_samples_per_second": 0.013,
90
- "train_steps_per_second": 0.002
91
  }
92
  ],
93
  "logging_steps": 2,
94
  "max_steps": 3,
95
- "num_input_tokens_seen": 12663,
96
  "num_train_epochs": 1,
97
  "save_steps": 25,
98
  "stateful_callbacks": {
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.4,
6
  "eval_steps": 500,
7
  "global_step": 3,
8
  "is_hyper_param_search": false,
 
15
  "clip_ratio/low_mean": 0.0,
16
  "clip_ratio/low_min": 0.0,
17
  "clip_ratio/region_mean": 0.0,
18
+ "completions/clipped_ratio": 0.0,
19
+ "completions/max_length": 605.5,
20
+ "completions/max_terminated_length": 605.5,
21
+ "completions/mean_length": 214.5625,
22
+ "completions/mean_terminated_length": 214.5625,
23
+ "completions/min_length": 9.0,
24
+ "completions/min_terminated_length": 9.0,
25
+ "epoch": 0.26666666666666666,
26
+ "grad_norm": 14.364421844482422,
27
  "kl": 0.0,
28
  "learning_rate": 5e-07,
29
+ "loss": -0.0457,
30
+ "num_tokens": 7529.0,
31
+ "reward": 0.27101069688796997,
32
+ "reward_std": 0.3605286180973053,
33
  "rewards/concensus_correctness_reward_func/mean": 0.0,
34
  "rewards/concensus_correctness_reward_func/std": 0.0,
35
+ "rewards/consensus_reward_func/mean": 0.125,
36
+ "rewards/consensus_reward_func/std": 0.3535533845424652,
37
  "rewards/cumulative_reward_2/mean": 0.0,
38
  "rewards/cumulative_reward_2/std": 0.0,
39
+ "rewards/final_correctness_reward_func/mean": 0.125,
40
+ "rewards/final_correctness_reward_func/std": 0.3535533845424652,
41
+ "rewards/question_recreation_reward_func/mean": 0.02001071721315384,
42
+ "rewards/question_recreation_reward_func/std": 0.009133741725236177,
43
  "rewards/soft_format_reward_func/mean": 0.0,
44
  "rewards/soft_format_reward_func/std": 0.0,
45
  "rewards/strict_format_reward_func/mean": 0.0,
46
  "rewards/strict_format_reward_func/std": 0.0,
47
+ "rewards/xmlcount_reward_func/mean": 0.0010000000474974513,
48
+ "rewards/xmlcount_reward_func/std": 0.002828427357599139,
49
  "step": 2
50
  },
51
  {
 
55
  "clip_ratio/low_min": 0.0,
56
  "clip_ratio/region_mean": 0.0,
57
  "completions/clipped_ratio": 0.0,
58
+ "completions/max_length": 321.0,
59
+ "completions/max_terminated_length": 321.0,
60
+ "completions/mean_length": 167.625,
61
+ "completions/mean_terminated_length": 167.625,
62
+ "completions/min_length": 19.0,
63
+ "completions/min_terminated_length": 19.0,
64
+ "epoch": 0.4,
65
+ "kl": 0.0005115475723869167,
66
+ "num_tokens": 10918.0,
67
+ "reward": 0.16452272236347198,
68
+ "reward_std": 0.054086603224277496,
69
+ "rewards/concensus_correctness_reward_func/mean": 0.0,
70
+ "rewards/concensus_correctness_reward_func/std": 0.0,
71
  "rewards/consensus_reward_func/mean": 0.0,
72
  "rewards/consensus_reward_func/std": 0.0,
73
  "rewards/cumulative_reward_2/mean": 0.0,
74
  "rewards/cumulative_reward_2/std": 0.0,
75
  "rewards/final_correctness_reward_func/mean": 0.0,
76
  "rewards/final_correctness_reward_func/std": 0.0,
77
+ "rewards/question_recreation_reward_func/mean": 0.16452273726463318,
78
+ "rewards/question_recreation_reward_func/std": 0.05140342935919762,
79
  "rewards/soft_format_reward_func/mean": 0.0,
80
  "rewards/soft_format_reward_func/std": 0.0,
81
  "rewards/strict_format_reward_func/mean": 0.0,
 
84
  "rewards/xmlcount_reward_func/std": 0.0,
85
  "step": 3,
86
  "total_flos": 0.0,
87
+ "train_loss": 0.06536803146203359,
88
+ "train_runtime": 3637.9329,
89
+ "train_samples_per_second": 0.007,
90
+ "train_steps_per_second": 0.001
91
  }
92
  ],
93
  "logging_steps": 2,
94
  "max_steps": 3,
95
+ "num_input_tokens_seen": 10918,
96
  "num_train_epochs": 1,
97
  "save_steps": 25,
98
  "stateful_callbacks": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a71230f32e732eea5fcb1a0593b7bc74ff3864c9b7b760cda833ba20848a0ed
3
  size 6929
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df0f354d248a121ff0bc1816ff97a871a1a1dfd3c63ddcf92aa745670c031c8
3
  size 6929