dimasik87 commited on
Commit
e1503c2
·
verified ·
1 Parent(s): b8fc50a

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca38a470f48b9a3eda7dc7d31dc9611e5e3c4256687c086fd038f95746814c4f
3
  size 78480072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28ad4f4743381dbee21215d037183c11e23b3e9cc27a404604d3378155d8bcec
3
  size 78480072
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b771855df1b735aed9eb0c2907412b59c68ae217034424530593503ec720bd4
3
  size 157104826
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf3666ccf9fe2cf907b046c3da7e15f51a5e1fcb4c2c4b1df677205caa7efc28
3
  size 157104826
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3486605cbb0dc2a33c5840ff99d168358bcb336ad8600122970833d77d8451e2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac55e2d2ceaf564eb5fd392154c87119d856598defac5660ba8d151da131eb4d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c188a6a4749e6ca627bb6d536eb7443f499d5b1b88d98a78f9c713443e010d9c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2def2cd24154d8cecbaa07c36ae27e5ebb9b7273a78abfea27aa67c480e4ae2b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.5183616876602173,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.032388663967611336,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -52,6 +52,87 @@
52
  "eval_samples_per_second": 35.568,
53
  "eval_steps_per_second": 8.892,
54
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "logging_steps": 3,
@@ -75,12 +156,12 @@
75
  "should_evaluate": false,
76
  "should_log": false,
77
  "should_save": true,
78
- "should_training_stop": false
79
  },
80
  "attributes": {}
81
  }
82
  },
83
- "total_flos": 2607004665249792.0,
84
  "train_batch_size": 4,
85
  "trial_name": null,
86
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.2018967866897583,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-30",
4
+ "epoch": 0.09716599190283401,
5
  "eval_steps": 5,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
52
  "eval_samples_per_second": 35.568,
53
  "eval_steps_per_second": 8.892,
54
  "step": 10
55
+ },
56
+ {
57
+ "epoch": 0.038866396761133605,
58
+ "grad_norm": 1.614790439605713,
59
+ "learning_rate": 0.000163742398974869,
60
+ "loss": 1.4998,
61
+ "step": 12
62
+ },
63
+ {
64
+ "epoch": 0.048582995951417005,
65
+ "grad_norm": 0.8673895001411438,
66
+ "learning_rate": 0.00013090169943749476,
67
+ "loss": 1.4005,
68
+ "step": 15
69
+ },
70
+ {
71
+ "epoch": 0.048582995951417005,
72
+ "eval_loss": 1.308341145515442,
73
+ "eval_runtime": 7.1234,
74
+ "eval_samples_per_second": 36.5,
75
+ "eval_steps_per_second": 9.125,
76
+ "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.058299595141700404,
80
+ "grad_norm": 0.9261519312858582,
81
+ "learning_rate": 9.372094804706867e-05,
82
+ "loss": 1.3996,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.06477732793522267,
87
+ "eval_loss": 1.2375595569610596,
88
+ "eval_runtime": 7.2083,
89
+ "eval_samples_per_second": 36.07,
90
+ "eval_steps_per_second": 9.017,
91
+ "step": 20
92
+ },
93
+ {
94
+ "epoch": 0.0680161943319838,
95
+ "grad_norm": 0.745496928691864,
96
+ "learning_rate": 5.7422070843492734e-05,
97
+ "loss": 1.2622,
98
+ "step": 21
99
+ },
100
+ {
101
+ "epoch": 0.07773279352226721,
102
+ "grad_norm": 0.7845839262008667,
103
+ "learning_rate": 2.7103137257858868e-05,
104
+ "loss": 1.252,
105
+ "step": 24
106
+ },
107
+ {
108
+ "epoch": 0.08097165991902834,
109
+ "eval_loss": 1.2060513496398926,
110
+ "eval_runtime": 7.2123,
111
+ "eval_samples_per_second": 36.049,
112
+ "eval_steps_per_second": 9.012,
113
+ "step": 25
114
+ },
115
+ {
116
+ "epoch": 0.0874493927125506,
117
+ "grad_norm": 0.9540892839431763,
118
+ "learning_rate": 7.022351411174866e-06,
119
+ "loss": 1.2789,
120
+ "step": 27
121
+ },
122
+ {
123
+ "epoch": 0.09716599190283401,
124
+ "grad_norm": 0.8427168130874634,
125
+ "learning_rate": 0.0,
126
+ "loss": 1.2617,
127
+ "step": 30
128
+ },
129
+ {
130
+ "epoch": 0.09716599190283401,
131
+ "eval_loss": 1.2018967866897583,
132
+ "eval_runtime": 7.2958,
133
+ "eval_samples_per_second": 35.637,
134
+ "eval_steps_per_second": 8.909,
135
+ "step": 30
136
  }
137
  ],
138
  "logging_steps": 3,
 
156
  "should_evaluate": false,
157
  "should_log": false,
158
  "should_save": true,
159
+ "should_training_stop": true
160
  },
161
  "attributes": {}
162
  }
163
  },
164
+ "total_flos": 6542106046758912.0,
165
  "train_batch_size": 4,
166
  "trial_name": null,
167
  "trial_params": null