vdos commited on
Commit
c2bba07
·
verified ·
1 Parent(s): d2986e5

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ead69c58462791e9b3b78649e3199d8362bfe863d885b05713f4f93206248f70
3
  size 100690288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a21ac62dc05ecfe50d366197bcf226060053c4c6d70d32e4ffbdaa41cf6d511
3
  size 100690288
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6e5cbf434d326825b6f2f5fea81711726224b549cb2294fae6ae309c76f07d1
3
  size 201488570
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e351282333d62e7084547a509e4d15bef761d4d443ce2134a41ce003cd149c
3
  size 201488570
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b65568bcafd41a616dec0332885bc21ab49fbb597d32c04eb043d5661fdccf1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3877fb6bb8fb7a13455ce4c12ec1b2bae788e0ab73d742fe06aee03f905fbaff
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:821cd6b421da594373994386c7fa0163e3f798603925f08da2fe4b51dc81cc3e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fee770f8fa582bcd9e1bcd432c1163b1cbf45fc43c5b2d61509025565e2135d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b49f68e7a7ff73567ecf125220724cc0569ce466b64f806107efe47d5408ca7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31ba91e4225d0b16bad3d40ae3ba674f580f22ac6e7e2a9f222010bbf991d1a5
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26768d555d48908128af1a395a29cb28c1be24334007ecb3f9cac54142793141
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41db95822976490b7f7ac26131a4f2393755f6184626c8a73b04cdd6537aa7cc
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.1853362321853638,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.3147128245476003,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 39.307,
199
  "eval_steps_per_second": 4.913,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 4.85137494048768e+16,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.9442797303199768,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.6294256490952006,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 39.307,
199
  "eval_steps_per_second": 4.913,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.32730133752950435,
204
+ "grad_norm": 32.60086441040039,
205
+ "learning_rate": 5e-05,
206
+ "loss": 1.6508,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.33988985051140835,
211
+ "grad_norm": 29.659774780273438,
212
+ "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 1.3073,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.35247836349331235,
218
+ "grad_norm": 27.13357925415039,
219
+ "learning_rate": 4.347369038899744e-05,
220
+ "loss": 1.2718,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.36506687647521635,
225
+ "grad_norm": 24.87796401977539,
226
+ "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 1.2055,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.3776553894571204,
232
+ "grad_norm": 24.10536003112793,
233
+ "learning_rate": 3.705904774487396e-05,
234
+ "loss": 1.16,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.3902439024390244,
239
+ "grad_norm": 25.981910705566406,
240
+ "learning_rate": 3.392802673484193e-05,
241
+ "loss": 1.0399,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.4028324154209284,
246
+ "grad_norm": 36.10255813598633,
247
+ "learning_rate": 3.086582838174551e-05,
248
+ "loss": 1.135,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.4154209284028324,
253
+ "grad_norm": 40.39674758911133,
254
+ "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 1.1585,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.42800944138473646,
260
+ "grad_norm": 34.2160758972168,
261
+ "learning_rate": 2.500000000000001e-05,
262
+ "loss": 0.8405,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.44059795436664045,
267
+ "grad_norm": 33.32962417602539,
268
+ "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 0.797,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.45318646734854445,
274
+ "grad_norm": 27.23453140258789,
275
+ "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 0.7851,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.46577498033044845,
281
+ "grad_norm": 31.11690330505371,
282
+ "learning_rate": 1.703270924499656e-05,
283
+ "loss": 0.9167,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.4783634933123525,
288
+ "grad_norm": 31.77367401123047,
289
+ "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 1.096,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.4909520062942565,
295
+ "grad_norm": 31.28978729248047,
296
+ "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 1.3097,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.5035405192761605,
302
+ "grad_norm": 24.03819465637207,
303
+ "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 1.0327,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.5161290322580645,
309
+ "grad_norm": 24.845369338989258,
310
+ "learning_rate": 8.426519384872733e-06,
311
+ "loss": 1.0824,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.5287175452399685,
316
+ "grad_norm": 24.798585891723633,
317
+ "learning_rate": 6.698729810778065e-06,
318
+ "loss": 1.058,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.5413060582218725,
323
+ "grad_norm": 24.77889633178711,
324
+ "learning_rate": 5.156362923365588e-06,
325
+ "loss": 0.9896,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.5538945712037766,
330
+ "grad_norm": 24.031038284301758,
331
+ "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 0.8687,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.5664830841856806,
337
+ "grad_norm": 24.104299545288086,
338
+ "learning_rate": 2.653493525244721e-06,
339
+ "loss": 0.9357,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.5790715971675846,
344
+ "grad_norm": 24.142581939697266,
345
+ "learning_rate": 1.70370868554659e-06,
346
+ "loss": 0.8889,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.5916601101494886,
351
+ "grad_norm": 23.086729049682617,
352
+ "learning_rate": 9.607359798384785e-07,
353
+ "loss": 0.6544,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.6042486231313926,
358
+ "grad_norm": 24.28306770324707,
359
+ "learning_rate": 4.277569313094809e-07,
360
+ "loss": 0.7109,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.6168371361132966,
365
+ "grad_norm": 25.321258544921875,
366
+ "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 0.7226,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.6294256490952006,
372
+ "grad_norm": 40.5628776550293,
373
+ "learning_rate": 0.0,
374
+ "loss": 0.7491,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.6294256490952006,
379
+ "eval_loss": 0.9442797303199768,
380
+ "eval_runtime": 13.6356,
381
+ "eval_samples_per_second": 39.309,
382
+ "eval_steps_per_second": 4.914,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 9.70274988097536e+16,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null