dzanbek commited on
Commit
0a3b971
·
verified ·
1 Parent(s): f25a6c1

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -22,11 +22,11 @@
22
  "target_modules": [
23
  "q_proj",
24
  "v_proj",
 
25
  "gate_proj",
26
- "o_proj",
27
  "down_proj",
28
  "k_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
22
  "target_modules": [
23
  "q_proj",
24
  "v_proj",
25
+ "up_proj",
26
  "gate_proj",
 
27
  "down_proj",
28
  "k_proj",
29
+ "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8e993f0485cda622dddb2f6f917e1903133d32adc89bded7ed2f2473692a9b3
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fa476385f0edacd75f946bfb108752a530a8192eae1a2ab489ff8a44b5abdb2
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2e27231aa2cf1cf4750a2c3a3604b5e7e8f0980d09e1d668992b83032f6b59a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71089d817d98b7b3af3db061a61fe9c3b14aae88ca7076d3a935bbfa590bd6d7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:212837ccb433e5430b061dc107b19dc09e932e6cfb62a751187d0903b7b0d94e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99fc9c0ec571f76cf9b6d1229601c5173899cd18104e487c5627f5f4c56c6e8a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.17301038062283736,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -18,9 +18,9 @@
18
  {
19
  "epoch": 0.002306805074971165,
20
  "eval_loss": NaN,
21
- "eval_runtime": 84.9747,
22
- "eval_samples_per_second": 4.295,
23
- "eval_steps_per_second": 2.154,
24
  "step": 1
25
  },
26
  {
@@ -194,376 +194,10 @@
194
  {
195
  "epoch": 0.05767012687427912,
196
  "eval_loss": NaN,
197
- "eval_runtime": 84.8887,
198
- "eval_samples_per_second": 4.3,
199
- "eval_steps_per_second": 2.156,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.05997693194925029,
204
- "grad_norm": NaN,
205
- "learning_rate": 7.68649804173412e-05,
206
- "loss": 0.0,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.06228373702422145,
211
- "grad_norm": NaN,
212
- "learning_rate": 7.500000000000001e-05,
213
- "loss": 0.0,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.06459054209919261,
218
- "grad_norm": NaN,
219
- "learning_rate": 7.308743066175172e-05,
220
- "loss": 0.0,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.06689734717416378,
225
- "grad_norm": NaN,
226
- "learning_rate": 7.113091308703498e-05,
227
- "loss": 0.0,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.06920415224913495,
232
- "grad_norm": NaN,
233
- "learning_rate": 6.91341716182545e-05,
234
- "loss": 0.0,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.07151095732410612,
239
- "grad_norm": NaN,
240
- "learning_rate": 6.710100716628344e-05,
241
- "loss": 0.0,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.07381776239907728,
246
- "grad_norm": NaN,
247
- "learning_rate": 6.503528997521366e-05,
248
- "loss": 0.0,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.07612456747404844,
253
- "grad_norm": NaN,
254
- "learning_rate": 6.294095225512603e-05,
255
- "loss": 0.0,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.0784313725490196,
260
- "grad_norm": NaN,
261
- "learning_rate": 6.0821980696905146e-05,
262
- "loss": 0.0,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.08073817762399077,
267
- "grad_norm": NaN,
268
- "learning_rate": 5.868240888334653e-05,
269
- "loss": 0.0,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.08304498269896193,
274
- "grad_norm": NaN,
275
- "learning_rate": 5.6526309611002594e-05,
276
- "loss": 0.0,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.0853517877739331,
281
- "grad_norm": NaN,
282
- "learning_rate": 5.435778713738292e-05,
283
- "loss": 0.0,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.08765859284890427,
288
- "grad_norm": NaN,
289
- "learning_rate": 5.218096936826681e-05,
290
- "loss": 0.0,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.08996539792387544,
295
- "grad_norm": NaN,
296
- "learning_rate": 5e-05,
297
- "loss": 0.0,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.0922722029988466,
302
- "grad_norm": NaN,
303
- "learning_rate": 4.781903063173321e-05,
304
- "loss": 0.0,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.09457900807381776,
309
- "grad_norm": NaN,
310
- "learning_rate": 4.564221286261709e-05,
311
- "loss": 0.0,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.09688581314878893,
316
- "grad_norm": NaN,
317
- "learning_rate": 4.347369038899744e-05,
318
- "loss": 0.0,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.09919261822376009,
323
- "grad_norm": NaN,
324
- "learning_rate": 4.131759111665349e-05,
325
- "loss": 0.0,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.10149942329873125,
330
- "grad_norm": NaN,
331
- "learning_rate": 3.917801930309486e-05,
332
- "loss": 0.0,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.10380622837370242,
337
- "grad_norm": NaN,
338
- "learning_rate": 3.705904774487396e-05,
339
- "loss": 0.0,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.1061130334486736,
344
- "grad_norm": NaN,
345
- "learning_rate": 3.4964710024786354e-05,
346
- "loss": 0.0,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.10841983852364476,
351
- "grad_norm": NaN,
352
- "learning_rate": 3.289899283371657e-05,
353
- "loss": 0.0,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.11072664359861592,
358
- "grad_norm": NaN,
359
- "learning_rate": 3.086582838174551e-05,
360
- "loss": 0.0,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.11303344867358708,
365
- "grad_norm": NaN,
366
- "learning_rate": 2.886908691296504e-05,
367
- "loss": 0.0,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.11534025374855825,
372
- "grad_norm": NaN,
373
- "learning_rate": 2.6912569338248315e-05,
374
- "loss": 0.0,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.11534025374855825,
379
- "eval_loss": NaN,
380
- "eval_runtime": 85.025,
381
- "eval_samples_per_second": 4.293,
382
- "eval_steps_per_second": 2.152,
383
- "step": 50
384
- },
385
- {
386
- "epoch": 0.11764705882352941,
387
- "grad_norm": NaN,
388
- "learning_rate": 2.500000000000001e-05,
389
- "loss": 0.0,
390
- "step": 51
391
- },
392
- {
393
- "epoch": 0.11995386389850057,
394
- "grad_norm": NaN,
395
- "learning_rate": 2.3135019582658802e-05,
396
- "loss": 0.0,
397
- "step": 52
398
- },
399
- {
400
- "epoch": 0.12226066897347174,
401
- "grad_norm": NaN,
402
- "learning_rate": 2.132117818244771e-05,
403
- "loss": 0.0,
404
- "step": 53
405
- },
406
- {
407
- "epoch": 0.1245674740484429,
408
- "grad_norm": NaN,
409
- "learning_rate": 1.9561928549563968e-05,
410
- "loss": 0.0,
411
- "step": 54
412
- },
413
- {
414
- "epoch": 0.12687427912341406,
415
- "grad_norm": NaN,
416
- "learning_rate": 1.7860619515673033e-05,
417
- "loss": 0.0,
418
- "step": 55
419
- },
420
- {
421
- "epoch": 0.12918108419838523,
422
- "grad_norm": NaN,
423
- "learning_rate": 1.622048961921699e-05,
424
- "loss": 0.0,
425
- "step": 56
426
- },
427
- {
428
- "epoch": 0.1314878892733564,
429
- "grad_norm": NaN,
430
- "learning_rate": 1.4644660940672627e-05,
431
- "loss": 0.0,
432
- "step": 57
433
- },
434
- {
435
- "epoch": 0.13379469434832755,
436
- "grad_norm": NaN,
437
- "learning_rate": 1.3136133159493802e-05,
438
- "loss": 0.0,
439
- "step": 58
440
- },
441
- {
442
- "epoch": 0.13610149942329874,
443
- "grad_norm": NaN,
444
- "learning_rate": 1.1697777844051105e-05,
445
- "loss": 0.0,
446
- "step": 59
447
- },
448
- {
449
- "epoch": 0.1384083044982699,
450
- "grad_norm": NaN,
451
- "learning_rate": 1.0332332985438248e-05,
452
- "loss": 0.0,
453
- "step": 60
454
- },
455
- {
456
- "epoch": 0.14071510957324107,
457
- "grad_norm": NaN,
458
- "learning_rate": 9.042397785550405e-06,
459
- "loss": 0.0,
460
- "step": 61
461
- },
462
- {
463
- "epoch": 0.14302191464821223,
464
- "grad_norm": NaN,
465
- "learning_rate": 7.830427709355725e-06,
466
- "loss": 0.0,
467
- "step": 62
468
- },
469
- {
470
- "epoch": 0.1453287197231834,
471
- "grad_norm": NaN,
472
- "learning_rate": 6.698729810778065e-06,
473
- "loss": 0.0,
474
- "step": 63
475
- },
476
- {
477
- "epoch": 0.14763552479815456,
478
- "grad_norm": NaN,
479
- "learning_rate": 5.649458341088915e-06,
480
- "loss": 0.0,
481
- "step": 64
482
- },
483
- {
484
- "epoch": 0.14994232987312572,
485
- "grad_norm": NaN,
486
- "learning_rate": 4.684610648167503e-06,
487
- "loss": 0.0,
488
- "step": 65
489
- },
490
- {
491
- "epoch": 0.1522491349480969,
492
- "grad_norm": NaN,
493
- "learning_rate": 3.8060233744356633e-06,
494
- "loss": 0.0,
495
- "step": 66
496
- },
497
- {
498
- "epoch": 0.15455594002306805,
499
- "grad_norm": NaN,
500
- "learning_rate": 3.0153689607045845e-06,
501
- "loss": 0.0,
502
- "step": 67
503
- },
504
- {
505
- "epoch": 0.1568627450980392,
506
- "grad_norm": NaN,
507
- "learning_rate": 2.314152462588659e-06,
508
- "loss": 0.0,
509
- "step": 68
510
- },
511
- {
512
- "epoch": 0.15916955017301038,
513
- "grad_norm": NaN,
514
- "learning_rate": 1.70370868554659e-06,
515
- "loss": 0.0,
516
- "step": 69
517
- },
518
- {
519
- "epoch": 0.16147635524798154,
520
- "grad_norm": NaN,
521
- "learning_rate": 1.1851996440033319e-06,
522
- "loss": 0.0,
523
- "step": 70
524
- },
525
- {
526
- "epoch": 0.1637831603229527,
527
- "grad_norm": NaN,
528
- "learning_rate": 7.596123493895991e-07,
529
- "loss": 0.0,
530
- "step": 71
531
- },
532
- {
533
- "epoch": 0.16608996539792387,
534
- "grad_norm": NaN,
535
- "learning_rate": 4.277569313094809e-07,
536
- "loss": 0.0,
537
- "step": 72
538
- },
539
- {
540
- "epoch": 0.16839677047289503,
541
- "grad_norm": NaN,
542
- "learning_rate": 1.9026509541272275e-07,
543
- "loss": 0.0,
544
- "step": 73
545
- },
546
- {
547
- "epoch": 0.1707035755478662,
548
- "grad_norm": NaN,
549
- "learning_rate": 4.7588920907110094e-08,
550
- "loss": 0.0,
551
- "step": 74
552
- },
553
- {
554
- "epoch": 0.17301038062283736,
555
- "grad_norm": NaN,
556
- "learning_rate": 0.0,
557
- "loss": 0.0,
558
- "step": 75
559
- },
560
- {
561
- "epoch": 0.17301038062283736,
562
- "eval_loss": NaN,
563
- "eval_runtime": 85.1091,
564
- "eval_samples_per_second": 4.289,
565
- "eval_steps_per_second": 2.15,
566
- "step": 75
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -578,12 +212,12 @@
578
  "should_evaluate": false,
579
  "should_log": false,
580
  "should_save": true,
581
- "should_training_stop": true
582
  },
583
  "attributes": {}
584
  }
585
  },
586
- "total_flos": 1.1165403617584742e+17,
587
  "train_batch_size": 2,
588
  "trial_name": null,
589
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05767012687427912,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
18
  {
19
  "epoch": 0.002306805074971165,
20
  "eval_loss": NaN,
21
+ "eval_runtime": 85.0954,
22
+ "eval_samples_per_second": 4.289,
23
+ "eval_steps_per_second": 2.151,
24
  "step": 1
25
  },
26
  {
 
194
  {
195
  "epoch": 0.05767012687427912,
196
  "eval_loss": NaN,
197
+ "eval_runtime": 85.0606,
198
+ "eval_samples_per_second": 4.291,
199
+ "eval_steps_per_second": 2.151,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
212
  "should_evaluate": false,
213
  "should_log": false,
214
  "should_save": true,
215
+ "should_training_stop": false
216
  },
217
  "attributes": {}
218
  }
219
  },
220
+ "total_flos": 3.727983599891251e+16,
221
  "train_batch_size": 2,
222
  "trial_name": null,
223
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:811911df4a93b0335d6380ceb9417cf2dbd4d2a294d8a95dd6af884484c7aced
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98df53abcf90011843630e80901851ea65527d38f1f5be44d42395b41dfc18ef
3
  size 6776