vdos's picture
Training in progress, step 50, checkpoint
c2bba07 verified
{
"best_metric": 0.9442797303199768,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.6294256490952006,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.012588512981904013,
"grad_norm": 21.16543960571289,
"learning_rate": 5e-05,
"loss": 2.3373,
"step": 1
},
{
"epoch": 0.012588512981904013,
"eval_loss": 2.5010251998901367,
"eval_runtime": 13.6534,
"eval_samples_per_second": 39.258,
"eval_steps_per_second": 4.907,
"step": 1
},
{
"epoch": 0.025177025963808025,
"grad_norm": 23.64405059814453,
"learning_rate": 0.0001,
"loss": 2.3002,
"step": 2
},
{
"epoch": 0.03776553894571204,
"grad_norm": 23.064193725585938,
"learning_rate": 9.989294616193017e-05,
"loss": 2.3989,
"step": 3
},
{
"epoch": 0.05035405192761605,
"grad_norm": 20.119773864746094,
"learning_rate": 9.957224306869053e-05,
"loss": 2.1218,
"step": 4
},
{
"epoch": 0.06294256490952006,
"grad_norm": 21.986080169677734,
"learning_rate": 9.903926402016153e-05,
"loss": 2.1021,
"step": 5
},
{
"epoch": 0.07553107789142408,
"grad_norm": 21.071178436279297,
"learning_rate": 9.829629131445342e-05,
"loss": 1.8549,
"step": 6
},
{
"epoch": 0.08811959087332809,
"grad_norm": 23.8582706451416,
"learning_rate": 9.73465064747553e-05,
"loss": 2.0362,
"step": 7
},
{
"epoch": 0.1007081038552321,
"grad_norm": 26.81045150756836,
"learning_rate": 9.619397662556435e-05,
"loss": 1.9506,
"step": 8
},
{
"epoch": 0.11329661683713611,
"grad_norm": 31.266883850097656,
"learning_rate": 9.484363707663442e-05,
"loss": 1.6144,
"step": 9
},
{
"epoch": 0.12588512981904013,
"grad_norm": 34.45520782470703,
"learning_rate": 9.330127018922194e-05,
"loss": 1.6564,
"step": 10
},
{
"epoch": 0.13847364280094415,
"grad_norm": 36.80929183959961,
"learning_rate": 9.157348061512727e-05,
"loss": 1.5002,
"step": 11
},
{
"epoch": 0.15106215578284815,
"grad_norm": 33.41875076293945,
"learning_rate": 8.966766701456177e-05,
"loss": 1.6248,
"step": 12
},
{
"epoch": 0.16365066876475218,
"grad_norm": 53.04654312133789,
"learning_rate": 8.759199037394887e-05,
"loss": 1.7668,
"step": 13
},
{
"epoch": 0.17623918174665618,
"grad_norm": 79.66429901123047,
"learning_rate": 8.535533905932738e-05,
"loss": 1.9672,
"step": 14
},
{
"epoch": 0.1888276947285602,
"grad_norm": 53.10544967651367,
"learning_rate": 8.296729075500344e-05,
"loss": 1.5469,
"step": 15
},
{
"epoch": 0.2014162077104642,
"grad_norm": 40.61369323730469,
"learning_rate": 8.043807145043604e-05,
"loss": 1.6699,
"step": 16
},
{
"epoch": 0.21400472069236823,
"grad_norm": 31.596616744995117,
"learning_rate": 7.777851165098012e-05,
"loss": 1.4594,
"step": 17
},
{
"epoch": 0.22659323367427223,
"grad_norm": 34.025482177734375,
"learning_rate": 7.500000000000001e-05,
"loss": 1.3662,
"step": 18
},
{
"epoch": 0.23918174665617625,
"grad_norm": 32.110267639160156,
"learning_rate": 7.211443451095007e-05,
"loss": 1.2301,
"step": 19
},
{
"epoch": 0.25177025963808025,
"grad_norm": 38.80237579345703,
"learning_rate": 6.91341716182545e-05,
"loss": 1.3337,
"step": 20
},
{
"epoch": 0.26435877261998425,
"grad_norm": 32.85317611694336,
"learning_rate": 6.607197326515808e-05,
"loss": 1.2655,
"step": 21
},
{
"epoch": 0.2769472856018883,
"grad_norm": 28.820865631103516,
"learning_rate": 6.294095225512603e-05,
"loss": 1.171,
"step": 22
},
{
"epoch": 0.2895357985837923,
"grad_norm": 32.6195182800293,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.1392,
"step": 23
},
{
"epoch": 0.3021243115656963,
"grad_norm": 30.52219009399414,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.1825,
"step": 24
},
{
"epoch": 0.3147128245476003,
"grad_norm": 36.73969650268555,
"learning_rate": 5.327015646150716e-05,
"loss": 1.0942,
"step": 25
},
{
"epoch": 0.3147128245476003,
"eval_loss": 1.1853362321853638,
"eval_runtime": 13.6363,
"eval_samples_per_second": 39.307,
"eval_steps_per_second": 4.913,
"step": 25
},
{
"epoch": 0.32730133752950435,
"grad_norm": 32.60086441040039,
"learning_rate": 5e-05,
"loss": 1.6508,
"step": 26
},
{
"epoch": 0.33988985051140835,
"grad_norm": 29.659774780273438,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.3073,
"step": 27
},
{
"epoch": 0.35247836349331235,
"grad_norm": 27.13357925415039,
"learning_rate": 4.347369038899744e-05,
"loss": 1.2718,
"step": 28
},
{
"epoch": 0.36506687647521635,
"grad_norm": 24.87796401977539,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.2055,
"step": 29
},
{
"epoch": 0.3776553894571204,
"grad_norm": 24.10536003112793,
"learning_rate": 3.705904774487396e-05,
"loss": 1.16,
"step": 30
},
{
"epoch": 0.3902439024390244,
"grad_norm": 25.981910705566406,
"learning_rate": 3.392802673484193e-05,
"loss": 1.0399,
"step": 31
},
{
"epoch": 0.4028324154209284,
"grad_norm": 36.10255813598633,
"learning_rate": 3.086582838174551e-05,
"loss": 1.135,
"step": 32
},
{
"epoch": 0.4154209284028324,
"grad_norm": 40.39674758911133,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.1585,
"step": 33
},
{
"epoch": 0.42800944138473646,
"grad_norm": 34.2160758972168,
"learning_rate": 2.500000000000001e-05,
"loss": 0.8405,
"step": 34
},
{
"epoch": 0.44059795436664045,
"grad_norm": 33.32962417602539,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.797,
"step": 35
},
{
"epoch": 0.45318646734854445,
"grad_norm": 27.23453140258789,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.7851,
"step": 36
},
{
"epoch": 0.46577498033044845,
"grad_norm": 31.11690330505371,
"learning_rate": 1.703270924499656e-05,
"loss": 0.9167,
"step": 37
},
{
"epoch": 0.4783634933123525,
"grad_norm": 31.77367401123047,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.096,
"step": 38
},
{
"epoch": 0.4909520062942565,
"grad_norm": 31.28978729248047,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.3097,
"step": 39
},
{
"epoch": 0.5035405192761605,
"grad_norm": 24.03819465637207,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.0327,
"step": 40
},
{
"epoch": 0.5161290322580645,
"grad_norm": 24.845369338989258,
"learning_rate": 8.426519384872733e-06,
"loss": 1.0824,
"step": 41
},
{
"epoch": 0.5287175452399685,
"grad_norm": 24.798585891723633,
"learning_rate": 6.698729810778065e-06,
"loss": 1.058,
"step": 42
},
{
"epoch": 0.5413060582218725,
"grad_norm": 24.77889633178711,
"learning_rate": 5.156362923365588e-06,
"loss": 0.9896,
"step": 43
},
{
"epoch": 0.5538945712037766,
"grad_norm": 24.031038284301758,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.8687,
"step": 44
},
{
"epoch": 0.5664830841856806,
"grad_norm": 24.104299545288086,
"learning_rate": 2.653493525244721e-06,
"loss": 0.9357,
"step": 45
},
{
"epoch": 0.5790715971675846,
"grad_norm": 24.142581939697266,
"learning_rate": 1.70370868554659e-06,
"loss": 0.8889,
"step": 46
},
{
"epoch": 0.5916601101494886,
"grad_norm": 23.086729049682617,
"learning_rate": 9.607359798384785e-07,
"loss": 0.6544,
"step": 47
},
{
"epoch": 0.6042486231313926,
"grad_norm": 24.28306770324707,
"learning_rate": 4.277569313094809e-07,
"loss": 0.7109,
"step": 48
},
{
"epoch": 0.6168371361132966,
"grad_norm": 25.321258544921875,
"learning_rate": 1.0705383806982606e-07,
"loss": 0.7226,
"step": 49
},
{
"epoch": 0.6294256490952006,
"grad_norm": 40.5628776550293,
"learning_rate": 0.0,
"loss": 0.7491,
"step": 50
},
{
"epoch": 0.6294256490952006,
"eval_loss": 0.9442797303199768,
"eval_runtime": 13.6356,
"eval_samples_per_second": 39.309,
"eval_steps_per_second": 4.914,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.70274988097536e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}