dimasik2987's picture
Training in progress, step 25, checkpoint
d26ffe5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02008637139700713,
"eval_steps": 3,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008034548558802853,
"grad_norm": 2.1960723400115967,
"learning_rate": 2e-05,
"loss": 1.0697,
"step": 1
},
{
"epoch": 0.0008034548558802853,
"eval_loss": 1.163741111755371,
"eval_runtime": 27.8219,
"eval_samples_per_second": 37.704,
"eval_steps_per_second": 18.87,
"step": 1
},
{
"epoch": 0.0016069097117605705,
"grad_norm": 2.7917819023132324,
"learning_rate": 4e-05,
"loss": 1.2096,
"step": 2
},
{
"epoch": 0.0024103645676408557,
"grad_norm": 2.0014545917510986,
"learning_rate": 6e-05,
"loss": 1.0414,
"step": 3
},
{
"epoch": 0.0024103645676408557,
"eval_loss": 1.1070276498794556,
"eval_runtime": 27.3924,
"eval_samples_per_second": 38.295,
"eval_steps_per_second": 19.166,
"step": 3
},
{
"epoch": 0.003213819423521141,
"grad_norm": 2.172611951828003,
"learning_rate": 8e-05,
"loss": 1.1045,
"step": 4
},
{
"epoch": 0.004017274279401426,
"grad_norm": 1.4744445085525513,
"learning_rate": 0.0001,
"loss": 0.8924,
"step": 5
},
{
"epoch": 0.004820729135281711,
"grad_norm": 1.8950849771499634,
"learning_rate": 0.00012,
"loss": 0.9391,
"step": 6
},
{
"epoch": 0.004820729135281711,
"eval_loss": 0.8586330413818359,
"eval_runtime": 27.5044,
"eval_samples_per_second": 38.139,
"eval_steps_per_second": 19.088,
"step": 6
},
{
"epoch": 0.005624183991161997,
"grad_norm": 2.3339345455169678,
"learning_rate": 0.00014,
"loss": 0.8422,
"step": 7
},
{
"epoch": 0.006427638847042282,
"grad_norm": 1.636521816253662,
"learning_rate": 0.00016,
"loss": 0.6522,
"step": 8
},
{
"epoch": 0.0072310937029225674,
"grad_norm": 2.06076717376709,
"learning_rate": 0.00018,
"loss": 0.6197,
"step": 9
},
{
"epoch": 0.0072310937029225674,
"eval_loss": 0.521320641040802,
"eval_runtime": 28.7745,
"eval_samples_per_second": 36.456,
"eval_steps_per_second": 18.245,
"step": 9
},
{
"epoch": 0.008034548558802852,
"grad_norm": 1.8209742307662964,
"learning_rate": 0.0002,
"loss": 0.4981,
"step": 10
},
{
"epoch": 0.008838003414683137,
"grad_norm": 2.4247426986694336,
"learning_rate": 0.00019781476007338058,
"loss": 0.4462,
"step": 11
},
{
"epoch": 0.009641458270563423,
"grad_norm": 1.2692722082138062,
"learning_rate": 0.0001913545457642601,
"loss": 0.3823,
"step": 12
},
{
"epoch": 0.009641458270563423,
"eval_loss": 0.3399949073791504,
"eval_runtime": 27.4805,
"eval_samples_per_second": 38.173,
"eval_steps_per_second": 19.104,
"step": 12
},
{
"epoch": 0.010444913126443708,
"grad_norm": 0.9483970999717712,
"learning_rate": 0.00018090169943749476,
"loss": 0.3157,
"step": 13
},
{
"epoch": 0.011248367982323993,
"grad_norm": 1.1190989017486572,
"learning_rate": 0.00016691306063588583,
"loss": 0.3227,
"step": 14
},
{
"epoch": 0.012051822838204279,
"grad_norm": 1.410027265548706,
"learning_rate": 0.00015000000000000001,
"loss": 0.3077,
"step": 15
},
{
"epoch": 0.012051822838204279,
"eval_loss": 0.2781650722026825,
"eval_runtime": 27.5144,
"eval_samples_per_second": 38.126,
"eval_steps_per_second": 19.081,
"step": 15
},
{
"epoch": 0.012855277694084564,
"grad_norm": 1.0272347927093506,
"learning_rate": 0.00013090169943749476,
"loss": 0.2837,
"step": 16
},
{
"epoch": 0.01365873254996485,
"grad_norm": 1.063129186630249,
"learning_rate": 0.00011045284632676536,
"loss": 0.2678,
"step": 17
},
{
"epoch": 0.014462187405845135,
"grad_norm": 1.1358978748321533,
"learning_rate": 8.954715367323468e-05,
"loss": 0.265,
"step": 18
},
{
"epoch": 0.014462187405845135,
"eval_loss": 0.2543899118900299,
"eval_runtime": 27.4127,
"eval_samples_per_second": 38.267,
"eval_steps_per_second": 19.152,
"step": 18
},
{
"epoch": 0.015265642261725419,
"grad_norm": 1.0389295816421509,
"learning_rate": 6.909830056250527e-05,
"loss": 0.2804,
"step": 19
},
{
"epoch": 0.016069097117605704,
"grad_norm": 1.001739263534546,
"learning_rate": 5.000000000000002e-05,
"loss": 0.2773,
"step": 20
},
{
"epoch": 0.01687255197348599,
"grad_norm": 0.8247617483139038,
"learning_rate": 3.308693936411421e-05,
"loss": 0.2343,
"step": 21
},
{
"epoch": 0.01687255197348599,
"eval_loss": 0.2293698489665985,
"eval_runtime": 27.5151,
"eval_samples_per_second": 38.124,
"eval_steps_per_second": 19.08,
"step": 21
},
{
"epoch": 0.017676006829366275,
"grad_norm": 0.8856362700462341,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.236,
"step": 22
},
{
"epoch": 0.01847946168524656,
"grad_norm": 0.6518480777740479,
"learning_rate": 8.645454235739903e-06,
"loss": 0.2408,
"step": 23
},
{
"epoch": 0.019282916541126845,
"grad_norm": 0.777733325958252,
"learning_rate": 2.1852399266194314e-06,
"loss": 0.2459,
"step": 24
},
{
"epoch": 0.019282916541126845,
"eval_loss": 0.2238992303609848,
"eval_runtime": 27.5262,
"eval_samples_per_second": 38.109,
"eval_steps_per_second": 19.073,
"step": 24
},
{
"epoch": 0.02008637139700713,
"grad_norm": 0.6574348211288452,
"learning_rate": 0.0,
"loss": 0.2059,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1802385752064000.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}