tag-user / checkpoint-120 /trainer_state.json
oodeh's picture
Add files using upload-large-folder tool
e19f4b5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.980269989615784,
"eval_steps": 500,
"global_step": 120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08307372793354102,
"grad_norm": 0.5125107169151306,
"learning_rate": 2.0833333333333334e-06,
"loss": 3.5515,
"step": 5
},
{
"epoch": 0.16614745586708204,
"grad_norm": 0.4532736539840698,
"learning_rate": 4.166666666666667e-06,
"loss": 3.6807,
"step": 10
},
{
"epoch": 0.24922118380062305,
"grad_norm": 0.4483909606933594,
"learning_rate": 6.25e-06,
"loss": 3.5357,
"step": 15
},
{
"epoch": 0.3322949117341641,
"grad_norm": 0.5020395517349243,
"learning_rate": 8.333333333333334e-06,
"loss": 3.645,
"step": 20
},
{
"epoch": 0.4153686396677051,
"grad_norm": 0.8230155110359192,
"learning_rate": 1.0416666666666668e-05,
"loss": 3.5682,
"step": 25
},
{
"epoch": 0.4984423676012461,
"grad_norm": 1.3333394527435303,
"learning_rate": 1.25e-05,
"loss": 3.5155,
"step": 30
},
{
"epoch": 0.5815160955347871,
"grad_norm": 0.7118450999259949,
"learning_rate": 1.4583333333333335e-05,
"loss": 3.4724,
"step": 35
},
{
"epoch": 0.6645898234683282,
"grad_norm": 1.0282073020935059,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.4205,
"step": 40
},
{
"epoch": 0.7476635514018691,
"grad_norm": 1.0468826293945312,
"learning_rate": 1.8750000000000002e-05,
"loss": 3.3295,
"step": 45
},
{
"epoch": 0.8307372793354102,
"grad_norm": 0.8730693459510803,
"learning_rate": 2.0833333333333336e-05,
"loss": 3.0831,
"step": 50
},
{
"epoch": 0.9138110072689511,
"grad_norm": 0.9377574324607849,
"learning_rate": 2.2916666666666667e-05,
"loss": 2.8412,
"step": 55
},
{
"epoch": 0.9968847352024922,
"grad_norm": 1.0129190683364868,
"learning_rate": 2.5e-05,
"loss": 2.3229,
"step": 60
},
{
"epoch": 1.066458982346833,
"grad_norm": 0.8808080554008484,
"learning_rate": 2.7083333333333332e-05,
"loss": 2.0489,
"step": 65
},
{
"epoch": 1.1495327102803738,
"grad_norm": 0.8092290759086609,
"learning_rate": 2.916666666666667e-05,
"loss": 1.9307,
"step": 70
},
{
"epoch": 1.2326064382139148,
"grad_norm": 0.8002105951309204,
"learning_rate": 3.125e-05,
"loss": 1.7402,
"step": 75
},
{
"epoch": 1.3156801661474558,
"grad_norm": 0.8581973314285278,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.6696,
"step": 80
},
{
"epoch": 1.398753894080997,
"grad_norm": 0.7443532347679138,
"learning_rate": 3.541666666666667e-05,
"loss": 1.5175,
"step": 85
},
{
"epoch": 1.4818276220145379,
"grad_norm": 0.9584633708000183,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.3968,
"step": 90
},
{
"epoch": 1.5649013499480788,
"grad_norm": 0.8683136105537415,
"learning_rate": 3.958333333333333e-05,
"loss": 1.299,
"step": 95
},
{
"epoch": 1.64797507788162,
"grad_norm": 0.7825261950492859,
"learning_rate": 4.166666666666667e-05,
"loss": 1.1271,
"step": 100
},
{
"epoch": 1.731048805815161,
"grad_norm": 0.654670000076294,
"learning_rate": 4.375e-05,
"loss": 1.14,
"step": 105
},
{
"epoch": 1.814122533748702,
"grad_norm": 0.7511588335037231,
"learning_rate": 4.5833333333333334e-05,
"loss": 1.1614,
"step": 110
},
{
"epoch": 1.897196261682243,
"grad_norm": 0.6596113443374634,
"learning_rate": 4.791666666666667e-05,
"loss": 1.0587,
"step": 115
},
{
"epoch": 1.980269989615784,
"grad_norm": 0.7166474461555481,
"learning_rate": 5e-05,
"loss": 1.0169,
"step": 120
}
],
"logging_steps": 5,
"max_steps": 1200,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 60,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5207292018425856.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}