|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9636552440290758, |
|
"eval_steps": 500, |
|
"global_step": 180, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08307372793354102, |
|
"grad_norm": 0.5125107169151306, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"loss": 3.5515, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.16614745586708204, |
|
"grad_norm": 0.4532736539840698, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 3.6807, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.24922118380062305, |
|
"grad_norm": 0.4483909606933594, |
|
"learning_rate": 6.25e-06, |
|
"loss": 3.5357, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3322949117341641, |
|
"grad_norm": 0.5020395517349243, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 3.645, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4153686396677051, |
|
"grad_norm": 0.8230155110359192, |
|
"learning_rate": 1.0416666666666668e-05, |
|
"loss": 3.5682, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4984423676012461, |
|
"grad_norm": 1.3333394527435303, |
|
"learning_rate": 1.25e-05, |
|
"loss": 3.5155, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5815160955347871, |
|
"grad_norm": 0.7118450999259949, |
|
"learning_rate": 1.4583333333333335e-05, |
|
"loss": 3.4724, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6645898234683282, |
|
"grad_norm": 1.0282073020935059, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 3.4205, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 1.0468826293945312, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 3.3295, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8307372793354102, |
|
"grad_norm": 0.8730693459510803, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 3.0831, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9138110072689511, |
|
"grad_norm": 0.9377574324607849, |
|
"learning_rate": 2.2916666666666667e-05, |
|
"loss": 2.8412, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9968847352024922, |
|
"grad_norm": 1.0129190683364868, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.3229, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.066458982346833, |
|
"grad_norm": 0.8808080554008484, |
|
"learning_rate": 2.7083333333333332e-05, |
|
"loss": 2.0489, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.1495327102803738, |
|
"grad_norm": 0.8092290759086609, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 1.9307, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2326064382139148, |
|
"grad_norm": 0.8002105951309204, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.7402, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3156801661474558, |
|
"grad_norm": 0.8581973314285278, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.6696, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.398753894080997, |
|
"grad_norm": 0.7443532347679138, |
|
"learning_rate": 3.541666666666667e-05, |
|
"loss": 1.5175, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.4818276220145379, |
|
"grad_norm": 0.9584633708000183, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.3968, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5649013499480788, |
|
"grad_norm": 0.8683136105537415, |
|
"learning_rate": 3.958333333333333e-05, |
|
"loss": 1.299, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.64797507788162, |
|
"grad_norm": 0.7825261950492859, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 1.1271, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.731048805815161, |
|
"grad_norm": 0.654670000076294, |
|
"learning_rate": 4.375e-05, |
|
"loss": 1.14, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.814122533748702, |
|
"grad_norm": 0.7511588335037231, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 1.1614, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.897196261682243, |
|
"grad_norm": 0.6596113443374634, |
|
"learning_rate": 4.791666666666667e-05, |
|
"loss": 1.0587, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.980269989615784, |
|
"grad_norm": 0.7166474461555481, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0169, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0498442367601246, |
|
"grad_norm": 0.6620935797691345, |
|
"learning_rate": 4.999735579817769e-05, |
|
"loss": 0.985, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.132917964693666, |
|
"grad_norm": 0.7846258878707886, |
|
"learning_rate": 4.998942375205502e-05, |
|
"loss": 0.9594, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.2159916926272065, |
|
"grad_norm": 0.652454137802124, |
|
"learning_rate": 4.997620553954645e-05, |
|
"loss": 0.9322, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.2990654205607477, |
|
"grad_norm": 0.7020059823989868, |
|
"learning_rate": 4.995770395678171e-05, |
|
"loss": 0.9242, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.382139148494289, |
|
"grad_norm": 0.8148695826530457, |
|
"learning_rate": 4.993392291751431e-05, |
|
"loss": 0.8394, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.4652128764278296, |
|
"grad_norm": 0.8813133835792542, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 0.992, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5482866043613708, |
|
"grad_norm": 0.7893730401992798, |
|
"learning_rate": 4.987054370740083e-05, |
|
"loss": 0.899, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.6313603322949115, |
|
"grad_norm": 0.7719221711158752, |
|
"learning_rate": 4.983095894354858e-05, |
|
"loss": 0.9416, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.7144340602284527, |
|
"grad_norm": 0.8439667820930481, |
|
"learning_rate": 4.9786121534345265e-05, |
|
"loss": 0.8401, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.797507788161994, |
|
"grad_norm": 0.833251953125, |
|
"learning_rate": 4.973604096452361e-05, |
|
"loss": 0.817, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.8805815160955346, |
|
"grad_norm": 0.7526916265487671, |
|
"learning_rate": 4.9680727827934354e-05, |
|
"loss": 0.8191, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.9636552440290758, |
|
"grad_norm": 1.0640058517456055, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 0.8607, |
|
"step": 180 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 60, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7793188999790592.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|