|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.315789473684211, |
|
"eval_steps": 500, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 29.249841690063477, |
|
"learning_rate": 5.0000000000000004e-08, |
|
"loss": 2.3522, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 28.9044246673584, |
|
"learning_rate": 1.0000000000000001e-07, |
|
"loss": 2.3252, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 29.364099502563477, |
|
"learning_rate": 1.5000000000000002e-07, |
|
"loss": 2.3516, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 29.600059509277344, |
|
"learning_rate": 2.0000000000000002e-07, |
|
"loss": 2.3531, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 28.67375373840332, |
|
"learning_rate": 2.5000000000000004e-07, |
|
"loss": 2.2996, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 28.520565032958984, |
|
"learning_rate": 3.0000000000000004e-07, |
|
"loss": 2.261, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 28.520565032958984, |
|
"learning_rate": 3.5000000000000004e-07, |
|
"loss": 2.4103, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.1578947368421053, |
|
"grad_norm": 50.07780075073242, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 2.3406, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.3157894736842106, |
|
"grad_norm": 27.654495239257812, |
|
"learning_rate": 4.5000000000000003e-07, |
|
"loss": 2.2567, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.4736842105263157, |
|
"grad_norm": 29.456050872802734, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 2.2952, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.631578947368421, |
|
"grad_norm": 29.20940399169922, |
|
"learning_rate": 5.5e-07, |
|
"loss": 2.2942, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.7894736842105263, |
|
"grad_norm": 29.31597328186035, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 2.1754, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.9473684210526314, |
|
"grad_norm": 28.444318771362305, |
|
"learning_rate": 6.5e-07, |
|
"loss": 2.1927, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 28.444318771362305, |
|
"learning_rate": 7.000000000000001e-07, |
|
"loss": 2.1262, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.1578947368421053, |
|
"grad_norm": 47.70970153808594, |
|
"learning_rate": 7.5e-07, |
|
"loss": 2.1372, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 2.3157894736842106, |
|
"grad_norm": 27.695999145507812, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 2.0525, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.473684210526316, |
|
"grad_norm": 27.79825210571289, |
|
"learning_rate": 8.500000000000001e-07, |
|
"loss": 2.0136, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 2.6315789473684212, |
|
"grad_norm": 26.69138526916504, |
|
"learning_rate": 9.000000000000001e-07, |
|
"loss": 1.8875, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.7894736842105265, |
|
"grad_norm": 26.22272300720215, |
|
"learning_rate": 9.500000000000001e-07, |
|
"loss": 1.7887, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 25.795120239257812, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 1.6733, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 40.42267990112305, |
|
"learning_rate": 1.0500000000000001e-06, |
|
"loss": 1.6464, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 3.1578947368421053, |
|
"grad_norm": 24.54705810546875, |
|
"learning_rate": 1.1e-06, |
|
"loss": 1.5408, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 3.3157894736842106, |
|
"grad_norm": 24.196022033691406, |
|
"learning_rate": 1.1500000000000002e-06, |
|
"loss": 1.3596, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 3.473684210526316, |
|
"grad_norm": 24.2416934967041, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 1.2173, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 3.6315789473684212, |
|
"grad_norm": 24.014326095581055, |
|
"learning_rate": 1.25e-06, |
|
"loss": 1.0318, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 3.7894736842105265, |
|
"grad_norm": 23.31243324279785, |
|
"learning_rate": 1.3e-06, |
|
"loss": 0.8277, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 3.9473684210526314, |
|
"grad_norm": 20.742502212524414, |
|
"learning_rate": 1.3500000000000002e-06, |
|
"loss": 0.6301, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 20.742502212524414, |
|
"learning_rate": 1.4000000000000001e-06, |
|
"loss": 0.4823, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.157894736842105, |
|
"grad_norm": 28.484569549560547, |
|
"learning_rate": 1.45e-06, |
|
"loss": 0.4436, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 4.315789473684211, |
|
"grad_norm": 12.672039031982422, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.3486, |
|
"step": 30 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 36, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 6, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7584034368286884e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|