|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9966329966329966, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04713804713804714, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09427609427609428, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.642857142857143e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1414141414141414, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.975670171853926e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18855218855218855, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.88646908061933e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2356902356902357, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.734081600808531e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2828282828282828, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.32996632996632996, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.257452643564155e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3771043771043771, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.945830840419966e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.42424242424242425, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5959278669726935e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4713804713804714, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.217008081777726e-05, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5185185185185185, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8191041196514873e-05, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.5656565656565656, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4127512582437485e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.6127946127946128, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0087084761679245e-05, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.6599326599326599, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.617673588215328e-05, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.7070707070707071, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.7542087542087542, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.154225815032242e-06, |
|
"loss": 0.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.8013468013468014, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.22799917546252e-06, |
|
"loss": 0.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.798797596089351e-06, |
|
"loss": 0.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.8956228956228957, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9309388911139426e-06, |
|
"loss": 0.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.9427609427609428, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.738782355044049e-07, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.98989898989899, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.089874350439506e-08, |
|
"loss": 0.0, |
|
"step": 147 |
|
} |
|
], |
|
"logging_steps": 7, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 74, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1296015690362880.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|