|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.912280701754386, |
|
"eval_steps": 100, |
|
"global_step": 84, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 13.274463118636966, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.227, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 13.067768872256629, |
|
"learning_rate": 1.996992941167792e-05, |
|
"loss": 1.2833, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3508771929824561, |
|
"grad_norm": 3.029737230058556, |
|
"learning_rate": 1.9633708786158803e-05, |
|
"loss": 0.9766, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 1.6344674238695178, |
|
"learning_rate": 1.8936326403234125e-05, |
|
"loss": 0.8109, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7017543859649122, |
|
"grad_norm": 1.440061411744547, |
|
"learning_rate": 1.7903926695187595e-05, |
|
"loss": 0.7554, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8771929824561403, |
|
"grad_norm": 1.37302749270618, |
|
"learning_rate": 1.657521368569064e-05, |
|
"loss": 0.7119, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.0350877192982457, |
|
"grad_norm": 1.7984147567981632, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.6836, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2105263157894737, |
|
"grad_norm": 1.005885201758115, |
|
"learning_rate": 1.3237339420583213e-05, |
|
"loss": 0.6187, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.3859649122807016, |
|
"grad_norm": 0.9917450064965392, |
|
"learning_rate": 1.1353312997501313e-05, |
|
"loss": 0.5963, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5614035087719298, |
|
"grad_norm": 0.9760187114551988, |
|
"learning_rate": 9.418551710895243e-06, |
|
"loss": 0.5995, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.736842105263158, |
|
"grad_norm": 0.976366471591892, |
|
"learning_rate": 7.505588559420188e-06, |
|
"loss": 0.5917, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.912280701754386, |
|
"grad_norm": 0.8757263720939512, |
|
"learning_rate": 5.686139343187468e-06, |
|
"loss": 0.5868, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.0701754385964914, |
|
"grad_norm": 0.7705568313648676, |
|
"learning_rate": 4.028414082972141e-06, |
|
"loss": 0.554, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.245614035087719, |
|
"grad_norm": 0.7551160537629242, |
|
"learning_rate": 2.594559868909956e-06, |
|
"loss": 0.5186, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.4210526315789473, |
|
"grad_norm": 0.737482342547591, |
|
"learning_rate": 1.4383310046973365e-06, |
|
"loss": 0.5083, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.5964912280701755, |
|
"grad_norm": 0.7345189094446426, |
|
"learning_rate": 6.030737921409169e-07, |
|
"loss": 0.5097, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.7719298245614032, |
|
"grad_norm": 0.6830233810548365, |
|
"learning_rate": 1.201015052319099e-07, |
|
"loss": 0.5063, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.912280701754386, |
|
"step": 84, |
|
"total_flos": 6.582526671847424e+16, |
|
"train_loss": 0.6669661885216123, |
|
"train_runtime": 2548.4796, |
|
"train_samples_per_second": 0.533, |
|
"train_steps_per_second": 0.033 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 84, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.582526671847424e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|