|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.96, |
|
"eval_steps": 500, |
|
"global_step": 24, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 532.6341304779053, |
|
"epoch": 0.32, |
|
"grad_norm": 0.9071540047756161, |
|
"kl": 0.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": -0.006, |
|
"num_tokens": 1244103.0, |
|
"reward": 0.581380233168602, |
|
"reward_std": 0.17345089931041002, |
|
"rewards/compiled_reward": 0.5813802070915699, |
|
"step": 2 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 592.2552280426025, |
|
"epoch": 0.64, |
|
"grad_norm": 1.1007144051464353, |
|
"kl": 0.009278297424316406, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0884, |
|
"num_tokens": 2533075.0, |
|
"reward": 1.2021484710276127, |
|
"reward_std": 0.7038572877645493, |
|
"rewards/compiled_reward": 1.2021484412252903, |
|
"step": 4 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 489.96876335144043, |
|
"epoch": 0.96, |
|
"grad_norm": 0.9559919963618165, |
|
"kl": 0.06231689453125, |
|
"learning_rate": 9.777864028930705e-06, |
|
"loss": 0.0432, |
|
"num_tokens": 3786199.0, |
|
"reward": 1.9729818180203438, |
|
"reward_std": 0.8437984921038151, |
|
"rewards/compiled_reward": 1.9729817658662796, |
|
"step": 6 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 514.4049606323242, |
|
"epoch": 1.32, |
|
"grad_norm": 0.8994591128856398, |
|
"kl": 0.0983123779296875, |
|
"learning_rate": 9.131193871579975e-06, |
|
"loss": 0.062, |
|
"num_tokens": 5038070.0, |
|
"reward": 2.1230469569563866, |
|
"reward_std": 0.8294221684336662, |
|
"rewards/compiled_reward": 2.123046852648258, |
|
"step": 8 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 566.2734565734863, |
|
"epoch": 1.6400000000000001, |
|
"grad_norm": 0.7238876816698709, |
|
"kl": 0.1275634765625, |
|
"learning_rate": 8.117449009293668e-06, |
|
"loss": 0.0575, |
|
"num_tokens": 6317864.0, |
|
"reward": 2.7200521528720856, |
|
"reward_std": 0.5833112373948097, |
|
"rewards/compiled_reward": 2.7200520783662796, |
|
"step": 10 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 539.0247611999512, |
|
"epoch": 1.96, |
|
"grad_norm": 0.6748076119984399, |
|
"kl": 0.145477294921875, |
|
"learning_rate": 6.8267051218319766e-06, |
|
"loss": 0.0371, |
|
"num_tokens": 7565020.0, |
|
"reward": 2.750000074505806, |
|
"reward_std": 0.40718091931194067, |
|
"rewards/compiled_reward": 2.750000014901161, |
|
"step": 12 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 525.386739730835, |
|
"epoch": 2.32, |
|
"grad_norm": 12.587918069852307, |
|
"kl": 0.261199951171875, |
|
"learning_rate": 5.373650467932122e-06, |
|
"loss": 0.0467, |
|
"num_tokens": 8817637.0, |
|
"reward": 2.9186198711395264, |
|
"reward_std": 0.32051649037748575, |
|
"rewards/compiled_reward": 2.9186198115348816, |
|
"step": 14 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 517.9622592926025, |
|
"epoch": 2.64, |
|
"grad_norm": 0.7052158461678741, |
|
"kl": 0.16070556640625, |
|
"learning_rate": 3.887395330218429e-06, |
|
"loss": 0.0419, |
|
"num_tokens": 10051840.0, |
|
"reward": 2.9300131052732468, |
|
"reward_std": 0.4993608547374606, |
|
"rewards/compiled_reward": 2.9300130009651184, |
|
"step": 16 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 467.34377098083496, |
|
"epoch": 2.96, |
|
"grad_norm": 0.8429441508712391, |
|
"kl": 0.166748046875, |
|
"learning_rate": 2.5000000000000015e-06, |
|
"loss": 0.0523, |
|
"num_tokens": 11267024.0, |
|
"reward": 2.7708334028720856, |
|
"reward_std": 0.7680677343159914, |
|
"rewards/compiled_reward": 2.7708333283662796, |
|
"step": 18 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 504.5286636352539, |
|
"epoch": 3.32, |
|
"grad_norm": 0.7525132694002632, |
|
"kl": 0.156494140625, |
|
"learning_rate": 1.3347406408508695e-06, |
|
"loss": 0.0645, |
|
"num_tokens": 12512150.0, |
|
"reward": 2.5833334028720856, |
|
"reward_std": 0.852101992815733, |
|
"rewards/compiled_reward": 2.583333343267441, |
|
"step": 20 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 489.8724060058594, |
|
"epoch": 3.64, |
|
"grad_norm": 0.7232325174102834, |
|
"kl": 0.17425537109375, |
|
"learning_rate": 4.951556604879049e-07, |
|
"loss": 0.058, |
|
"num_tokens": 13725860.0, |
|
"reward": 2.7301433235406876, |
|
"reward_std": 0.6854851758107543, |
|
"rewards/compiled_reward": 2.730143219232559, |
|
"step": 22 |
|
}, |
|
{ |
|
"clip_ratio": 0.0, |
|
"completion_length": 480.85418128967285, |
|
"epoch": 3.96, |
|
"grad_norm": 0.8329086910965117, |
|
"kl": 0.18341064453125, |
|
"learning_rate": 5.584586887435739e-08, |
|
"loss": 0.0553, |
|
"num_tokens": 14944987.0, |
|
"reward": 2.772135466337204, |
|
"reward_std": 0.7318777162581682, |
|
"rewards/compiled_reward": 2.7721354216337204, |
|
"step": 24 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 24, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 30, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|