Muedi's picture
Training in progress, step 2000, checkpoint
e823c94 verified
raw
history blame
4.28 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01954365564078761,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009771827820393806,
"grad_norm": 0.5417118072509766,
"learning_rate": 4.995602247740044e-05,
"loss": 1.378,
"step": 100
},
{
"epoch": 0.001954365564078761,
"grad_norm": 0.6493918895721436,
"learning_rate": 4.990715856340093e-05,
"loss": 1.3304,
"step": 200
},
{
"epoch": 0.0029315483461181415,
"grad_norm": 0.9062462449073792,
"learning_rate": 4.9858294649401425e-05,
"loss": 1.3284,
"step": 300
},
{
"epoch": 0.003908731128157522,
"grad_norm": 0.750052273273468,
"learning_rate": 4.9809430735401906e-05,
"loss": 1.3166,
"step": 400
},
{
"epoch": 0.004885913910196903,
"grad_norm": 0.6602022051811218,
"learning_rate": 4.97605668214024e-05,
"loss": 1.3166,
"step": 500
},
{
"epoch": 0.005863096692236283,
"grad_norm": 0.4193927049636841,
"learning_rate": 4.971170290740288e-05,
"loss": 1.3098,
"step": 600
},
{
"epoch": 0.006840279474275663,
"grad_norm": 0.6095415949821472,
"learning_rate": 4.966283899340338e-05,
"loss": 1.3103,
"step": 700
},
{
"epoch": 0.007817462256315045,
"grad_norm": 0.9943467378616333,
"learning_rate": 4.9613975079403865e-05,
"loss": 1.3096,
"step": 800
},
{
"epoch": 0.008794645038354424,
"grad_norm": 1.2263585329055786,
"learning_rate": 4.9565111165404346e-05,
"loss": 1.3067,
"step": 900
},
{
"epoch": 0.009771827820393805,
"grad_norm": 0.7198677659034729,
"learning_rate": 4.951624725140484e-05,
"loss": 1.3041,
"step": 1000
},
{
"epoch": 0.010749010602433185,
"grad_norm": 0.7370775938034058,
"learning_rate": 4.946738333740533e-05,
"loss": 1.302,
"step": 1100
},
{
"epoch": 0.011726193384472566,
"grad_norm": 0.5109437704086304,
"learning_rate": 4.941851942340582e-05,
"loss": 1.3089,
"step": 1200
},
{
"epoch": 0.012703376166511945,
"grad_norm": 0.1879555583000183,
"learning_rate": 4.9369655509406305e-05,
"loss": 1.3043,
"step": 1300
},
{
"epoch": 0.013680558948551327,
"grad_norm": 0.951046884059906,
"learning_rate": 4.932079159540679e-05,
"loss": 1.3098,
"step": 1400
},
{
"epoch": 0.014657741730590706,
"grad_norm": 0.2478829026222229,
"learning_rate": 4.927192768140728e-05,
"loss": 1.3026,
"step": 1500
},
{
"epoch": 0.01563492451263009,
"grad_norm": 0.5585843324661255,
"learning_rate": 4.9223063767407776e-05,
"loss": 1.3014,
"step": 1600
},
{
"epoch": 0.016612107294669467,
"grad_norm": 0.48532453179359436,
"learning_rate": 4.917419985340826e-05,
"loss": 1.2981,
"step": 1700
},
{
"epoch": 0.017589290076708848,
"grad_norm": 0.4233573079109192,
"learning_rate": 4.912533593940875e-05,
"loss": 1.2992,
"step": 1800
},
{
"epoch": 0.01856647285874823,
"grad_norm": 0.3272475600242615,
"learning_rate": 4.9076472025409234e-05,
"loss": 1.292,
"step": 1900
},
{
"epoch": 0.01954365564078761,
"grad_norm": 0.5299385786056519,
"learning_rate": 4.902760811140973e-05,
"loss": 1.2963,
"step": 2000
}
],
"logging_steps": 100,
"max_steps": 102335,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.508584634023936e+16,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}