GP-GPT / base /trainer_state.json
IanL10's picture
Upload 29 files
5a11e34 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9997659723847414,
"eval_steps": 500,
"global_step": 2136,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0468055230517201,
"grad_norm": 1.0054835081100464,
"learning_rate": 1.1533520599250935e-05,
"loss": 1.4107,
"step": 100
},
{
"epoch": 0.0936110461034402,
"grad_norm": 1.0428086519241333,
"learning_rate": 1.0967041198501872e-05,
"loss": 1.0774,
"step": 200
},
{
"epoch": 0.14041656915516032,
"grad_norm": 0.6874257326126099,
"learning_rate": 1.0400561797752808e-05,
"loss": 1.0431,
"step": 300
},
{
"epoch": 0.1872220922068804,
"grad_norm": 0.8036668300628662,
"learning_rate": 9.834082397003745e-06,
"loss": 1.0286,
"step": 400
},
{
"epoch": 0.23402761525860052,
"grad_norm": 0.7824677228927612,
"learning_rate": 9.26760299625468e-06,
"loss": 1.0153,
"step": 500
},
{
"epoch": 0.28083313831032064,
"grad_norm": 0.6508174538612366,
"learning_rate": 8.701123595505617e-06,
"loss": 1.0063,
"step": 600
},
{
"epoch": 0.32763866136204073,
"grad_norm": 0.7692238688468933,
"learning_rate": 8.134644194756553e-06,
"loss": 1.0015,
"step": 700
},
{
"epoch": 0.3744441844137608,
"grad_norm": 0.7233865261077881,
"learning_rate": 7.56816479400749e-06,
"loss": 0.9951,
"step": 800
},
{
"epoch": 0.4212497074654809,
"grad_norm": 0.7237682342529297,
"learning_rate": 7.0016853932584264e-06,
"loss": 0.9893,
"step": 900
},
{
"epoch": 0.46805523051720105,
"grad_norm": 0.7689889669418335,
"learning_rate": 6.435205992509363e-06,
"loss": 0.9864,
"step": 1000
},
{
"epoch": 0.5148607535689211,
"grad_norm": 0.7110843658447266,
"learning_rate": 5.868726591760299e-06,
"loss": 0.9783,
"step": 1100
},
{
"epoch": 0.5616662766206413,
"grad_norm": 0.8957160115242004,
"learning_rate": 5.3022471910112355e-06,
"loss": 0.9781,
"step": 1200
},
{
"epoch": 0.6084717996723613,
"grad_norm": 0.6917927861213684,
"learning_rate": 4.735767790262172e-06,
"loss": 0.9782,
"step": 1300
},
{
"epoch": 0.6552773227240815,
"grad_norm": 0.83516526222229,
"learning_rate": 4.169288389513108e-06,
"loss": 0.9715,
"step": 1400
},
{
"epoch": 0.7020828457758015,
"grad_norm": 0.6937043070793152,
"learning_rate": 3.602808988764045e-06,
"loss": 0.9682,
"step": 1500
},
{
"epoch": 0.7488883688275216,
"grad_norm": 0.6241235136985779,
"learning_rate": 3.0363295880149813e-06,
"loss": 0.9673,
"step": 1600
},
{
"epoch": 0.7956938918792418,
"grad_norm": 0.7788057327270508,
"learning_rate": 2.4698501872659176e-06,
"loss": 0.9646,
"step": 1700
},
{
"epoch": 0.8424994149309618,
"grad_norm": 0.8091927766799927,
"learning_rate": 1.9033707865168538e-06,
"loss": 0.9625,
"step": 1800
},
{
"epoch": 0.889304937982682,
"grad_norm": 0.7234342098236084,
"learning_rate": 1.3368913857677903e-06,
"loss": 0.9598,
"step": 1900
},
{
"epoch": 0.9361104610344021,
"grad_norm": 0.73792564868927,
"learning_rate": 7.704119850187266e-07,
"loss": 0.96,
"step": 2000
},
{
"epoch": 0.9829159840861221,
"grad_norm": 0.6532009243965149,
"learning_rate": 2.039325842696629e-07,
"loss": 0.9626,
"step": 2100
}
],
"logging_steps": 100,
"max_steps": 2136,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1092390198683107e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}