heboya8's picture
Add files using upload-large-folder tool
5c0f8c6 verified
raw
history blame
3.26 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7363353157745681,
"eval_steps": 354,
"global_step": 650,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05664117813650524,
"grad_norm": 0.7603653073310852,
"learning_rate": 0.0001978110599078341,
"loss": 0.9425,
"step": 50
},
{
"epoch": 0.11328235627301048,
"grad_norm": 0.6873273849487305,
"learning_rate": 0.00019205069124423964,
"loss": 0.6078,
"step": 100
},
{
"epoch": 0.16992353440951571,
"grad_norm": 0.6323167085647583,
"learning_rate": 0.00018629032258064517,
"loss": 0.6748,
"step": 150
},
{
"epoch": 0.22656471254602095,
"grad_norm": 1.0095610618591309,
"learning_rate": 0.0001805299539170507,
"loss": 0.6594,
"step": 200
},
{
"epoch": 0.2832058906825262,
"grad_norm": 0.5822212100028992,
"learning_rate": 0.00017476958525345623,
"loss": 0.6317,
"step": 250
},
{
"epoch": 0.33984706881903143,
"grad_norm": 0.8490907549858093,
"learning_rate": 0.00016900921658986176,
"loss": 0.5742,
"step": 300
},
{
"epoch": 0.3964882469555367,
"grad_norm": 0.6252707242965698,
"learning_rate": 0.0001632488479262673,
"loss": 0.5502,
"step": 350
},
{
"epoch": 0.4010195412064571,
"eval_loss": 0.6019027233123779,
"eval_runtime": 159.9351,
"eval_samples_per_second": 9.81,
"eval_steps_per_second": 2.457,
"step": 354
},
{
"epoch": 0.4531294250920419,
"grad_norm": 0.656812310218811,
"learning_rate": 0.00015748847926267282,
"loss": 0.5686,
"step": 400
},
{
"epoch": 0.5097706032285472,
"grad_norm": 0.7391073703765869,
"learning_rate": 0.00015172811059907835,
"loss": 0.5701,
"step": 450
},
{
"epoch": 0.5664117813650524,
"grad_norm": 0.9210707545280457,
"learning_rate": 0.00014596774193548388,
"loss": 0.6397,
"step": 500
},
{
"epoch": 0.6230529595015576,
"grad_norm": 0.8228403329849243,
"learning_rate": 0.00014020737327188939,
"loss": 0.5822,
"step": 550
},
{
"epoch": 0.6796941376380629,
"grad_norm": 0.716748833656311,
"learning_rate": 0.00013444700460829494,
"loss": 0.5881,
"step": 600
},
{
"epoch": 0.7363353157745681,
"grad_norm": 0.7144941091537476,
"learning_rate": 0.00012868663594470047,
"loss": 0.6032,
"step": 650
}
],
"logging_steps": 50,
"max_steps": 1766,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.185067752241357e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}