KoseiUemura's picture
Add checkpoint checkpoint-1500
c44b38e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20587427944002196,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01372495196266813,
"grad_norm": 23.43617820739746,
"learning_rate": 6.378600823045268e-07,
"loss": 3.6307,
"step": 100
},
{
"epoch": 0.02744990392533626,
"grad_norm": 27.224123001098633,
"learning_rate": 1.3237311385459534e-06,
"loss": 0.8845,
"step": 200
},
{
"epoch": 0.04117485588800439,
"grad_norm": 22.287761688232422,
"learning_rate": 2.00960219478738e-06,
"loss": 0.5517,
"step": 300
},
{
"epoch": 0.05489980785067252,
"grad_norm": 14.750782012939453,
"learning_rate": 2.6954732510288067e-06,
"loss": 0.3488,
"step": 400
},
{
"epoch": 0.06862475981334065,
"grad_norm": 51.949310302734375,
"learning_rate": 3.3813443072702336e-06,
"loss": 0.3129,
"step": 500
},
{
"epoch": 0.08234971177600878,
"grad_norm": 10.03300666809082,
"learning_rate": 4.06721536351166e-06,
"loss": 0.2679,
"step": 600
},
{
"epoch": 0.09607466373867692,
"grad_norm": 26.423612594604492,
"learning_rate": 4.753086419753087e-06,
"loss": 0.2911,
"step": 700
},
{
"epoch": 0.10979961570134504,
"grad_norm": 3.058533191680908,
"learning_rate": 5.438957475994513e-06,
"loss": 0.2801,
"step": 800
},
{
"epoch": 0.12352456766401318,
"grad_norm": 18.2351016998291,
"learning_rate": 6.12482853223594e-06,
"loss": 0.2771,
"step": 900
},
{
"epoch": 0.1372495196266813,
"grad_norm": 7.775646686553955,
"learning_rate": 6.810699588477366e-06,
"loss": 0.2446,
"step": 1000
},
{
"epoch": 0.15097447158934943,
"grad_norm": 10.91329574584961,
"learning_rate": 7.496570644718793e-06,
"loss": 0.2736,
"step": 1100
},
{
"epoch": 0.16469942355201755,
"grad_norm": 32.206260681152344,
"learning_rate": 8.18244170096022e-06,
"loss": 0.2983,
"step": 1200
},
{
"epoch": 0.1784243755146857,
"grad_norm": 30.968856811523438,
"learning_rate": 8.868312757201646e-06,
"loss": 0.2933,
"step": 1300
},
{
"epoch": 0.19214932747735383,
"grad_norm": 31.030960083007812,
"learning_rate": 9.554183813443072e-06,
"loss": 0.2826,
"step": 1400
},
{
"epoch": 0.20587427944002196,
"grad_norm": 17.41258430480957,
"learning_rate": 9.97331096538051e-06,
"loss": 0.2527,
"step": 1500
}
],
"logging_steps": 100,
"max_steps": 14572,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}