kevinwang676's picture
Upload folder using huggingface_hub
5180c5e verified
raw
history blame
3.31 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.5568,
"eval_steps": 500,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.17174428701400757,
"learning_rate": 0.0001,
"loss": 1.229,
"step": 50
},
{
"epoch": 0.32,
"grad_norm": 0.1649257093667984,
"learning_rate": 0.0002,
"loss": 1.0561,
"step": 100
},
{
"epoch": 0.48,
"grad_norm": 0.17078982293605804,
"learning_rate": 0.0001943566591422122,
"loss": 1.0143,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.17896169424057007,
"learning_rate": 0.0001887133182844244,
"loss": 0.9925,
"step": 200
},
{
"epoch": 0.8,
"grad_norm": 0.17166747152805328,
"learning_rate": 0.0001830699774266366,
"loss": 0.9619,
"step": 250
},
{
"epoch": 0.96,
"grad_norm": 0.1940702199935913,
"learning_rate": 0.00017742663656884877,
"loss": 0.9658,
"step": 300
},
{
"epoch": 1.1184,
"grad_norm": 0.1746826320886612,
"learning_rate": 0.00017178329571106095,
"loss": 0.9479,
"step": 350
},
{
"epoch": 1.2784,
"grad_norm": 0.18184725940227509,
"learning_rate": 0.00016613995485327313,
"loss": 0.9126,
"step": 400
},
{
"epoch": 1.4384000000000001,
"grad_norm": 0.18270964920520782,
"learning_rate": 0.00016049661399548536,
"loss": 0.9387,
"step": 450
},
{
"epoch": 1.5984,
"grad_norm": 0.17156830430030823,
"learning_rate": 0.00015485327313769753,
"loss": 0.9285,
"step": 500
},
{
"epoch": 1.7584,
"grad_norm": 0.15996217727661133,
"learning_rate": 0.0001492099322799097,
"loss": 0.9388,
"step": 550
},
{
"epoch": 1.9184,
"grad_norm": 0.1889728307723999,
"learning_rate": 0.0001435665914221219,
"loss": 0.9368,
"step": 600
},
{
"epoch": 2.0768,
"grad_norm": 0.1747490018606186,
"learning_rate": 0.0001379232505643341,
"loss": 0.9214,
"step": 650
},
{
"epoch": 2.2368,
"grad_norm": 0.1738065481185913,
"learning_rate": 0.0001322799097065463,
"loss": 0.9004,
"step": 700
},
{
"epoch": 2.3968,
"grad_norm": 0.17394229769706726,
"learning_rate": 0.00012663656884875847,
"loss": 0.9218,
"step": 750
},
{
"epoch": 2.5568,
"grad_norm": 0.20736579596996307,
"learning_rate": 0.00012099322799097066,
"loss": 0.8929,
"step": 800
}
],
"logging_steps": 50,
"max_steps": 1872,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.4867169473186816e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}