kevinwang676's picture
Upload folder using huggingface_hub
5180c5e verified
raw
history blame
4.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.8352,
"eval_steps": 500,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.17174428701400757,
"learning_rate": 0.0001,
"loss": 1.229,
"step": 50
},
{
"epoch": 0.32,
"grad_norm": 0.1649257093667984,
"learning_rate": 0.0002,
"loss": 1.0561,
"step": 100
},
{
"epoch": 0.48,
"grad_norm": 0.17078982293605804,
"learning_rate": 0.0001943566591422122,
"loss": 1.0143,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.17896169424057007,
"learning_rate": 0.0001887133182844244,
"loss": 0.9925,
"step": 200
},
{
"epoch": 0.8,
"grad_norm": 0.17166747152805328,
"learning_rate": 0.0001830699774266366,
"loss": 0.9619,
"step": 250
},
{
"epoch": 0.96,
"grad_norm": 0.1940702199935913,
"learning_rate": 0.00017742663656884877,
"loss": 0.9658,
"step": 300
},
{
"epoch": 1.1184,
"grad_norm": 0.1746826320886612,
"learning_rate": 0.00017178329571106095,
"loss": 0.9479,
"step": 350
},
{
"epoch": 1.2784,
"grad_norm": 0.18184725940227509,
"learning_rate": 0.00016613995485327313,
"loss": 0.9126,
"step": 400
},
{
"epoch": 1.4384000000000001,
"grad_norm": 0.18270964920520782,
"learning_rate": 0.00016049661399548536,
"loss": 0.9387,
"step": 450
},
{
"epoch": 1.5984,
"grad_norm": 0.17156830430030823,
"learning_rate": 0.00015485327313769753,
"loss": 0.9285,
"step": 500
},
{
"epoch": 1.7584,
"grad_norm": 0.15996217727661133,
"learning_rate": 0.0001492099322799097,
"loss": 0.9388,
"step": 550
},
{
"epoch": 1.9184,
"grad_norm": 0.1889728307723999,
"learning_rate": 0.0001435665914221219,
"loss": 0.9368,
"step": 600
},
{
"epoch": 2.0768,
"grad_norm": 0.1747490018606186,
"learning_rate": 0.0001379232505643341,
"loss": 0.9214,
"step": 650
},
{
"epoch": 2.2368,
"grad_norm": 0.1738065481185913,
"learning_rate": 0.0001322799097065463,
"loss": 0.9004,
"step": 700
},
{
"epoch": 2.3968,
"grad_norm": 0.17394229769706726,
"learning_rate": 0.00012663656884875847,
"loss": 0.9218,
"step": 750
},
{
"epoch": 2.5568,
"grad_norm": 0.20736579596996307,
"learning_rate": 0.00012099322799097066,
"loss": 0.8929,
"step": 800
},
{
"epoch": 2.7168,
"grad_norm": 0.19318443536758423,
"learning_rate": 0.00011534988713318284,
"loss": 0.8941,
"step": 850
},
{
"epoch": 2.8768000000000002,
"grad_norm": 0.19400037825107574,
"learning_rate": 0.00010970654627539505,
"loss": 0.9114,
"step": 900
},
{
"epoch": 3.0352,
"grad_norm": 0.17894019186496735,
"learning_rate": 0.00010406320541760724,
"loss": 0.8913,
"step": 950
},
{
"epoch": 3.1952,
"grad_norm": 0.19137370586395264,
"learning_rate": 9.841986455981941e-05,
"loss": 0.8833,
"step": 1000
},
{
"epoch": 3.3552,
"grad_norm": 0.2128266543149948,
"learning_rate": 9.27765237020316e-05,
"loss": 0.8772,
"step": 1050
},
{
"epoch": 3.5152,
"grad_norm": 0.20723260939121246,
"learning_rate": 8.71331828442438e-05,
"loss": 0.8814,
"step": 1100
},
{
"epoch": 3.6752000000000002,
"grad_norm": 0.18090017139911652,
"learning_rate": 8.148984198645599e-05,
"loss": 0.8719,
"step": 1150
},
{
"epoch": 3.8352,
"grad_norm": 0.18422921001911163,
"learning_rate": 7.584650112866818e-05,
"loss": 0.8909,
"step": 1200
}
],
"logging_steps": 50,
"max_steps": 1872,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.7402190332532736e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}