rabiulawal's picture
Add files using upload-large-folder tool
205dd19 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.15630035449565,
"eval_steps": 100,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12890750886239125,
"grad_norm": 0.9527155866900715,
"learning_rate": 0.001,
"loss": 4.2951,
"step": 50
},
{
"epoch": 0.2578150177247825,
"grad_norm": 0.8613266657338488,
"learning_rate": 0.0009995777619868967,
"loss": 4.1174,
"step": 100
},
{
"epoch": 0.2578150177247825,
"eval_loss": 4.061758041381836,
"eval_runtime": 13.6791,
"eval_samples_per_second": 73.104,
"eval_steps_per_second": 2.339,
"step": 100
},
{
"epoch": 0.3867225265871737,
"grad_norm": 0.588963638591784,
"learning_rate": 0.000998311761801199,
"loss": 4.035,
"step": 150
},
{
"epoch": 0.515630035449565,
"grad_norm": 0.3000507476828121,
"learning_rate": 0.000996204139796873,
"loss": 3.9794,
"step": 200
},
{
"epoch": 0.515630035449565,
"eval_loss": 3.951843500137329,
"eval_runtime": 13.2341,
"eval_samples_per_second": 75.563,
"eval_steps_per_second": 2.418,
"step": 200
},
{
"epoch": 0.6445375443119562,
"grad_norm": 1.0882364902367292,
"learning_rate": 0.0009932584592096643,
"loss": 3.9453,
"step": 250
},
{
"epoch": 0.7734450531743474,
"grad_norm": 0.6363211565344357,
"learning_rate": 0.0009894797001329398,
"loss": 3.917,
"step": 300
},
{
"epoch": 0.7734450531743474,
"eval_loss": 3.8788869380950928,
"eval_runtime": 13.3005,
"eval_samples_per_second": 75.185,
"eval_steps_per_second": 2.406,
"step": 300
},
{
"epoch": 0.9023525620367386,
"grad_norm": 0.9910006063725697,
"learning_rate": 0.0009848742510981292,
"loss": 3.805,
"step": 350
},
{
"epoch": 1.03126007089913,
"grad_norm": 5.070998393490313,
"learning_rate": 0.0009794498982740008,
"loss": 3.7973,
"step": 400
},
{
"epoch": 1.03126007089913,
"eval_loss": 3.929041862487793,
"eval_runtime": 13.2112,
"eval_samples_per_second": 75.693,
"eval_steps_per_second": 2.422,
"step": 400
},
{
"epoch": 1.1601675797615212,
"grad_norm": 1.5111119419703132,
"learning_rate": 0.0009732158123030324,
"loss": 3.98,
"step": 450
},
{
"epoch": 1.2890750886239124,
"grad_norm": 2.9266272457246685,
"learning_rate": 0.0009661825327971316,
"loss": 3.9676,
"step": 500
},
{
"epoch": 1.2890750886239124,
"eval_loss": 3.992887258529663,
"eval_runtime": 13.2769,
"eval_samples_per_second": 75.319,
"eval_steps_per_second": 2.41,
"step": 500
},
{
"epoch": 1.4179825974863036,
"grad_norm": 1.9372695993602536,
"learning_rate": 0.0009583619505189177,
"loss": 4.0102,
"step": 550
},
{
"epoch": 1.5468901063486948,
"grad_norm": 1.1325425311995052,
"learning_rate": 0.0009497672872786908,
"loss": 4.0347,
"step": 600
},
{
"epoch": 1.5468901063486948,
"eval_loss": 4.0086283683776855,
"eval_runtime": 13.2747,
"eval_samples_per_second": 75.331,
"eval_steps_per_second": 2.411,
"step": 600
},
{
"epoch": 1.675797615211086,
"grad_norm": 1.0653229015184138,
"learning_rate": 0.0009404130735810749,
"loss": 4.015,
"step": 650
},
{
"epoch": 1.8047051240734773,
"grad_norm": 0.9867803701996282,
"learning_rate": 0.0009303151240591263,
"loss": 4.0545,
"step": 700
},
{
"epoch": 1.8047051240734773,
"eval_loss": 4.0230817794799805,
"eval_runtime": 13.2485,
"eval_samples_per_second": 75.48,
"eval_steps_per_second": 2.415,
"step": 700
},
{
"epoch": 1.9336126329358685,
"grad_norm": 1.3505264820000504,
"learning_rate": 0.0009194905107374401,
"loss": 4.0391,
"step": 750
},
{
"epoch": 2.06252014179826,
"grad_norm": 1.4563064030505048,
"learning_rate": 0.0009079575341694557,
"loss": 4.0552,
"step": 800
},
{
"epoch": 2.06252014179826,
"eval_loss": 4.040332317352295,
"eval_runtime": 13.2387,
"eval_samples_per_second": 75.536,
"eval_steps_per_second": 2.417,
"step": 800
},
{
"epoch": 2.191427650660651,
"grad_norm": 1.2688404260592696,
"learning_rate": 0.0008957356924977609,
"loss": 4.0285,
"step": 850
},
{
"epoch": 2.3203351595230424,
"grad_norm": 1.922527341942018,
"learning_rate": 0.0008828456484896984,
"loss": 4.0291,
"step": 900
},
{
"epoch": 2.3203351595230424,
"eval_loss": 4.0184197425842285,
"eval_runtime": 13.2837,
"eval_samples_per_second": 75.28,
"eval_steps_per_second": 2.409,
"step": 900
},
{
"epoch": 2.4492426683854336,
"grad_norm": 1.1751317607621916,
"learning_rate": 0.0008693091946040104,
"loss": 4.0198,
"step": 950
},
{
"epoch": 2.578150177247825,
"grad_norm": 1.4781189301871374,
"learning_rate": 0.000855149216147576,
"loss": 4.0389,
"step": 1000
},
{
"epoch": 2.578150177247825,
"eval_loss": 4.022523403167725,
"eval_runtime": 13.2647,
"eval_samples_per_second": 75.388,
"eval_steps_per_second": 2.412,
"step": 1000
},
{
"epoch": 2.707057686110216,
"grad_norm": 2.8481102078113363,
"learning_rate": 0.000840389652584536,
"loss": 4.0337,
"step": 1050
},
{
"epoch": 2.8359651949726072,
"grad_norm": 1.2842129496698058,
"learning_rate": 0.0008250554570632107,
"loss": 4.0293,
"step": 1100
},
{
"epoch": 2.8359651949726072,
"eval_loss": 4.018503189086914,
"eval_runtime": 13.2539,
"eval_samples_per_second": 75.45,
"eval_steps_per_second": 2.414,
"step": 1100
},
{
"epoch": 2.9648727038349985,
"grad_norm": 3.7613179214902197,
"learning_rate": 0.0008091725542292438,
"loss": 4.0283,
"step": 1150
},
{
"epoch": 3.0937802126973897,
"grad_norm": 2.6521101503823346,
"learning_rate": 0.000792767796396289,
"loss": 4.0347,
"step": 1200
},
{
"epoch": 3.0937802126973897,
"eval_loss": 4.021185398101807,
"eval_runtime": 13.2939,
"eval_samples_per_second": 75.222,
"eval_steps_per_second": 2.407,
"step": 1200
},
{
"epoch": 3.222687721559781,
"grad_norm": 1.7003701014793156,
"learning_rate": 0.0007758689181483412,
"loss": 4.0275,
"step": 1250
},
{
"epoch": 3.351595230422172,
"grad_norm": 1.3295380943734334,
"learning_rate": 0.000758504489450466,
"loss": 4.0208,
"step": 1300
},
{
"epoch": 3.351595230422172,
"eval_loss": 4.011188507080078,
"eval_runtime": 13.2452,
"eval_samples_per_second": 75.499,
"eval_steps_per_second": 2.416,
"step": 1300
},
{
"epoch": 3.4805027392845633,
"grad_norm": 1.3487659259714693,
"learning_rate": 0.0007407038673471959,
"loss": 4.0191,
"step": 1350
},
{
"epoch": 3.6094102481469545,
"grad_norm": 0.9861657777387535,
"learning_rate": 0.0007224971463302587,
"loss": 4.0204,
"step": 1400
},
{
"epoch": 3.6094102481469545,
"eval_loss": 4.0111846923828125,
"eval_runtime": 13.2369,
"eval_samples_per_second": 75.546,
"eval_steps_per_second": 2.417,
"step": 1400
},
{
"epoch": 3.7383177570093458,
"grad_norm": 1.0800491613545316,
"learning_rate": 0.0007039151074595432,
"loss": 4.024,
"step": 1450
},
{
"epoch": 3.867225265871737,
"grad_norm": 0.9644339387320809,
"learning_rate": 0.0006849891663233264,
"loss": 4.023,
"step": 1500
},
{
"epoch": 3.867225265871737,
"eval_loss": 4.018366813659668,
"eval_runtime": 13.2404,
"eval_samples_per_second": 75.526,
"eval_steps_per_second": 2.417,
"step": 1500
},
{
"epoch": 3.996132774734128,
"grad_norm": 0.8344627432775175,
"learning_rate": 0.0006657513199257385,
"loss": 4.0233,
"step": 1550
},
{
"epoch": 4.12504028359652,
"grad_norm": 1.4143353248285266,
"learning_rate": 0.0006462340925912611,
"loss": 4.0253,
"step": 1600
},
{
"epoch": 4.12504028359652,
"eval_loss": 4.0222063064575195,
"eval_runtime": 13.2332,
"eval_samples_per_second": 75.567,
"eval_steps_per_second": 2.418,
"step": 1600
},
{
"epoch": 4.253947792458911,
"grad_norm": 0.970349395404879,
"learning_rate": 0.0006264704809777159,
"loss": 4.02,
"step": 1650
},
{
"epoch": 4.382855301321302,
"grad_norm": 0.7142593470613441,
"learning_rate": 0.0006064938982907064,
"loss": 4.0186,
"step": 1700
},
{
"epoch": 4.382855301321302,
"eval_loss": 4.019730567932129,
"eval_runtime": 13.2783,
"eval_samples_per_second": 75.311,
"eval_steps_per_second": 2.41,
"step": 1700
},
{
"epoch": 4.5117628101836935,
"grad_norm": 1.088690826939789,
"learning_rate": 0.0005863381177938257,
"loss": 4.0203,
"step": 1750
},
{
"epoch": 4.640670319046085,
"grad_norm": 0.81043587990677,
"learning_rate": 0.0005660372157101351,
"loss": 4.0124,
"step": 1800
},
{
"epoch": 4.640670319046085,
"eval_loss": 4.00410795211792,
"eval_runtime": 13.2665,
"eval_samples_per_second": 75.378,
"eval_steps_per_second": 2.412,
"step": 1800
},
{
"epoch": 4.769577827908476,
"grad_norm": 1.218343880260603,
"learning_rate": 0.0005456255136114464,
"loss": 4.0095,
"step": 1850
},
{
"epoch": 4.898485336770867,
"grad_norm": 1.2906531132627939,
"learning_rate": 0.0005251375203928073,
"loss": 4.013,
"step": 1900
},
{
"epoch": 4.898485336770867,
"eval_loss": 4.00699520111084,
"eval_runtime": 13.2744,
"eval_samples_per_second": 75.333,
"eval_steps_per_second": 2.411,
"step": 1900
},
{
"epoch": 5.027392845633258,
"grad_norm": 1.0003413201127862,
"learning_rate": 0.0005046078739302906,
"loss": 4.0152,
"step": 1950
},
{
"epoch": 5.15630035449565,
"grad_norm": 0.7713832151052984,
"learning_rate": 0.00048407128252072126,
"loss": 4.0107,
"step": 2000
},
{
"epoch": 5.15630035449565,
"eval_loss": 4.002959728240967,
"eval_runtime": 13.3166,
"eval_samples_per_second": 75.094,
"eval_steps_per_second": 2.403,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 3870,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1778471009255424.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}