prxy5605's picture
Training in progress, epoch 0, checkpoint
49003be verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.028117037167208505,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.029259291802126e-05,
"eval_loss": 4.201883792877197,
"eval_runtime": 160.7718,
"eval_samples_per_second": 37.258,
"eval_steps_per_second": 18.629,
"step": 1
},
{
"epoch": 0.00035146296459010633,
"grad_norm": 6.725489616394043,
"learning_rate": 1.6666666666666667e-05,
"loss": 5.1733,
"step": 5
},
{
"epoch": 0.0007029259291802127,
"grad_norm": 5.495113372802734,
"learning_rate": 3.3333333333333335e-05,
"loss": 6.0821,
"step": 10
},
{
"epoch": 0.001054388893770319,
"grad_norm": 7.01501989364624,
"learning_rate": 5e-05,
"loss": 4.2642,
"step": 15
},
{
"epoch": 0.0014058518583604253,
"grad_norm": 5.596772193908691,
"learning_rate": 6.666666666666667e-05,
"loss": 3.5852,
"step": 20
},
{
"epoch": 0.0017573148229505316,
"grad_norm": 6.823464393615723,
"learning_rate": 8.333333333333334e-05,
"loss": 2.7004,
"step": 25
},
{
"epoch": 0.002108777787540638,
"grad_norm": 5.401830673217773,
"learning_rate": 0.0001,
"loss": 2.6182,
"step": 30
},
{
"epoch": 0.0024602407521307442,
"grad_norm": 6.206054210662842,
"learning_rate": 9.995494831023409e-05,
"loss": 2.2873,
"step": 35
},
{
"epoch": 0.0028117037167208507,
"grad_norm": 6.021923542022705,
"learning_rate": 9.981987442712633e-05,
"loss": 1.9107,
"step": 40
},
{
"epoch": 0.0031631666813109567,
"grad_norm": 5.26896333694458,
"learning_rate": 9.959502176294383e-05,
"loss": 2.1527,
"step": 45
},
{
"epoch": 0.003514629645901063,
"grad_norm": 4.976156234741211,
"learning_rate": 9.928079551738543e-05,
"loss": 2.0677,
"step": 50
},
{
"epoch": 0.0038660926104911696,
"grad_norm": 7.764614582061768,
"learning_rate": 9.887776194738432e-05,
"loss": 3.946,
"step": 55
},
{
"epoch": 0.004217555575081276,
"grad_norm": 7.109737396240234,
"learning_rate": 9.838664734667495e-05,
"loss": 4.1154,
"step": 60
},
{
"epoch": 0.0045690185396713824,
"grad_norm": 4.454560279846191,
"learning_rate": 9.780833673696254e-05,
"loss": 2.3855,
"step": 65
},
{
"epoch": 0.0049204815042614885,
"grad_norm": 4.77770471572876,
"learning_rate": 9.714387227305422e-05,
"loss": 2.2012,
"step": 70
},
{
"epoch": 0.0052719444688515945,
"grad_norm": 4.2080078125,
"learning_rate": 9.639445136482548e-05,
"loss": 2.1322,
"step": 75
},
{
"epoch": 0.005623407433441701,
"grad_norm": 4.303310394287109,
"learning_rate": 9.55614245194068e-05,
"loss": 1.982,
"step": 80
},
{
"epoch": 0.005974870398031807,
"grad_norm": 4.268455982208252,
"learning_rate": 9.464629290747842e-05,
"loss": 1.8789,
"step": 85
},
{
"epoch": 0.006326333362621913,
"grad_norm": 5.272060871124268,
"learning_rate": 9.365070565805941e-05,
"loss": 1.6805,
"step": 90
},
{
"epoch": 0.00667779632721202,
"grad_norm": 4.5109710693359375,
"learning_rate": 9.257645688666556e-05,
"loss": 1.6993,
"step": 95
},
{
"epoch": 0.007029259291802126,
"grad_norm": 4.6436872482299805,
"learning_rate": 9.142548246219212e-05,
"loss": 1.805,
"step": 100
},
{
"epoch": 0.007029259291802126,
"eval_loss": 2.2164463996887207,
"eval_runtime": 160.18,
"eval_samples_per_second": 37.395,
"eval_steps_per_second": 18.698,
"step": 100
},
{
"epoch": 0.007380722256392232,
"grad_norm": 7.175086498260498,
"learning_rate": 9.019985651834703e-05,
"loss": 3.1929,
"step": 105
},
{
"epoch": 0.007732185220982339,
"grad_norm": 6.154209136962891,
"learning_rate": 8.890178771592199e-05,
"loss": 3.6965,
"step": 110
},
{
"epoch": 0.008083648185572445,
"grad_norm": 5.077075958251953,
"learning_rate": 8.753361526263621e-05,
"loss": 2.2779,
"step": 115
},
{
"epoch": 0.008435111150162551,
"grad_norm": 4.827920913696289,
"learning_rate": 8.609780469772623e-05,
"loss": 2.007,
"step": 120
},
{
"epoch": 0.008786574114752657,
"grad_norm": 5.940175533294678,
"learning_rate": 8.459694344887732e-05,
"loss": 1.9976,
"step": 125
},
{
"epoch": 0.009138037079342765,
"grad_norm": 4.312155723571777,
"learning_rate": 8.303373616950408e-05,
"loss": 2.0399,
"step": 130
},
{
"epoch": 0.009489500043932871,
"grad_norm": 3.841646671295166,
"learning_rate": 8.141099986478212e-05,
"loss": 1.7846,
"step": 135
},
{
"epoch": 0.009840963008522977,
"grad_norm": 4.672375679016113,
"learning_rate": 7.973165881521434e-05,
"loss": 1.408,
"step": 140
},
{
"epoch": 0.010192425973113083,
"grad_norm": 4.594907760620117,
"learning_rate": 7.799873930687978e-05,
"loss": 1.3866,
"step": 145
},
{
"epoch": 0.010543888937703189,
"grad_norm": 4.282839775085449,
"learning_rate": 7.621536417786159e-05,
"loss": 1.8141,
"step": 150
},
{
"epoch": 0.010895351902293297,
"grad_norm": 7.275561809539795,
"learning_rate": 7.438474719068173e-05,
"loss": 2.9346,
"step": 155
},
{
"epoch": 0.011246814866883403,
"grad_norm": 5.895737171173096,
"learning_rate": 7.251018724088367e-05,
"loss": 3.389,
"step": 160
},
{
"epoch": 0.011598277831473509,
"grad_norm": 5.633843421936035,
"learning_rate": 7.059506241219965e-05,
"loss": 2.3134,
"step": 165
},
{
"epoch": 0.011949740796063615,
"grad_norm": 4.488674640655518,
"learning_rate": 6.864282388901544e-05,
"loss": 2.1643,
"step": 170
},
{
"epoch": 0.01230120376065372,
"grad_norm": 5.371987342834473,
"learning_rate": 6.665698973710288e-05,
"loss": 1.9552,
"step": 175
},
{
"epoch": 0.012652666725243827,
"grad_norm": 4.47937536239624,
"learning_rate": 6.464113856382752e-05,
"loss": 1.7325,
"step": 180
},
{
"epoch": 0.013004129689833934,
"grad_norm": 4.164087772369385,
"learning_rate": 6.259890306925627e-05,
"loss": 1.833,
"step": 185
},
{
"epoch": 0.01335559265442404,
"grad_norm": 2.899000644683838,
"learning_rate": 6.0533963499786314e-05,
"loss": 1.5539,
"step": 190
},
{
"epoch": 0.013707055619014146,
"grad_norm": 3.8446884155273438,
"learning_rate": 5.8450041016092464e-05,
"loss": 1.1038,
"step": 195
},
{
"epoch": 0.014058518583604252,
"grad_norm": 4.144617557525635,
"learning_rate": 5.6350890987343944e-05,
"loss": 1.7659,
"step": 200
},
{
"epoch": 0.014058518583604252,
"eval_loss": 1.9981021881103516,
"eval_runtime": 160.2469,
"eval_samples_per_second": 37.38,
"eval_steps_per_second": 18.69,
"step": 200
},
{
"epoch": 0.014409981548194358,
"grad_norm": 6.136613845825195,
"learning_rate": 5.4240296223775465e-05,
"loss": 2.7228,
"step": 205
},
{
"epoch": 0.014761444512784464,
"grad_norm": 6.069944381713867,
"learning_rate": 5.212206015980742e-05,
"loss": 3.1086,
"step": 210
},
{
"epoch": 0.015112907477374572,
"grad_norm": 4.702614784240723,
"learning_rate": 5e-05,
"loss": 2.7574,
"step": 215
},
{
"epoch": 0.015464370441964678,
"grad_norm": 4.951237201690674,
"learning_rate": 4.78779398401926e-05,
"loss": 1.9488,
"step": 220
},
{
"epoch": 0.015815833406554784,
"grad_norm": 5.850872993469238,
"learning_rate": 4.575970377622456e-05,
"loss": 1.877,
"step": 225
},
{
"epoch": 0.01616729637114489,
"grad_norm": 5.1090216636657715,
"learning_rate": 4.364910901265606e-05,
"loss": 1.875,
"step": 230
},
{
"epoch": 0.016518759335734996,
"grad_norm": 4.607788562774658,
"learning_rate": 4.1549958983907555e-05,
"loss": 1.6502,
"step": 235
},
{
"epoch": 0.016870222300325102,
"grad_norm": 4.216214179992676,
"learning_rate": 3.94660365002137e-05,
"loss": 1.5976,
"step": 240
},
{
"epoch": 0.01722168526491521,
"grad_norm": 4.849003314971924,
"learning_rate": 3.740109693074375e-05,
"loss": 1.0067,
"step": 245
},
{
"epoch": 0.017573148229505314,
"grad_norm": 4.999157428741455,
"learning_rate": 3.5358861436172485e-05,
"loss": 2.0552,
"step": 250
},
{
"epoch": 0.017924611194095424,
"grad_norm": 9.179941177368164,
"learning_rate": 3.334301026289712e-05,
"loss": 2.5788,
"step": 255
},
{
"epoch": 0.01827607415868553,
"grad_norm": 5.043307781219482,
"learning_rate": 3.135717611098458e-05,
"loss": 2.7554,
"step": 260
},
{
"epoch": 0.018627537123275636,
"grad_norm": 4.71703577041626,
"learning_rate": 2.9404937587800375e-05,
"loss": 2.1318,
"step": 265
},
{
"epoch": 0.018979000087865742,
"grad_norm": 3.761772632598877,
"learning_rate": 2.748981275911633e-05,
"loss": 2.1178,
"step": 270
},
{
"epoch": 0.019330463052455848,
"grad_norm": 4.184715270996094,
"learning_rate": 2.5615252809318284e-05,
"loss": 1.8148,
"step": 275
},
{
"epoch": 0.019681926017045954,
"grad_norm": 4.2669243812561035,
"learning_rate": 2.3784635822138424e-05,
"loss": 1.8792,
"step": 280
},
{
"epoch": 0.02003338898163606,
"grad_norm": 4.071264743804932,
"learning_rate": 2.2001260693120233e-05,
"loss": 1.4996,
"step": 285
},
{
"epoch": 0.020384851946226166,
"grad_norm": 4.989243984222412,
"learning_rate": 2.026834118478567e-05,
"loss": 1.2144,
"step": 290
},
{
"epoch": 0.020736314910816272,
"grad_norm": 5.321191310882568,
"learning_rate": 1.858900013521788e-05,
"loss": 1.1887,
"step": 295
},
{
"epoch": 0.021087777875406378,
"grad_norm": 5.177714824676514,
"learning_rate": 1.6966263830495936e-05,
"loss": 1.8524,
"step": 300
},
{
"epoch": 0.021087777875406378,
"eval_loss": 1.9191735982894897,
"eval_runtime": 160.2687,
"eval_samples_per_second": 37.375,
"eval_steps_per_second": 18.687,
"step": 300
},
{
"epoch": 0.021439240839996484,
"grad_norm": 5.646726131439209,
"learning_rate": 1.5403056551122697e-05,
"loss": 2.7197,
"step": 305
},
{
"epoch": 0.021790703804586593,
"grad_norm": 7.194861888885498,
"learning_rate": 1.3902195302273779e-05,
"loss": 3.0354,
"step": 310
},
{
"epoch": 0.0221421667691767,
"grad_norm": 4.582761764526367,
"learning_rate": 1.246638473736378e-05,
"loss": 2.1669,
"step": 315
},
{
"epoch": 0.022493629733766805,
"grad_norm": 4.967409610748291,
"learning_rate": 1.1098212284078036e-05,
"loss": 2.0142,
"step": 320
},
{
"epoch": 0.02284509269835691,
"grad_norm": 5.128559112548828,
"learning_rate": 9.800143481652979e-06,
"loss": 1.6949,
"step": 325
},
{
"epoch": 0.023196555662947017,
"grad_norm": 5.137016773223877,
"learning_rate": 8.574517537807897e-06,
"loss": 1.844,
"step": 330
},
{
"epoch": 0.023548018627537123,
"grad_norm": 3.744854211807251,
"learning_rate": 7.423543113334436e-06,
"loss": 1.4837,
"step": 335
},
{
"epoch": 0.02389948159212723,
"grad_norm": 3.0734193325042725,
"learning_rate": 6.349294341940593e-06,
"loss": 1.4371,
"step": 340
},
{
"epoch": 0.024250944556717335,
"grad_norm": 4.060451030731201,
"learning_rate": 5.353707092521582e-06,
"loss": 1.2239,
"step": 345
},
{
"epoch": 0.02460240752130744,
"grad_norm": 4.555269241333008,
"learning_rate": 4.43857548059321e-06,
"loss": 1.6619,
"step": 350
},
{
"epoch": 0.024953870485897547,
"grad_norm": 6.561709403991699,
"learning_rate": 3.605548635174533e-06,
"loss": 2.2723,
"step": 355
},
{
"epoch": 0.025305333450487653,
"grad_norm": 4.8083109855651855,
"learning_rate": 2.85612772694579e-06,
"loss": 2.2588,
"step": 360
},
{
"epoch": 0.025656796415077763,
"grad_norm": 5.575637340545654,
"learning_rate": 2.191663263037458e-06,
"loss": 2.3431,
"step": 365
},
{
"epoch": 0.02600825937966787,
"grad_norm": 4.456447601318359,
"learning_rate": 1.6133526533250565e-06,
"loss": 1.9024,
"step": 370
},
{
"epoch": 0.026359722344257975,
"grad_norm": 4.07964563369751,
"learning_rate": 1.1222380526156928e-06,
"loss": 1.8472,
"step": 375
},
{
"epoch": 0.02671118530884808,
"grad_norm": 4.813708305358887,
"learning_rate": 7.192044826145771e-07,
"loss": 1.8371,
"step": 380
},
{
"epoch": 0.027062648273438187,
"grad_norm": 5.605709075927734,
"learning_rate": 4.049782370561583e-07,
"loss": 1.6434,
"step": 385
},
{
"epoch": 0.027414111238028293,
"grad_norm": 2.7447404861450195,
"learning_rate": 1.8012557287367392e-07,
"loss": 1.3624,
"step": 390
},
{
"epoch": 0.0277655742026184,
"grad_norm": 4.5164408683776855,
"learning_rate": 4.5051689765929214e-08,
"loss": 1.1689,
"step": 395
},
{
"epoch": 0.028117037167208505,
"grad_norm": 4.672063827514648,
"learning_rate": 0.0,
"loss": 1.4677,
"step": 400
},
{
"epoch": 0.028117037167208505,
"eval_loss": 1.9015841484069824,
"eval_runtime": 160.4551,
"eval_samples_per_second": 37.331,
"eval_steps_per_second": 18.666,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8529856092438528.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}