deberta-v3-xsmall-readability / trainer_state.json
agentlans's picture
Upload 12 files
b989da3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 39288,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03817959682345754,
"grad_norm": 47.025718688964844,
"learning_rate": 4.936367338627571e-05,
"loss": 19.8956,
"step": 500
},
{
"epoch": 0.07635919364691508,
"grad_norm": 39.02047348022461,
"learning_rate": 4.872734677255142e-05,
"loss": 2.0594,
"step": 1000
},
{
"epoch": 0.11453879047037263,
"grad_norm": 82.10893249511719,
"learning_rate": 4.8091020158827126e-05,
"loss": 1.924,
"step": 1500
},
{
"epoch": 0.15271838729383017,
"grad_norm": 25.970558166503906,
"learning_rate": 4.7454693545102835e-05,
"loss": 1.6354,
"step": 2000
},
{
"epoch": 0.19089798411728773,
"grad_norm": 46.208919525146484,
"learning_rate": 4.6818366931378536e-05,
"loss": 1.5016,
"step": 2500
},
{
"epoch": 0.22907758094074526,
"grad_norm": 35.68703842163086,
"learning_rate": 4.6182040317654244e-05,
"loss": 1.5022,
"step": 3000
},
{
"epoch": 0.2672571777642028,
"grad_norm": 65.33502960205078,
"learning_rate": 4.554571370392995e-05,
"loss": 1.423,
"step": 3500
},
{
"epoch": 0.30543677458766033,
"grad_norm": 33.0444450378418,
"learning_rate": 4.490938709020566e-05,
"loss": 1.3971,
"step": 4000
},
{
"epoch": 0.3436163714111179,
"grad_norm": 18.407243728637695,
"learning_rate": 4.427306047648137e-05,
"loss": 1.308,
"step": 4500
},
{
"epoch": 0.38179596823457546,
"grad_norm": 62.20368194580078,
"learning_rate": 4.3636733862757076e-05,
"loss": 1.3365,
"step": 5000
},
{
"epoch": 0.41997556505803296,
"grad_norm": 18.440366744995117,
"learning_rate": 4.3000407249032784e-05,
"loss": 1.1873,
"step": 5500
},
{
"epoch": 0.4581551618814905,
"grad_norm": 21.170751571655273,
"learning_rate": 4.236408063530849e-05,
"loss": 1.212,
"step": 6000
},
{
"epoch": 0.4963347587049481,
"grad_norm": 38.78303146362305,
"learning_rate": 4.17277540215842e-05,
"loss": 1.2951,
"step": 6500
},
{
"epoch": 0.5345143555284056,
"grad_norm": 15.414962768554688,
"learning_rate": 4.109142740785991e-05,
"loss": 1.2291,
"step": 7000
},
{
"epoch": 0.5726939523518632,
"grad_norm": 17.313640594482422,
"learning_rate": 4.0455100794135616e-05,
"loss": 1.1234,
"step": 7500
},
{
"epoch": 0.6108735491753207,
"grad_norm": 24.39589500427246,
"learning_rate": 3.9818774180411324e-05,
"loss": 1.2496,
"step": 8000
},
{
"epoch": 0.6490531459987783,
"grad_norm": 29.411834716796875,
"learning_rate": 3.918244756668703e-05,
"loss": 1.1647,
"step": 8500
},
{
"epoch": 0.6872327428222358,
"grad_norm": 24.564817428588867,
"learning_rate": 3.854612095296274e-05,
"loss": 1.1065,
"step": 9000
},
{
"epoch": 0.7254123396456933,
"grad_norm": 48.59101104736328,
"learning_rate": 3.790979433923845e-05,
"loss": 1.1279,
"step": 9500
},
{
"epoch": 0.7635919364691509,
"grad_norm": 37.30824661254883,
"learning_rate": 3.7273467725514156e-05,
"loss": 1.0621,
"step": 10000
},
{
"epoch": 0.8017715332926084,
"grad_norm": 40.20986557006836,
"learning_rate": 3.6637141111789864e-05,
"loss": 1.1039,
"step": 10500
},
{
"epoch": 0.8399511301160659,
"grad_norm": 7.365723609924316,
"learning_rate": 3.600081449806557e-05,
"loss": 1.0561,
"step": 11000
},
{
"epoch": 0.8781307269395235,
"grad_norm": 10.872908592224121,
"learning_rate": 3.5364487884341273e-05,
"loss": 1.0061,
"step": 11500
},
{
"epoch": 0.916310323762981,
"grad_norm": 37.47850799560547,
"learning_rate": 3.472816127061698e-05,
"loss": 1.1034,
"step": 12000
},
{
"epoch": 0.9544899205864386,
"grad_norm": 59.97223663330078,
"learning_rate": 3.409183465689269e-05,
"loss": 1.058,
"step": 12500
},
{
"epoch": 0.9926695174098962,
"grad_norm": 17.347299575805664,
"learning_rate": 3.34555080431684e-05,
"loss": 0.9714,
"step": 13000
},
{
"epoch": 1.0308491142333538,
"grad_norm": 47.64847183227539,
"learning_rate": 3.2819181429444106e-05,
"loss": 0.9232,
"step": 13500
},
{
"epoch": 1.0690287110568113,
"grad_norm": 23.9332332611084,
"learning_rate": 3.2182854815719814e-05,
"loss": 0.8723,
"step": 14000
},
{
"epoch": 1.1072083078802688,
"grad_norm": 24.243032455444336,
"learning_rate": 3.154652820199552e-05,
"loss": 0.9376,
"step": 14500
},
{
"epoch": 1.1453879047037263,
"grad_norm": 18.394193649291992,
"learning_rate": 3.091020158827123e-05,
"loss": 0.9212,
"step": 15000
},
{
"epoch": 1.1835675015271838,
"grad_norm": 33.30715560913086,
"learning_rate": 3.027387497454694e-05,
"loss": 0.9196,
"step": 15500
},
{
"epoch": 1.2217470983506413,
"grad_norm": 116.27849578857422,
"learning_rate": 2.9637548360822642e-05,
"loss": 0.8325,
"step": 16000
},
{
"epoch": 1.2599266951740988,
"grad_norm": 33.555179595947266,
"learning_rate": 2.900122174709835e-05,
"loss": 0.8746,
"step": 16500
},
{
"epoch": 1.2981062919975566,
"grad_norm": 35.980411529541016,
"learning_rate": 2.836489513337406e-05,
"loss": 0.8561,
"step": 17000
},
{
"epoch": 1.336285888821014,
"grad_norm": 42.04365921020508,
"learning_rate": 2.7728568519649766e-05,
"loss": 0.867,
"step": 17500
},
{
"epoch": 1.3744654856444716,
"grad_norm": 43.86580276489258,
"learning_rate": 2.7092241905925474e-05,
"loss": 0.8331,
"step": 18000
},
{
"epoch": 1.412645082467929,
"grad_norm": 44.562686920166016,
"learning_rate": 2.6455915292201182e-05,
"loss": 0.8354,
"step": 18500
},
{
"epoch": 1.4508246792913866,
"grad_norm": 15.569896697998047,
"learning_rate": 2.581958867847689e-05,
"loss": 0.8514,
"step": 19000
},
{
"epoch": 1.4890042761148443,
"grad_norm": 58.31094741821289,
"learning_rate": 2.51832620647526e-05,
"loss": 0.8712,
"step": 19500
},
{
"epoch": 1.5271838729383018,
"grad_norm": 15.770406723022461,
"learning_rate": 2.4546935451028307e-05,
"loss": 0.8085,
"step": 20000
},
{
"epoch": 1.5653634697617593,
"grad_norm": 29.918487548828125,
"learning_rate": 2.3910608837304015e-05,
"loss": 0.7545,
"step": 20500
},
{
"epoch": 1.6035430665852168,
"grad_norm": 112.88292694091797,
"learning_rate": 2.3274282223579723e-05,
"loss": 0.8347,
"step": 21000
},
{
"epoch": 1.6417226634086743,
"grad_norm": 16.153079986572266,
"learning_rate": 2.2637955609855427e-05,
"loss": 0.8042,
"step": 21500
},
{
"epoch": 1.679902260232132,
"grad_norm": 17.334716796875,
"learning_rate": 2.2001628996131135e-05,
"loss": 0.8368,
"step": 22000
},
{
"epoch": 1.7180818570555894,
"grad_norm": 22.649688720703125,
"learning_rate": 2.1365302382406843e-05,
"loss": 0.7438,
"step": 22500
},
{
"epoch": 1.756261453879047,
"grad_norm": 23.706069946289062,
"learning_rate": 2.072897576868255e-05,
"loss": 0.8163,
"step": 23000
},
{
"epoch": 1.7944410507025046,
"grad_norm": 60.12598419189453,
"learning_rate": 2.0092649154958256e-05,
"loss": 0.8018,
"step": 23500
},
{
"epoch": 1.832620647525962,
"grad_norm": 88.19845581054688,
"learning_rate": 1.9456322541233964e-05,
"loss": 0.7776,
"step": 24000
},
{
"epoch": 1.8708002443494198,
"grad_norm": 18.326126098632812,
"learning_rate": 1.8819995927509672e-05,
"loss": 0.7694,
"step": 24500
},
{
"epoch": 1.9089798411728771,
"grad_norm": 33.15532684326172,
"learning_rate": 1.818366931378538e-05,
"loss": 0.7517,
"step": 25000
},
{
"epoch": 1.9471594379963348,
"grad_norm": 11.88496208190918,
"learning_rate": 1.7547342700061088e-05,
"loss": 0.7802,
"step": 25500
},
{
"epoch": 1.9853390348197923,
"grad_norm": 53.72198486328125,
"learning_rate": 1.6911016086336796e-05,
"loss": 0.7699,
"step": 26000
},
{
"epoch": 2.02351863164325,
"grad_norm": 13.519152641296387,
"learning_rate": 1.6274689472612504e-05,
"loss": 0.7362,
"step": 26500
},
{
"epoch": 2.0616982284667076,
"grad_norm": 21.352752685546875,
"learning_rate": 1.5638362858888212e-05,
"loss": 0.7043,
"step": 27000
},
{
"epoch": 2.099877825290165,
"grad_norm": 39.630210876464844,
"learning_rate": 1.500203624516392e-05,
"loss": 0.6899,
"step": 27500
},
{
"epoch": 2.1380574221136226,
"grad_norm": 15.605965614318848,
"learning_rate": 1.4365709631439625e-05,
"loss": 0.7241,
"step": 28000
},
{
"epoch": 2.17623701893708,
"grad_norm": 12.23674201965332,
"learning_rate": 1.3729383017715333e-05,
"loss": 0.6201,
"step": 28500
},
{
"epoch": 2.2144166157605376,
"grad_norm": 15.482099533081055,
"learning_rate": 1.309305640399104e-05,
"loss": 0.6966,
"step": 29000
},
{
"epoch": 2.2525962125839953,
"grad_norm": 25.2972469329834,
"learning_rate": 1.2456729790266749e-05,
"loss": 0.5878,
"step": 29500
},
{
"epoch": 2.2907758094074526,
"grad_norm": 18.335323333740234,
"learning_rate": 1.1820403176542457e-05,
"loss": 0.6468,
"step": 30000
},
{
"epoch": 2.3289554062309104,
"grad_norm": 44.75697326660156,
"learning_rate": 1.1184076562818163e-05,
"loss": 0.6888,
"step": 30500
},
{
"epoch": 2.3671350030543676,
"grad_norm": 9.713996887207031,
"learning_rate": 1.0547749949093871e-05,
"loss": 0.6584,
"step": 31000
},
{
"epoch": 2.4053145998778254,
"grad_norm": 19.04861831665039,
"learning_rate": 9.91142333536958e-06,
"loss": 0.6356,
"step": 31500
},
{
"epoch": 2.4434941967012827,
"grad_norm": 16.914947509765625,
"learning_rate": 9.275096721645286e-06,
"loss": 0.6188,
"step": 32000
},
{
"epoch": 2.4816737935247404,
"grad_norm": 25.262630462646484,
"learning_rate": 8.638770107920995e-06,
"loss": 0.6062,
"step": 32500
},
{
"epoch": 2.5198533903481977,
"grad_norm": 49.74958419799805,
"learning_rate": 8.002443494196702e-06,
"loss": 0.6554,
"step": 33000
},
{
"epoch": 2.5580329871716554,
"grad_norm": 35.74587631225586,
"learning_rate": 7.36611688047241e-06,
"loss": 0.6523,
"step": 33500
},
{
"epoch": 2.596212583995113,
"grad_norm": 22.55206298828125,
"learning_rate": 6.729790266748116e-06,
"loss": 0.6333,
"step": 34000
},
{
"epoch": 2.6343921808185704,
"grad_norm": 19.39708709716797,
"learning_rate": 6.093463653023825e-06,
"loss": 0.6269,
"step": 34500
},
{
"epoch": 2.672571777642028,
"grad_norm": 65.50653839111328,
"learning_rate": 5.457137039299532e-06,
"loss": 0.6825,
"step": 35000
},
{
"epoch": 2.7107513744654854,
"grad_norm": 10.603687286376953,
"learning_rate": 4.820810425575239e-06,
"loss": 0.6289,
"step": 35500
},
{
"epoch": 2.748930971288943,
"grad_norm": 12.743573188781738,
"learning_rate": 4.184483811850947e-06,
"loss": 0.6369,
"step": 36000
},
{
"epoch": 2.787110568112401,
"grad_norm": 8.957477569580078,
"learning_rate": 3.5481571981266544e-06,
"loss": 0.6095,
"step": 36500
},
{
"epoch": 2.825290164935858,
"grad_norm": 38.0220947265625,
"learning_rate": 2.911830584402362e-06,
"loss": 0.5497,
"step": 37000
},
{
"epoch": 2.863469761759316,
"grad_norm": 25.39042854309082,
"learning_rate": 2.27550397067807e-06,
"loss": 0.5781,
"step": 37500
},
{
"epoch": 2.901649358582773,
"grad_norm": 11.314273834228516,
"learning_rate": 1.6391773569537775e-06,
"loss": 0.6227,
"step": 38000
},
{
"epoch": 2.939828955406231,
"grad_norm": 38.86436080932617,
"learning_rate": 1.0028507432294848e-06,
"loss": 0.5966,
"step": 38500
},
{
"epoch": 2.9780085522296886,
"grad_norm": 11.880073547363281,
"learning_rate": 3.6652412950519244e-07,
"loss": 0.5901,
"step": 39000
},
{
"epoch": 3.0,
"step": 39288,
"total_flos": 5175762253906176.0,
"train_loss": 1.154250169620813,
"train_runtime": 1582.2568,
"train_samples_per_second": 198.63,
"train_steps_per_second": 24.83
}
],
"logging_steps": 500,
"max_steps": 39288,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5175762253906176.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}