|
{ |
|
"best_global_step": 1053, |
|
"best_metric": 0.9728, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-1053", |
|
"epoch": 2.992181947405828, |
|
"eval_steps": 500, |
|
"global_step": 1053, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.028429282160625444, |
|
"grad_norm": 3.7549822330474854, |
|
"learning_rate": 4.716981132075472e-06, |
|
"loss": 2.3642, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05685856432125089, |
|
"grad_norm": 4.3167524337768555, |
|
"learning_rate": 9.433962264150944e-06, |
|
"loss": 2.2894, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 4.120803356170654, |
|
"learning_rate": 1.4150943396226415e-05, |
|
"loss": 2.1748, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11371712864250177, |
|
"grad_norm": 4.778192043304443, |
|
"learning_rate": 1.8867924528301888e-05, |
|
"loss": 1.9726, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14214641080312723, |
|
"grad_norm": 6.351480960845947, |
|
"learning_rate": 2.358490566037736e-05, |
|
"loss": 1.6153, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 8.011857986450195, |
|
"learning_rate": 2.830188679245283e-05, |
|
"loss": 1.3063, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19900497512437812, |
|
"grad_norm": 10.897187232971191, |
|
"learning_rate": 3.30188679245283e-05, |
|
"loss": 1.0459, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22743425728500355, |
|
"grad_norm": 11.66580581665039, |
|
"learning_rate": 3.7735849056603776e-05, |
|
"loss": 0.8935, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 9.333466529846191, |
|
"learning_rate": 4.245283018867925e-05, |
|
"loss": 0.8089, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28429282160625446, |
|
"grad_norm": 10.605915069580078, |
|
"learning_rate": 4.716981132075472e-05, |
|
"loss": 0.7529, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31272210376687987, |
|
"grad_norm": 13.530115127563477, |
|
"learning_rate": 4.978880675818374e-05, |
|
"loss": 0.6793, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 16.129487991333008, |
|
"learning_rate": 4.9260823653643085e-05, |
|
"loss": 0.7098, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3695806680881308, |
|
"grad_norm": 10.853135108947754, |
|
"learning_rate": 4.8732840549102435e-05, |
|
"loss": 0.6768, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.39800995024875624, |
|
"grad_norm": 15.441333770751953, |
|
"learning_rate": 4.820485744456177e-05, |
|
"loss": 0.6994, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 15.366798400878906, |
|
"learning_rate": 4.767687434002112e-05, |
|
"loss": 0.6091, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.4548685145700071, |
|
"grad_norm": 9.057019233703613, |
|
"learning_rate": 4.7148891235480466e-05, |
|
"loss": 0.5447, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.48329779673063256, |
|
"grad_norm": 11.785883903503418, |
|
"learning_rate": 4.662090813093981e-05, |
|
"loss": 0.5824, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 10.493522644042969, |
|
"learning_rate": 4.609292502639916e-05, |
|
"loss": 0.527, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5401563610518835, |
|
"grad_norm": 10.710103034973145, |
|
"learning_rate": 4.55649419218585e-05, |
|
"loss": 0.5574, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5685856432125089, |
|
"grad_norm": 10.442607879638672, |
|
"learning_rate": 4.503695881731785e-05, |
|
"loss": 0.5617, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 8.90545654296875, |
|
"learning_rate": 4.45089757127772e-05, |
|
"loss": 0.5034, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6254442075337597, |
|
"grad_norm": 13.07169246673584, |
|
"learning_rate": 4.398099260823654e-05, |
|
"loss": 0.5332, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6538734896943852, |
|
"grad_norm": 10.371030807495117, |
|
"learning_rate": 4.3453009503695884e-05, |
|
"loss": 0.4784, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 9.855475425720215, |
|
"learning_rate": 4.292502639915523e-05, |
|
"loss": 0.5059, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7107320540156361, |
|
"grad_norm": 10.05436897277832, |
|
"learning_rate": 4.239704329461457e-05, |
|
"loss": 0.4563, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7391613361762616, |
|
"grad_norm": 10.342894554138184, |
|
"learning_rate": 4.186906019007392e-05, |
|
"loss": 0.4704, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 9.998404502868652, |
|
"learning_rate": 4.1341077085533265e-05, |
|
"loss": 0.4532, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.7960199004975125, |
|
"grad_norm": 9.41798210144043, |
|
"learning_rate": 4.081309398099261e-05, |
|
"loss": 0.4811, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8244491826581379, |
|
"grad_norm": 12.316587448120117, |
|
"learning_rate": 4.028511087645195e-05, |
|
"loss": 0.5105, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 13.802250862121582, |
|
"learning_rate": 3.97571277719113e-05, |
|
"loss": 0.5178, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.8813077469793887, |
|
"grad_norm": 11.238789558410645, |
|
"learning_rate": 3.9229144667370646e-05, |
|
"loss": 0.4478, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9097370291400142, |
|
"grad_norm": 9.923989295959473, |
|
"learning_rate": 3.870116156282999e-05, |
|
"loss": 0.4973, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 10.5370512008667, |
|
"learning_rate": 3.817317845828934e-05, |
|
"loss": 0.4734, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.9665955934612651, |
|
"grad_norm": 7.8266448974609375, |
|
"learning_rate": 3.764519535374868e-05, |
|
"loss": 0.4087, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.9950248756218906, |
|
"grad_norm": 7.531087398529053, |
|
"learning_rate": 3.711721224920803e-05, |
|
"loss": 0.4642, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9532, |
|
"eval_loss": 0.13443532586097717, |
|
"eval_runtime": 30.1227, |
|
"eval_samples_per_second": 165.988, |
|
"eval_steps_per_second": 5.212, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.0227434257285004, |
|
"grad_norm": 8.728900909423828, |
|
"learning_rate": 3.658922914466738e-05, |
|
"loss": 0.4293, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.0511727078891258, |
|
"grad_norm": 7.730119705200195, |
|
"learning_rate": 3.6061246040126714e-05, |
|
"loss": 0.4392, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.0796019900497513, |
|
"grad_norm": 13.757883071899414, |
|
"learning_rate": 3.5533262935586064e-05, |
|
"loss": 0.4193, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.1080312722103767, |
|
"grad_norm": 9.790868759155273, |
|
"learning_rate": 3.500527983104541e-05, |
|
"loss": 0.4604, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.136460554371002, |
|
"grad_norm": 9.005698204040527, |
|
"learning_rate": 3.447729672650475e-05, |
|
"loss": 0.4085, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.1648898365316276, |
|
"grad_norm": 9.621892929077148, |
|
"learning_rate": 3.3949313621964095e-05, |
|
"loss": 0.4185, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.193319118692253, |
|
"grad_norm": 8.165648460388184, |
|
"learning_rate": 3.3421330517423445e-05, |
|
"loss": 0.3593, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.2217484008528785, |
|
"grad_norm": 9.228168487548828, |
|
"learning_rate": 3.289334741288279e-05, |
|
"loss": 0.3602, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.2501776830135038, |
|
"grad_norm": 8.772619247436523, |
|
"learning_rate": 3.236536430834213e-05, |
|
"loss": 0.4727, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.2786069651741294, |
|
"grad_norm": 10.873198509216309, |
|
"learning_rate": 3.183738120380148e-05, |
|
"loss": 0.4686, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.3070362473347548, |
|
"grad_norm": 8.746286392211914, |
|
"learning_rate": 3.130939809926082e-05, |
|
"loss": 0.3997, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.33546552949538, |
|
"grad_norm": 11.572668075561523, |
|
"learning_rate": 3.078141499472017e-05, |
|
"loss": 0.4251, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.3638948116560057, |
|
"grad_norm": 7.098505973815918, |
|
"learning_rate": 3.0253431890179517e-05, |
|
"loss": 0.3824, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.3923240938166312, |
|
"grad_norm": 11.592111587524414, |
|
"learning_rate": 2.972544878563886e-05, |
|
"loss": 0.4104, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.4207533759772566, |
|
"grad_norm": 10.93486499786377, |
|
"learning_rate": 2.9197465681098207e-05, |
|
"loss": 0.3884, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.449182658137882, |
|
"grad_norm": 9.60152816772461, |
|
"learning_rate": 2.8669482576557548e-05, |
|
"loss": 0.4238, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.4776119402985075, |
|
"grad_norm": 10.507494926452637, |
|
"learning_rate": 2.8141499472016898e-05, |
|
"loss": 0.4234, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.5060412224591329, |
|
"grad_norm": 8.032126426696777, |
|
"learning_rate": 2.7613516367476245e-05, |
|
"loss": 0.3762, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.5344705046197582, |
|
"grad_norm": 8.108844757080078, |
|
"learning_rate": 2.7085533262935585e-05, |
|
"loss": 0.436, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.5628997867803838, |
|
"grad_norm": 10.481697082519531, |
|
"learning_rate": 2.6557550158394935e-05, |
|
"loss": 0.3651, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.5913290689410093, |
|
"grad_norm": 8.025532722473145, |
|
"learning_rate": 2.6029567053854276e-05, |
|
"loss": 0.4054, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.6197583511016347, |
|
"grad_norm": 6.820096492767334, |
|
"learning_rate": 2.5501583949313622e-05, |
|
"loss": 0.4018, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.64818763326226, |
|
"grad_norm": 9.175089836120605, |
|
"learning_rate": 2.497360084477297e-05, |
|
"loss": 0.4268, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.6766169154228856, |
|
"grad_norm": 10.221317291259766, |
|
"learning_rate": 2.4445617740232313e-05, |
|
"loss": 0.3801, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.7050461975835112, |
|
"grad_norm": 8.72017765045166, |
|
"learning_rate": 2.391763463569166e-05, |
|
"loss": 0.3949, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.7334754797441365, |
|
"grad_norm": 8.293012619018555, |
|
"learning_rate": 2.3389651531151003e-05, |
|
"loss": 0.3985, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.7619047619047619, |
|
"grad_norm": 6.780423641204834, |
|
"learning_rate": 2.286166842661035e-05, |
|
"loss": 0.3703, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.7903340440653874, |
|
"grad_norm": 8.803313255310059, |
|
"learning_rate": 2.2333685322069694e-05, |
|
"loss": 0.3901, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.8187633262260128, |
|
"grad_norm": 8.275494575500488, |
|
"learning_rate": 2.180570221752904e-05, |
|
"loss": 0.397, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.8471926083866381, |
|
"grad_norm": 8.815279006958008, |
|
"learning_rate": 2.1277719112988384e-05, |
|
"loss": 0.4184, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.8756218905472637, |
|
"grad_norm": 6.734531402587891, |
|
"learning_rate": 2.074973600844773e-05, |
|
"loss": 0.3941, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.9040511727078893, |
|
"grad_norm": 6.036601543426514, |
|
"learning_rate": 2.0221752903907075e-05, |
|
"loss": 0.3907, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.9324804548685146, |
|
"grad_norm": 9.15225601196289, |
|
"learning_rate": 1.9693769799366422e-05, |
|
"loss": 0.4078, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.96090973702914, |
|
"grad_norm": 10.416858673095703, |
|
"learning_rate": 1.9165786694825765e-05, |
|
"loss": 0.3725, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.9893390191897655, |
|
"grad_norm": 8.242061614990234, |
|
"learning_rate": 1.863780359028511e-05, |
|
"loss": 0.367, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9688, |
|
"eval_loss": 0.08839573711156845, |
|
"eval_runtime": 30.1589, |
|
"eval_samples_per_second": 165.789, |
|
"eval_steps_per_second": 5.206, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 2.0170575692963753, |
|
"grad_norm": 8.470636367797852, |
|
"learning_rate": 1.810982048574446e-05, |
|
"loss": 0.3583, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.045486851457001, |
|
"grad_norm": 11.136754035949707, |
|
"learning_rate": 1.7581837381203803e-05, |
|
"loss": 0.3168, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.073916133617626, |
|
"grad_norm": 9.741412162780762, |
|
"learning_rate": 1.7053854276663146e-05, |
|
"loss": 0.3108, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.1023454157782515, |
|
"grad_norm": 8.964203834533691, |
|
"learning_rate": 1.6525871172122493e-05, |
|
"loss": 0.3775, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.130774697938877, |
|
"grad_norm": 8.144142150878906, |
|
"learning_rate": 1.5997888067581837e-05, |
|
"loss": 0.3406, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.1592039800995027, |
|
"grad_norm": 6.743160724639893, |
|
"learning_rate": 1.5469904963041184e-05, |
|
"loss": 0.3383, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.1876332622601278, |
|
"grad_norm": 9.67287826538086, |
|
"learning_rate": 1.4941921858500529e-05, |
|
"loss": 0.3196, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.2160625444207533, |
|
"grad_norm": 9.108183860778809, |
|
"learning_rate": 1.4413938753959874e-05, |
|
"loss": 0.3598, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.244491826581379, |
|
"grad_norm": 8.00169849395752, |
|
"learning_rate": 1.388595564941922e-05, |
|
"loss": 0.3518, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.272921108742004, |
|
"grad_norm": 8.659334182739258, |
|
"learning_rate": 1.3357972544878563e-05, |
|
"loss": 0.2976, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.3013503909026296, |
|
"grad_norm": 10.701913833618164, |
|
"learning_rate": 1.2829989440337912e-05, |
|
"loss": 0.3627, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.329779673063255, |
|
"grad_norm": 11.164308547973633, |
|
"learning_rate": 1.2302006335797255e-05, |
|
"loss": 0.3649, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.3582089552238807, |
|
"grad_norm": 8.511822700500488, |
|
"learning_rate": 1.17740232312566e-05, |
|
"loss": 0.3818, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.386638237384506, |
|
"grad_norm": 8.833267211914062, |
|
"learning_rate": 1.1246040126715946e-05, |
|
"loss": 0.3465, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.4150675195451314, |
|
"grad_norm": 7.075978755950928, |
|
"learning_rate": 1.0718057022175291e-05, |
|
"loss": 0.3173, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.443496801705757, |
|
"grad_norm": 7.614409923553467, |
|
"learning_rate": 1.0190073917634636e-05, |
|
"loss": 0.3323, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.471926083866382, |
|
"grad_norm": 12.305047035217285, |
|
"learning_rate": 9.662090813093982e-06, |
|
"loss": 0.3603, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.5003553660270077, |
|
"grad_norm": 9.996665954589844, |
|
"learning_rate": 9.134107708553327e-06, |
|
"loss": 0.3105, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.5287846481876333, |
|
"grad_norm": 10.400426864624023, |
|
"learning_rate": 8.606124604012672e-06, |
|
"loss": 0.3555, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.557213930348259, |
|
"grad_norm": 11.872201919555664, |
|
"learning_rate": 8.078141499472017e-06, |
|
"loss": 0.3697, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.5856432125088844, |
|
"grad_norm": 10.102994918823242, |
|
"learning_rate": 7.5501583949313625e-06, |
|
"loss": 0.3389, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.6140724946695095, |
|
"grad_norm": 11.179094314575195, |
|
"learning_rate": 7.022175290390708e-06, |
|
"loss": 0.3466, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.642501776830135, |
|
"grad_norm": 7.167893886566162, |
|
"learning_rate": 6.494192185850054e-06, |
|
"loss": 0.356, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.67093105899076, |
|
"grad_norm": 10.360447883605957, |
|
"learning_rate": 5.966209081309398e-06, |
|
"loss": 0.3719, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.699360341151386, |
|
"grad_norm": 8.36248779296875, |
|
"learning_rate": 5.438225976768744e-06, |
|
"loss": 0.3199, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.7277896233120114, |
|
"grad_norm": 10.92849063873291, |
|
"learning_rate": 4.910242872228089e-06, |
|
"loss": 0.3278, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.756218905472637, |
|
"grad_norm": 7.335627555847168, |
|
"learning_rate": 4.382259767687434e-06, |
|
"loss": 0.3017, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.7846481876332625, |
|
"grad_norm": 8.422759056091309, |
|
"learning_rate": 3.854276663146779e-06, |
|
"loss": 0.2855, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.8130774697938876, |
|
"grad_norm": 7.285062313079834, |
|
"learning_rate": 3.326293558606125e-06, |
|
"loss": 0.3194, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.841506751954513, |
|
"grad_norm": 5.810029029846191, |
|
"learning_rate": 2.79831045406547e-06, |
|
"loss": 0.3056, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.8699360341151388, |
|
"grad_norm": 8.25550365447998, |
|
"learning_rate": 2.2703273495248154e-06, |
|
"loss": 0.2828, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.898365316275764, |
|
"grad_norm": 7.528290271759033, |
|
"learning_rate": 1.7423442449841606e-06, |
|
"loss": 0.3165, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.9267945984363894, |
|
"grad_norm": 8.811773300170898, |
|
"learning_rate": 1.2143611404435059e-06, |
|
"loss": 0.3423, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.955223880597015, |
|
"grad_norm": 8.768060684204102, |
|
"learning_rate": 6.863780359028511e-07, |
|
"loss": 0.3498, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.9836531627576406, |
|
"grad_norm": 10.878323554992676, |
|
"learning_rate": 1.5839493136219642e-07, |
|
"loss": 0.3387, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.992181947405828, |
|
"eval_accuracy": 0.9728, |
|
"eval_loss": 0.07574349641799927, |
|
"eval_runtime": 29.6661, |
|
"eval_samples_per_second": 168.543, |
|
"eval_steps_per_second": 5.292, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 2.992181947405828, |
|
"step": 1053, |
|
"total_flos": 3.3481540253275914e+18, |
|
"train_loss": 0.518996418711127, |
|
"train_runtime": 2019.6102, |
|
"train_samples_per_second": 66.845, |
|
"train_steps_per_second": 0.521 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1053, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.3481540253275914e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|