bobox's picture
Training in progress, step 972, checkpoint
a3cccdc verified
raw
history blame
183 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9908256880733946,
"eval_steps": 98,
"global_step": 972,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010193679918450561,
"grad_norm": 24.08992576599121,
"learning_rate": 0.0,
"loss": 8.5798,
"step": 1
},
{
"epoch": 0.0020387359836901123,
"grad_norm": 22.663619995117188,
"learning_rate": 2.2624434389140275e-07,
"loss": 8.4222,
"step": 2
},
{
"epoch": 0.0030581039755351682,
"grad_norm": 25.377544403076172,
"learning_rate": 4.524886877828055e-07,
"loss": 9.392,
"step": 3
},
{
"epoch": 0.004077471967380225,
"grad_norm": 22.122257232666016,
"learning_rate": 6.787330316742082e-07,
"loss": 8.4193,
"step": 4
},
{
"epoch": 0.0050968399592252805,
"grad_norm": Infinity,
"learning_rate": 9.04977375565611e-07,
"loss": 8.808,
"step": 5
},
{
"epoch": 0.0061162079510703364,
"grad_norm": 22.832090377807617,
"learning_rate": 9.04977375565611e-07,
"loss": 10.5002,
"step": 6
},
{
"epoch": 0.007135575942915392,
"grad_norm": 20.933177947998047,
"learning_rate": 1.1312217194570136e-06,
"loss": 9.5956,
"step": 7
},
{
"epoch": 0.00815494393476045,
"grad_norm": 20.44132423400879,
"learning_rate": 1.3574660633484164e-06,
"loss": 8.5526,
"step": 8
},
{
"epoch": 0.009174311926605505,
"grad_norm": 22.630067825317383,
"learning_rate": 1.583710407239819e-06,
"loss": 9.8255,
"step": 9
},
{
"epoch": 0.010193679918450561,
"grad_norm": 22.625064849853516,
"learning_rate": 1.809954751131222e-06,
"loss": 9.2067,
"step": 10
},
{
"epoch": 0.011213047910295617,
"grad_norm": 19.427106857299805,
"learning_rate": 2.0361990950226245e-06,
"loss": 8.667,
"step": 11
},
{
"epoch": 0.012232415902140673,
"grad_norm": 24.354387283325195,
"learning_rate": 2.2624434389140273e-06,
"loss": 10.396,
"step": 12
},
{
"epoch": 0.013251783893985729,
"grad_norm": 21.99860191345215,
"learning_rate": 2.48868778280543e-06,
"loss": 8.9036,
"step": 13
},
{
"epoch": 0.014271151885830785,
"grad_norm": 21.275592803955078,
"learning_rate": 2.7149321266968327e-06,
"loss": 7.9307,
"step": 14
},
{
"epoch": 0.01529051987767584,
"grad_norm": 20.020435333251953,
"learning_rate": 2.9411764705882355e-06,
"loss": 9.0655,
"step": 15
},
{
"epoch": 0.0163098878695209,
"grad_norm": 20.713603973388672,
"learning_rate": 3.167420814479638e-06,
"loss": 8.6598,
"step": 16
},
{
"epoch": 0.017329255861365953,
"grad_norm": 22.857194900512695,
"learning_rate": 3.3936651583710405e-06,
"loss": 9.7562,
"step": 17
},
{
"epoch": 0.01834862385321101,
"grad_norm": 19.031551361083984,
"learning_rate": 3.619909502262444e-06,
"loss": 9.2297,
"step": 18
},
{
"epoch": 0.019367991845056064,
"grad_norm": 19.30624008178711,
"learning_rate": 3.846153846153847e-06,
"loss": 8.6939,
"step": 19
},
{
"epoch": 0.020387359836901122,
"grad_norm": 17.09296417236328,
"learning_rate": 4.072398190045249e-06,
"loss": 8.1317,
"step": 20
},
{
"epoch": 0.021406727828746176,
"grad_norm": 19.199600219726562,
"learning_rate": 4.298642533936651e-06,
"loss": 8.3585,
"step": 21
},
{
"epoch": 0.022426095820591234,
"grad_norm": 18.50484275817871,
"learning_rate": 4.5248868778280546e-06,
"loss": 8.4533,
"step": 22
},
{
"epoch": 0.023445463812436288,
"grad_norm": 19.170618057250977,
"learning_rate": 4.751131221719457e-06,
"loss": 9.3014,
"step": 23
},
{
"epoch": 0.024464831804281346,
"grad_norm": 17.692346572875977,
"learning_rate": 4.97737556561086e-06,
"loss": 8.18,
"step": 24
},
{
"epoch": 0.0254841997961264,
"grad_norm": 18.87356185913086,
"learning_rate": 5.203619909502263e-06,
"loss": 7.8485,
"step": 25
},
{
"epoch": 0.026503567787971458,
"grad_norm": 16.432092666625977,
"learning_rate": 5.4298642533936655e-06,
"loss": 8.9669,
"step": 26
},
{
"epoch": 0.027522935779816515,
"grad_norm": 17.064382553100586,
"learning_rate": 5.656108597285068e-06,
"loss": 10.1397,
"step": 27
},
{
"epoch": 0.02854230377166157,
"grad_norm": 17.96854591369629,
"learning_rate": 5.882352941176471e-06,
"loss": 10.5216,
"step": 28
},
{
"epoch": 0.029561671763506627,
"grad_norm": 16.348352432250977,
"learning_rate": 6.108597285067873e-06,
"loss": 7.4782,
"step": 29
},
{
"epoch": 0.03058103975535168,
"grad_norm": 15.834653854370117,
"learning_rate": 6.334841628959276e-06,
"loss": 8.0439,
"step": 30
},
{
"epoch": 0.03160040774719674,
"grad_norm": 15.115158081054688,
"learning_rate": 6.5610859728506795e-06,
"loss": 7.1911,
"step": 31
},
{
"epoch": 0.0326197757390418,
"grad_norm": 17.570573806762695,
"learning_rate": 6.787330316742081e-06,
"loss": 8.5735,
"step": 32
},
{
"epoch": 0.03363914373088685,
"grad_norm": 15.224530220031738,
"learning_rate": 7.013574660633485e-06,
"loss": 8.3855,
"step": 33
},
{
"epoch": 0.034658511722731905,
"grad_norm": 16.47282600402832,
"learning_rate": 7.239819004524888e-06,
"loss": 8.4305,
"step": 34
},
{
"epoch": 0.03567787971457696,
"grad_norm": 16.739215850830078,
"learning_rate": 7.46606334841629e-06,
"loss": 9.4608,
"step": 35
},
{
"epoch": 0.03669724770642202,
"grad_norm": 13.741637229919434,
"learning_rate": 7.692307692307694e-06,
"loss": 8.1572,
"step": 36
},
{
"epoch": 0.03771661569826707,
"grad_norm": 14.70285701751709,
"learning_rate": 7.918552036199094e-06,
"loss": 8.1456,
"step": 37
},
{
"epoch": 0.03873598369011213,
"grad_norm": 11.470185279846191,
"learning_rate": 8.144796380090498e-06,
"loss": 7.3833,
"step": 38
},
{
"epoch": 0.039755351681957186,
"grad_norm": 13.029812812805176,
"learning_rate": 8.3710407239819e-06,
"loss": 8.8539,
"step": 39
},
{
"epoch": 0.040774719673802244,
"grad_norm": 12.46716594696045,
"learning_rate": 8.597285067873303e-06,
"loss": 8.9349,
"step": 40
},
{
"epoch": 0.0417940876656473,
"grad_norm": 12.875706672668457,
"learning_rate": 8.823529411764707e-06,
"loss": 8.1803,
"step": 41
},
{
"epoch": 0.04281345565749235,
"grad_norm": 12.646770477294922,
"learning_rate": 9.049773755656109e-06,
"loss": 6.7532,
"step": 42
},
{
"epoch": 0.04383282364933741,
"grad_norm": 13.792744636535645,
"learning_rate": 9.276018099547511e-06,
"loss": 7.127,
"step": 43
},
{
"epoch": 0.04485219164118247,
"grad_norm": 11.656695365905762,
"learning_rate": 9.502262443438914e-06,
"loss": 7.5565,
"step": 44
},
{
"epoch": 0.045871559633027525,
"grad_norm": 11.562976837158203,
"learning_rate": 9.728506787330318e-06,
"loss": 7.6078,
"step": 45
},
{
"epoch": 0.046890927624872576,
"grad_norm": 11.516715049743652,
"learning_rate": 9.95475113122172e-06,
"loss": 8.4153,
"step": 46
},
{
"epoch": 0.047910295616717634,
"grad_norm": 11.569866180419922,
"learning_rate": 1.0180995475113122e-05,
"loss": 7.1062,
"step": 47
},
{
"epoch": 0.04892966360856269,
"grad_norm": 11.088666915893555,
"learning_rate": 1.0407239819004526e-05,
"loss": 6.8482,
"step": 48
},
{
"epoch": 0.04994903160040775,
"grad_norm": 11.396224021911621,
"learning_rate": 1.0633484162895929e-05,
"loss": 7.2262,
"step": 49
},
{
"epoch": 0.0509683995922528,
"grad_norm": 11.868388175964355,
"learning_rate": 1.0859728506787331e-05,
"loss": 8.0207,
"step": 50
},
{
"epoch": 0.05198776758409786,
"grad_norm": 10.022957801818848,
"learning_rate": 1.1085972850678733e-05,
"loss": 7.6895,
"step": 51
},
{
"epoch": 0.053007135575942915,
"grad_norm": 11.007475852966309,
"learning_rate": 1.1312217194570136e-05,
"loss": 7.6185,
"step": 52
},
{
"epoch": 0.05402650356778797,
"grad_norm": 10.026458740234375,
"learning_rate": 1.153846153846154e-05,
"loss": 8.8153,
"step": 53
},
{
"epoch": 0.05504587155963303,
"grad_norm": 10.358866691589355,
"learning_rate": 1.1764705882352942e-05,
"loss": 7.7666,
"step": 54
},
{
"epoch": 0.05606523955147808,
"grad_norm": 10.722491264343262,
"learning_rate": 1.1990950226244344e-05,
"loss": 7.1431,
"step": 55
},
{
"epoch": 0.05708460754332314,
"grad_norm": 10.623186111450195,
"learning_rate": 1.2217194570135746e-05,
"loss": 6.3969,
"step": 56
},
{
"epoch": 0.0581039755351682,
"grad_norm": 10.13591480255127,
"learning_rate": 1.244343891402715e-05,
"loss": 8.1643,
"step": 57
},
{
"epoch": 0.059123343527013254,
"grad_norm": 9.476139068603516,
"learning_rate": 1.2669683257918553e-05,
"loss": 7.1228,
"step": 58
},
{
"epoch": 0.060142711518858305,
"grad_norm": 8.608465194702148,
"learning_rate": 1.2895927601809957e-05,
"loss": 6.9228,
"step": 59
},
{
"epoch": 0.06116207951070336,
"grad_norm": 10.69497299194336,
"learning_rate": 1.3122171945701359e-05,
"loss": 10.2251,
"step": 60
},
{
"epoch": 0.06218144750254842,
"grad_norm": 9.309306144714355,
"learning_rate": 1.3348416289592761e-05,
"loss": 7.1105,
"step": 61
},
{
"epoch": 0.06320081549439348,
"grad_norm": 9.268863677978516,
"learning_rate": 1.3574660633484162e-05,
"loss": 7.1156,
"step": 62
},
{
"epoch": 0.06422018348623854,
"grad_norm": 10.207130432128906,
"learning_rate": 1.3800904977375568e-05,
"loss": 6.5522,
"step": 63
},
{
"epoch": 0.0652395514780836,
"grad_norm": 9.29359245300293,
"learning_rate": 1.402714932126697e-05,
"loss": 6.734,
"step": 64
},
{
"epoch": 0.06625891946992865,
"grad_norm": 8.38429069519043,
"learning_rate": 1.425339366515837e-05,
"loss": 8.1303,
"step": 65
},
{
"epoch": 0.0672782874617737,
"grad_norm": 9.689257621765137,
"learning_rate": 1.4479638009049776e-05,
"loss": 7.298,
"step": 66
},
{
"epoch": 0.06829765545361875,
"grad_norm": 8.886714935302734,
"learning_rate": 1.4705882352941177e-05,
"loss": 6.1227,
"step": 67
},
{
"epoch": 0.06931702344546381,
"grad_norm": 9.28791332244873,
"learning_rate": 1.493212669683258e-05,
"loss": 6.7938,
"step": 68
},
{
"epoch": 0.07033639143730887,
"grad_norm": 9.196669578552246,
"learning_rate": 1.5158371040723981e-05,
"loss": 6.4562,
"step": 69
},
{
"epoch": 0.07135575942915393,
"grad_norm": 10.716215133666992,
"learning_rate": 1.5384615384615387e-05,
"loss": 8.0389,
"step": 70
},
{
"epoch": 0.07237512742099898,
"grad_norm": 9.852572441101074,
"learning_rate": 1.5610859728506788e-05,
"loss": 8.7218,
"step": 71
},
{
"epoch": 0.07339449541284404,
"grad_norm": 8.59492301940918,
"learning_rate": 1.583710407239819e-05,
"loss": 6.1906,
"step": 72
},
{
"epoch": 0.0744138634046891,
"grad_norm": 9.830521583557129,
"learning_rate": 1.6063348416289596e-05,
"loss": 6.7222,
"step": 73
},
{
"epoch": 0.07543323139653414,
"grad_norm": 9.12816047668457,
"learning_rate": 1.6289592760180996e-05,
"loss": 7.0611,
"step": 74
},
{
"epoch": 0.0764525993883792,
"grad_norm": 10.391504287719727,
"learning_rate": 1.6515837104072397e-05,
"loss": 7.8241,
"step": 75
},
{
"epoch": 0.07747196738022426,
"grad_norm": 9.0382719039917,
"learning_rate": 1.67420814479638e-05,
"loss": 6.3791,
"step": 76
},
{
"epoch": 0.07849133537206932,
"grad_norm": 11.495955467224121,
"learning_rate": 1.6968325791855205e-05,
"loss": 6.8864,
"step": 77
},
{
"epoch": 0.07951070336391437,
"grad_norm": 9.282613754272461,
"learning_rate": 1.7194570135746606e-05,
"loss": 6.8356,
"step": 78
},
{
"epoch": 0.08053007135575943,
"grad_norm": 9.06067180633545,
"learning_rate": 1.742081447963801e-05,
"loss": 6.168,
"step": 79
},
{
"epoch": 0.08154943934760449,
"grad_norm": 10.343846321105957,
"learning_rate": 1.7647058823529414e-05,
"loss": 8.6845,
"step": 80
},
{
"epoch": 0.08256880733944955,
"grad_norm": 10.185526847839355,
"learning_rate": 1.7873303167420814e-05,
"loss": 5.9739,
"step": 81
},
{
"epoch": 0.0835881753312946,
"grad_norm": 12.164653778076172,
"learning_rate": 1.8099547511312218e-05,
"loss": 6.2423,
"step": 82
},
{
"epoch": 0.08460754332313965,
"grad_norm": 10.543149948120117,
"learning_rate": 1.832579185520362e-05,
"loss": 7.6247,
"step": 83
},
{
"epoch": 0.0856269113149847,
"grad_norm": 10.210731506347656,
"learning_rate": 1.8552036199095023e-05,
"loss": 5.8418,
"step": 84
},
{
"epoch": 0.08664627930682976,
"grad_norm": 11.613642692565918,
"learning_rate": 1.8778280542986427e-05,
"loss": 7.0948,
"step": 85
},
{
"epoch": 0.08766564729867482,
"grad_norm": 12.590648651123047,
"learning_rate": 1.9004524886877827e-05,
"loss": 6.7457,
"step": 86
},
{
"epoch": 0.08868501529051988,
"grad_norm": 12.547815322875977,
"learning_rate": 1.923076923076923e-05,
"loss": 5.6837,
"step": 87
},
{
"epoch": 0.08970438328236494,
"grad_norm": 14.212437629699707,
"learning_rate": 1.9457013574660635e-05,
"loss": 6.0757,
"step": 88
},
{
"epoch": 0.09072375127421,
"grad_norm": 14.821358680725098,
"learning_rate": 1.9683257918552036e-05,
"loss": 6.523,
"step": 89
},
{
"epoch": 0.09174311926605505,
"grad_norm": 14.133096694946289,
"learning_rate": 1.990950226244344e-05,
"loss": 6.2917,
"step": 90
},
{
"epoch": 0.09276248725790011,
"grad_norm": 14.283154487609863,
"learning_rate": 2.0135746606334844e-05,
"loss": 6.0509,
"step": 91
},
{
"epoch": 0.09378185524974515,
"grad_norm": 15.914741516113281,
"learning_rate": 2.0361990950226245e-05,
"loss": 6.8298,
"step": 92
},
{
"epoch": 0.09480122324159021,
"grad_norm": 18.067726135253906,
"learning_rate": 2.058823529411765e-05,
"loss": 7.855,
"step": 93
},
{
"epoch": 0.09582059123343527,
"grad_norm": 17.288843154907227,
"learning_rate": 2.0814479638009053e-05,
"loss": 6.6372,
"step": 94
},
{
"epoch": 0.09683995922528033,
"grad_norm": 22.13617515563965,
"learning_rate": 2.1040723981900453e-05,
"loss": 5.7468,
"step": 95
},
{
"epoch": 0.09785932721712538,
"grad_norm": 22.20960235595703,
"learning_rate": 2.1266968325791857e-05,
"loss": 7.5522,
"step": 96
},
{
"epoch": 0.09887869520897044,
"grad_norm": 23.28131103515625,
"learning_rate": 2.149321266968326e-05,
"loss": 7.7825,
"step": 97
},
{
"epoch": 0.0998980632008155,
"grad_norm": 29.695850372314453,
"learning_rate": 2.1719457013574662e-05,
"loss": 8.7452,
"step": 98
},
{
"epoch": 0.0998980632008155,
"eval_Qnli-dev-1024_cosine_accuracy": 0.6458333333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.995652437210083,
"eval_Qnli-dev-1024_cosine_ap": 0.6274798374964984,
"eval_Qnli-dev-1024_cosine_f1": 0.6518518518518519,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.9515509605407715,
"eval_Qnli-dev-1024_cosine_mcc": 0.1563007361345257,
"eval_Qnli-dev-1024_cosine_precision": 0.4888888888888889,
"eval_Qnli-dev-1024_cosine_recall": 0.9777777777777777,
"eval_Qnli-dev_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.8860945701599121,
"eval_Qnli-dev_cosine_ap": 0.7645314494110582,
"eval_Qnli-dev_cosine_f1": 0.7500000000000001,
"eval_Qnli-dev_cosine_f1_threshold": 0.8442017436027527,
"eval_Qnli-dev_cosine_mcc": 0.48653004754089046,
"eval_Qnli-dev_cosine_precision": 0.6610169491525424,
"eval_Qnli-dev_cosine_recall": 0.8666666666666667,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.7291666865348816,
"eval_allNLI-triplets_cosine_accuracy": 0.96875,
"eval_global_dataset_loss": 2.297825574874878,
"eval_global_dataset_runtime": 104.2196,
"eval_global_dataset_samples_per_second": 7.705,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.7291666865348816,
"eval_sts-test-1024_pearson_cosine": 0.470983874633109,
"eval_sts-test-1024_spearman_cosine": 0.7146928621162676,
"eval_sts-test_pearson_cosine": 0.904138891044396,
"eval_sts-test_spearman_cosine": 0.9172742489825538,
"step": 98
},
{
"epoch": 0.10091743119266056,
"grad_norm": 21.82425880432129,
"learning_rate": 2.1945701357466062e-05,
"loss": 6.2322,
"step": 99
},
{
"epoch": 0.1019367991845056,
"grad_norm": 25.734025955200195,
"learning_rate": 2.2171945701357466e-05,
"loss": 4.8433,
"step": 100
},
{
"epoch": 0.10295616717635066,
"grad_norm": 28.17144775390625,
"learning_rate": 2.239819004524887e-05,
"loss": 4.8335,
"step": 101
},
{
"epoch": 0.10397553516819572,
"grad_norm": 27.875871658325195,
"learning_rate": 2.262443438914027e-05,
"loss": 5.138,
"step": 102
},
{
"epoch": 0.10499490316004077,
"grad_norm": 31.503034591674805,
"learning_rate": 2.2850678733031675e-05,
"loss": 4.8609,
"step": 103
},
{
"epoch": 0.10601427115188583,
"grad_norm": 26.674440383911133,
"learning_rate": 2.307692307692308e-05,
"loss": 4.6204,
"step": 104
},
{
"epoch": 0.10703363914373089,
"grad_norm": 25.039222717285156,
"learning_rate": 2.330316742081448e-05,
"loss": 4.3809,
"step": 105
},
{
"epoch": 0.10805300713557595,
"grad_norm": 26.333913803100586,
"learning_rate": 2.3529411764705884e-05,
"loss": 5.6703,
"step": 106
},
{
"epoch": 0.109072375127421,
"grad_norm": 23.51517105102539,
"learning_rate": 2.3755656108597284e-05,
"loss": 5.0237,
"step": 107
},
{
"epoch": 0.11009174311926606,
"grad_norm": 18.25855255126953,
"learning_rate": 2.3981900452488688e-05,
"loss": 4.002,
"step": 108
},
{
"epoch": 0.1111111111111111,
"grad_norm": 19.852886199951172,
"learning_rate": 2.4208144796380092e-05,
"loss": 5.2532,
"step": 109
},
{
"epoch": 0.11213047910295616,
"grad_norm": 17.45444107055664,
"learning_rate": 2.4434389140271493e-05,
"loss": 4.1033,
"step": 110
},
{
"epoch": 0.11314984709480122,
"grad_norm": 14.521421432495117,
"learning_rate": 2.4660633484162897e-05,
"loss": 4.0818,
"step": 111
},
{
"epoch": 0.11416921508664628,
"grad_norm": 12.525910377502441,
"learning_rate": 2.48868778280543e-05,
"loss": 3.458,
"step": 112
},
{
"epoch": 0.11518858307849134,
"grad_norm": 14.503193855285645,
"learning_rate": 2.51131221719457e-05,
"loss": 4.3372,
"step": 113
},
{
"epoch": 0.1162079510703364,
"grad_norm": 14.2279634475708,
"learning_rate": 2.5339366515837106e-05,
"loss": 4.9513,
"step": 114
},
{
"epoch": 0.11722731906218145,
"grad_norm": 15.238719940185547,
"learning_rate": 2.5565610859728506e-05,
"loss": 5.2602,
"step": 115
},
{
"epoch": 0.11824668705402651,
"grad_norm": 11.11528491973877,
"learning_rate": 2.5791855203619913e-05,
"loss": 3.1741,
"step": 116
},
{
"epoch": 0.11926605504587157,
"grad_norm": 12.077157974243164,
"learning_rate": 2.6018099547511314e-05,
"loss": 4.1914,
"step": 117
},
{
"epoch": 0.12028542303771661,
"grad_norm": 11.872669219970703,
"learning_rate": 2.6244343891402718e-05,
"loss": 2.8383,
"step": 118
},
{
"epoch": 0.12130479102956167,
"grad_norm": 9.008302688598633,
"learning_rate": 2.647058823529412e-05,
"loss": 3.4165,
"step": 119
},
{
"epoch": 0.12232415902140673,
"grad_norm": 10.702130317687988,
"learning_rate": 2.6696832579185523e-05,
"loss": 3.5085,
"step": 120
},
{
"epoch": 0.12334352701325178,
"grad_norm": 10.306276321411133,
"learning_rate": 2.6923076923076923e-05,
"loss": 2.3992,
"step": 121
},
{
"epoch": 0.12436289500509684,
"grad_norm": 9.035378456115723,
"learning_rate": 2.7149321266968324e-05,
"loss": 2.4849,
"step": 122
},
{
"epoch": 0.12538226299694188,
"grad_norm": 8.996299743652344,
"learning_rate": 2.737556561085973e-05,
"loss": 2.2839,
"step": 123
},
{
"epoch": 0.12640163098878696,
"grad_norm": 8.635661125183105,
"learning_rate": 2.7601809954751135e-05,
"loss": 2.567,
"step": 124
},
{
"epoch": 0.127420998980632,
"grad_norm": 10.015826225280762,
"learning_rate": 2.7828054298642536e-05,
"loss": 4.5119,
"step": 125
},
{
"epoch": 0.12844036697247707,
"grad_norm": 8.679932594299316,
"learning_rate": 2.805429864253394e-05,
"loss": 2.767,
"step": 126
},
{
"epoch": 0.12945973496432212,
"grad_norm": 10.05739688873291,
"learning_rate": 2.828054298642534e-05,
"loss": 4.0225,
"step": 127
},
{
"epoch": 0.1304791029561672,
"grad_norm": 9.361485481262207,
"learning_rate": 2.850678733031674e-05,
"loss": 1.8294,
"step": 128
},
{
"epoch": 0.13149847094801223,
"grad_norm": 9.865928649902344,
"learning_rate": 2.8733031674208145e-05,
"loss": 4.4174,
"step": 129
},
{
"epoch": 0.1325178389398573,
"grad_norm": 10.055468559265137,
"learning_rate": 2.8959276018099553e-05,
"loss": 2.0112,
"step": 130
},
{
"epoch": 0.13353720693170235,
"grad_norm": 9.528116226196289,
"learning_rate": 2.9185520361990953e-05,
"loss": 1.7772,
"step": 131
},
{
"epoch": 0.1345565749235474,
"grad_norm": 9.870166778564453,
"learning_rate": 2.9411764705882354e-05,
"loss": 3.1912,
"step": 132
},
{
"epoch": 0.13557594291539246,
"grad_norm": 10.1703462600708,
"learning_rate": 2.9638009049773758e-05,
"loss": 2.4527,
"step": 133
},
{
"epoch": 0.1365953109072375,
"grad_norm": 7.443604469299316,
"learning_rate": 2.986425339366516e-05,
"loss": 1.6424,
"step": 134
},
{
"epoch": 0.13761467889908258,
"grad_norm": 10.003544807434082,
"learning_rate": 3.0090497737556562e-05,
"loss": 2.6143,
"step": 135
},
{
"epoch": 0.13863404689092762,
"grad_norm": 9.352860450744629,
"learning_rate": 3.0316742081447963e-05,
"loss": 2.0498,
"step": 136
},
{
"epoch": 0.1396534148827727,
"grad_norm": 7.393095970153809,
"learning_rate": 3.0542986425339374e-05,
"loss": 1.962,
"step": 137
},
{
"epoch": 0.14067278287461774,
"grad_norm": 8.278059959411621,
"learning_rate": 3.0769230769230774e-05,
"loss": 1.789,
"step": 138
},
{
"epoch": 0.14169215086646278,
"grad_norm": 6.577699184417725,
"learning_rate": 3.0995475113122175e-05,
"loss": 1.459,
"step": 139
},
{
"epoch": 0.14271151885830785,
"grad_norm": 8.23404312133789,
"learning_rate": 3.1221719457013576e-05,
"loss": 1.2479,
"step": 140
},
{
"epoch": 0.1437308868501529,
"grad_norm": 9.47106647491455,
"learning_rate": 3.1447963800904976e-05,
"loss": 2.5413,
"step": 141
},
{
"epoch": 0.14475025484199797,
"grad_norm": 7.330000400543213,
"learning_rate": 3.167420814479638e-05,
"loss": 1.4077,
"step": 142
},
{
"epoch": 0.145769622833843,
"grad_norm": 9.64534854888916,
"learning_rate": 3.1900452488687784e-05,
"loss": 2.6988,
"step": 143
},
{
"epoch": 0.14678899082568808,
"grad_norm": 8.404465675354004,
"learning_rate": 3.212669683257919e-05,
"loss": 2.9772,
"step": 144
},
{
"epoch": 0.14780835881753313,
"grad_norm": 8.019698143005371,
"learning_rate": 3.235294117647059e-05,
"loss": 1.6265,
"step": 145
},
{
"epoch": 0.1488277268093782,
"grad_norm": 7.635079860687256,
"learning_rate": 3.257918552036199e-05,
"loss": 1.9404,
"step": 146
},
{
"epoch": 0.14984709480122324,
"grad_norm": 7.929011821746826,
"learning_rate": 3.2805429864253393e-05,
"loss": 1.4251,
"step": 147
},
{
"epoch": 0.15086646279306828,
"grad_norm": 7.869425296783447,
"learning_rate": 3.3031674208144794e-05,
"loss": 2.6657,
"step": 148
},
{
"epoch": 0.15188583078491336,
"grad_norm": 8.369176864624023,
"learning_rate": 3.32579185520362e-05,
"loss": 2.1576,
"step": 149
},
{
"epoch": 0.1529051987767584,
"grad_norm": 9.128487586975098,
"learning_rate": 3.34841628959276e-05,
"loss": 1.53,
"step": 150
},
{
"epoch": 0.15392456676860347,
"grad_norm": 7.673459529876709,
"learning_rate": 3.371040723981901e-05,
"loss": 1.2642,
"step": 151
},
{
"epoch": 0.15494393476044852,
"grad_norm": 9.104422569274902,
"learning_rate": 3.393665158371041e-05,
"loss": 2.4846,
"step": 152
},
{
"epoch": 0.1559633027522936,
"grad_norm": 8.658594131469727,
"learning_rate": 3.416289592760181e-05,
"loss": 1.4979,
"step": 153
},
{
"epoch": 0.15698267074413863,
"grad_norm": 9.34330940246582,
"learning_rate": 3.438914027149321e-05,
"loss": 1.8149,
"step": 154
},
{
"epoch": 0.1580020387359837,
"grad_norm": 9.401769638061523,
"learning_rate": 3.461538461538462e-05,
"loss": 1.4693,
"step": 155
},
{
"epoch": 0.15902140672782875,
"grad_norm": 10.389461517333984,
"learning_rate": 3.484162895927602e-05,
"loss": 2.0114,
"step": 156
},
{
"epoch": 0.1600407747196738,
"grad_norm": 9.321866989135742,
"learning_rate": 3.506787330316742e-05,
"loss": 1.5511,
"step": 157
},
{
"epoch": 0.16106014271151886,
"grad_norm": 10.052262306213379,
"learning_rate": 3.529411764705883e-05,
"loss": 1.621,
"step": 158
},
{
"epoch": 0.1620795107033639,
"grad_norm": 7.535787105560303,
"learning_rate": 3.552036199095023e-05,
"loss": 2.1122,
"step": 159
},
{
"epoch": 0.16309887869520898,
"grad_norm": 9.70533275604248,
"learning_rate": 3.574660633484163e-05,
"loss": 1.8148,
"step": 160
},
{
"epoch": 0.16411824668705402,
"grad_norm": 7.81204080581665,
"learning_rate": 3.5972850678733036e-05,
"loss": 1.9861,
"step": 161
},
{
"epoch": 0.1651376146788991,
"grad_norm": 7.583981513977051,
"learning_rate": 3.6199095022624436e-05,
"loss": 1.3943,
"step": 162
},
{
"epoch": 0.16615698267074414,
"grad_norm": 8.344895362854004,
"learning_rate": 3.642533936651584e-05,
"loss": 1.7317,
"step": 163
},
{
"epoch": 0.1671763506625892,
"grad_norm": 7.1097331047058105,
"learning_rate": 3.665158371040724e-05,
"loss": 1.6,
"step": 164
},
{
"epoch": 0.16819571865443425,
"grad_norm": 7.911113739013672,
"learning_rate": 3.6877828054298645e-05,
"loss": 1.2222,
"step": 165
},
{
"epoch": 0.1692150866462793,
"grad_norm": 9.282394409179688,
"learning_rate": 3.7104072398190046e-05,
"loss": 1.6152,
"step": 166
},
{
"epoch": 0.17023445463812437,
"grad_norm": 7.449146270751953,
"learning_rate": 3.733031674208145e-05,
"loss": 1.0374,
"step": 167
},
{
"epoch": 0.1712538226299694,
"grad_norm": 9.164731979370117,
"learning_rate": 3.7556561085972854e-05,
"loss": 1.2844,
"step": 168
},
{
"epoch": 0.17227319062181448,
"grad_norm": 6.987304210662842,
"learning_rate": 3.7782805429864254e-05,
"loss": 1.8805,
"step": 169
},
{
"epoch": 0.17329255861365953,
"grad_norm": 7.447988033294678,
"learning_rate": 3.8009049773755655e-05,
"loss": 1.0972,
"step": 170
},
{
"epoch": 0.1743119266055046,
"grad_norm": 7.7849321365356445,
"learning_rate": 3.8235294117647055e-05,
"loss": 1.7012,
"step": 171
},
{
"epoch": 0.17533129459734964,
"grad_norm": 7.341614246368408,
"learning_rate": 3.846153846153846e-05,
"loss": 1.4182,
"step": 172
},
{
"epoch": 0.1763506625891947,
"grad_norm": 8.514887809753418,
"learning_rate": 3.868778280542987e-05,
"loss": 2.6053,
"step": 173
},
{
"epoch": 0.17737003058103976,
"grad_norm": 7.384711265563965,
"learning_rate": 3.891402714932127e-05,
"loss": 1.4193,
"step": 174
},
{
"epoch": 0.1783893985728848,
"grad_norm": 8.553336143493652,
"learning_rate": 3.914027149321267e-05,
"loss": 2.251,
"step": 175
},
{
"epoch": 0.17940876656472987,
"grad_norm": 8.517749786376953,
"learning_rate": 3.936651583710407e-05,
"loss": 1.9057,
"step": 176
},
{
"epoch": 0.18042813455657492,
"grad_norm": 8.444558143615723,
"learning_rate": 3.959276018099547e-05,
"loss": 1.1228,
"step": 177
},
{
"epoch": 0.18144750254842,
"grad_norm": 12.253990173339844,
"learning_rate": 3.981900452488688e-05,
"loss": 4.0905,
"step": 178
},
{
"epoch": 0.18246687054026503,
"grad_norm": 5.70052433013916,
"learning_rate": 4.004524886877829e-05,
"loss": 0.9007,
"step": 179
},
{
"epoch": 0.1834862385321101,
"grad_norm": 9.525473594665527,
"learning_rate": 4.027149321266969e-05,
"loss": 2.0665,
"step": 180
},
{
"epoch": 0.18450560652395515,
"grad_norm": 6.146080493927002,
"learning_rate": 4.049773755656109e-05,
"loss": 1.0946,
"step": 181
},
{
"epoch": 0.18552497451580022,
"grad_norm": 7.736543655395508,
"learning_rate": 4.072398190045249e-05,
"loss": 1.7479,
"step": 182
},
{
"epoch": 0.18654434250764526,
"grad_norm": 8.404258728027344,
"learning_rate": 4.095022624434389e-05,
"loss": 2.0877,
"step": 183
},
{
"epoch": 0.1875637104994903,
"grad_norm": 5.705750942230225,
"learning_rate": 4.11764705882353e-05,
"loss": 0.9239,
"step": 184
},
{
"epoch": 0.18858307849133538,
"grad_norm": 7.753995895385742,
"learning_rate": 4.14027149321267e-05,
"loss": 1.7865,
"step": 185
},
{
"epoch": 0.18960244648318042,
"grad_norm": 9.15240478515625,
"learning_rate": 4.1628959276018105e-05,
"loss": 2.1053,
"step": 186
},
{
"epoch": 0.1906218144750255,
"grad_norm": 7.2251129150390625,
"learning_rate": 4.1855203619909506e-05,
"loss": 1.5273,
"step": 187
},
{
"epoch": 0.19164118246687054,
"grad_norm": 6.803040981292725,
"learning_rate": 4.2081447963800907e-05,
"loss": 1.8726,
"step": 188
},
{
"epoch": 0.1926605504587156,
"grad_norm": 5.646162509918213,
"learning_rate": 4.230769230769231e-05,
"loss": 1.4663,
"step": 189
},
{
"epoch": 0.19367991845056065,
"grad_norm": 7.599930286407471,
"learning_rate": 4.2533936651583714e-05,
"loss": 1.0136,
"step": 190
},
{
"epoch": 0.1946992864424057,
"grad_norm": 7.882979393005371,
"learning_rate": 4.2760180995475115e-05,
"loss": 1.121,
"step": 191
},
{
"epoch": 0.19571865443425077,
"grad_norm": 8.919268608093262,
"learning_rate": 4.298642533936652e-05,
"loss": 1.6074,
"step": 192
},
{
"epoch": 0.1967380224260958,
"grad_norm": 8.914848327636719,
"learning_rate": 4.321266968325792e-05,
"loss": 2.1956,
"step": 193
},
{
"epoch": 0.19775739041794088,
"grad_norm": 8.603778839111328,
"learning_rate": 4.3438914027149324e-05,
"loss": 1.5425,
"step": 194
},
{
"epoch": 0.19877675840978593,
"grad_norm": 8.500616073608398,
"learning_rate": 4.3665158371040724e-05,
"loss": 1.4552,
"step": 195
},
{
"epoch": 0.199796126401631,
"grad_norm": 7.815979957580566,
"learning_rate": 4.3891402714932125e-05,
"loss": 1.2635,
"step": 196
},
{
"epoch": 0.199796126401631,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8570283651351929,
"eval_Qnli-dev-1024_cosine_ap": 0.7434694144471753,
"eval_Qnli-dev-1024_cosine_f1": 0.7207207207207208,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.7906914353370667,
"eval_Qnli-dev-1024_cosine_mcc": 0.4081269865567241,
"eval_Qnli-dev-1024_cosine_precision": 0.6060606060606061,
"eval_Qnli-dev-1024_cosine_recall": 0.8888888888888888,
"eval_Qnli-dev_cosine_accuracy": 0.75,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.8287814855575562,
"eval_Qnli-dev_cosine_ap": 0.7646453733471359,
"eval_Qnli-dev_cosine_f1": 0.7378640776699029,
"eval_Qnli-dev_cosine_f1_threshold": 0.7745069265365601,
"eval_Qnli-dev_cosine_mcc": 0.46153029495329345,
"eval_Qnli-dev_cosine_precision": 0.6551724137931034,
"eval_Qnli-dev_cosine_recall": 0.8444444444444444,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.9166666865348816,
"eval_allNLI-triplets_cosine_accuracy": 0.9583333134651184,
"eval_global_dataset_loss": 0.5179261565208435,
"eval_global_dataset_runtime": 104.2216,
"eval_global_dataset_samples_per_second": 7.705,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.9166666865348816,
"eval_sts-test-1024_pearson_cosine": 0.8476975008591285,
"eval_sts-test-1024_spearman_cosine": 0.8973182534732806,
"eval_sts-test_pearson_cosine": 0.9039400681490469,
"eval_sts-test_spearman_cosine": 0.9185431775441114,
"step": 196
},
{
"epoch": 0.20081549439347604,
"grad_norm": 9.34125804901123,
"learning_rate": 4.411764705882353e-05,
"loss": 1.7222,
"step": 197
},
{
"epoch": 0.2018348623853211,
"grad_norm": 10.679852485656738,
"learning_rate": 4.434389140271493e-05,
"loss": 2.377,
"step": 198
},
{
"epoch": 0.20285423037716616,
"grad_norm": 7.775190830230713,
"learning_rate": 4.457013574660634e-05,
"loss": 1.5317,
"step": 199
},
{
"epoch": 0.2038735983690112,
"grad_norm": 6.390950679779053,
"learning_rate": 4.479638009049774e-05,
"loss": 1.0494,
"step": 200
},
{
"epoch": 0.20489296636085627,
"grad_norm": 9.170794486999512,
"learning_rate": 4.502262443438914e-05,
"loss": 1.7392,
"step": 201
},
{
"epoch": 0.20591233435270132,
"grad_norm": 7.37787389755249,
"learning_rate": 4.524886877828054e-05,
"loss": 1.2924,
"step": 202
},
{
"epoch": 0.2069317023445464,
"grad_norm": 6.836249828338623,
"learning_rate": 4.547511312217195e-05,
"loss": 0.9413,
"step": 203
},
{
"epoch": 0.20795107033639143,
"grad_norm": 9.543895721435547,
"learning_rate": 4.570135746606335e-05,
"loss": 2.1448,
"step": 204
},
{
"epoch": 0.2089704383282365,
"grad_norm": 7.8430495262146,
"learning_rate": 4.592760180995475e-05,
"loss": 1.0357,
"step": 205
},
{
"epoch": 0.20998980632008155,
"grad_norm": 9.558221817016602,
"learning_rate": 4.615384615384616e-05,
"loss": 1.3534,
"step": 206
},
{
"epoch": 0.21100917431192662,
"grad_norm": 5.715826034545898,
"learning_rate": 4.638009049773756e-05,
"loss": 1.0564,
"step": 207
},
{
"epoch": 0.21202854230377166,
"grad_norm": 8.720932960510254,
"learning_rate": 4.660633484162896e-05,
"loss": 0.9259,
"step": 208
},
{
"epoch": 0.2130479102956167,
"grad_norm": 9.008890151977539,
"learning_rate": 4.683257918552037e-05,
"loss": 1.3813,
"step": 209
},
{
"epoch": 0.21406727828746178,
"grad_norm": 7.1262006759643555,
"learning_rate": 4.705882352941177e-05,
"loss": 0.8828,
"step": 210
},
{
"epoch": 0.21508664627930682,
"grad_norm": 12.986166000366211,
"learning_rate": 4.728506787330317e-05,
"loss": 2.9147,
"step": 211
},
{
"epoch": 0.2161060142711519,
"grad_norm": 6.804072380065918,
"learning_rate": 4.751131221719457e-05,
"loss": 0.6539,
"step": 212
},
{
"epoch": 0.21712538226299694,
"grad_norm": 9.138653755187988,
"learning_rate": 4.7737556561085976e-05,
"loss": 1.3092,
"step": 213
},
{
"epoch": 0.218144750254842,
"grad_norm": 7.303668975830078,
"learning_rate": 4.7963800904977377e-05,
"loss": 1.1562,
"step": 214
},
{
"epoch": 0.21916411824668705,
"grad_norm": 7.368769645690918,
"learning_rate": 4.8190045248868784e-05,
"loss": 0.9509,
"step": 215
},
{
"epoch": 0.22018348623853212,
"grad_norm": 5.067785263061523,
"learning_rate": 4.8416289592760185e-05,
"loss": 0.6664,
"step": 216
},
{
"epoch": 0.22120285423037717,
"grad_norm": 5.643320083618164,
"learning_rate": 4.8642533936651585e-05,
"loss": 1.2315,
"step": 217
},
{
"epoch": 0.2222222222222222,
"grad_norm": 6.596173286437988,
"learning_rate": 4.8868778280542986e-05,
"loss": 0.9855,
"step": 218
},
{
"epoch": 0.22324159021406728,
"grad_norm": 6.5434770584106445,
"learning_rate": 4.9095022624434386e-05,
"loss": 0.9258,
"step": 219
},
{
"epoch": 0.22426095820591233,
"grad_norm": 11.537922859191895,
"learning_rate": 4.9321266968325794e-05,
"loss": 1.6578,
"step": 220
},
{
"epoch": 0.2252803261977574,
"grad_norm": 7.364137172698975,
"learning_rate": 4.95475113122172e-05,
"loss": 0.9666,
"step": 221
},
{
"epoch": 0.22629969418960244,
"grad_norm": 8.102925300598145,
"learning_rate": 4.97737556561086e-05,
"loss": 0.9808,
"step": 222
},
{
"epoch": 0.2273190621814475,
"grad_norm": 10.013775825500488,
"learning_rate": 5e-05,
"loss": 2.4156,
"step": 223
},
{
"epoch": 0.22833843017329256,
"grad_norm": 7.974793434143066,
"learning_rate": 5.02262443438914e-05,
"loss": 1.1297,
"step": 224
},
{
"epoch": 0.22935779816513763,
"grad_norm": 7.710846424102783,
"learning_rate": 5.0452488687782804e-05,
"loss": 1.3063,
"step": 225
},
{
"epoch": 0.23037716615698267,
"grad_norm": 5.633566856384277,
"learning_rate": 5.067873303167421e-05,
"loss": 0.5567,
"step": 226
},
{
"epoch": 0.23139653414882771,
"grad_norm": 9.50987720489502,
"learning_rate": 5.090497737556561e-05,
"loss": 1.3551,
"step": 227
},
{
"epoch": 0.2324159021406728,
"grad_norm": 10.309268951416016,
"learning_rate": 5.113122171945701e-05,
"loss": 1.4079,
"step": 228
},
{
"epoch": 0.23343527013251783,
"grad_norm": 7.812633037567139,
"learning_rate": 5.135746606334841e-05,
"loss": 0.948,
"step": 229
},
{
"epoch": 0.2344546381243629,
"grad_norm": 8.013436317443848,
"learning_rate": 5.158371040723983e-05,
"loss": 0.9288,
"step": 230
},
{
"epoch": 0.23547400611620795,
"grad_norm": 7.550686359405518,
"learning_rate": 5.180995475113123e-05,
"loss": 1.0077,
"step": 231
},
{
"epoch": 0.23649337410805302,
"grad_norm": 7.249583721160889,
"learning_rate": 5.203619909502263e-05,
"loss": 1.0674,
"step": 232
},
{
"epoch": 0.23751274209989806,
"grad_norm": 7.766678810119629,
"learning_rate": 5.2262443438914036e-05,
"loss": 1.3354,
"step": 233
},
{
"epoch": 0.23853211009174313,
"grad_norm": 7.417704105377197,
"learning_rate": 5.2488687782805436e-05,
"loss": 1.0076,
"step": 234
},
{
"epoch": 0.23955147808358818,
"grad_norm": 8.414839744567871,
"learning_rate": 5.271493212669684e-05,
"loss": 0.8814,
"step": 235
},
{
"epoch": 0.24057084607543322,
"grad_norm": 9.537981986999512,
"learning_rate": 5.294117647058824e-05,
"loss": 1.7839,
"step": 236
},
{
"epoch": 0.2415902140672783,
"grad_norm": 6.3290886878967285,
"learning_rate": 5.316742081447964e-05,
"loss": 1.2614,
"step": 237
},
{
"epoch": 0.24260958205912334,
"grad_norm": 8.181835174560547,
"learning_rate": 5.3393665158371045e-05,
"loss": 0.8655,
"step": 238
},
{
"epoch": 0.2436289500509684,
"grad_norm": 8.01684856414795,
"learning_rate": 5.3619909502262446e-05,
"loss": 1.8418,
"step": 239
},
{
"epoch": 0.24464831804281345,
"grad_norm": 7.891118049621582,
"learning_rate": 5.384615384615385e-05,
"loss": 1.4465,
"step": 240
},
{
"epoch": 0.24566768603465852,
"grad_norm": 8.080881118774414,
"learning_rate": 5.407239819004525e-05,
"loss": 0.8695,
"step": 241
},
{
"epoch": 0.24668705402650357,
"grad_norm": 6.881638050079346,
"learning_rate": 5.429864253393665e-05,
"loss": 0.9695,
"step": 242
},
{
"epoch": 0.24770642201834864,
"grad_norm": 10.03598690032959,
"learning_rate": 5.4524886877828055e-05,
"loss": 1.3374,
"step": 243
},
{
"epoch": 0.24872579001019368,
"grad_norm": 7.844127178192139,
"learning_rate": 5.475113122171946e-05,
"loss": 0.6716,
"step": 244
},
{
"epoch": 0.24974515800203873,
"grad_norm": 8.654071807861328,
"learning_rate": 5.497737556561087e-05,
"loss": 1.032,
"step": 245
},
{
"epoch": 0.25076452599388377,
"grad_norm": 6.731460094451904,
"learning_rate": 5.520361990950227e-05,
"loss": 0.8033,
"step": 246
},
{
"epoch": 0.25178389398572887,
"grad_norm": 9.436687469482422,
"learning_rate": 5.542986425339367e-05,
"loss": 0.9257,
"step": 247
},
{
"epoch": 0.2528032619775739,
"grad_norm": 7.817379474639893,
"learning_rate": 5.565610859728507e-05,
"loss": 0.8311,
"step": 248
},
{
"epoch": 0.25382262996941896,
"grad_norm": 6.328183650970459,
"learning_rate": 5.588235294117647e-05,
"loss": 0.5609,
"step": 249
},
{
"epoch": 0.254841997961264,
"grad_norm": 8.576601028442383,
"learning_rate": 5.610859728506788e-05,
"loss": 1.5985,
"step": 250
},
{
"epoch": 0.2558613659531091,
"grad_norm": 9.092324256896973,
"learning_rate": 5.633484162895928e-05,
"loss": 1.0025,
"step": 251
},
{
"epoch": 0.25688073394495414,
"grad_norm": 11.906094551086426,
"learning_rate": 5.656108597285068e-05,
"loss": 2.0499,
"step": 252
},
{
"epoch": 0.2579001019367992,
"grad_norm": 7.968968868255615,
"learning_rate": 5.678733031674208e-05,
"loss": 1.3116,
"step": 253
},
{
"epoch": 0.25891946992864423,
"grad_norm": 5.355049133300781,
"learning_rate": 5.701357466063348e-05,
"loss": 0.5969,
"step": 254
},
{
"epoch": 0.2599388379204893,
"grad_norm": 8.151896476745605,
"learning_rate": 5.723981900452488e-05,
"loss": 1.1107,
"step": 255
},
{
"epoch": 0.2609582059123344,
"grad_norm": 9.651622772216797,
"learning_rate": 5.746606334841629e-05,
"loss": 1.8581,
"step": 256
},
{
"epoch": 0.2619775739041794,
"grad_norm": 7.1527533531188965,
"learning_rate": 5.769230769230769e-05,
"loss": 0.572,
"step": 257
},
{
"epoch": 0.26299694189602446,
"grad_norm": 6.141374111175537,
"learning_rate": 5.7918552036199105e-05,
"loss": 0.9267,
"step": 258
},
{
"epoch": 0.2640163098878695,
"grad_norm": 7.274891376495361,
"learning_rate": 5.8144796380090506e-05,
"loss": 0.6255,
"step": 259
},
{
"epoch": 0.2650356778797146,
"grad_norm": 5.81080436706543,
"learning_rate": 5.8371040723981906e-05,
"loss": 0.7615,
"step": 260
},
{
"epoch": 0.26605504587155965,
"grad_norm": 6.9981279373168945,
"learning_rate": 5.859728506787331e-05,
"loss": 0.6026,
"step": 261
},
{
"epoch": 0.2670744138634047,
"grad_norm": 5.718660831451416,
"learning_rate": 5.882352941176471e-05,
"loss": 0.7263,
"step": 262
},
{
"epoch": 0.26809378185524974,
"grad_norm": 5.391998767852783,
"learning_rate": 5.9049773755656115e-05,
"loss": 0.4643,
"step": 263
},
{
"epoch": 0.2691131498470948,
"grad_norm": 6.843007564544678,
"learning_rate": 5.9276018099547516e-05,
"loss": 0.5101,
"step": 264
},
{
"epoch": 0.2701325178389399,
"grad_norm": 5.087254047393799,
"learning_rate": 5.9502262443438916e-05,
"loss": 0.5562,
"step": 265
},
{
"epoch": 0.2711518858307849,
"grad_norm": 7.482615947723389,
"learning_rate": 5.972850678733032e-05,
"loss": 1.256,
"step": 266
},
{
"epoch": 0.27217125382262997,
"grad_norm": 6.911371231079102,
"learning_rate": 5.995475113122172e-05,
"loss": 0.6543,
"step": 267
},
{
"epoch": 0.273190621814475,
"grad_norm": 7.643139839172363,
"learning_rate": 6.0180995475113125e-05,
"loss": 0.6698,
"step": 268
},
{
"epoch": 0.2742099898063201,
"grad_norm": 9.08658504486084,
"learning_rate": 6.0407239819004525e-05,
"loss": 1.3843,
"step": 269
},
{
"epoch": 0.27522935779816515,
"grad_norm": 8.890534400939941,
"learning_rate": 6.0633484162895926e-05,
"loss": 1.1421,
"step": 270
},
{
"epoch": 0.2762487257900102,
"grad_norm": 9.855698585510254,
"learning_rate": 6.0859728506787327e-05,
"loss": 1.1558,
"step": 271
},
{
"epoch": 0.27726809378185524,
"grad_norm": 8.32972526550293,
"learning_rate": 6.108597285067875e-05,
"loss": 1.603,
"step": 272
},
{
"epoch": 0.2782874617737003,
"grad_norm": 8.393510818481445,
"learning_rate": 6.131221719457015e-05,
"loss": 0.7985,
"step": 273
},
{
"epoch": 0.2793068297655454,
"grad_norm": 7.992040157318115,
"learning_rate": 6.153846153846155e-05,
"loss": 1.3884,
"step": 274
},
{
"epoch": 0.2803261977573904,
"grad_norm": 8.646651268005371,
"learning_rate": 6.176470588235295e-05,
"loss": 1.0337,
"step": 275
},
{
"epoch": 0.28134556574923547,
"grad_norm": 7.3104329109191895,
"learning_rate": 6.199095022624435e-05,
"loss": 1.0917,
"step": 276
},
{
"epoch": 0.2823649337410805,
"grad_norm": 12.030378341674805,
"learning_rate": 6.221719457013575e-05,
"loss": 2.4149,
"step": 277
},
{
"epoch": 0.28338430173292556,
"grad_norm": 4.781021595001221,
"learning_rate": 6.244343891402715e-05,
"loss": 0.4301,
"step": 278
},
{
"epoch": 0.28440366972477066,
"grad_norm": 4.352090358734131,
"learning_rate": 6.266968325791855e-05,
"loss": 0.5084,
"step": 279
},
{
"epoch": 0.2854230377166157,
"grad_norm": 5.88839864730835,
"learning_rate": 6.289592760180995e-05,
"loss": 0.7202,
"step": 280
},
{
"epoch": 0.28644240570846075,
"grad_norm": 11.228419303894043,
"learning_rate": 6.312217194570135e-05,
"loss": 1.7983,
"step": 281
},
{
"epoch": 0.2874617737003058,
"grad_norm": 6.119421005249023,
"learning_rate": 6.334841628959275e-05,
"loss": 0.673,
"step": 282
},
{
"epoch": 0.2884811416921509,
"grad_norm": 6.405134677886963,
"learning_rate": 6.357466063348417e-05,
"loss": 0.6655,
"step": 283
},
{
"epoch": 0.28950050968399593,
"grad_norm": 6.735506534576416,
"learning_rate": 6.380090497737557e-05,
"loss": 0.9121,
"step": 284
},
{
"epoch": 0.290519877675841,
"grad_norm": 11.012415885925293,
"learning_rate": 6.402714932126697e-05,
"loss": 1.5978,
"step": 285
},
{
"epoch": 0.291539245667686,
"grad_norm": 13.007187843322754,
"learning_rate": 6.425339366515838e-05,
"loss": 1.8536,
"step": 286
},
{
"epoch": 0.29255861365953106,
"grad_norm": 12.273601531982422,
"learning_rate": 6.447963800904978e-05,
"loss": 1.6397,
"step": 287
},
{
"epoch": 0.29357798165137616,
"grad_norm": 9.6339750289917,
"learning_rate": 6.470588235294118e-05,
"loss": 0.8275,
"step": 288
},
{
"epoch": 0.2945973496432212,
"grad_norm": 6.717658996582031,
"learning_rate": 6.493212669683258e-05,
"loss": 0.6003,
"step": 289
},
{
"epoch": 0.29561671763506625,
"grad_norm": 8.443256378173828,
"learning_rate": 6.515837104072399e-05,
"loss": 0.9834,
"step": 290
},
{
"epoch": 0.2966360856269113,
"grad_norm": 8.823105812072754,
"learning_rate": 6.538461538461539e-05,
"loss": 0.603,
"step": 291
},
{
"epoch": 0.2976554536187564,
"grad_norm": 6.8099141120910645,
"learning_rate": 6.561085972850679e-05,
"loss": 0.6597,
"step": 292
},
{
"epoch": 0.29867482161060144,
"grad_norm": 6.705087661743164,
"learning_rate": 6.583710407239819e-05,
"loss": 0.739,
"step": 293
},
{
"epoch": 0.2996941896024465,
"grad_norm": 7.209024906158447,
"learning_rate": 6.606334841628959e-05,
"loss": 1.2564,
"step": 294
},
{
"epoch": 0.2996941896024465,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8565528392791748,
"eval_Qnli-dev-1024_cosine_ap": 0.7531377591671699,
"eval_Qnli-dev-1024_cosine_f1": 0.7254901960784313,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.8200148344039917,
"eval_Qnli-dev-1024_cosine_mcc": 0.43697448216965834,
"eval_Qnli-dev-1024_cosine_precision": 0.6491228070175439,
"eval_Qnli-dev-1024_cosine_recall": 0.8222222222222222,
"eval_Qnli-dev_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.7719540596008301,
"eval_Qnli-dev_cosine_ap": 0.7588639733888536,
"eval_Qnli-dev_cosine_f1": 0.7454545454545455,
"eval_Qnli-dev_cosine_f1_threshold": 0.7090869545936584,
"eval_Qnli-dev_cosine_mcc": 0.47013467657639685,
"eval_Qnli-dev_cosine_precision": 0.6307692307692307,
"eval_Qnli-dev_cosine_recall": 0.9111111111111111,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.8854166865348816,
"eval_allNLI-triplets_cosine_accuracy": 0.9583333134651184,
"eval_global_dataset_loss": 0.4855804145336151,
"eval_global_dataset_runtime": 104.3189,
"eval_global_dataset_samples_per_second": 7.698,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.8854166865348816,
"eval_sts-test-1024_pearson_cosine": 0.8681461030339531,
"eval_sts-test-1024_spearman_cosine": 0.9053809631987397,
"eval_sts-test_pearson_cosine": 0.9051731986667259,
"eval_sts-test_spearman_cosine": 0.920630429781229,
"step": 294
},
{
"epoch": 0.3007135575942915,
"grad_norm": 7.649487495422363,
"learning_rate": 6.6289592760181e-05,
"loss": 1.4442,
"step": 295
},
{
"epoch": 0.30173292558613657,
"grad_norm": 7.740142822265625,
"learning_rate": 6.65158371040724e-05,
"loss": 1.4064,
"step": 296
},
{
"epoch": 0.30275229357798167,
"grad_norm": 8.009271621704102,
"learning_rate": 6.67420814479638e-05,
"loss": 0.8456,
"step": 297
},
{
"epoch": 0.3037716615698267,
"grad_norm": 5.718809604644775,
"learning_rate": 6.69683257918552e-05,
"loss": 0.7772,
"step": 298
},
{
"epoch": 0.30479102956167176,
"grad_norm": 7.34658145904541,
"learning_rate": 6.719457013574662e-05,
"loss": 0.7619,
"step": 299
},
{
"epoch": 0.3058103975535168,
"grad_norm": 8.556058883666992,
"learning_rate": 6.742081447963802e-05,
"loss": 1.002,
"step": 300
},
{
"epoch": 0.3068297655453619,
"grad_norm": 8.995348930358887,
"learning_rate": 6.764705882352942e-05,
"loss": 1.4774,
"step": 301
},
{
"epoch": 0.30784913353720694,
"grad_norm": 8.271109580993652,
"learning_rate": 6.787330316742082e-05,
"loss": 0.6814,
"step": 302
},
{
"epoch": 0.308868501529052,
"grad_norm": 8.896450996398926,
"learning_rate": 6.809954751131222e-05,
"loss": 1.2286,
"step": 303
},
{
"epoch": 0.30988786952089703,
"grad_norm": 9.076520919799805,
"learning_rate": 6.832579185520362e-05,
"loss": 1.8546,
"step": 304
},
{
"epoch": 0.3109072375127421,
"grad_norm": 6.780123233795166,
"learning_rate": 6.855203619909502e-05,
"loss": 0.7547,
"step": 305
},
{
"epoch": 0.3119266055045872,
"grad_norm": 7.728740692138672,
"learning_rate": 6.877828054298642e-05,
"loss": 1.3638,
"step": 306
},
{
"epoch": 0.3129459734964322,
"grad_norm": 8.941544532775879,
"learning_rate": 6.900452488687784e-05,
"loss": 1.2604,
"step": 307
},
{
"epoch": 0.31396534148827726,
"grad_norm": 6.70719575881958,
"learning_rate": 6.923076923076924e-05,
"loss": 0.5111,
"step": 308
},
{
"epoch": 0.3149847094801223,
"grad_norm": 7.599255561828613,
"learning_rate": 6.945701357466064e-05,
"loss": 0.7153,
"step": 309
},
{
"epoch": 0.3160040774719674,
"grad_norm": 7.323727607727051,
"learning_rate": 6.968325791855204e-05,
"loss": 0.8367,
"step": 310
},
{
"epoch": 0.31702344546381245,
"grad_norm": 7.314160346984863,
"learning_rate": 6.990950226244344e-05,
"loss": 0.6643,
"step": 311
},
{
"epoch": 0.3180428134556575,
"grad_norm": 8.265671730041504,
"learning_rate": 7.013574660633484e-05,
"loss": 1.0404,
"step": 312
},
{
"epoch": 0.31906218144750254,
"grad_norm": 10.820046424865723,
"learning_rate": 7.036199095022625e-05,
"loss": 1.122,
"step": 313
},
{
"epoch": 0.3200815494393476,
"grad_norm": 7.194378852844238,
"learning_rate": 7.058823529411765e-05,
"loss": 0.7091,
"step": 314
},
{
"epoch": 0.3211009174311927,
"grad_norm": 7.764474868774414,
"learning_rate": 7.081447963800906e-05,
"loss": 1.07,
"step": 315
},
{
"epoch": 0.3221202854230377,
"grad_norm": 7.757960796356201,
"learning_rate": 7.104072398190046e-05,
"loss": 0.7246,
"step": 316
},
{
"epoch": 0.32313965341488277,
"grad_norm": 9.631681442260742,
"learning_rate": 7.126696832579186e-05,
"loss": 0.821,
"step": 317
},
{
"epoch": 0.3241590214067278,
"grad_norm": 6.478396892547607,
"learning_rate": 7.149321266968326e-05,
"loss": 0.9556,
"step": 318
},
{
"epoch": 0.3251783893985729,
"grad_norm": 8.858171463012695,
"learning_rate": 7.171945701357467e-05,
"loss": 1.2634,
"step": 319
},
{
"epoch": 0.32619775739041795,
"grad_norm": 8.02340030670166,
"learning_rate": 7.194570135746607e-05,
"loss": 0.8621,
"step": 320
},
{
"epoch": 0.327217125382263,
"grad_norm": 8.634239196777344,
"learning_rate": 7.217194570135747e-05,
"loss": 1.4215,
"step": 321
},
{
"epoch": 0.32823649337410804,
"grad_norm": 8.96740436553955,
"learning_rate": 7.239819004524887e-05,
"loss": 0.8894,
"step": 322
},
{
"epoch": 0.3292558613659531,
"grad_norm": 4.730165958404541,
"learning_rate": 7.262443438914027e-05,
"loss": 0.4134,
"step": 323
},
{
"epoch": 0.3302752293577982,
"grad_norm": 6.1243181228637695,
"learning_rate": 7.285067873303167e-05,
"loss": 0.4147,
"step": 324
},
{
"epoch": 0.3312945973496432,
"grad_norm": 7.8853607177734375,
"learning_rate": 7.307692307692307e-05,
"loss": 0.5721,
"step": 325
},
{
"epoch": 0.33231396534148827,
"grad_norm": 9.193514823913574,
"learning_rate": 7.330316742081448e-05,
"loss": 0.8541,
"step": 326
},
{
"epoch": 0.3333333333333333,
"grad_norm": 12.314509391784668,
"learning_rate": 7.352941176470589e-05,
"loss": 2.2959,
"step": 327
},
{
"epoch": 0.3343527013251784,
"grad_norm": 4.384552955627441,
"learning_rate": 7.375565610859729e-05,
"loss": 0.4452,
"step": 328
},
{
"epoch": 0.33537206931702346,
"grad_norm": 5.677075386047363,
"learning_rate": 7.398190045248869e-05,
"loss": 0.5008,
"step": 329
},
{
"epoch": 0.3363914373088685,
"grad_norm": 6.752626419067383,
"learning_rate": 7.420814479638009e-05,
"loss": 0.4106,
"step": 330
},
{
"epoch": 0.33741080530071355,
"grad_norm": 10.971478462219238,
"learning_rate": 7.44343891402715e-05,
"loss": 0.9237,
"step": 331
},
{
"epoch": 0.3384301732925586,
"grad_norm": 7.574080944061279,
"learning_rate": 7.46606334841629e-05,
"loss": 0.6275,
"step": 332
},
{
"epoch": 0.3394495412844037,
"grad_norm": 9.538507461547852,
"learning_rate": 7.48868778280543e-05,
"loss": 1.5184,
"step": 333
},
{
"epoch": 0.34046890927624873,
"grad_norm": 9.139626502990723,
"learning_rate": 7.511312217194571e-05,
"loss": 1.7865,
"step": 334
},
{
"epoch": 0.3414882772680938,
"grad_norm": 9.275596618652344,
"learning_rate": 7.533936651583711e-05,
"loss": 1.5947,
"step": 335
},
{
"epoch": 0.3425076452599388,
"grad_norm": 9.375283241271973,
"learning_rate": 7.556561085972851e-05,
"loss": 1.0249,
"step": 336
},
{
"epoch": 0.3435270132517839,
"grad_norm": 7.951083660125732,
"learning_rate": 7.579185520361991e-05,
"loss": 1.0227,
"step": 337
},
{
"epoch": 0.34454638124362896,
"grad_norm": 9.579297065734863,
"learning_rate": 7.601809954751131e-05,
"loss": 1.28,
"step": 338
},
{
"epoch": 0.345565749235474,
"grad_norm": 5.935997486114502,
"learning_rate": 7.624434389140271e-05,
"loss": 0.798,
"step": 339
},
{
"epoch": 0.34658511722731905,
"grad_norm": 7.16936731338501,
"learning_rate": 7.647058823529411e-05,
"loss": 1.0408,
"step": 340
},
{
"epoch": 0.3476044852191641,
"grad_norm": 9.448662757873535,
"learning_rate": 7.669683257918553e-05,
"loss": 0.9732,
"step": 341
},
{
"epoch": 0.3486238532110092,
"grad_norm": 7.747692584991455,
"learning_rate": 7.692307692307693e-05,
"loss": 0.7588,
"step": 342
},
{
"epoch": 0.34964322120285424,
"grad_norm": 10.198869705200195,
"learning_rate": 7.714932126696833e-05,
"loss": 0.9615,
"step": 343
},
{
"epoch": 0.3506625891946993,
"grad_norm": 8.069470405578613,
"learning_rate": 7.737556561085974e-05,
"loss": 0.9895,
"step": 344
},
{
"epoch": 0.3516819571865443,
"grad_norm": 10.662049293518066,
"learning_rate": 7.760180995475114e-05,
"loss": 1.923,
"step": 345
},
{
"epoch": 0.3527013251783894,
"grad_norm": 6.53238582611084,
"learning_rate": 7.782805429864254e-05,
"loss": 0.615,
"step": 346
},
{
"epoch": 0.35372069317023447,
"grad_norm": 11.10132122039795,
"learning_rate": 7.805429864253394e-05,
"loss": 1.4572,
"step": 347
},
{
"epoch": 0.3547400611620795,
"grad_norm": 7.372711181640625,
"learning_rate": 7.828054298642534e-05,
"loss": 1.0083,
"step": 348
},
{
"epoch": 0.35575942915392456,
"grad_norm": 7.358077526092529,
"learning_rate": 7.850678733031674e-05,
"loss": 0.922,
"step": 349
},
{
"epoch": 0.3567787971457696,
"grad_norm": 8.45017147064209,
"learning_rate": 7.873303167420814e-05,
"loss": 1.3767,
"step": 350
},
{
"epoch": 0.3577981651376147,
"grad_norm": 4.858506679534912,
"learning_rate": 7.895927601809954e-05,
"loss": 0.6378,
"step": 351
},
{
"epoch": 0.35881753312945974,
"grad_norm": 5.764273643493652,
"learning_rate": 7.918552036199095e-05,
"loss": 0.4063,
"step": 352
},
{
"epoch": 0.3598369011213048,
"grad_norm": 8.656686782836914,
"learning_rate": 7.941176470588235e-05,
"loss": 1.0834,
"step": 353
},
{
"epoch": 0.36085626911314983,
"grad_norm": 5.824944496154785,
"learning_rate": 7.963800904977376e-05,
"loss": 0.807,
"step": 354
},
{
"epoch": 0.36187563710499493,
"grad_norm": 6.73368501663208,
"learning_rate": 7.986425339366516e-05,
"loss": 1.0293,
"step": 355
},
{
"epoch": 0.36289500509684,
"grad_norm": 5.860096454620361,
"learning_rate": 8.009049773755657e-05,
"loss": 0.4371,
"step": 356
},
{
"epoch": 0.363914373088685,
"grad_norm": 5.65436315536499,
"learning_rate": 8.031674208144798e-05,
"loss": 0.4334,
"step": 357
},
{
"epoch": 0.36493374108053006,
"grad_norm": 7.566843509674072,
"learning_rate": 8.054298642533938e-05,
"loss": 0.949,
"step": 358
},
{
"epoch": 0.3659531090723751,
"grad_norm": 6.286118984222412,
"learning_rate": 8.076923076923078e-05,
"loss": 0.5788,
"step": 359
},
{
"epoch": 0.3669724770642202,
"grad_norm": 10.212640762329102,
"learning_rate": 8.099547511312218e-05,
"loss": 0.8535,
"step": 360
},
{
"epoch": 0.36799184505606525,
"grad_norm": 9.267760276794434,
"learning_rate": 8.122171945701358e-05,
"loss": 1.2529,
"step": 361
},
{
"epoch": 0.3690112130479103,
"grad_norm": 8.794651985168457,
"learning_rate": 8.144796380090498e-05,
"loss": 0.8974,
"step": 362
},
{
"epoch": 0.37003058103975534,
"grad_norm": 13.3441162109375,
"learning_rate": 8.167420814479638e-05,
"loss": 1.9105,
"step": 363
},
{
"epoch": 0.37104994903160043,
"grad_norm": 9.258030891418457,
"learning_rate": 8.190045248868778e-05,
"loss": 0.7717,
"step": 364
},
{
"epoch": 0.3720693170234455,
"grad_norm": 6.051854610443115,
"learning_rate": 8.212669683257918e-05,
"loss": 1.1052,
"step": 365
},
{
"epoch": 0.3730886850152905,
"grad_norm": 9.53382682800293,
"learning_rate": 8.23529411764706e-05,
"loss": 0.7298,
"step": 366
},
{
"epoch": 0.37410805300713557,
"grad_norm": 6.723752498626709,
"learning_rate": 8.2579185520362e-05,
"loss": 0.7039,
"step": 367
},
{
"epoch": 0.3751274209989806,
"grad_norm": 6.844725608825684,
"learning_rate": 8.28054298642534e-05,
"loss": 0.8536,
"step": 368
},
{
"epoch": 0.3761467889908257,
"grad_norm": 5.233691692352295,
"learning_rate": 8.303167420814481e-05,
"loss": 0.4774,
"step": 369
},
{
"epoch": 0.37716615698267075,
"grad_norm": 4.231795787811279,
"learning_rate": 8.325791855203621e-05,
"loss": 0.3297,
"step": 370
},
{
"epoch": 0.3781855249745158,
"grad_norm": 11.760458946228027,
"learning_rate": 8.348416289592761e-05,
"loss": 1.693,
"step": 371
},
{
"epoch": 0.37920489296636084,
"grad_norm": 10.05996036529541,
"learning_rate": 8.371040723981901e-05,
"loss": 0.853,
"step": 372
},
{
"epoch": 0.38022426095820594,
"grad_norm": 8.649154663085938,
"learning_rate": 8.393665158371041e-05,
"loss": 0.7242,
"step": 373
},
{
"epoch": 0.381243628950051,
"grad_norm": 6.6194748878479,
"learning_rate": 8.416289592760181e-05,
"loss": 0.5019,
"step": 374
},
{
"epoch": 0.382262996941896,
"grad_norm": 8.058365821838379,
"learning_rate": 8.438914027149321e-05,
"loss": 0.6206,
"step": 375
},
{
"epoch": 0.38328236493374107,
"grad_norm": 6.66504430770874,
"learning_rate": 8.461538461538461e-05,
"loss": 0.4872,
"step": 376
},
{
"epoch": 0.3843017329255861,
"grad_norm": 5.8679518699646,
"learning_rate": 8.484162895927601e-05,
"loss": 0.4515,
"step": 377
},
{
"epoch": 0.3853211009174312,
"grad_norm": 9.830297470092773,
"learning_rate": 8.506787330316743e-05,
"loss": 1.4657,
"step": 378
},
{
"epoch": 0.38634046890927626,
"grad_norm": 8.260361671447754,
"learning_rate": 8.529411764705883e-05,
"loss": 0.8411,
"step": 379
},
{
"epoch": 0.3873598369011213,
"grad_norm": 8.48035717010498,
"learning_rate": 8.552036199095023e-05,
"loss": 0.7654,
"step": 380
},
{
"epoch": 0.38837920489296635,
"grad_norm": 7.481667518615723,
"learning_rate": 8.574660633484163e-05,
"loss": 0.5413,
"step": 381
},
{
"epoch": 0.3893985728848114,
"grad_norm": 5.923032760620117,
"learning_rate": 8.597285067873304e-05,
"loss": 0.4594,
"step": 382
},
{
"epoch": 0.3904179408766565,
"grad_norm": 11.383003234863281,
"learning_rate": 8.619909502262445e-05,
"loss": 1.2656,
"step": 383
},
{
"epoch": 0.39143730886850153,
"grad_norm": 9.154252052307129,
"learning_rate": 8.642533936651585e-05,
"loss": 0.6881,
"step": 384
},
{
"epoch": 0.3924566768603466,
"grad_norm": 8.656584739685059,
"learning_rate": 8.665158371040725e-05,
"loss": 0.8169,
"step": 385
},
{
"epoch": 0.3934760448521916,
"grad_norm": 9.6775541305542,
"learning_rate": 8.687782805429865e-05,
"loss": 0.937,
"step": 386
},
{
"epoch": 0.3944954128440367,
"grad_norm": 12.836816787719727,
"learning_rate": 8.710407239819005e-05,
"loss": 2.1343,
"step": 387
},
{
"epoch": 0.39551478083588176,
"grad_norm": 6.1532487869262695,
"learning_rate": 8.733031674208145e-05,
"loss": 0.3644,
"step": 388
},
{
"epoch": 0.3965341488277268,
"grad_norm": 6.3952555656433105,
"learning_rate": 8.755656108597285e-05,
"loss": 0.4406,
"step": 389
},
{
"epoch": 0.39755351681957185,
"grad_norm": 7.005934238433838,
"learning_rate": 8.778280542986425e-05,
"loss": 0.5444,
"step": 390
},
{
"epoch": 0.3985728848114169,
"grad_norm": 8.97732925415039,
"learning_rate": 8.800904977375566e-05,
"loss": 1.3891,
"step": 391
},
{
"epoch": 0.399592252803262,
"grad_norm": 6.8778181076049805,
"learning_rate": 8.823529411764706e-05,
"loss": 0.6287,
"step": 392
},
{
"epoch": 0.399592252803262,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8465664982795715,
"eval_Qnli-dev-1024_cosine_ap": 0.7683064400770494,
"eval_Qnli-dev-1024_cosine_f1": 0.6976744186046511,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.834477424621582,
"eval_Qnli-dev-1024_cosine_mcc": 0.45496263625850347,
"eval_Qnli-dev-1024_cosine_precision": 0.7317073170731707,
"eval_Qnli-dev-1024_cosine_recall": 0.6666666666666666,
"eval_Qnli-dev_cosine_accuracy": 0.75,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.74493807554245,
"eval_Qnli-dev_cosine_ap": 0.7575725381948821,
"eval_Qnli-dev_cosine_f1": 0.7476635514018692,
"eval_Qnli-dev_cosine_f1_threshold": 0.7015562057495117,
"eval_Qnli-dev_cosine_mcc": 0.47737827504723207,
"eval_Qnli-dev_cosine_precision": 0.6451612903225806,
"eval_Qnli-dev_cosine_recall": 0.8888888888888888,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.9270833134651184,
"eval_allNLI-triplets_cosine_accuracy": 0.9479166865348816,
"eval_global_dataset_loss": 0.3703947365283966,
"eval_global_dataset_runtime": 104.3143,
"eval_global_dataset_samples_per_second": 7.698,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.9270833134651184,
"eval_sts-test-1024_pearson_cosine": 0.8782079507952609,
"eval_sts-test-1024_spearman_cosine": 0.9080003485202497,
"eval_sts-test_pearson_cosine": 0.9052799671643099,
"eval_sts-test_spearman_cosine": 0.9200953636370672,
"step": 392
},
{
"epoch": 0.40061162079510704,
"grad_norm": 7.236085414886475,
"learning_rate": 8.846153846153847e-05,
"loss": 1.066,
"step": 393
},
{
"epoch": 0.4016309887869521,
"grad_norm": 7.638827323913574,
"learning_rate": 8.868778280542987e-05,
"loss": 1.0406,
"step": 394
},
{
"epoch": 0.4026503567787971,
"grad_norm": 6.2278876304626465,
"learning_rate": 8.891402714932127e-05,
"loss": 0.819,
"step": 395
},
{
"epoch": 0.4036697247706422,
"grad_norm": 7.04884147644043,
"learning_rate": 8.914027149321268e-05,
"loss": 0.5826,
"step": 396
},
{
"epoch": 0.40468909276248727,
"grad_norm": 8.24869441986084,
"learning_rate": 8.936651583710408e-05,
"loss": 0.6355,
"step": 397
},
{
"epoch": 0.4057084607543323,
"grad_norm": 9.9276704788208,
"learning_rate": 8.959276018099548e-05,
"loss": 0.7566,
"step": 398
},
{
"epoch": 0.40672782874617736,
"grad_norm": 8.717905044555664,
"learning_rate": 8.981900452488688e-05,
"loss": 0.8174,
"step": 399
},
{
"epoch": 0.4077471967380224,
"grad_norm": 8.515538215637207,
"learning_rate": 9.004524886877828e-05,
"loss": 0.8905,
"step": 400
},
{
"epoch": 0.4087665647298675,
"grad_norm": 6.506967067718506,
"learning_rate": 9.027149321266968e-05,
"loss": 0.6646,
"step": 401
},
{
"epoch": 0.40978593272171254,
"grad_norm": 9.33711051940918,
"learning_rate": 9.049773755656108e-05,
"loss": 0.9056,
"step": 402
},
{
"epoch": 0.4108053007135576,
"grad_norm": 5.124199867248535,
"learning_rate": 9.07239819004525e-05,
"loss": 0.3689,
"step": 403
},
{
"epoch": 0.41182466870540263,
"grad_norm": 5.597712516784668,
"learning_rate": 9.09502262443439e-05,
"loss": 0.3709,
"step": 404
},
{
"epoch": 0.41284403669724773,
"grad_norm": 7.897356033325195,
"learning_rate": 9.11764705882353e-05,
"loss": 0.6708,
"step": 405
},
{
"epoch": 0.4138634046890928,
"grad_norm": 8.37096881866455,
"learning_rate": 9.14027149321267e-05,
"loss": 1.0531,
"step": 406
},
{
"epoch": 0.4148827726809378,
"grad_norm": 7.530358791351318,
"learning_rate": 9.16289592760181e-05,
"loss": 1.1355,
"step": 407
},
{
"epoch": 0.41590214067278286,
"grad_norm": 10.304217338562012,
"learning_rate": 9.18552036199095e-05,
"loss": 0.8042,
"step": 408
},
{
"epoch": 0.4169215086646279,
"grad_norm": 7.292766094207764,
"learning_rate": 9.20814479638009e-05,
"loss": 0.3915,
"step": 409
},
{
"epoch": 0.417940876656473,
"grad_norm": 10.453197479248047,
"learning_rate": 9.230769230769232e-05,
"loss": 1.9388,
"step": 410
},
{
"epoch": 0.41896024464831805,
"grad_norm": 2.7471694946289062,
"learning_rate": 9.253393665158372e-05,
"loss": 0.3044,
"step": 411
},
{
"epoch": 0.4199796126401631,
"grad_norm": 5.923367023468018,
"learning_rate": 9.276018099547512e-05,
"loss": 0.6153,
"step": 412
},
{
"epoch": 0.42099898063200814,
"grad_norm": 8.176202774047852,
"learning_rate": 9.298642533936652e-05,
"loss": 0.9407,
"step": 413
},
{
"epoch": 0.42201834862385323,
"grad_norm": 8.41361141204834,
"learning_rate": 9.321266968325792e-05,
"loss": 0.6876,
"step": 414
},
{
"epoch": 0.4230377166156983,
"grad_norm": 9.516852378845215,
"learning_rate": 9.343891402714933e-05,
"loss": 0.9694,
"step": 415
},
{
"epoch": 0.4240570846075433,
"grad_norm": 7.201638698577881,
"learning_rate": 9.366515837104073e-05,
"loss": 0.7868,
"step": 416
},
{
"epoch": 0.42507645259938837,
"grad_norm": 9.961840629577637,
"learning_rate": 9.389140271493213e-05,
"loss": 0.7735,
"step": 417
},
{
"epoch": 0.4260958205912334,
"grad_norm": 10.842241287231445,
"learning_rate": 9.411764705882353e-05,
"loss": 1.1682,
"step": 418
},
{
"epoch": 0.4271151885830785,
"grad_norm": 5.817572116851807,
"learning_rate": 9.434389140271494e-05,
"loss": 0.3465,
"step": 419
},
{
"epoch": 0.42813455657492355,
"grad_norm": 6.870133399963379,
"learning_rate": 9.457013574660634e-05,
"loss": 0.5699,
"step": 420
},
{
"epoch": 0.4291539245667686,
"grad_norm": 6.472342014312744,
"learning_rate": 9.479638009049774e-05,
"loss": 0.6128,
"step": 421
},
{
"epoch": 0.43017329255861364,
"grad_norm": 6.5723795890808105,
"learning_rate": 9.502262443438914e-05,
"loss": 0.8886,
"step": 422
},
{
"epoch": 0.43119266055045874,
"grad_norm": 6.1384429931640625,
"learning_rate": 9.524886877828054e-05,
"loss": 0.5124,
"step": 423
},
{
"epoch": 0.4322120285423038,
"grad_norm": 6.241471290588379,
"learning_rate": 9.547511312217195e-05,
"loss": 0.4409,
"step": 424
},
{
"epoch": 0.4332313965341488,
"grad_norm": 9.087861061096191,
"learning_rate": 9.570135746606335e-05,
"loss": 0.6368,
"step": 425
},
{
"epoch": 0.43425076452599387,
"grad_norm": 9.653539657592773,
"learning_rate": 9.592760180995475e-05,
"loss": 0.9874,
"step": 426
},
{
"epoch": 0.4352701325178389,
"grad_norm": 13.366517066955566,
"learning_rate": 9.615384615384617e-05,
"loss": 1.6544,
"step": 427
},
{
"epoch": 0.436289500509684,
"grad_norm": 6.302597522735596,
"learning_rate": 9.638009049773757e-05,
"loss": 0.4561,
"step": 428
},
{
"epoch": 0.43730886850152906,
"grad_norm": 7.133030891418457,
"learning_rate": 9.660633484162897e-05,
"loss": 0.5443,
"step": 429
},
{
"epoch": 0.4383282364933741,
"grad_norm": 6.341556072235107,
"learning_rate": 9.683257918552037e-05,
"loss": 0.5183,
"step": 430
},
{
"epoch": 0.43934760448521915,
"grad_norm": 10.657116889953613,
"learning_rate": 9.705882352941177e-05,
"loss": 1.1585,
"step": 431
},
{
"epoch": 0.44036697247706424,
"grad_norm": 7.707142353057861,
"learning_rate": 9.728506787330317e-05,
"loss": 1.4285,
"step": 432
},
{
"epoch": 0.4413863404689093,
"grad_norm": 8.27905559539795,
"learning_rate": 9.751131221719457e-05,
"loss": 1.0638,
"step": 433
},
{
"epoch": 0.44240570846075433,
"grad_norm": 5.601058483123779,
"learning_rate": 9.773755656108597e-05,
"loss": 0.553,
"step": 434
},
{
"epoch": 0.4434250764525994,
"grad_norm": 9.084299087524414,
"learning_rate": 9.796380090497737e-05,
"loss": 1.0009,
"step": 435
},
{
"epoch": 0.4444444444444444,
"grad_norm": 5.231532573699951,
"learning_rate": 9.819004524886877e-05,
"loss": 0.5211,
"step": 436
},
{
"epoch": 0.4454638124362895,
"grad_norm": 7.0478715896606445,
"learning_rate": 9.841628959276019e-05,
"loss": 0.6483,
"step": 437
},
{
"epoch": 0.44648318042813456,
"grad_norm": 8.44166088104248,
"learning_rate": 9.864253393665159e-05,
"loss": 1.2634,
"step": 438
},
{
"epoch": 0.4475025484199796,
"grad_norm": 7.2984771728515625,
"learning_rate": 9.8868778280543e-05,
"loss": 0.5242,
"step": 439
},
{
"epoch": 0.44852191641182465,
"grad_norm": 9.091867446899414,
"learning_rate": 9.90950226244344e-05,
"loss": 1.3739,
"step": 440
},
{
"epoch": 0.44954128440366975,
"grad_norm": 8.1068115234375,
"learning_rate": 9.93212669683258e-05,
"loss": 1.0153,
"step": 441
},
{
"epoch": 0.4505606523955148,
"grad_norm": 7.902680397033691,
"learning_rate": 9.95475113122172e-05,
"loss": 0.7174,
"step": 442
},
{
"epoch": 0.45158002038735984,
"grad_norm": 8.784537315368652,
"learning_rate": 9.97737556561086e-05,
"loss": 0.8631,
"step": 443
},
{
"epoch": 0.4525993883792049,
"grad_norm": 8.205148696899414,
"learning_rate": 0.0001,
"loss": 1.0721,
"step": 444
},
{
"epoch": 0.4536187563710499,
"grad_norm": 4.789169788360596,
"learning_rate": 9.999964497873585e-05,
"loss": 0.3682,
"step": 445
},
{
"epoch": 0.454638124362895,
"grad_norm": 11.335341453552246,
"learning_rate": 9.999857991998499e-05,
"loss": 1.3278,
"step": 446
},
{
"epoch": 0.45565749235474007,
"grad_norm": 8.901962280273438,
"learning_rate": 9.999680483887217e-05,
"loss": 0.665,
"step": 447
},
{
"epoch": 0.4566768603465851,
"grad_norm": 6.525248050689697,
"learning_rate": 9.999431976060504e-05,
"loss": 0.77,
"step": 448
},
{
"epoch": 0.45769622833843016,
"grad_norm": 7.658937931060791,
"learning_rate": 9.999112472047386e-05,
"loss": 0.9903,
"step": 449
},
{
"epoch": 0.45871559633027525,
"grad_norm": 5.406915664672852,
"learning_rate": 9.998721976385087e-05,
"loss": 0.3372,
"step": 450
},
{
"epoch": 0.4597349643221203,
"grad_norm": 5.920129299163818,
"learning_rate": 9.998260494618979e-05,
"loss": 0.6911,
"step": 451
},
{
"epoch": 0.46075433231396534,
"grad_norm": 7.490262985229492,
"learning_rate": 9.997728033302496e-05,
"loss": 0.505,
"step": 452
},
{
"epoch": 0.4617737003058104,
"grad_norm": 8.21649169921875,
"learning_rate": 9.997124599997043e-05,
"loss": 1.3397,
"step": 453
},
{
"epoch": 0.46279306829765543,
"grad_norm": 5.116532802581787,
"learning_rate": 9.996450203271886e-05,
"loss": 0.2853,
"step": 454
},
{
"epoch": 0.46381243628950053,
"grad_norm": 7.29067325592041,
"learning_rate": 9.995704852704029e-05,
"loss": 1.198,
"step": 455
},
{
"epoch": 0.4648318042813456,
"grad_norm": 10.033268928527832,
"learning_rate": 9.994888558878086e-05,
"loss": 1.7965,
"step": 456
},
{
"epoch": 0.4658511722731906,
"grad_norm": 5.4102606773376465,
"learning_rate": 9.994001333386125e-05,
"loss": 0.2987,
"step": 457
},
{
"epoch": 0.46687054026503566,
"grad_norm": 8.109895706176758,
"learning_rate": 9.993043188827501e-05,
"loss": 0.6864,
"step": 458
},
{
"epoch": 0.46788990825688076,
"grad_norm": 9.893292427062988,
"learning_rate": 9.992014138808682e-05,
"loss": 0.9016,
"step": 459
},
{
"epoch": 0.4689092762487258,
"grad_norm": 7.73169469833374,
"learning_rate": 9.990914197943053e-05,
"loss": 0.7314,
"step": 460
},
{
"epoch": 0.46992864424057085,
"grad_norm": 8.335735321044922,
"learning_rate": 9.989743381850711e-05,
"loss": 0.6633,
"step": 461
},
{
"epoch": 0.4709480122324159,
"grad_norm": 8.655631065368652,
"learning_rate": 9.988501707158243e-05,
"loss": 0.9783,
"step": 462
},
{
"epoch": 0.47196738022426094,
"grad_norm": 9.166102409362793,
"learning_rate": 9.987189191498479e-05,
"loss": 1.1307,
"step": 463
},
{
"epoch": 0.47298674821610603,
"grad_norm": 10.597552299499512,
"learning_rate": 9.985805853510262e-05,
"loss": 1.4662,
"step": 464
},
{
"epoch": 0.4740061162079511,
"grad_norm": 10.318975448608398,
"learning_rate": 9.984351712838167e-05,
"loss": 1.4666,
"step": 465
},
{
"epoch": 0.4750254841997961,
"grad_norm": 7.259106636047363,
"learning_rate": 9.98282679013223e-05,
"loss": 0.5918,
"step": 466
},
{
"epoch": 0.47604485219164117,
"grad_norm": 10.192667961120605,
"learning_rate": 9.981231107047648e-05,
"loss": 1.5836,
"step": 467
},
{
"epoch": 0.47706422018348627,
"grad_norm": 6.506603717803955,
"learning_rate": 9.97956468624448e-05,
"loss": 0.5698,
"step": 468
},
{
"epoch": 0.4780835881753313,
"grad_norm": 5.789127349853516,
"learning_rate": 9.977827551387318e-05,
"loss": 0.4654,
"step": 469
},
{
"epoch": 0.47910295616717635,
"grad_norm": 8.615316390991211,
"learning_rate": 9.976019727144956e-05,
"loss": 0.9522,
"step": 470
},
{
"epoch": 0.4801223241590214,
"grad_norm": 4.792436599731445,
"learning_rate": 9.974141239190034e-05,
"loss": 0.4748,
"step": 471
},
{
"epoch": 0.48114169215086644,
"grad_norm": 9.958406448364258,
"learning_rate": 9.972192114198677e-05,
"loss": 2.0766,
"step": 472
},
{
"epoch": 0.48216106014271154,
"grad_norm": 4.340735912322998,
"learning_rate": 9.970172379850122e-05,
"loss": 0.7071,
"step": 473
},
{
"epoch": 0.4831804281345566,
"grad_norm": 7.170680999755859,
"learning_rate": 9.968082064826314e-05,
"loss": 0.435,
"step": 474
},
{
"epoch": 0.4841997961264016,
"grad_norm": 3.927189350128174,
"learning_rate": 9.965921198811501e-05,
"loss": 0.4551,
"step": 475
},
{
"epoch": 0.48521916411824667,
"grad_norm": 10.183062553405762,
"learning_rate": 9.96368981249182e-05,
"loss": 1.1758,
"step": 476
},
{
"epoch": 0.48623853211009177,
"grad_norm": 9.819293022155762,
"learning_rate": 9.961387937554857e-05,
"loss": 0.9995,
"step": 477
},
{
"epoch": 0.4872579001019368,
"grad_norm": 11.188612937927246,
"learning_rate": 9.95901560668919e-05,
"loss": 1.6207,
"step": 478
},
{
"epoch": 0.48827726809378186,
"grad_norm": 7.268994331359863,
"learning_rate": 9.95657285358394e-05,
"loss": 0.6978,
"step": 479
},
{
"epoch": 0.4892966360856269,
"grad_norm": 5.575627326965332,
"learning_rate": 9.954059712928275e-05,
"loss": 0.4236,
"step": 480
},
{
"epoch": 0.49031600407747195,
"grad_norm": 9.621591567993164,
"learning_rate": 9.951476220410929e-05,
"loss": 1.8218,
"step": 481
},
{
"epoch": 0.49133537206931704,
"grad_norm": 7.322023391723633,
"learning_rate": 9.948822412719697e-05,
"loss": 0.8749,
"step": 482
},
{
"epoch": 0.4923547400611621,
"grad_norm": 8.407424926757812,
"learning_rate": 9.946098327540902e-05,
"loss": 1.1704,
"step": 483
},
{
"epoch": 0.49337410805300713,
"grad_norm": 4.8855438232421875,
"learning_rate": 9.943304003558873e-05,
"loss": 0.5327,
"step": 484
},
{
"epoch": 0.4943934760448522,
"grad_norm": 8.738515853881836,
"learning_rate": 9.940439480455386e-05,
"loss": 1.2009,
"step": 485
},
{
"epoch": 0.4954128440366973,
"grad_norm": 7.554356575012207,
"learning_rate": 9.937504798909106e-05,
"loss": 0.5427,
"step": 486
},
{
"epoch": 0.4964322120285423,
"grad_norm": 8.203272819519043,
"learning_rate": 9.934500000595008e-05,
"loss": 0.5893,
"step": 487
},
{
"epoch": 0.49745158002038736,
"grad_norm": 8.477286338806152,
"learning_rate": 9.931425128183782e-05,
"loss": 1.061,
"step": 488
},
{
"epoch": 0.4984709480122324,
"grad_norm": 7.389923095703125,
"learning_rate": 9.928280225341232e-05,
"loss": 0.5465,
"step": 489
},
{
"epoch": 0.49949031600407745,
"grad_norm": 10.051106452941895,
"learning_rate": 9.925065336727654e-05,
"loss": 0.7035,
"step": 490
},
{
"epoch": 0.49949031600407745,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7291666666666666,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.817620038986206,
"eval_Qnli-dev-1024_cosine_ap": 0.7443202788050278,
"eval_Qnli-dev-1024_cosine_f1": 0.7291666666666667,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.817620038986206,
"eval_Qnli-dev-1024_cosine_mcc": 0.46405228758169936,
"eval_Qnli-dev-1024_cosine_precision": 0.6862745098039216,
"eval_Qnli-dev-1024_cosine_recall": 0.7777777777777778,
"eval_Qnli-dev_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.7795548439025879,
"eval_Qnli-dev_cosine_ap": 0.7446338608862075,
"eval_Qnli-dev_cosine_f1": 0.7378640776699029,
"eval_Qnli-dev_cosine_f1_threshold": 0.6985307335853577,
"eval_Qnli-dev_cosine_mcc": 0.46153029495329345,
"eval_Qnli-dev_cosine_precision": 0.6551724137931034,
"eval_Qnli-dev_cosine_recall": 0.8444444444444444,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.90625,
"eval_allNLI-triplets_cosine_accuracy": 0.9375,
"eval_global_dataset_loss": 0.34814590215682983,
"eval_global_dataset_runtime": 104.2751,
"eval_global_dataset_samples_per_second": 7.701,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.90625,
"eval_sts-test-1024_pearson_cosine": 0.8574057933500303,
"eval_sts-test-1024_spearman_cosine": 0.8986116241995802,
"eval_sts-test_pearson_cosine": 0.9019111579722014,
"eval_sts-test_spearman_cosine": 0.9181479205822737,
"step": 490
},
{
"epoch": 0.5005096839959225,
"grad_norm": 10.88537311553955,
"learning_rate": 9.921780507997202e-05,
"loss": 1.6596,
"step": 491
},
{
"epoch": 0.5015290519877675,
"grad_norm": 5.0818891525268555,
"learning_rate": 9.918425785797235e-05,
"loss": 0.4475,
"step": 492
},
{
"epoch": 0.5025484199796126,
"grad_norm": 12.540839195251465,
"learning_rate": 9.915001217767663e-05,
"loss": 2.0803,
"step": 493
},
{
"epoch": 0.5035677879714577,
"grad_norm": 6.171934604644775,
"learning_rate": 9.911506852540267e-05,
"loss": 0.4296,
"step": 494
},
{
"epoch": 0.5045871559633027,
"grad_norm": 9.624109268188477,
"learning_rate": 9.907942739738001e-05,
"loss": 1.5435,
"step": 495
},
{
"epoch": 0.5056065239551478,
"grad_norm": 6.020090579986572,
"learning_rate": 9.904308929974302e-05,
"loss": 0.6073,
"step": 496
},
{
"epoch": 0.5066258919469928,
"grad_norm": 8.587658882141113,
"learning_rate": 9.900605474852358e-05,
"loss": 1.1774,
"step": 497
},
{
"epoch": 0.5076452599388379,
"grad_norm": 6.535181999206543,
"learning_rate": 9.896832426964382e-05,
"loss": 0.8951,
"step": 498
},
{
"epoch": 0.508664627930683,
"grad_norm": 5.945138454437256,
"learning_rate": 9.892989839890863e-05,
"loss": 0.3775,
"step": 499
},
{
"epoch": 0.509683995922528,
"grad_norm": 7.641120433807373,
"learning_rate": 9.889077768199806e-05,
"loss": 0.8086,
"step": 500
},
{
"epoch": 0.5107033639143731,
"grad_norm": 8.10549545288086,
"learning_rate": 9.885096267445957e-05,
"loss": 0.3864,
"step": 501
},
{
"epoch": 0.5117227319062182,
"grad_norm": 7.266530990600586,
"learning_rate": 9.881045394170012e-05,
"loss": 0.8865,
"step": 502
},
{
"epoch": 0.5127420998980632,
"grad_norm": 9.056779861450195,
"learning_rate": 9.876925205897818e-05,
"loss": 0.567,
"step": 503
},
{
"epoch": 0.5137614678899083,
"grad_norm": 7.140566349029541,
"learning_rate": 9.872735761139554e-05,
"loss": 0.9304,
"step": 504
},
{
"epoch": 0.5147808358817533,
"grad_norm": 11.422016143798828,
"learning_rate": 9.868477119388896e-05,
"loss": 0.6977,
"step": 505
},
{
"epoch": 0.5158002038735984,
"grad_norm": 11.155719757080078,
"learning_rate": 9.864149341122181e-05,
"loss": 1.3174,
"step": 506
},
{
"epoch": 0.5168195718654435,
"grad_norm": 8.781103134155273,
"learning_rate": 9.859752487797542e-05,
"loss": 1.2481,
"step": 507
},
{
"epoch": 0.5178389398572885,
"grad_norm": 5.503263473510742,
"learning_rate": 9.855286621854034e-05,
"loss": 0.4894,
"step": 508
},
{
"epoch": 0.5188583078491336,
"grad_norm": 7.503839015960693,
"learning_rate": 9.850751806710753e-05,
"loss": 0.8095,
"step": 509
},
{
"epoch": 0.5198776758409785,
"grad_norm": 5.623706817626953,
"learning_rate": 9.846148106765933e-05,
"loss": 0.388,
"step": 510
},
{
"epoch": 0.5208970438328236,
"grad_norm": 5.178555965423584,
"learning_rate": 9.841475587396028e-05,
"loss": 0.6725,
"step": 511
},
{
"epoch": 0.5219164118246687,
"grad_norm": 7.296833038330078,
"learning_rate": 9.836734314954785e-05,
"loss": 0.4804,
"step": 512
},
{
"epoch": 0.5229357798165137,
"grad_norm": 8.692532539367676,
"learning_rate": 9.831924356772308e-05,
"loss": 1.2414,
"step": 513
},
{
"epoch": 0.5239551478083588,
"grad_norm": 9.865914344787598,
"learning_rate": 9.827045781154093e-05,
"loss": 1.0319,
"step": 514
},
{
"epoch": 0.5249745158002038,
"grad_norm": 14.857895851135254,
"learning_rate": 9.822098657380065e-05,
"loss": 2.0732,
"step": 515
},
{
"epoch": 0.5259938837920489,
"grad_norm": 6.85409688949585,
"learning_rate": 9.817083055703587e-05,
"loss": 1.1168,
"step": 516
},
{
"epoch": 0.527013251783894,
"grad_norm": 10.668725967407227,
"learning_rate": 9.811999047350471e-05,
"loss": 0.8056,
"step": 517
},
{
"epoch": 0.528032619775739,
"grad_norm": 6.76224946975708,
"learning_rate": 9.806846704517957e-05,
"loss": 0.5322,
"step": 518
},
{
"epoch": 0.5290519877675841,
"grad_norm": 4.4465789794921875,
"learning_rate": 9.801626100373699e-05,
"loss": 0.4348,
"step": 519
},
{
"epoch": 0.5300713557594292,
"grad_norm": 8.388195991516113,
"learning_rate": 9.796337309054717e-05,
"loss": 0.6316,
"step": 520
},
{
"epoch": 0.5310907237512742,
"grad_norm": 5.859539031982422,
"learning_rate": 9.790980405666344e-05,
"loss": 0.3212,
"step": 521
},
{
"epoch": 0.5321100917431193,
"grad_norm": 6.299170017242432,
"learning_rate": 9.785555466281162e-05,
"loss": 0.4739,
"step": 522
},
{
"epoch": 0.5331294597349643,
"grad_norm": 9.609426498413086,
"learning_rate": 9.780062567937928e-05,
"loss": 1.1692,
"step": 523
},
{
"epoch": 0.5341488277268094,
"grad_norm": 9.116230964660645,
"learning_rate": 9.774501788640471e-05,
"loss": 0.941,
"step": 524
},
{
"epoch": 0.5351681957186545,
"grad_norm": 5.024673938751221,
"learning_rate": 9.768873207356586e-05,
"loss": 0.3767,
"step": 525
},
{
"epoch": 0.5361875637104995,
"grad_norm": 7.534763336181641,
"learning_rate": 9.763176904016913e-05,
"loss": 0.5264,
"step": 526
},
{
"epoch": 0.5372069317023446,
"grad_norm": 7.897163391113281,
"learning_rate": 9.757412959513807e-05,
"loss": 0.4345,
"step": 527
},
{
"epoch": 0.5382262996941896,
"grad_norm": 8.391239166259766,
"learning_rate": 9.751581455700181e-05,
"loss": 1.0352,
"step": 528
},
{
"epoch": 0.5392456676860347,
"grad_norm": 6.951046466827393,
"learning_rate": 9.745682475388348e-05,
"loss": 1.1014,
"step": 529
},
{
"epoch": 0.5402650356778798,
"grad_norm": 6.4283671379089355,
"learning_rate": 9.73971610234885e-05,
"loss": 0.7368,
"step": 530
},
{
"epoch": 0.5412844036697247,
"grad_norm": 7.643414497375488,
"learning_rate": 9.733682421309256e-05,
"loss": 0.5324,
"step": 531
},
{
"epoch": 0.5423037716615698,
"grad_norm": 7.95609188079834,
"learning_rate": 9.727581517952969e-05,
"loss": 0.5351,
"step": 532
},
{
"epoch": 0.5433231396534148,
"grad_norm": 11.28146743774414,
"learning_rate": 9.721413478918007e-05,
"loss": 1.6815,
"step": 533
},
{
"epoch": 0.5443425076452599,
"grad_norm": 8.346885681152344,
"learning_rate": 9.715178391795769e-05,
"loss": 0.8125,
"step": 534
},
{
"epoch": 0.545361875637105,
"grad_norm": 8.147517204284668,
"learning_rate": 9.708876345129797e-05,
"loss": 0.8629,
"step": 535
},
{
"epoch": 0.54638124362895,
"grad_norm": 10.061439514160156,
"learning_rate": 9.702507428414513e-05,
"loss": 1.3161,
"step": 536
},
{
"epoch": 0.5474006116207951,
"grad_norm": 8.882964134216309,
"learning_rate": 9.696071732093952e-05,
"loss": 1.0465,
"step": 537
},
{
"epoch": 0.5484199796126402,
"grad_norm": 5.954410076141357,
"learning_rate": 9.689569347560475e-05,
"loss": 0.4531,
"step": 538
},
{
"epoch": 0.5494393476044852,
"grad_norm": 10.33085823059082,
"learning_rate": 9.683000367153474e-05,
"loss": 0.5567,
"step": 539
},
{
"epoch": 0.5504587155963303,
"grad_norm": 5.265343189239502,
"learning_rate": 9.676364884158058e-05,
"loss": 0.7093,
"step": 540
},
{
"epoch": 0.5514780835881753,
"grad_norm": 10.214452743530273,
"learning_rate": 9.66966299280373e-05,
"loss": 1.9339,
"step": 541
},
{
"epoch": 0.5524974515800204,
"grad_norm": 7.001688480377197,
"learning_rate": 9.662894788263044e-05,
"loss": 0.3659,
"step": 542
},
{
"epoch": 0.5535168195718655,
"grad_norm": 6.640339374542236,
"learning_rate": 9.656060366650267e-05,
"loss": 1.0505,
"step": 543
},
{
"epoch": 0.5545361875637105,
"grad_norm": 9.303877830505371,
"learning_rate": 9.649159825019996e-05,
"loss": 0.8766,
"step": 544
},
{
"epoch": 0.5555555555555556,
"grad_norm": 8.21275806427002,
"learning_rate": 9.642193261365791e-05,
"loss": 0.6526,
"step": 545
},
{
"epoch": 0.5565749235474006,
"grad_norm": 6.97646427154541,
"learning_rate": 9.635160774618782e-05,
"loss": 0.5529,
"step": 546
},
{
"epoch": 0.5575942915392457,
"grad_norm": 6.77686071395874,
"learning_rate": 9.628062464646264e-05,
"loss": 0.4817,
"step": 547
},
{
"epoch": 0.5586136595310908,
"grad_norm": 3.5217092037200928,
"learning_rate": 9.620898432250272e-05,
"loss": 0.4804,
"step": 548
},
{
"epoch": 0.5596330275229358,
"grad_norm": 5.6369476318359375,
"learning_rate": 9.613668779166165e-05,
"loss": 0.4508,
"step": 549
},
{
"epoch": 0.5606523955147809,
"grad_norm": 5.534257888793945,
"learning_rate": 9.606373608061162e-05,
"loss": 0.4339,
"step": 550
},
{
"epoch": 0.5616717635066258,
"grad_norm": 10.922380447387695,
"learning_rate": 9.5990130225329e-05,
"loss": 0.712,
"step": 551
},
{
"epoch": 0.5626911314984709,
"grad_norm": 6.2288360595703125,
"learning_rate": 9.59158712710795e-05,
"loss": 0.3974,
"step": 552
},
{
"epoch": 0.563710499490316,
"grad_norm": 11.958196640014648,
"learning_rate": 9.58409602724035e-05,
"loss": 1.0016,
"step": 553
},
{
"epoch": 0.564729867482161,
"grad_norm": 8.267114639282227,
"learning_rate": 9.576539829310085e-05,
"loss": 0.5751,
"step": 554
},
{
"epoch": 0.5657492354740061,
"grad_norm": 11.533574104309082,
"learning_rate": 9.568918640621594e-05,
"loss": 1.111,
"step": 555
},
{
"epoch": 0.5667686034658511,
"grad_norm": 6.519062519073486,
"learning_rate": 9.561232569402239e-05,
"loss": 0.4202,
"step": 556
},
{
"epoch": 0.5677879714576962,
"grad_norm": 9.009593963623047,
"learning_rate": 9.553481724800768e-05,
"loss": 0.7822,
"step": 557
},
{
"epoch": 0.5688073394495413,
"grad_norm": 12.121257781982422,
"learning_rate": 9.545666216885767e-05,
"loss": 1.3844,
"step": 558
},
{
"epoch": 0.5698267074413863,
"grad_norm": 5.953427314758301,
"learning_rate": 9.537786156644097e-05,
"loss": 0.3881,
"step": 559
},
{
"epoch": 0.5708460754332314,
"grad_norm": 7.334780216217041,
"learning_rate": 9.529841655979315e-05,
"loss": 0.6317,
"step": 560
},
{
"epoch": 0.5718654434250765,
"grad_norm": 5.987368583679199,
"learning_rate": 9.521832827710088e-05,
"loss": 0.4976,
"step": 561
},
{
"epoch": 0.5728848114169215,
"grad_norm": 3.9462735652923584,
"learning_rate": 9.51375978556859e-05,
"loss": 0.2741,
"step": 562
},
{
"epoch": 0.5739041794087666,
"grad_norm": 6.374652862548828,
"learning_rate": 9.505622644198885e-05,
"loss": 0.6232,
"step": 563
},
{
"epoch": 0.5749235474006116,
"grad_norm": 3.525486707687378,
"learning_rate": 9.497421519155303e-05,
"loss": 0.2083,
"step": 564
},
{
"epoch": 0.5759429153924567,
"grad_norm": 9.60029125213623,
"learning_rate": 9.489156526900795e-05,
"loss": 1.0605,
"step": 565
},
{
"epoch": 0.5769622833843018,
"grad_norm": 12.22358226776123,
"learning_rate": 9.480827784805278e-05,
"loss": 1.2086,
"step": 566
},
{
"epoch": 0.5779816513761468,
"grad_norm": 4.388841152191162,
"learning_rate": 9.472435411143978e-05,
"loss": 0.2217,
"step": 567
},
{
"epoch": 0.5790010193679919,
"grad_norm": 5.581283092498779,
"learning_rate": 9.463979525095738e-05,
"loss": 0.4215,
"step": 568
},
{
"epoch": 0.5800203873598369,
"grad_norm": 7.996876239776611,
"learning_rate": 9.455460246741331e-05,
"loss": 0.663,
"step": 569
},
{
"epoch": 0.581039755351682,
"grad_norm": 9.21956729888916,
"learning_rate": 9.446877697061757e-05,
"loss": 0.653,
"step": 570
},
{
"epoch": 0.582059123343527,
"grad_norm": 8.46827220916748,
"learning_rate": 9.43823199793652e-05,
"loss": 0.6895,
"step": 571
},
{
"epoch": 0.583078491335372,
"grad_norm": 9.72203540802002,
"learning_rate": 9.429523272141903e-05,
"loss": 1.1101,
"step": 572
},
{
"epoch": 0.5840978593272171,
"grad_norm": 8.79525089263916,
"learning_rate": 9.420751643349219e-05,
"loss": 1.2991,
"step": 573
},
{
"epoch": 0.5851172273190621,
"grad_norm": 6.719937801361084,
"learning_rate": 9.411917236123059e-05,
"loss": 0.4072,
"step": 574
},
{
"epoch": 0.5861365953109072,
"grad_norm": 8.360040664672852,
"learning_rate": 9.403020175919517e-05,
"loss": 1.169,
"step": 575
},
{
"epoch": 0.5871559633027523,
"grad_norm": 5.402820587158203,
"learning_rate": 9.394060589084417e-05,
"loss": 0.3374,
"step": 576
},
{
"epoch": 0.5881753312945973,
"grad_norm": 9.037818908691406,
"learning_rate": 9.385038602851515e-05,
"loss": 0.6785,
"step": 577
},
{
"epoch": 0.5891946992864424,
"grad_norm": 9.151761054992676,
"learning_rate": 9.375954345340685e-05,
"loss": 1.2757,
"step": 578
},
{
"epoch": 0.5902140672782875,
"grad_norm": 5.834461212158203,
"learning_rate": 9.366807945556113e-05,
"loss": 0.5899,
"step": 579
},
{
"epoch": 0.5912334352701325,
"grad_norm": 5.722581386566162,
"learning_rate": 9.357599533384453e-05,
"loss": 0.3389,
"step": 580
},
{
"epoch": 0.5922528032619776,
"grad_norm": 10.132628440856934,
"learning_rate": 9.348329239592995e-05,
"loss": 1.631,
"step": 581
},
{
"epoch": 0.5932721712538226,
"grad_norm": 9.922087669372559,
"learning_rate": 9.338997195827792e-05,
"loss": 1.3975,
"step": 582
},
{
"epoch": 0.5942915392456677,
"grad_norm": 8.382550239562988,
"learning_rate": 9.329603534611806e-05,
"loss": 0.4654,
"step": 583
},
{
"epoch": 0.5953109072375128,
"grad_norm": 8.080007553100586,
"learning_rate": 9.32014838934301e-05,
"loss": 0.56,
"step": 584
},
{
"epoch": 0.5963302752293578,
"grad_norm": 5.616114616394043,
"learning_rate": 9.310631894292518e-05,
"loss": 0.2282,
"step": 585
},
{
"epoch": 0.5973496432212029,
"grad_norm": 10.813580513000488,
"learning_rate": 9.301054184602647e-05,
"loss": 1.0754,
"step": 586
},
{
"epoch": 0.5983690112130479,
"grad_norm": 8.062788963317871,
"learning_rate": 9.291415396285024e-05,
"loss": 0.4411,
"step": 587
},
{
"epoch": 0.599388379204893,
"grad_norm": 8.6395845413208,
"learning_rate": 9.281715666218643e-05,
"loss": 0.9243,
"step": 588
},
{
"epoch": 0.599388379204893,
"eval_Qnli-dev-1024_cosine_accuracy": 0.71875,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8436912298202515,
"eval_Qnli-dev-1024_cosine_ap": 0.7587494204458187,
"eval_Qnli-dev-1024_cosine_f1": 0.6875,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.8028630018234253,
"eval_Qnli-dev-1024_cosine_mcc": 0.3803921568627451,
"eval_Qnli-dev-1024_cosine_precision": 0.6470588235294118,
"eval_Qnli-dev-1024_cosine_recall": 0.7333333333333333,
"eval_Qnli-dev_cosine_accuracy": 0.71875,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.8078321218490601,
"eval_Qnli-dev_cosine_ap": 0.7321739553695406,
"eval_Qnli-dev_cosine_f1": 0.7339449541284404,
"eval_Qnli-dev_cosine_f1_threshold": 0.6781572699546814,
"eval_Qnli-dev_cosine_mcc": 0.4428074427700477,
"eval_Qnli-dev_cosine_precision": 0.625,
"eval_Qnli-dev_cosine_recall": 0.8888888888888888,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.90625,
"eval_allNLI-triplets_cosine_accuracy": 0.9375,
"eval_global_dataset_loss": 0.36118289828300476,
"eval_global_dataset_runtime": 104.3983,
"eval_global_dataset_samples_per_second": 7.692,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.90625,
"eval_sts-test-1024_pearson_cosine": 0.8631921152381832,
"eval_sts-test-1024_spearman_cosine": 0.9009700758334896,
"eval_sts-test_pearson_cosine": 0.9009355736320144,
"eval_sts-test_spearman_cosine": 0.9171725695772274,
"step": 588
},
{
"epoch": 0.6004077471967381,
"grad_norm": 6.184821128845215,
"learning_rate": 9.271955132147916e-05,
"loss": 0.3572,
"step": 589
},
{
"epoch": 0.601427115188583,
"grad_norm": 8.318941116333008,
"learning_rate": 9.262133932680733e-05,
"loss": 0.6761,
"step": 590
},
{
"epoch": 0.6024464831804281,
"grad_norm": 7.54533052444458,
"learning_rate": 9.252252207286479e-05,
"loss": 0.5754,
"step": 591
},
{
"epoch": 0.6034658511722731,
"grad_norm": 4.341547012329102,
"learning_rate": 9.24231009629406e-05,
"loss": 0.3664,
"step": 592
},
{
"epoch": 0.6044852191641182,
"grad_norm": 7.616749286651611,
"learning_rate": 9.232307740889909e-05,
"loss": 0.5391,
"step": 593
},
{
"epoch": 0.6055045871559633,
"grad_norm": 4.843873977661133,
"learning_rate": 9.222245283115979e-05,
"loss": 0.518,
"step": 594
},
{
"epoch": 0.6065239551478083,
"grad_norm": 8.295080184936523,
"learning_rate": 9.21212286586773e-05,
"loss": 0.5263,
"step": 595
},
{
"epoch": 0.6075433231396534,
"grad_norm": 3.873260736465454,
"learning_rate": 9.201940632892096e-05,
"loss": 0.2995,
"step": 596
},
{
"epoch": 0.6085626911314985,
"grad_norm": 4.403683185577393,
"learning_rate": 9.191698728785448e-05,
"loss": 0.4181,
"step": 597
},
{
"epoch": 0.6095820591233435,
"grad_norm": 7.282264709472656,
"learning_rate": 9.181397298991532e-05,
"loss": 0.5087,
"step": 598
},
{
"epoch": 0.6106014271151886,
"grad_norm": 5.132986068725586,
"learning_rate": 9.171036489799416e-05,
"loss": 0.4344,
"step": 599
},
{
"epoch": 0.6116207951070336,
"grad_norm": 11.096871376037598,
"learning_rate": 9.160616448341403e-05,
"loss": 1.6529,
"step": 600
},
{
"epoch": 0.6126401630988787,
"grad_norm": 4.306335926055908,
"learning_rate": 9.150137322590944e-05,
"loss": 0.4079,
"step": 601
},
{
"epoch": 0.6136595310907238,
"grad_norm": 5.622674942016602,
"learning_rate": 9.139599261360537e-05,
"loss": 0.3123,
"step": 602
},
{
"epoch": 0.6146788990825688,
"grad_norm": 10.172139167785645,
"learning_rate": 9.129002414299617e-05,
"loss": 1.4398,
"step": 603
},
{
"epoch": 0.6156982670744139,
"grad_norm": 10.175543785095215,
"learning_rate": 9.118346931892423e-05,
"loss": 1.5553,
"step": 604
},
{
"epoch": 0.6167176350662589,
"grad_norm": 7.616044521331787,
"learning_rate": 9.10763296545587e-05,
"loss": 0.7958,
"step": 605
},
{
"epoch": 0.617737003058104,
"grad_norm": 5.390756607055664,
"learning_rate": 9.096860667137394e-05,
"loss": 0.3815,
"step": 606
},
{
"epoch": 0.6187563710499491,
"grad_norm": 6.750911235809326,
"learning_rate": 9.086030189912794e-05,
"loss": 0.787,
"step": 607
},
{
"epoch": 0.6197757390417941,
"grad_norm": 5.695408344268799,
"learning_rate": 9.075141687584057e-05,
"loss": 0.2352,
"step": 608
},
{
"epoch": 0.6207951070336392,
"grad_norm": 12.017024040222168,
"learning_rate": 9.06419531477718e-05,
"loss": 0.6469,
"step": 609
},
{
"epoch": 0.6218144750254841,
"grad_norm": 9.70870304107666,
"learning_rate": 9.053191226939965e-05,
"loss": 1.0997,
"step": 610
},
{
"epoch": 0.6228338430173292,
"grad_norm": 8.183333396911621,
"learning_rate": 9.042129580339822e-05,
"loss": 0.8762,
"step": 611
},
{
"epoch": 0.6238532110091743,
"grad_norm": 8.237792015075684,
"learning_rate": 9.031010532061538e-05,
"loss": 0.6259,
"step": 612
},
{
"epoch": 0.6248725790010193,
"grad_norm": 7.553733825683594,
"learning_rate": 9.019834240005058e-05,
"loss": 0.8133,
"step": 613
},
{
"epoch": 0.6258919469928644,
"grad_norm": 8.876506805419922,
"learning_rate": 9.008600862883235e-05,
"loss": 1.2119,
"step": 614
},
{
"epoch": 0.6269113149847095,
"grad_norm": 6.738461971282959,
"learning_rate": 8.997310560219578e-05,
"loss": 0.5068,
"step": 615
},
{
"epoch": 0.6279306829765545,
"grad_norm": 9.400090217590332,
"learning_rate": 8.985963492345991e-05,
"loss": 0.7723,
"step": 616
},
{
"epoch": 0.6289500509683996,
"grad_norm": 8.690120697021484,
"learning_rate": 8.974559820400486e-05,
"loss": 1.0791,
"step": 617
},
{
"epoch": 0.6299694189602446,
"grad_norm": 5.441365718841553,
"learning_rate": 8.963099706324904e-05,
"loss": 0.348,
"step": 618
},
{
"epoch": 0.6309887869520897,
"grad_norm": 8.373964309692383,
"learning_rate": 8.951583312862616e-05,
"loss": 0.6523,
"step": 619
},
{
"epoch": 0.6320081549439348,
"grad_norm": 8.361169815063477,
"learning_rate": 8.9400108035562e-05,
"loss": 0.6241,
"step": 620
},
{
"epoch": 0.6330275229357798,
"grad_norm": 8.095520973205566,
"learning_rate": 8.928382342745137e-05,
"loss": 0.5039,
"step": 621
},
{
"epoch": 0.6340468909276249,
"grad_norm": 9.879805564880371,
"learning_rate": 8.916698095563453e-05,
"loss": 1.0113,
"step": 622
},
{
"epoch": 0.6350662589194699,
"grad_norm": 11.630424499511719,
"learning_rate": 8.904958227937406e-05,
"loss": 1.0527,
"step": 623
},
{
"epoch": 0.636085626911315,
"grad_norm": 9.939377784729004,
"learning_rate": 8.893162906583094e-05,
"loss": 1.3893,
"step": 624
},
{
"epoch": 0.6371049949031601,
"grad_norm": 7.852113723754883,
"learning_rate": 8.881312299004117e-05,
"loss": 0.7191,
"step": 625
},
{
"epoch": 0.6381243628950051,
"grad_norm": 7.134123802185059,
"learning_rate": 8.86940657348918e-05,
"loss": 0.3591,
"step": 626
},
{
"epoch": 0.6391437308868502,
"grad_norm": 5.795046806335449,
"learning_rate": 8.857445899109715e-05,
"loss": 0.9856,
"step": 627
},
{
"epoch": 0.6401630988786952,
"grad_norm": 7.4533610343933105,
"learning_rate": 8.845430445717469e-05,
"loss": 0.7603,
"step": 628
},
{
"epoch": 0.6411824668705403,
"grad_norm": 9.926379203796387,
"learning_rate": 8.8333603839421e-05,
"loss": 1.1553,
"step": 629
},
{
"epoch": 0.6422018348623854,
"grad_norm": 7.032261371612549,
"learning_rate": 8.821235885188754e-05,
"loss": 0.5608,
"step": 630
},
{
"epoch": 0.6432212028542303,
"grad_norm": 6.283802509307861,
"learning_rate": 8.809057121635624e-05,
"loss": 0.4338,
"step": 631
},
{
"epoch": 0.6442405708460754,
"grad_norm": 2.8640384674072266,
"learning_rate": 8.796824266231511e-05,
"loss": 0.1376,
"step": 632
},
{
"epoch": 0.6452599388379205,
"grad_norm": 7.722833633422852,
"learning_rate": 8.784537492693368e-05,
"loss": 0.6539,
"step": 633
},
{
"epoch": 0.6462793068297655,
"grad_norm": 7.714670658111572,
"learning_rate": 8.772196975503828e-05,
"loss": 0.5017,
"step": 634
},
{
"epoch": 0.6472986748216106,
"grad_norm": 4.0773091316223145,
"learning_rate": 8.759802889908733e-05,
"loss": 0.1888,
"step": 635
},
{
"epoch": 0.6483180428134556,
"grad_norm": 12.99943733215332,
"learning_rate": 8.747355411914642e-05,
"loss": 1.6077,
"step": 636
},
{
"epoch": 0.6493374108053007,
"grad_norm": 10.86596393585205,
"learning_rate": 8.734854718286327e-05,
"loss": 0.9635,
"step": 637
},
{
"epoch": 0.6503567787971458,
"grad_norm": 9.243484497070312,
"learning_rate": 8.722300986544272e-05,
"loss": 0.9786,
"step": 638
},
{
"epoch": 0.6513761467889908,
"grad_norm": 10.92319393157959,
"learning_rate": 8.709694394962142e-05,
"loss": 0.6728,
"step": 639
},
{
"epoch": 0.6523955147808359,
"grad_norm": 11.628253936767578,
"learning_rate": 8.697035122564266e-05,
"loss": 0.8592,
"step": 640
},
{
"epoch": 0.6534148827726809,
"grad_norm": 5.602497100830078,
"learning_rate": 8.684323349123075e-05,
"loss": 0.3945,
"step": 641
},
{
"epoch": 0.654434250764526,
"grad_norm": 7.681665420532227,
"learning_rate": 8.671559255156567e-05,
"loss": 0.7486,
"step": 642
},
{
"epoch": 0.6554536187563711,
"grad_norm": 9.017338752746582,
"learning_rate": 8.658743021925733e-05,
"loss": 0.7793,
"step": 643
},
{
"epoch": 0.6564729867482161,
"grad_norm": 5.24987268447876,
"learning_rate": 8.645874831431982e-05,
"loss": 0.4401,
"step": 644
},
{
"epoch": 0.6574923547400612,
"grad_norm": 10.270877838134766,
"learning_rate": 8.632954866414567e-05,
"loss": 0.6189,
"step": 645
},
{
"epoch": 0.6585117227319062,
"grad_norm": 8.378297805786133,
"learning_rate": 8.619983310347982e-05,
"loss": 0.7339,
"step": 646
},
{
"epoch": 0.6595310907237513,
"grad_norm": 6.045844554901123,
"learning_rate": 8.606960347439355e-05,
"loss": 0.4089,
"step": 647
},
{
"epoch": 0.6605504587155964,
"grad_norm": 10.432483673095703,
"learning_rate": 8.593886162625835e-05,
"loss": 1.1412,
"step": 648
},
{
"epoch": 0.6615698267074414,
"grad_norm": 5.939512729644775,
"learning_rate": 8.580760941571967e-05,
"loss": 0.798,
"step": 649
},
{
"epoch": 0.6625891946992865,
"grad_norm": 12.093332290649414,
"learning_rate": 8.567584870667056e-05,
"loss": 1.0588,
"step": 650
},
{
"epoch": 0.6636085626911316,
"grad_norm": 8.624043464660645,
"learning_rate": 8.554358137022513e-05,
"loss": 0.9044,
"step": 651
},
{
"epoch": 0.6646279306829765,
"grad_norm": 7.735975742340088,
"learning_rate": 8.54108092846921e-05,
"loss": 0.4464,
"step": 652
},
{
"epoch": 0.6656472986748216,
"grad_norm": 3.8205575942993164,
"learning_rate": 8.527753433554797e-05,
"loss": 0.2756,
"step": 653
},
{
"epoch": 0.6666666666666666,
"grad_norm": 10.537273406982422,
"learning_rate": 8.51437584154104e-05,
"loss": 1.123,
"step": 654
},
{
"epoch": 0.6676860346585117,
"grad_norm": 6.052632808685303,
"learning_rate": 8.500948342401124e-05,
"loss": 0.5377,
"step": 655
},
{
"epoch": 0.6687054026503568,
"grad_norm": 7.787528991699219,
"learning_rate": 8.48747112681696e-05,
"loss": 0.5164,
"step": 656
},
{
"epoch": 0.6697247706422018,
"grad_norm": 10.115964889526367,
"learning_rate": 8.473944386176469e-05,
"loss": 0.7155,
"step": 657
},
{
"epoch": 0.6707441386340469,
"grad_norm": 6.880122184753418,
"learning_rate": 8.460368312570873e-05,
"loss": 0.4512,
"step": 658
},
{
"epoch": 0.6717635066258919,
"grad_norm": 8.106338500976562,
"learning_rate": 8.446743098791969e-05,
"loss": 0.6199,
"step": 659
},
{
"epoch": 0.672782874617737,
"grad_norm": 11.035154342651367,
"learning_rate": 8.433068938329376e-05,
"loss": 0.6673,
"step": 660
},
{
"epoch": 0.6738022426095821,
"grad_norm": 4.484703540802002,
"learning_rate": 8.419346025367809e-05,
"loss": 0.5934,
"step": 661
},
{
"epoch": 0.6748216106014271,
"grad_norm": 6.977105140686035,
"learning_rate": 8.4055745547843e-05,
"loss": 0.5034,
"step": 662
},
{
"epoch": 0.6758409785932722,
"grad_norm": 5.447470664978027,
"learning_rate": 8.391754722145449e-05,
"loss": 0.4161,
"step": 663
},
{
"epoch": 0.6768603465851172,
"grad_norm": 13.200489044189453,
"learning_rate": 8.37788672370463e-05,
"loss": 0.9848,
"step": 664
},
{
"epoch": 0.6778797145769623,
"grad_norm": 6.03376579284668,
"learning_rate": 8.36397075639922e-05,
"loss": 0.356,
"step": 665
},
{
"epoch": 0.6788990825688074,
"grad_norm": 6.075347900390625,
"learning_rate": 8.350007017847788e-05,
"loss": 0.3031,
"step": 666
},
{
"epoch": 0.6799184505606524,
"grad_norm": 5.790109157562256,
"learning_rate": 8.335995706347299e-05,
"loss": 0.254,
"step": 667
},
{
"epoch": 0.6809378185524975,
"grad_norm": 11.979147911071777,
"learning_rate": 8.321937020870296e-05,
"loss": 0.8646,
"step": 668
},
{
"epoch": 0.6819571865443425,
"grad_norm": 9.445723533630371,
"learning_rate": 8.30783116106207e-05,
"loss": 0.7303,
"step": 669
},
{
"epoch": 0.6829765545361876,
"grad_norm": 8.001054763793945,
"learning_rate": 8.293678327237827e-05,
"loss": 0.4105,
"step": 670
},
{
"epoch": 0.6839959225280327,
"grad_norm": 4.437264919281006,
"learning_rate": 8.279478720379845e-05,
"loss": 0.2874,
"step": 671
},
{
"epoch": 0.6850152905198776,
"grad_norm": 4.547714710235596,
"learning_rate": 8.265232542134622e-05,
"loss": 0.2112,
"step": 672
},
{
"epoch": 0.6860346585117227,
"grad_norm": 7.875749588012695,
"learning_rate": 8.250939994810003e-05,
"loss": 1.0919,
"step": 673
},
{
"epoch": 0.6870540265035678,
"grad_norm": 5.349310874938965,
"learning_rate": 8.236601281372319e-05,
"loss": 0.5927,
"step": 674
},
{
"epoch": 0.6880733944954128,
"grad_norm": 11.490046501159668,
"learning_rate": 8.222216605443496e-05,
"loss": 1.011,
"step": 675
},
{
"epoch": 0.6890927624872579,
"grad_norm": 7.11298942565918,
"learning_rate": 8.207786171298166e-05,
"loss": 0.5656,
"step": 676
},
{
"epoch": 0.6901121304791029,
"grad_norm": 10.48589038848877,
"learning_rate": 8.193310183860771e-05,
"loss": 0.7199,
"step": 677
},
{
"epoch": 0.691131498470948,
"grad_norm": 9.364179611206055,
"learning_rate": 8.178788848702643e-05,
"loss": 0.7506,
"step": 678
},
{
"epoch": 0.6921508664627931,
"grad_norm": 6.678390026092529,
"learning_rate": 8.164222372039092e-05,
"loss": 0.5386,
"step": 679
},
{
"epoch": 0.6931702344546381,
"grad_norm": 6.151979446411133,
"learning_rate": 8.149610960726479e-05,
"loss": 0.6678,
"step": 680
},
{
"epoch": 0.6941896024464832,
"grad_norm": 6.415065765380859,
"learning_rate": 8.134954822259271e-05,
"loss": 0.4834,
"step": 681
},
{
"epoch": 0.6952089704383282,
"grad_norm": 4.4640326499938965,
"learning_rate": 8.120254164767101e-05,
"loss": 0.2411,
"step": 682
},
{
"epoch": 0.6962283384301733,
"grad_norm": 6.626987457275391,
"learning_rate": 8.105509197011807e-05,
"loss": 0.5011,
"step": 683
},
{
"epoch": 0.6972477064220184,
"grad_norm": 7.628388404846191,
"learning_rate": 8.090720128384475e-05,
"loss": 0.6573,
"step": 684
},
{
"epoch": 0.6982670744138634,
"grad_norm": 3.4043076038360596,
"learning_rate": 8.075887168902459e-05,
"loss": 0.2798,
"step": 685
},
{
"epoch": 0.6992864424057085,
"grad_norm": 5.682481288909912,
"learning_rate": 8.061010529206398e-05,
"loss": 0.5887,
"step": 686
},
{
"epoch": 0.6992864424057085,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7291666666666666,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8446075320243835,
"eval_Qnli-dev-1024_cosine_ap": 0.7501532568375827,
"eval_Qnli-dev-1024_cosine_f1": 0.7207207207207208,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.756614089012146,
"eval_Qnli-dev-1024_cosine_mcc": 0.4081269865567241,
"eval_Qnli-dev-1024_cosine_precision": 0.6060606060606061,
"eval_Qnli-dev-1024_cosine_recall": 0.8888888888888888,
"eval_Qnli-dev_cosine_accuracy": 0.71875,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.7564685344696045,
"eval_Qnli-dev_cosine_ap": 0.731843650475666,
"eval_Qnli-dev_cosine_f1": 0.7378640776699029,
"eval_Qnli-dev_cosine_f1_threshold": 0.6987220048904419,
"eval_Qnli-dev_cosine_mcc": 0.46153029495329345,
"eval_Qnli-dev_cosine_precision": 0.6551724137931034,
"eval_Qnli-dev_cosine_recall": 0.8444444444444444,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.9270833134651184,
"eval_allNLI-triplets_cosine_accuracy": 0.9583333134651184,
"eval_global_dataset_loss": 0.29955434799194336,
"eval_global_dataset_runtime": 104.3655,
"eval_global_dataset_samples_per_second": 7.694,
"eval_global_dataset_steps_per_second": 0.163,
"eval_sequential_score": 0.9270833134651184,
"eval_sts-test-1024_pearson_cosine": 0.8628190908797548,
"eval_sts-test-1024_spearman_cosine": 0.9062196010289961,
"eval_sts-test_pearson_cosine": 0.9012940791829644,
"eval_sts-test_spearman_cosine": 0.9179642241352577,
"step": 686
},
{
"epoch": 0.7003058103975535,
"grad_norm": 5.198816299438477,
"learning_rate": 8.046090420557231e-05,
"loss": 0.6328,
"step": 687
},
{
"epoch": 0.7013251783893986,
"grad_norm": 7.044593811035156,
"learning_rate": 8.031127054833189e-05,
"loss": 0.4322,
"step": 688
},
{
"epoch": 0.7023445463812437,
"grad_norm": 13.891091346740723,
"learning_rate": 8.016120644526797e-05,
"loss": 1.1473,
"step": 689
},
{
"epoch": 0.7033639143730887,
"grad_norm": 9.329078674316406,
"learning_rate": 8.001071402741842e-05,
"loss": 1.0135,
"step": 690
},
{
"epoch": 0.7043832823649337,
"grad_norm": 4.1149210929870605,
"learning_rate": 7.985979543190358e-05,
"loss": 0.2655,
"step": 691
},
{
"epoch": 0.7054026503567788,
"grad_norm": 7.722234725952148,
"learning_rate": 7.970845280189586e-05,
"loss": 0.5053,
"step": 692
},
{
"epoch": 0.7064220183486238,
"grad_norm": 6.9180216789245605,
"learning_rate": 7.955668828658937e-05,
"loss": 0.8647,
"step": 693
},
{
"epoch": 0.7074413863404689,
"grad_norm": 5.709589004516602,
"learning_rate": 7.940450404116928e-05,
"loss": 0.4423,
"step": 694
},
{
"epoch": 0.7084607543323139,
"grad_norm": 4.812499523162842,
"learning_rate": 7.925190222678133e-05,
"loss": 0.3673,
"step": 695
},
{
"epoch": 0.709480122324159,
"grad_norm": 11.944628715515137,
"learning_rate": 7.909888501050109e-05,
"loss": 1.1714,
"step": 696
},
{
"epoch": 0.7104994903160041,
"grad_norm": 7.61957311630249,
"learning_rate": 7.894545456530316e-05,
"loss": 0.8142,
"step": 697
},
{
"epoch": 0.7115188583078491,
"grad_norm": 9.580735206604004,
"learning_rate": 7.879161307003038e-05,
"loss": 0.8027,
"step": 698
},
{
"epoch": 0.7125382262996942,
"grad_norm": 7.831961154937744,
"learning_rate": 7.863736270936284e-05,
"loss": 0.4514,
"step": 699
},
{
"epoch": 0.7135575942915392,
"grad_norm": 9.805893898010254,
"learning_rate": 7.848270567378686e-05,
"loss": 0.8798,
"step": 700
},
{
"epoch": 0.7145769622833843,
"grad_norm": 8.573545455932617,
"learning_rate": 7.832764415956389e-05,
"loss": 0.7718,
"step": 701
},
{
"epoch": 0.7155963302752294,
"grad_norm": 6.185779571533203,
"learning_rate": 7.817218036869932e-05,
"loss": 0.4094,
"step": 702
},
{
"epoch": 0.7166156982670744,
"grad_norm": 9.415246963500977,
"learning_rate": 7.80163165089112e-05,
"loss": 0.5358,
"step": 703
},
{
"epoch": 0.7176350662589195,
"grad_norm": 5.7925543785095215,
"learning_rate": 7.78600547935989e-05,
"loss": 0.5728,
"step": 704
},
{
"epoch": 0.7186544342507645,
"grad_norm": 8.365612983703613,
"learning_rate": 7.770339744181175e-05,
"loss": 0.4349,
"step": 705
},
{
"epoch": 0.7196738022426096,
"grad_norm": 11.040353775024414,
"learning_rate": 7.754634667821734e-05,
"loss": 1.0107,
"step": 706
},
{
"epoch": 0.7206931702344547,
"grad_norm": 10.400522232055664,
"learning_rate": 7.73889047330701e-05,
"loss": 1.3393,
"step": 707
},
{
"epoch": 0.7217125382262997,
"grad_norm": 6.314993381500244,
"learning_rate": 7.723107384217958e-05,
"loss": 0.5175,
"step": 708
},
{
"epoch": 0.7227319062181448,
"grad_norm": 7.7337541580200195,
"learning_rate": 7.70728562468787e-05,
"loss": 0.3906,
"step": 709
},
{
"epoch": 0.7237512742099899,
"grad_norm": 8.559732437133789,
"learning_rate": 7.691425419399183e-05,
"loss": 0.726,
"step": 710
},
{
"epoch": 0.7247706422018348,
"grad_norm": 5.824985504150391,
"learning_rate": 7.675526993580306e-05,
"loss": 0.4299,
"step": 711
},
{
"epoch": 0.72579001019368,
"grad_norm": 9.804418563842773,
"learning_rate": 7.659590573002407e-05,
"loss": 0.7486,
"step": 712
},
{
"epoch": 0.7268093781855249,
"grad_norm": 5.5835957527160645,
"learning_rate": 7.643616383976214e-05,
"loss": 0.3316,
"step": 713
},
{
"epoch": 0.72782874617737,
"grad_norm": 8.719099044799805,
"learning_rate": 7.627604653348796e-05,
"loss": 0.5444,
"step": 714
},
{
"epoch": 0.7288481141692151,
"grad_norm": 7.16873025894165,
"learning_rate": 7.611555608500351e-05,
"loss": 0.4717,
"step": 715
},
{
"epoch": 0.7298674821610601,
"grad_norm": 8.529095649719238,
"learning_rate": 7.595469477340965e-05,
"loss": 0.5413,
"step": 716
},
{
"epoch": 0.7308868501529052,
"grad_norm": 4.7856245040893555,
"learning_rate": 7.579346488307379e-05,
"loss": 0.2207,
"step": 717
},
{
"epoch": 0.7319062181447502,
"grad_norm": 8.381448745727539,
"learning_rate": 7.563186870359758e-05,
"loss": 0.8042,
"step": 718
},
{
"epoch": 0.7329255861365953,
"grad_norm": 6.099252700805664,
"learning_rate": 7.546990852978415e-05,
"loss": 0.3666,
"step": 719
},
{
"epoch": 0.7339449541284404,
"grad_norm": 6.979067802429199,
"learning_rate": 7.530758666160577e-05,
"loss": 0.4511,
"step": 720
},
{
"epoch": 0.7349643221202854,
"grad_norm": 8.355476379394531,
"learning_rate": 7.514490540417103e-05,
"loss": 0.782,
"step": 721
},
{
"epoch": 0.7359836901121305,
"grad_norm": 3.4271693229675293,
"learning_rate": 7.498186706769213e-05,
"loss": 0.2947,
"step": 722
},
{
"epoch": 0.7370030581039755,
"grad_norm": 10.753888130187988,
"learning_rate": 7.481847396745215e-05,
"loss": 1.6206,
"step": 723
},
{
"epoch": 0.7380224260958206,
"grad_norm": 10.323583602905273,
"learning_rate": 7.465472842377206e-05,
"loss": 0.6216,
"step": 724
},
{
"epoch": 0.7390417940876657,
"grad_norm": 5.268289566040039,
"learning_rate": 7.449063276197789e-05,
"loss": 0.2819,
"step": 725
},
{
"epoch": 0.7400611620795107,
"grad_norm": 8.948394775390625,
"learning_rate": 7.432618931236759e-05,
"loss": 0.4797,
"step": 726
},
{
"epoch": 0.7410805300713558,
"grad_norm": 4.78109884262085,
"learning_rate": 7.416140041017802e-05,
"loss": 0.3875,
"step": 727
},
{
"epoch": 0.7420998980632009,
"grad_norm": 7.643434047698975,
"learning_rate": 7.399626839555176e-05,
"loss": 0.6888,
"step": 728
},
{
"epoch": 0.7431192660550459,
"grad_norm": 4.128391742706299,
"learning_rate": 7.383079561350386e-05,
"loss": 0.3023,
"step": 729
},
{
"epoch": 0.744138634046891,
"grad_norm": 8.254578590393066,
"learning_rate": 7.36649844138886e-05,
"loss": 0.6654,
"step": 730
},
{
"epoch": 0.745158002038736,
"grad_norm": 10.747797966003418,
"learning_rate": 7.3498837151366e-05,
"loss": 0.6517,
"step": 731
},
{
"epoch": 0.746177370030581,
"grad_norm": 6.274332046508789,
"learning_rate": 7.333235618536856e-05,
"loss": 0.4537,
"step": 732
},
{
"epoch": 0.7471967380224261,
"grad_norm": 8.256685256958008,
"learning_rate": 7.316554388006756e-05,
"loss": 0.7224,
"step": 733
},
{
"epoch": 0.7482161060142711,
"grad_norm": 7.657110214233398,
"learning_rate": 7.299840260433965e-05,
"loss": 0.4447,
"step": 734
},
{
"epoch": 0.7492354740061162,
"grad_norm": 6.170997142791748,
"learning_rate": 7.283093473173307e-05,
"loss": 0.4127,
"step": 735
},
{
"epoch": 0.7502548419979612,
"grad_norm": 5.84876823425293,
"learning_rate": 7.26631426404341e-05,
"loss": 0.3297,
"step": 736
},
{
"epoch": 0.7512742099898063,
"grad_norm": 5.986436367034912,
"learning_rate": 7.249502871323314e-05,
"loss": 0.3664,
"step": 737
},
{
"epoch": 0.7522935779816514,
"grad_norm": 9.613632202148438,
"learning_rate": 7.232659533749092e-05,
"loss": 0.7934,
"step": 738
},
{
"epoch": 0.7533129459734964,
"grad_norm": 5.5741286277771,
"learning_rate": 7.215784490510468e-05,
"loss": 0.4214,
"step": 739
},
{
"epoch": 0.7543323139653415,
"grad_norm": 8.343430519104004,
"learning_rate": 7.198877981247406e-05,
"loss": 0.6174,
"step": 740
},
{
"epoch": 0.7553516819571865,
"grad_norm": 11.505045890808105,
"learning_rate": 7.18194024604672e-05,
"loss": 0.7011,
"step": 741
},
{
"epoch": 0.7563710499490316,
"grad_norm": 9.192388534545898,
"learning_rate": 7.164971525438657e-05,
"loss": 0.6472,
"step": 742
},
{
"epoch": 0.7573904179408767,
"grad_norm": 10.685009002685547,
"learning_rate": 7.147972060393478e-05,
"loss": 0.9555,
"step": 743
},
{
"epoch": 0.7584097859327217,
"grad_norm": 9.81982421875,
"learning_rate": 7.130942092318051e-05,
"loss": 1.1771,
"step": 744
},
{
"epoch": 0.7594291539245668,
"grad_norm": 7.654698848724365,
"learning_rate": 7.113881863052407e-05,
"loss": 0.6876,
"step": 745
},
{
"epoch": 0.7604485219164119,
"grad_norm": 10.608144760131836,
"learning_rate": 7.096791614866309e-05,
"loss": 0.6737,
"step": 746
},
{
"epoch": 0.7614678899082569,
"grad_norm": 8.949767112731934,
"learning_rate": 7.079671590455821e-05,
"loss": 0.9648,
"step": 747
},
{
"epoch": 0.762487257900102,
"grad_norm": 5.873875141143799,
"learning_rate": 7.06252203293985e-05,
"loss": 0.3267,
"step": 748
},
{
"epoch": 0.763506625891947,
"grad_norm": 3.814371347427368,
"learning_rate": 7.045343185856701e-05,
"loss": 0.2244,
"step": 749
},
{
"epoch": 0.764525993883792,
"grad_norm": 5.834865570068359,
"learning_rate": 7.028135293160611e-05,
"loss": 0.305,
"step": 750
},
{
"epoch": 0.7655453618756372,
"grad_norm": 8.765941619873047,
"learning_rate": 7.010898599218296e-05,
"loss": 0.5588,
"step": 751
},
{
"epoch": 0.7665647298674821,
"grad_norm": 8.091228485107422,
"learning_rate": 6.99363334880547e-05,
"loss": 1.0974,
"step": 752
},
{
"epoch": 0.7675840978593272,
"grad_norm": 7.041286468505859,
"learning_rate": 6.976339787103373e-05,
"loss": 0.603,
"step": 753
},
{
"epoch": 0.7686034658511722,
"grad_norm": 6.676450729370117,
"learning_rate": 6.959018159695293e-05,
"loss": 0.6972,
"step": 754
},
{
"epoch": 0.7696228338430173,
"grad_norm": 9.935379981994629,
"learning_rate": 6.94166871256307e-05,
"loss": 0.958,
"step": 755
},
{
"epoch": 0.7706422018348624,
"grad_norm": 6.536661624908447,
"learning_rate": 6.92429169208361e-05,
"loss": 0.2937,
"step": 756
},
{
"epoch": 0.7716615698267074,
"grad_norm": 5.736427307128906,
"learning_rate": 6.906887345025385e-05,
"loss": 0.3384,
"step": 757
},
{
"epoch": 0.7726809378185525,
"grad_norm": 5.628017425537109,
"learning_rate": 6.88945591854493e-05,
"loss": 0.3321,
"step": 758
},
{
"epoch": 0.7737003058103975,
"grad_norm": 9.1480712890625,
"learning_rate": 6.87199766018332e-05,
"loss": 0.8029,
"step": 759
},
{
"epoch": 0.7747196738022426,
"grad_norm": 7.8731770515441895,
"learning_rate": 6.85451281786268e-05,
"loss": 0.7043,
"step": 760
},
{
"epoch": 0.7757390417940877,
"grad_norm": 13.733153343200684,
"learning_rate": 6.837001639882641e-05,
"loss": 1.6068,
"step": 761
},
{
"epoch": 0.7767584097859327,
"grad_norm": 9.02813720703125,
"learning_rate": 6.819464374916823e-05,
"loss": 1.1273,
"step": 762
},
{
"epoch": 0.7777777777777778,
"grad_norm": 8.211151123046875,
"learning_rate": 6.801901272009307e-05,
"loss": 0.5429,
"step": 763
},
{
"epoch": 0.7787971457696228,
"grad_norm": 5.243499755859375,
"learning_rate": 6.784312580571091e-05,
"loss": 0.2976,
"step": 764
},
{
"epoch": 0.7798165137614679,
"grad_norm": 11.219100952148438,
"learning_rate": 6.766698550376556e-05,
"loss": 0.9183,
"step": 765
},
{
"epoch": 0.780835881753313,
"grad_norm": 7.10944938659668,
"learning_rate": 6.749059431559913e-05,
"loss": 0.4734,
"step": 766
},
{
"epoch": 0.781855249745158,
"grad_norm": 7.810965061187744,
"learning_rate": 6.731395474611649e-05,
"loss": 0.5437,
"step": 767
},
{
"epoch": 0.7828746177370031,
"grad_norm": 6.063333034515381,
"learning_rate": 6.71370693037498e-05,
"loss": 0.3382,
"step": 768
},
{
"epoch": 0.7838939857288482,
"grad_norm": 5.784426689147949,
"learning_rate": 6.695994050042277e-05,
"loss": 0.3925,
"step": 769
},
{
"epoch": 0.7849133537206932,
"grad_norm": 7.640711784362793,
"learning_rate": 6.678257085151509e-05,
"loss": 0.4345,
"step": 770
},
{
"epoch": 0.7859327217125383,
"grad_norm": 9.467418670654297,
"learning_rate": 6.660496287582667e-05,
"loss": 0.9237,
"step": 771
},
{
"epoch": 0.7869520897043832,
"grad_norm": 4.449363708496094,
"learning_rate": 6.642711909554174e-05,
"loss": 0.3875,
"step": 772
},
{
"epoch": 0.7879714576962283,
"grad_norm": 7.483307838439941,
"learning_rate": 6.624904203619333e-05,
"loss": 0.533,
"step": 773
},
{
"epoch": 0.7889908256880734,
"grad_norm": 4.827091693878174,
"learning_rate": 6.607073422662711e-05,
"loss": 0.4211,
"step": 774
},
{
"epoch": 0.7900101936799184,
"grad_norm": 6.135465621948242,
"learning_rate": 6.589219819896565e-05,
"loss": 0.5421,
"step": 775
},
{
"epoch": 0.7910295616717635,
"grad_norm": 9.622929573059082,
"learning_rate": 6.571343648857242e-05,
"loss": 0.8904,
"step": 776
},
{
"epoch": 0.7920489296636085,
"grad_norm": 5.664134502410889,
"learning_rate": 6.553445163401571e-05,
"loss": 0.4604,
"step": 777
},
{
"epoch": 0.7930682976554536,
"grad_norm": 9.634468078613281,
"learning_rate": 6.535524617703273e-05,
"loss": 0.7431,
"step": 778
},
{
"epoch": 0.7940876656472987,
"grad_norm": 10.855483055114746,
"learning_rate": 6.517582266249336e-05,
"loss": 1.0159,
"step": 779
},
{
"epoch": 0.7951070336391437,
"grad_norm": 9.945262908935547,
"learning_rate": 6.499618363836417e-05,
"loss": 0.6554,
"step": 780
},
{
"epoch": 0.7961264016309888,
"grad_norm": 7.224388599395752,
"learning_rate": 6.481633165567207e-05,
"loss": 0.8539,
"step": 781
},
{
"epoch": 0.7971457696228338,
"grad_norm": 8.917383193969727,
"learning_rate": 6.463626926846817e-05,
"loss": 0.4543,
"step": 782
},
{
"epoch": 0.7981651376146789,
"grad_norm": 4.411260604858398,
"learning_rate": 6.445599903379154e-05,
"loss": 0.2281,
"step": 783
},
{
"epoch": 0.799184505606524,
"grad_norm": 8.85741138458252,
"learning_rate": 6.427552351163286e-05,
"loss": 1.0334,
"step": 784
},
{
"epoch": 0.799184505606524,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7395833333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8376107215881348,
"eval_Qnli-dev-1024_cosine_ap": 0.7815698422458957,
"eval_Qnli-dev-1024_cosine_f1": 0.7222222222222222,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.7544304132461548,
"eval_Qnli-dev-1024_cosine_mcc": 0.41614558708189836,
"eval_Qnli-dev-1024_cosine_precision": 0.6190476190476191,
"eval_Qnli-dev-1024_cosine_recall": 0.8666666666666667,
"eval_Qnli-dev_cosine_accuracy": 0.7291666666666666,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.7388297319412231,
"eval_Qnli-dev_cosine_ap": 0.7636341718424307,
"eval_Qnli-dev_cosine_f1": 0.7450980392156862,
"eval_Qnli-dev_cosine_f1_threshold": 0.695953369140625,
"eval_Qnli-dev_cosine_mcc": 0.4794765594627558,
"eval_Qnli-dev_cosine_precision": 0.6666666666666666,
"eval_Qnli-dev_cosine_recall": 0.8444444444444444,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.9583333134651184,
"eval_allNLI-triplets_cosine_accuracy": 0.9583333134651184,
"eval_global_dataset_loss": 0.3045359253883362,
"eval_global_dataset_runtime": 103.772,
"eval_global_dataset_samples_per_second": 7.738,
"eval_global_dataset_steps_per_second": 0.164,
"eval_sequential_score": 0.9583333134651184,
"eval_sts-test-1024_pearson_cosine": 0.8706480103495355,
"eval_sts-test-1024_spearman_cosine": 0.9094148980677476,
"eval_sts-test_pearson_cosine": 0.9036838203711135,
"eval_sts-test_spearman_cosine": 0.9196077696084266,
"step": 784
},
{
"epoch": 0.800203873598369,
"grad_norm": 10.137900352478027,
"learning_rate": 6.409484526489805e-05,
"loss": 0.9697,
"step": 785
},
{
"epoch": 0.8012232415902141,
"grad_norm": 8.976780891418457,
"learning_rate": 6.391396685937186e-05,
"loss": 0.7048,
"step": 786
},
{
"epoch": 0.8022426095820592,
"grad_norm": 8.672534942626953,
"learning_rate": 6.373289086368151e-05,
"loss": 0.5263,
"step": 787
},
{
"epoch": 0.8032619775739042,
"grad_norm": 9.115574836730957,
"learning_rate": 6.355161984926019e-05,
"loss": 0.5056,
"step": 788
},
{
"epoch": 0.8042813455657493,
"grad_norm": 5.47214412689209,
"learning_rate": 6.337015639031044e-05,
"loss": 0.3826,
"step": 789
},
{
"epoch": 0.8053007135575942,
"grad_norm": 4.726554870605469,
"learning_rate": 6.318850306376777e-05,
"loss": 0.3029,
"step": 790
},
{
"epoch": 0.8063200815494393,
"grad_norm": 9.025796890258789,
"learning_rate": 6.300666244926387e-05,
"loss": 0.7712,
"step": 791
},
{
"epoch": 0.8073394495412844,
"grad_norm": 8.51115894317627,
"learning_rate": 6.282463712909018e-05,
"loss": 0.5587,
"step": 792
},
{
"epoch": 0.8083588175331294,
"grad_norm": 4.170646667480469,
"learning_rate": 6.264242968816106e-05,
"loss": 0.2386,
"step": 793
},
{
"epoch": 0.8093781855249745,
"grad_norm": 7.225284576416016,
"learning_rate": 6.246004271397713e-05,
"loss": 0.5662,
"step": 794
},
{
"epoch": 0.8103975535168195,
"grad_norm": 8.109657287597656,
"learning_rate": 6.227747879658859e-05,
"loss": 0.5322,
"step": 795
},
{
"epoch": 0.8114169215086646,
"grad_norm": 8.729584693908691,
"learning_rate": 6.20947405285583e-05,
"loss": 0.5122,
"step": 796
},
{
"epoch": 0.8124362895005097,
"grad_norm": 6.562040328979492,
"learning_rate": 6.191183050492515e-05,
"loss": 0.5094,
"step": 797
},
{
"epoch": 0.8134556574923547,
"grad_norm": 8.552765846252441,
"learning_rate": 6.172875132316703e-05,
"loss": 0.8412,
"step": 798
},
{
"epoch": 0.8144750254841998,
"grad_norm": 8.517980575561523,
"learning_rate": 6.154550558316405e-05,
"loss": 0.3771,
"step": 799
},
{
"epoch": 0.8154943934760448,
"grad_norm": 9.862586975097656,
"learning_rate": 6.136209588716155e-05,
"loss": 0.626,
"step": 800
},
{
"epoch": 0.8165137614678899,
"grad_norm": 11.597122192382812,
"learning_rate": 6.117852483973325e-05,
"loss": 0.8902,
"step": 801
},
{
"epoch": 0.817533129459735,
"grad_norm": 4.268974781036377,
"learning_rate": 6.0994795047744144e-05,
"loss": 0.2301,
"step": 802
},
{
"epoch": 0.81855249745158,
"grad_norm": 2.586038112640381,
"learning_rate": 6.081090912031358e-05,
"loss": 0.16,
"step": 803
},
{
"epoch": 0.8195718654434251,
"grad_norm": 6.814731121063232,
"learning_rate": 6.0626869668778085e-05,
"loss": 0.4375,
"step": 804
},
{
"epoch": 0.8205912334352702,
"grad_norm": 9.699979782104492,
"learning_rate": 6.044267930665446e-05,
"loss": 0.9554,
"step": 805
},
{
"epoch": 0.8216106014271152,
"grad_norm": 7.751320838928223,
"learning_rate": 6.025834064960247e-05,
"loss": 0.4906,
"step": 806
},
{
"epoch": 0.8226299694189603,
"grad_norm": 8.852093696594238,
"learning_rate": 6.007385631538787e-05,
"loss": 0.478,
"step": 807
},
{
"epoch": 0.8236493374108053,
"grad_norm": 5.510447025299072,
"learning_rate": 5.988922892384513e-05,
"loss": 0.6057,
"step": 808
},
{
"epoch": 0.8246687054026504,
"grad_norm": 6.745148658752441,
"learning_rate": 5.9704461096840204e-05,
"loss": 0.5003,
"step": 809
},
{
"epoch": 0.8256880733944955,
"grad_norm": 11.509452819824219,
"learning_rate": 5.9519555458233436e-05,
"loss": 1.0844,
"step": 810
},
{
"epoch": 0.8267074413863404,
"grad_norm": 9.71648120880127,
"learning_rate": 5.933451463384213e-05,
"loss": 1.0267,
"step": 811
},
{
"epoch": 0.8277268093781855,
"grad_norm": 9.810832023620605,
"learning_rate": 5.91493412514034e-05,
"loss": 0.5415,
"step": 812
},
{
"epoch": 0.8287461773700305,
"grad_norm": 5.600392818450928,
"learning_rate": 5.896403794053679e-05,
"loss": 0.3295,
"step": 813
},
{
"epoch": 0.8297655453618756,
"grad_norm": 7.511580944061279,
"learning_rate": 5.877860733270692e-05,
"loss": 0.5511,
"step": 814
},
{
"epoch": 0.8307849133537207,
"grad_norm": 5.374726295471191,
"learning_rate": 5.8593052061186125e-05,
"loss": 0.3234,
"step": 815
},
{
"epoch": 0.8318042813455657,
"grad_norm": 4.7778639793396,
"learning_rate": 5.8407374761017105e-05,
"loss": 0.2917,
"step": 816
},
{
"epoch": 0.8328236493374108,
"grad_norm": 4.155742645263672,
"learning_rate": 5.822157806897548e-05,
"loss": 0.3865,
"step": 817
},
{
"epoch": 0.8338430173292558,
"grad_norm": 5.087594032287598,
"learning_rate": 5.803566462353225e-05,
"loss": 0.2401,
"step": 818
},
{
"epoch": 0.8348623853211009,
"grad_norm": 3.707869529724121,
"learning_rate": 5.7849637064816496e-05,
"loss": 0.1582,
"step": 819
},
{
"epoch": 0.835881753312946,
"grad_norm": 7.63162899017334,
"learning_rate": 5.76634980345778e-05,
"loss": 0.5475,
"step": 820
},
{
"epoch": 0.836901121304791,
"grad_norm": 5.092942237854004,
"learning_rate": 5.747725017614869e-05,
"loss": 0.3291,
"step": 821
},
{
"epoch": 0.8379204892966361,
"grad_norm": 6.86021089553833,
"learning_rate": 5.72908961344072e-05,
"loss": 0.6867,
"step": 822
},
{
"epoch": 0.8389398572884812,
"grad_norm": 9.336700439453125,
"learning_rate": 5.710443855573919e-05,
"loss": 0.9519,
"step": 823
},
{
"epoch": 0.8399592252803262,
"grad_norm": 6.382976055145264,
"learning_rate": 5.6917880088000894e-05,
"loss": 0.4898,
"step": 824
},
{
"epoch": 0.8409785932721713,
"grad_norm": 8.171992301940918,
"learning_rate": 5.6731223380481257e-05,
"loss": 0.3361,
"step": 825
},
{
"epoch": 0.8419979612640163,
"grad_norm": 11.304964065551758,
"learning_rate": 5.6544471083864245e-05,
"loss": 1.0131,
"step": 826
},
{
"epoch": 0.8430173292558614,
"grad_norm": 7.883802890777588,
"learning_rate": 5.635762585019136e-05,
"loss": 0.4988,
"step": 827
},
{
"epoch": 0.8440366972477065,
"grad_norm": 5.304625988006592,
"learning_rate": 5.61706903328238e-05,
"loss": 0.2737,
"step": 828
},
{
"epoch": 0.8450560652395515,
"grad_norm": 8.170361518859863,
"learning_rate": 5.598366718640494e-05,
"loss": 0.5214,
"step": 829
},
{
"epoch": 0.8460754332313966,
"grad_norm": 7.193360805511475,
"learning_rate": 5.579655906682255e-05,
"loss": 0.5261,
"step": 830
},
{
"epoch": 0.8470948012232415,
"grad_norm": 5.908787250518799,
"learning_rate": 5.5609368631171035e-05,
"loss": 0.4337,
"step": 831
},
{
"epoch": 0.8481141692150866,
"grad_norm": 11.470138549804688,
"learning_rate": 5.5422098537713815e-05,
"loss": 1.0523,
"step": 832
},
{
"epoch": 0.8491335372069317,
"grad_norm": 5.7633514404296875,
"learning_rate": 5.52347514458455e-05,
"loss": 0.59,
"step": 833
},
{
"epoch": 0.8501529051987767,
"grad_norm": 9.171930313110352,
"learning_rate": 5.5047330016054154e-05,
"loss": 0.9984,
"step": 834
},
{
"epoch": 0.8511722731906218,
"grad_norm": 7.584822177886963,
"learning_rate": 5.48598369098835e-05,
"loss": 1.0533,
"step": 835
},
{
"epoch": 0.8521916411824668,
"grad_norm": 5.429177761077881,
"learning_rate": 5.4672274789895104e-05,
"loss": 0.266,
"step": 836
},
{
"epoch": 0.8532110091743119,
"grad_norm": 7.292309284210205,
"learning_rate": 5.4484646319630636e-05,
"loss": 0.3497,
"step": 837
},
{
"epoch": 0.854230377166157,
"grad_norm": 7.126836776733398,
"learning_rate": 5.429695416357392e-05,
"loss": 0.5161,
"step": 838
},
{
"epoch": 0.855249745158002,
"grad_norm": 6.357126235961914,
"learning_rate": 5.410920098711323e-05,
"loss": 0.4256,
"step": 839
},
{
"epoch": 0.8562691131498471,
"grad_norm": 6.682480335235596,
"learning_rate": 5.392138945650339e-05,
"loss": 0.3334,
"step": 840
},
{
"epoch": 0.8572884811416922,
"grad_norm": 6.9180521965026855,
"learning_rate": 5.373352223882787e-05,
"loss": 0.5704,
"step": 841
},
{
"epoch": 0.8583078491335372,
"grad_norm": 6.871384620666504,
"learning_rate": 5.354560200196094e-05,
"loss": 0.3803,
"step": 842
},
{
"epoch": 0.8593272171253823,
"grad_norm": 9.186737060546875,
"learning_rate": 5.335763141452982e-05,
"loss": 0.7648,
"step": 843
},
{
"epoch": 0.8603465851172273,
"grad_norm": 8.700101852416992,
"learning_rate": 5.3169613145876714e-05,
"loss": 0.7548,
"step": 844
},
{
"epoch": 0.8613659531090724,
"grad_norm": 7.032200336456299,
"learning_rate": 5.2981549866020975e-05,
"loss": 0.7275,
"step": 845
},
{
"epoch": 0.8623853211009175,
"grad_norm": 13.48193359375,
"learning_rate": 5.2793444245621146e-05,
"loss": 1.1788,
"step": 846
},
{
"epoch": 0.8634046890927625,
"grad_norm": 9.682479858398438,
"learning_rate": 5.260529895593702e-05,
"loss": 0.7809,
"step": 847
},
{
"epoch": 0.8644240570846076,
"grad_norm": 8.730304718017578,
"learning_rate": 5.241711666879172e-05,
"loss": 0.6487,
"step": 848
},
{
"epoch": 0.8654434250764526,
"grad_norm": 6.570590972900391,
"learning_rate": 5.2228900056533836e-05,
"loss": 0.561,
"step": 849
},
{
"epoch": 0.8664627930682977,
"grad_norm": 8.695535659790039,
"learning_rate": 5.204065179199931e-05,
"loss": 0.5906,
"step": 850
},
{
"epoch": 0.8674821610601428,
"grad_norm": 5.353935241699219,
"learning_rate": 5.1852374548473614e-05,
"loss": 0.5192,
"step": 851
},
{
"epoch": 0.8685015290519877,
"grad_norm": 10.60522174835205,
"learning_rate": 5.1664070999653766e-05,
"loss": 0.8094,
"step": 852
},
{
"epoch": 0.8695208970438328,
"grad_norm": 3.7188539505004883,
"learning_rate": 5.147574381961032e-05,
"loss": 0.2399,
"step": 853
},
{
"epoch": 0.8705402650356778,
"grad_norm": 5.648993492126465,
"learning_rate": 5.128739568274944e-05,
"loss": 0.4103,
"step": 854
},
{
"epoch": 0.8715596330275229,
"grad_norm": 6.711026668548584,
"learning_rate": 5.109902926377482e-05,
"loss": 0.4969,
"step": 855
},
{
"epoch": 0.872579001019368,
"grad_norm": 5.686347961425781,
"learning_rate": 5.091064723764987e-05,
"loss": 0.37,
"step": 856
},
{
"epoch": 0.873598369011213,
"grad_norm": 4.857931613922119,
"learning_rate": 5.072225227955959e-05,
"loss": 0.4109,
"step": 857
},
{
"epoch": 0.8746177370030581,
"grad_norm": 8.75938606262207,
"learning_rate": 5.053384706487261e-05,
"loss": 0.525,
"step": 858
},
{
"epoch": 0.8756371049949032,
"grad_norm": 5.874378204345703,
"learning_rate": 5.034543426910324e-05,
"loss": 0.5958,
"step": 859
},
{
"epoch": 0.8766564729867482,
"grad_norm": 5.085257530212402,
"learning_rate": 5.0157016567873424e-05,
"loss": 0.4708,
"step": 860
},
{
"epoch": 0.8776758409785933,
"grad_norm": 7.9917707443237305,
"learning_rate": 4.996859663687479e-05,
"loss": 0.6881,
"step": 861
},
{
"epoch": 0.8786952089704383,
"grad_norm": 8.1506929397583,
"learning_rate": 4.9780177151830634e-05,
"loss": 0.5545,
"step": 862
},
{
"epoch": 0.8797145769622834,
"grad_norm": 9.375650405883789,
"learning_rate": 4.959176078845789e-05,
"loss": 0.645,
"step": 863
},
{
"epoch": 0.8807339449541285,
"grad_norm": 4.8143310546875,
"learning_rate": 4.9403350222429184e-05,
"loss": 0.4112,
"step": 864
},
{
"epoch": 0.8817533129459735,
"grad_norm": 7.862481594085693,
"learning_rate": 4.92149481293348e-05,
"loss": 0.4178,
"step": 865
},
{
"epoch": 0.8827726809378186,
"grad_norm": 5.252464771270752,
"learning_rate": 4.902655718464473e-05,
"loss": 0.2857,
"step": 866
},
{
"epoch": 0.8837920489296636,
"grad_norm": 6.06905460357666,
"learning_rate": 4.883818006367062e-05,
"loss": 0.3374,
"step": 867
},
{
"epoch": 0.8848114169215087,
"grad_norm": 6.810131072998047,
"learning_rate": 4.86498194415278e-05,
"loss": 0.5303,
"step": 868
},
{
"epoch": 0.8858307849133538,
"grad_norm": 7.676322937011719,
"learning_rate": 4.846147799309734e-05,
"loss": 0.7438,
"step": 869
},
{
"epoch": 0.8868501529051988,
"grad_norm": 11.570023536682129,
"learning_rate": 4.8273158392987986e-05,
"loss": 1.0872,
"step": 870
},
{
"epoch": 0.8878695208970439,
"grad_norm": 6.312341213226318,
"learning_rate": 4.8084863315498234e-05,
"loss": 0.4497,
"step": 871
},
{
"epoch": 0.8888888888888888,
"grad_norm": 7.389033794403076,
"learning_rate": 4.7896595434578356e-05,
"loss": 0.4171,
"step": 872
},
{
"epoch": 0.8899082568807339,
"grad_norm": 8.600625038146973,
"learning_rate": 4.770835742379239e-05,
"loss": 0.4417,
"step": 873
},
{
"epoch": 0.890927624872579,
"grad_norm": 7.350024223327637,
"learning_rate": 4.7520151956280227e-05,
"loss": 0.7023,
"step": 874
},
{
"epoch": 0.891946992864424,
"grad_norm": 12.617684364318848,
"learning_rate": 4.733198170471953e-05,
"loss": 1.0547,
"step": 875
},
{
"epoch": 0.8929663608562691,
"grad_norm": 5.219171524047852,
"learning_rate": 4.714384934128796e-05,
"loss": 0.3526,
"step": 876
},
{
"epoch": 0.8939857288481141,
"grad_norm": 10.923335075378418,
"learning_rate": 4.6955757537625104e-05,
"loss": 0.7315,
"step": 877
},
{
"epoch": 0.8950050968399592,
"grad_norm": 4.7785325050354,
"learning_rate": 4.6767708964794526e-05,
"loss": 0.4082,
"step": 878
},
{
"epoch": 0.8960244648318043,
"grad_norm": 7.037627696990967,
"learning_rate": 4.6579706293245944e-05,
"loss": 0.8155,
"step": 879
},
{
"epoch": 0.8970438328236493,
"grad_norm": 7.149205207824707,
"learning_rate": 4.6391752192777164e-05,
"loss": 0.5083,
"step": 880
},
{
"epoch": 0.8980632008154944,
"grad_norm": 5.331564426422119,
"learning_rate": 4.620384933249631e-05,
"loss": 0.655,
"step": 881
},
{
"epoch": 0.8990825688073395,
"grad_norm": 10.019486427307129,
"learning_rate": 4.6016000380783805e-05,
"loss": 0.7207,
"step": 882
},
{
"epoch": 0.8990825688073395,
"eval_Qnli-dev-1024_cosine_accuracy": 0.7708333333333334,
"eval_Qnli-dev-1024_cosine_accuracy_threshold": 0.8360349535942078,
"eval_Qnli-dev-1024_cosine_ap": 0.8011558872452826,
"eval_Qnli-dev-1024_cosine_f1": 0.7250000000000001,
"eval_Qnli-dev-1024_cosine_f1_threshold": 0.8360349535942078,
"eval_Qnli-dev-1024_cosine_mcc": 0.5461802806126049,
"eval_Qnli-dev-1024_cosine_precision": 0.8285714285714286,
"eval_Qnli-dev-1024_cosine_recall": 0.6444444444444445,
"eval_Qnli-dev_cosine_accuracy": 0.7291666666666666,
"eval_Qnli-dev_cosine_accuracy_threshold": 0.7521146535873413,
"eval_Qnli-dev_cosine_ap": 0.7712094779135136,
"eval_Qnli-dev_cosine_f1": 0.7500000000000001,
"eval_Qnli-dev_cosine_f1_threshold": 0.6768573522567749,
"eval_Qnli-dev_cosine_mcc": 0.48653004754089046,
"eval_Qnli-dev_cosine_precision": 0.6610169491525424,
"eval_Qnli-dev_cosine_recall": 0.8666666666666667,
"eval_allNLI--triplets-1024_cosine_accuracy": 0.9479166865348816,
"eval_allNLI-triplets_cosine_accuracy": 0.9583333134651184,
"eval_global_dataset_loss": 0.26387155055999756,
"eval_global_dataset_runtime": 103.9177,
"eval_global_dataset_samples_per_second": 7.727,
"eval_global_dataset_steps_per_second": 0.164,
"eval_sequential_score": 0.9479166865348816,
"eval_sts-test-1024_pearson_cosine": 0.8810824372715894,
"eval_sts-test-1024_spearman_cosine": 0.9117642789427417,
"eval_sts-test_pearson_cosine": 0.9044525796924666,
"eval_sts-test_spearman_cosine": 0.9182572042166309,
"step": 882
},
{
"epoch": 0.9001019367991845,
"grad_norm": 7.724752426147461,
"learning_rate": 4.582820800525455e-05,
"loss": 0.4898,
"step": 883
},
{
"epoch": 0.9011213047910296,
"grad_norm": 9.442131042480469,
"learning_rate": 4.564047487272001e-05,
"loss": 0.5506,
"step": 884
},
{
"epoch": 0.9021406727828746,
"grad_norm": 8.832263946533203,
"learning_rate": 4.5452803649150324e-05,
"loss": 0.6206,
"step": 885
},
{
"epoch": 0.9031600407747197,
"grad_norm": 12.987079620361328,
"learning_rate": 4.5265196999636535e-05,
"loss": 1.9669,
"step": 886
},
{
"epoch": 0.9041794087665648,
"grad_norm": 9.050309181213379,
"learning_rate": 4.5077657588352595e-05,
"loss": 0.8493,
"step": 887
},
{
"epoch": 0.9051987767584098,
"grad_norm": 9.642857551574707,
"learning_rate": 4.489018807851769e-05,
"loss": 0.9698,
"step": 888
},
{
"epoch": 0.9062181447502549,
"grad_norm": 7.444589614868164,
"learning_rate": 4.4702791132358314e-05,
"loss": 0.7322,
"step": 889
},
{
"epoch": 0.9072375127420998,
"grad_norm": 9.99152946472168,
"learning_rate": 4.451546941107046e-05,
"loss": 0.484,
"step": 890
},
{
"epoch": 0.908256880733945,
"grad_norm": 6.232360363006592,
"learning_rate": 4.432822557478194e-05,
"loss": 0.5604,
"step": 891
},
{
"epoch": 0.90927624872579,
"grad_norm": 3.1541106700897217,
"learning_rate": 4.414106228251446e-05,
"loss": 0.2633,
"step": 892
},
{
"epoch": 0.910295616717635,
"grad_norm": 5.661106109619141,
"learning_rate": 4.3953982192146006e-05,
"loss": 0.2417,
"step": 893
},
{
"epoch": 0.9113149847094801,
"grad_norm": 4.497067451477051,
"learning_rate": 4.3766987960372956e-05,
"loss": 0.4481,
"step": 894
},
{
"epoch": 0.9123343527013251,
"grad_norm": 8.505694389343262,
"learning_rate": 4.358008224267245e-05,
"loss": 0.7402,
"step": 895
},
{
"epoch": 0.9133537206931702,
"grad_norm": 5.820054054260254,
"learning_rate": 4.3393267693264686e-05,
"loss": 0.4897,
"step": 896
},
{
"epoch": 0.9143730886850153,
"grad_norm": 7.943095684051514,
"learning_rate": 4.320654696507511e-05,
"loss": 0.5863,
"step": 897
},
{
"epoch": 0.9153924566768603,
"grad_norm": 10.6437349319458,
"learning_rate": 4.301992270969692e-05,
"loss": 0.7101,
"step": 898
},
{
"epoch": 0.9164118246687054,
"grad_norm": 3.8055593967437744,
"learning_rate": 4.2833397577353284e-05,
"loss": 0.2404,
"step": 899
},
{
"epoch": 0.9174311926605505,
"grad_norm": 8.539854049682617,
"learning_rate": 4.26469742168597e-05,
"loss": 0.5594,
"step": 900
},
{
"epoch": 0.9184505606523955,
"grad_norm": 5.611748218536377,
"learning_rate": 4.2460655275586494e-05,
"loss": 0.4047,
"step": 901
},
{
"epoch": 0.9194699286442406,
"grad_norm": 4.898343086242676,
"learning_rate": 4.227444339942107e-05,
"loss": 0.4865,
"step": 902
},
{
"epoch": 0.9204892966360856,
"grad_norm": 8.28711986541748,
"learning_rate": 4.208834123273047e-05,
"loss": 0.3909,
"step": 903
},
{
"epoch": 0.9215086646279307,
"grad_norm": 6.98935604095459,
"learning_rate": 4.190235141832375e-05,
"loss": 0.2808,
"step": 904
},
{
"epoch": 0.9225280326197758,
"grad_norm": 9.016980171203613,
"learning_rate": 4.171647659741448e-05,
"loss": 0.7509,
"step": 905
},
{
"epoch": 0.9235474006116208,
"grad_norm": 5.859550476074219,
"learning_rate": 4.153071940958321e-05,
"loss": 0.325,
"step": 906
},
{
"epoch": 0.9245667686034659,
"grad_norm": 7.970040321350098,
"learning_rate": 4.134508249274002e-05,
"loss": 0.5335,
"step": 907
},
{
"epoch": 0.9255861365953109,
"grad_norm": 6.2324981689453125,
"learning_rate": 4.1159568483087e-05,
"loss": 0.6193,
"step": 908
},
{
"epoch": 0.926605504587156,
"grad_norm": 5.227268218994141,
"learning_rate": 4.0974180015080897e-05,
"loss": 0.2974,
"step": 909
},
{
"epoch": 0.9276248725790011,
"grad_norm": 9.293944358825684,
"learning_rate": 4.078891972139564e-05,
"loss": 0.6725,
"step": 910
},
{
"epoch": 0.928644240570846,
"grad_norm": 10.003561019897461,
"learning_rate": 4.060379023288495e-05,
"loss": 0.8828,
"step": 911
},
{
"epoch": 0.9296636085626911,
"grad_norm": 9.07729721069336,
"learning_rate": 4.0418794178545076e-05,
"loss": 0.8751,
"step": 912
},
{
"epoch": 0.9306829765545361,
"grad_norm": 7.200821876525879,
"learning_rate": 4.023393418547732e-05,
"loss": 0.7019,
"step": 913
},
{
"epoch": 0.9317023445463812,
"grad_norm": 10.154699325561523,
"learning_rate": 4.0049212878850793e-05,
"loss": 0.7131,
"step": 914
},
{
"epoch": 0.9327217125382263,
"grad_norm": 7.271543025970459,
"learning_rate": 3.98646328818652e-05,
"loss": 0.2849,
"step": 915
},
{
"epoch": 0.9337410805300713,
"grad_norm": 9.933566093444824,
"learning_rate": 3.96801968157135e-05,
"loss": 0.8097,
"step": 916
},
{
"epoch": 0.9347604485219164,
"grad_norm": 5.370792865753174,
"learning_rate": 3.949590729954467e-05,
"loss": 0.3447,
"step": 917
},
{
"epoch": 0.9357798165137615,
"grad_norm": 8.846680641174316,
"learning_rate": 3.931176695042664e-05,
"loss": 0.8601,
"step": 918
},
{
"epoch": 0.9367991845056065,
"grad_norm": 5.936051368713379,
"learning_rate": 3.912777838330893e-05,
"loss": 0.4467,
"step": 919
},
{
"epoch": 0.9378185524974516,
"grad_norm": 10.40077018737793,
"learning_rate": 3.8943944210985735e-05,
"loss": 0.8137,
"step": 920
},
{
"epoch": 0.9388379204892966,
"grad_norm": 7.319591999053955,
"learning_rate": 3.876026704405866e-05,
"loss": 0.4527,
"step": 921
},
{
"epoch": 0.9398572884811417,
"grad_norm": 8.947883605957031,
"learning_rate": 3.8576749490899686e-05,
"loss": 0.7656,
"step": 922
},
{
"epoch": 0.9408766564729868,
"grad_norm": 10.776662826538086,
"learning_rate": 3.839339415761416e-05,
"loss": 1.1218,
"step": 923
},
{
"epoch": 0.9418960244648318,
"grad_norm": 2.9248359203338623,
"learning_rate": 3.821020364800379e-05,
"loss": 0.188,
"step": 924
},
{
"epoch": 0.9429153924566769,
"grad_norm": 9.73752212524414,
"learning_rate": 3.8027180563529616e-05,
"loss": 0.8454,
"step": 925
},
{
"epoch": 0.9439347604485219,
"grad_norm": 6.643280506134033,
"learning_rate": 3.7844327503275136e-05,
"loss": 0.5368,
"step": 926
},
{
"epoch": 0.944954128440367,
"grad_norm": 9.299040794372559,
"learning_rate": 3.7661647063909294e-05,
"loss": 0.7602,
"step": 927
},
{
"epoch": 0.9459734964322121,
"grad_norm": 6.660792827606201,
"learning_rate": 3.747914183964974e-05,
"loss": 0.4733,
"step": 928
},
{
"epoch": 0.9469928644240571,
"grad_norm": 5.206737995147705,
"learning_rate": 3.729681442222587e-05,
"loss": 0.2305,
"step": 929
},
{
"epoch": 0.9480122324159022,
"grad_norm": 9.746971130371094,
"learning_rate": 3.711466740084211e-05,
"loss": 0.7775,
"step": 930
},
{
"epoch": 0.9490316004077471,
"grad_norm": 9.825338363647461,
"learning_rate": 3.6932703362141084e-05,
"loss": 0.8859,
"step": 931
},
{
"epoch": 0.9500509683995922,
"grad_norm": 7.335731506347656,
"learning_rate": 3.6750924890166914e-05,
"loss": 0.3918,
"step": 932
},
{
"epoch": 0.9510703363914373,
"grad_norm": 6.4724931716918945,
"learning_rate": 3.656933456632853e-05,
"loss": 0.3842,
"step": 933
},
{
"epoch": 0.9520897043832823,
"grad_norm": 4.886312484741211,
"learning_rate": 3.638793496936296e-05,
"loss": 0.3719,
"step": 934
},
{
"epoch": 0.9531090723751274,
"grad_norm": 8.522834777832031,
"learning_rate": 3.620672867529878e-05,
"loss": 0.8043,
"step": 935
},
{
"epoch": 0.9541284403669725,
"grad_norm": 9.507696151733398,
"learning_rate": 3.602571825741953e-05,
"loss": 0.8282,
"step": 936
},
{
"epoch": 0.9551478083588175,
"grad_norm": 4.895750045776367,
"learning_rate": 3.584490628622705e-05,
"loss": 0.4599,
"step": 937
},
{
"epoch": 0.9561671763506626,
"grad_norm": 7.197470664978027,
"learning_rate": 3.566429532940518e-05,
"loss": 0.649,
"step": 938
},
{
"epoch": 0.9571865443425076,
"grad_norm": 6.60915470123291,
"learning_rate": 3.548388795178307e-05,
"loss": 0.4325,
"step": 939
},
{
"epoch": 0.9582059123343527,
"grad_norm": 10.626359939575195,
"learning_rate": 3.5303686715298955e-05,
"loss": 1.3108,
"step": 940
},
{
"epoch": 0.9592252803261978,
"grad_norm": 6.316555023193359,
"learning_rate": 3.51236941789637e-05,
"loss": 0.3018,
"step": 941
},
{
"epoch": 0.9602446483180428,
"grad_norm": 7.12025785446167,
"learning_rate": 3.494391289882435e-05,
"loss": 0.6258,
"step": 942
},
{
"epoch": 0.9612640163098879,
"grad_norm": 10.008544921875,
"learning_rate": 3.476434542792805e-05,
"loss": 1.2266,
"step": 943
},
{
"epoch": 0.9622833843017329,
"grad_norm": 8.917716979980469,
"learning_rate": 3.4584994316285604e-05,
"loss": 0.6593,
"step": 944
},
{
"epoch": 0.963302752293578,
"grad_norm": 5.837446689605713,
"learning_rate": 3.4405862110835364e-05,
"loss": 0.3096,
"step": 945
},
{
"epoch": 0.9643221202854231,
"grad_norm": 4.312796115875244,
"learning_rate": 3.422695135540697e-05,
"loss": 0.3436,
"step": 946
},
{
"epoch": 0.9653414882772681,
"grad_norm": 4.772927284240723,
"learning_rate": 3.404826459068536e-05,
"loss": 0.2497,
"step": 947
},
{
"epoch": 0.9663608562691132,
"grad_norm": 3.3676137924194336,
"learning_rate": 3.386980435417457e-05,
"loss": 0.1653,
"step": 948
},
{
"epoch": 0.9673802242609582,
"grad_norm": 6.203863143920898,
"learning_rate": 3.369157318016176e-05,
"loss": 0.469,
"step": 949
},
{
"epoch": 0.9683995922528033,
"grad_norm": 7.628493309020996,
"learning_rate": 3.351357359968117e-05,
"loss": 0.4919,
"step": 950
},
{
"epoch": 0.9694189602446484,
"grad_norm": 7.940287113189697,
"learning_rate": 3.333580814047826e-05,
"loss": 0.4788,
"step": 951
},
{
"epoch": 0.9704383282364933,
"grad_norm": 6.046499729156494,
"learning_rate": 3.3158279326973766e-05,
"loss": 0.3041,
"step": 952
},
{
"epoch": 0.9714576962283384,
"grad_norm": 4.314492225646973,
"learning_rate": 3.298098968022782e-05,
"loss": 0.3138,
"step": 953
},
{
"epoch": 0.9724770642201835,
"grad_norm": 8.91407585144043,
"learning_rate": 3.2803941717904216e-05,
"loss": 0.7758,
"step": 954
},
{
"epoch": 0.9734964322120285,
"grad_norm": 11.913896560668945,
"learning_rate": 3.26271379542346e-05,
"loss": 0.6974,
"step": 955
},
{
"epoch": 0.9745158002038736,
"grad_norm": 4.831221580505371,
"learning_rate": 3.2450580899982795e-05,
"loss": 0.2964,
"step": 956
},
{
"epoch": 0.9755351681957186,
"grad_norm": 6.116502285003662,
"learning_rate": 3.2274273062409154e-05,
"loss": 0.3473,
"step": 957
},
{
"epoch": 0.9765545361875637,
"grad_norm": 11.75236988067627,
"learning_rate": 3.2098216945234946e-05,
"loss": 0.8905,
"step": 958
},
{
"epoch": 0.9775739041794088,
"grad_norm": 3.468975067138672,
"learning_rate": 3.192241504860675e-05,
"loss": 0.2521,
"step": 959
},
{
"epoch": 0.9785932721712538,
"grad_norm": 7.624709606170654,
"learning_rate": 3.1746869869061063e-05,
"loss": 0.4462,
"step": 960
},
{
"epoch": 0.9796126401630989,
"grad_norm": 9.019265174865723,
"learning_rate": 3.157158389948871e-05,
"loss": 0.7842,
"step": 961
},
{
"epoch": 0.9806320081549439,
"grad_norm": 4.77131986618042,
"learning_rate": 3.1396559629099574e-05,
"loss": 0.2973,
"step": 962
},
{
"epoch": 0.981651376146789,
"grad_norm": 8.40596866607666,
"learning_rate": 3.122179954338716e-05,
"loss": 0.6026,
"step": 963
},
{
"epoch": 0.9826707441386341,
"grad_norm": 6.705322265625,
"learning_rate": 3.1047306124093335e-05,
"loss": 0.4026,
"step": 964
},
{
"epoch": 0.9836901121304791,
"grad_norm": 10.35732364654541,
"learning_rate": 3.087308184917308e-05,
"loss": 0.9181,
"step": 965
},
{
"epoch": 0.9847094801223242,
"grad_norm": 6.806704998016357,
"learning_rate": 3.069912919275926e-05,
"loss": 0.473,
"step": 966
},
{
"epoch": 0.9857288481141692,
"grad_norm": 10.28345012664795,
"learning_rate": 3.0525450625127575e-05,
"loss": 0.7152,
"step": 967
},
{
"epoch": 0.9867482161060143,
"grad_norm": 11.785171508789062,
"learning_rate": 3.0352048612661416e-05,
"loss": 0.9519,
"step": 968
},
{
"epoch": 0.9877675840978594,
"grad_norm": 8.55274772644043,
"learning_rate": 3.017892561781682e-05,
"loss": 0.5322,
"step": 969
},
{
"epoch": 0.9887869520897044,
"grad_norm": 8.597644805908203,
"learning_rate": 3.0006084099087595e-05,
"loss": 0.8257,
"step": 970
},
{
"epoch": 0.9898063200815495,
"grad_norm": 6.743808746337891,
"learning_rate": 2.983352651097031e-05,
"loss": 0.5648,
"step": 971
},
{
"epoch": 0.9908256880733946,
"grad_norm": 10.981080055236816,
"learning_rate": 2.9661255303929486e-05,
"loss": 0.909,
"step": 972
}
],
"logging_steps": 1,
"max_steps": 2943,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 972,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}