|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6047904191616766, |
|
"eval_steps": 500, |
|
"global_step": 303, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001996007984031936, |
|
"grad_norm": 3.4413226850733536, |
|
"learning_rate": 5.88235294117647e-09, |
|
"logits/chosen": -0.2126132994890213, |
|
"logits/rejected": -0.06049485132098198, |
|
"logps/chosen": -312.6929931640625, |
|
"logps/rejected": -303.399658203125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00998003992015968, |
|
"grad_norm": 3.5626484310109503, |
|
"learning_rate": 2.941176470588235e-08, |
|
"logits/chosen": -0.15043149888515472, |
|
"logits/rejected": -0.05884423851966858, |
|
"logps/chosen": -271.6809387207031, |
|
"logps/rejected": -308.14019775390625, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": 0.00032658452983014286, |
|
"rewards/margins": -0.00044720349251292646, |
|
"rewards/rejected": 0.0007737880805507302, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01996007984031936, |
|
"grad_norm": 3.444766226961807, |
|
"learning_rate": 5.88235294117647e-08, |
|
"logits/chosen": -0.04885115846991539, |
|
"logits/rejected": 0.02876092866063118, |
|
"logps/chosen": -314.44207763671875, |
|
"logps/rejected": -324.5089111328125, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": -0.0015638660406693816, |
|
"rewards/margins": -0.0018882350996136665, |
|
"rewards/rejected": 0.0003243688843213022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.029940119760479042, |
|
"grad_norm": 3.351776759283806, |
|
"learning_rate": 8.823529411764706e-08, |
|
"logits/chosen": -0.14645501971244812, |
|
"logits/rejected": -0.10253484547138214, |
|
"logps/chosen": -325.3197021484375, |
|
"logps/rejected": -335.1691589355469, |
|
"loss": 0.6937, |
|
"rewards/accuracies": 0.3375000059604645, |
|
"rewards/chosen": -0.0009325200808234513, |
|
"rewards/margins": -0.0027469159103929996, |
|
"rewards/rejected": 0.0018143958877772093, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03992015968063872, |
|
"grad_norm": 3.2486760323756765, |
|
"learning_rate": 1.176470588235294e-07, |
|
"logits/chosen": -0.17513605952262878, |
|
"logits/rejected": -0.14125120639801025, |
|
"logps/chosen": -236.529052734375, |
|
"logps/rejected": -224.99609375, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.00048039742978289723, |
|
"rewards/margins": -0.0017443184042349458, |
|
"rewards/rejected": 0.0012639210326597095, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0499001996007984, |
|
"grad_norm": 3.682895506825886, |
|
"learning_rate": 1.4705882352941175e-07, |
|
"logits/chosen": 0.02582230605185032, |
|
"logits/rejected": -0.026322145015001297, |
|
"logps/chosen": -276.4084777832031, |
|
"logps/rejected": -271.5510559082031, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.00010870690312003717, |
|
"rewards/margins": -2.2810349037172273e-05, |
|
"rewards/rejected": -8.589646313339472e-05, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.059880239520958084, |
|
"grad_norm": 3.521568498969969, |
|
"learning_rate": 1.764705882352941e-07, |
|
"logits/chosen": -0.029633093625307083, |
|
"logits/rejected": 0.017462745308876038, |
|
"logps/chosen": -241.04345703125, |
|
"logps/rejected": -254.0708465576172, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.001156697398982942, |
|
"rewards/margins": 0.0012088593794032931, |
|
"rewards/rejected": -5.2162260544719175e-05, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06986027944111776, |
|
"grad_norm": 3.4756100770970852, |
|
"learning_rate": 2.0588235294117647e-07, |
|
"logits/chosen": -0.09809896349906921, |
|
"logits/rejected": -0.12783308327198029, |
|
"logps/chosen": -316.04541015625, |
|
"logps/rejected": -303.2275085449219, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.0019062187056988478, |
|
"rewards/margins": 0.0034712664783000946, |
|
"rewards/rejected": -0.0015650471905246377, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07984031936127745, |
|
"grad_norm": 3.5251485554920383, |
|
"learning_rate": 2.352941176470588e-07, |
|
"logits/chosen": -0.10866469144821167, |
|
"logits/rejected": -0.08842261880636215, |
|
"logps/chosen": -300.94866943359375, |
|
"logps/rejected": -301.57635498046875, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.0005061564734205604, |
|
"rewards/margins": 0.0017319575417786837, |
|
"rewards/rejected": -0.0012258009519428015, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08982035928143713, |
|
"grad_norm": 3.562421126179782, |
|
"learning_rate": 2.6470588235294114e-07, |
|
"logits/chosen": -0.04412439838051796, |
|
"logits/rejected": 0.046757772564888, |
|
"logps/chosen": -276.1053771972656, |
|
"logps/rejected": -299.3190612792969, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.002857887651771307, |
|
"rewards/margins": 0.0004688422195613384, |
|
"rewards/rejected": -0.003326730104163289, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0998003992015968, |
|
"grad_norm": 3.174201376940309, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": -0.08824320137500763, |
|
"logits/rejected": -0.043245576322078705, |
|
"logps/chosen": -271.13604736328125, |
|
"logps/rejected": -272.74102783203125, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.0016361953457817435, |
|
"rewards/margins": 0.005965009797364473, |
|
"rewards/rejected": -0.007601206190884113, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10978043912175649, |
|
"grad_norm": 3.3426951285606368, |
|
"learning_rate": 2.999415172560531e-07, |
|
"logits/chosen": 0.032454706728458405, |
|
"logits/rejected": 0.15314094722270966, |
|
"logps/chosen": -301.4625244140625, |
|
"logps/rejected": -304.8877258300781, |
|
"loss": 0.691, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.007659287191927433, |
|
"rewards/margins": 0.0023522169794887304, |
|
"rewards/rejected": -0.010011503472924232, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.11976047904191617, |
|
"grad_norm": 3.2266501701625283, |
|
"learning_rate": 2.997040092642407e-07, |
|
"logits/chosen": -0.13825412094593048, |
|
"logits/rejected": -0.06802596896886826, |
|
"logps/chosen": -273.85198974609375, |
|
"logps/rejected": -256.1399841308594, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.0074735768139362335, |
|
"rewards/margins": 0.005476811900734901, |
|
"rewards/rejected": -0.012950388714671135, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12974051896207583, |
|
"grad_norm": 3.129826931598691, |
|
"learning_rate": 2.9928410999727467e-07, |
|
"logits/chosen": -0.03806266561150551, |
|
"logits/rejected": -0.06114925071597099, |
|
"logps/chosen": -273.815673828125, |
|
"logps/rejected": -257.2196350097656, |
|
"loss": 0.6897, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.010186719708144665, |
|
"rewards/margins": 0.00584446731954813, |
|
"rewards/rejected": -0.016031187027692795, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.13972055888223553, |
|
"grad_norm": 3.2696148017727764, |
|
"learning_rate": 2.9868233103773125e-07, |
|
"logits/chosen": -0.19126296043395996, |
|
"logits/rejected": -0.1384088099002838, |
|
"logps/chosen": -289.3074645996094, |
|
"logps/rejected": -304.941650390625, |
|
"loss": 0.6901, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.012994937598705292, |
|
"rewards/margins": 0.006138815078884363, |
|
"rewards/rejected": -0.019133752211928368, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1497005988023952, |
|
"grad_norm": 3.158441752144902, |
|
"learning_rate": 2.978994055605757e-07, |
|
"logits/chosen": -0.3353852927684784, |
|
"logits/rejected": -0.20957405865192413, |
|
"logps/chosen": -282.4247131347656, |
|
"logps/rejected": -297.1376647949219, |
|
"loss": 0.6887, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.013628537766635418, |
|
"rewards/margins": 0.012240052223205566, |
|
"rewards/rejected": -0.02586858905851841, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1596806387225549, |
|
"grad_norm": 3.2922551208958932, |
|
"learning_rate": 2.9693628743990157e-07, |
|
"logits/chosen": -0.2210439145565033, |
|
"logits/rejected": -0.13906380534172058, |
|
"logps/chosen": -286.2383728027344, |
|
"logps/rejected": -284.0565490722656, |
|
"loss": 0.6885, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.012788718566298485, |
|
"rewards/margins": 0.01518750749528408, |
|
"rewards/rejected": -0.027976226061582565, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16966067864271456, |
|
"grad_norm": 3.143746580193799, |
|
"learning_rate": 2.9579415008678196e-07, |
|
"logits/chosen": 0.061027251183986664, |
|
"logits/rejected": 0.020937766879796982, |
|
"logps/chosen": -261.85797119140625, |
|
"logps/rejected": -245.4117889404297, |
|
"loss": 0.6879, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.020817553624510765, |
|
"rewards/margins": 0.01146668754518032, |
|
"rewards/rejected": -0.032284241169691086, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.17964071856287425, |
|
"grad_norm": 3.1363791165769634, |
|
"learning_rate": 2.944743850196487e-07, |
|
"logits/chosen": -0.18295350670814514, |
|
"logits/rejected": -0.138901025056839, |
|
"logps/chosen": -302.30377197265625, |
|
"logps/rejected": -297.4572448730469, |
|
"loss": 0.6867, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.022789286449551582, |
|
"rewards/margins": 0.014456982724368572, |
|
"rewards/rejected": -0.03724626824259758, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18962075848303392, |
|
"grad_norm": 3.375624228841244, |
|
"learning_rate": 2.92978600168942e-07, |
|
"logits/chosen": -0.21953599154949188, |
|
"logits/rejected": -0.19249823689460754, |
|
"logps/chosen": -282.0645446777344, |
|
"logps/rejected": -293.2350158691406, |
|
"loss": 0.6865, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.028345201164484024, |
|
"rewards/margins": 0.015181821770966053, |
|
"rewards/rejected": -0.0435270220041275, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1996007984031936, |
|
"grad_norm": 3.4004680540887673, |
|
"learning_rate": 2.913086179180945e-07, |
|
"logits/chosen": -0.26934295892715454, |
|
"logits/rejected": -0.24918439984321594, |
|
"logps/chosen": -267.2822570800781, |
|
"logps/rejected": -273.8163757324219, |
|
"loss": 0.6847, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.02366602048277855, |
|
"rewards/margins": 0.018390221521258354, |
|
"rewards/rejected": -0.042056240141391754, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.20958083832335328, |
|
"grad_norm": 3.348944145808141, |
|
"learning_rate": 2.894664728832377e-07, |
|
"logits/chosen": -0.12691783905029297, |
|
"logits/rejected": -0.10880570113658905, |
|
"logps/chosen": -306.7061462402344, |
|
"logps/rejected": -296.7961120605469, |
|
"loss": 0.684, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.02831300161778927, |
|
"rewards/margins": 0.024948222562670708, |
|
"rewards/rejected": -0.053261227905750275, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.21956087824351297, |
|
"grad_norm": 3.3068624575557677, |
|
"learning_rate": 2.8745440943433595e-07, |
|
"logits/chosen": -0.39724451303482056, |
|
"logits/rejected": -0.3233721852302551, |
|
"logps/chosen": -299.71356201171875, |
|
"logps/rejected": -307.81011962890625, |
|
"loss": 0.6826, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.037553686648607254, |
|
"rewards/margins": 0.01620485447347164, |
|
"rewards/rejected": -0.053758542984724045, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22954091816367264, |
|
"grad_norm": 3.5399975333931057, |
|
"learning_rate": 2.8527487896076705e-07, |
|
"logits/chosen": -0.2971861958503723, |
|
"logits/rejected": -0.26923665404319763, |
|
"logps/chosen": -323.73760986328125, |
|
"logps/rejected": -317.88238525390625, |
|
"loss": 0.6812, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.03361167758703232, |
|
"rewards/margins": 0.025083836168050766, |
|
"rewards/rejected": -0.058695513755083084, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.23952095808383234, |
|
"grad_norm": 3.387251098081717, |
|
"learning_rate": 2.829305368846822e-07, |
|
"logits/chosen": -0.08930722624063492, |
|
"logits/rejected": -0.093043752014637, |
|
"logps/chosen": -298.3551330566406, |
|
"logps/rejected": -301.565185546875, |
|
"loss": 0.6807, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.04334830120205879, |
|
"rewards/margins": 0.02416048012673855, |
|
"rewards/rejected": -0.06750877946615219, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.249500998003992, |
|
"grad_norm": 3.458206539528906, |
|
"learning_rate": 2.8042423942578286e-07, |
|
"logits/chosen": -0.06799415498971939, |
|
"logits/rejected": 0.05015769600868225, |
|
"logps/chosen": -252.72793579101562, |
|
"logps/rejected": -279.40631103515625, |
|
"loss": 0.6788, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -0.0386417880654335, |
|
"rewards/margins": 0.03730592131614685, |
|
"rewards/rejected": -0.07594770938158035, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.25948103792415167, |
|
"grad_norm": 3.4555138461844757, |
|
"learning_rate": 2.7775904012145714e-07, |
|
"logits/chosen": -0.24486279487609863, |
|
"logits/rejected": -0.1943987011909485, |
|
"logps/chosen": -326.3341369628906, |
|
"logps/rejected": -326.3017272949219, |
|
"loss": 0.6792, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.04846924915909767, |
|
"rewards/margins": 0.03331124037504196, |
|
"rewards/rejected": -0.08178049325942993, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2694610778443114, |
|
"grad_norm": 3.4103149118409104, |
|
"learning_rate": 2.749381861065149e-07, |
|
"logits/chosen": -0.11615544557571411, |
|
"logits/rejected": -0.02477203868329525, |
|
"logps/chosen": -281.5689697265625, |
|
"logps/rejected": -289.6168518066406, |
|
"loss": 0.6756, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.04677049443125725, |
|
"rewards/margins": 0.03843165561556816, |
|
"rewards/rejected": -0.085202157497406, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.27944111776447106, |
|
"grad_norm": 3.3294732309430946, |
|
"learning_rate": 2.719651141570541e-07, |
|
"logits/chosen": -0.07356293499469757, |
|
"logits/rejected": 0.01894739270210266, |
|
"logps/chosen": -295.6517333984375, |
|
"logps/rejected": -294.22064208984375, |
|
"loss": 0.6733, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.04726024717092514, |
|
"rewards/margins": 0.05050128698348999, |
|
"rewards/rejected": -0.09776153415441513, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2894211576846307, |
|
"grad_norm": 3.335787112423438, |
|
"learning_rate": 2.6884344650327857e-07, |
|
"logits/chosen": -0.3144836127758026, |
|
"logits/rejected": -0.2471332997083664, |
|
"logps/chosen": -275.6163635253906, |
|
"logps/rejected": -283.7267150878906, |
|
"loss": 0.6741, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.06026310846209526, |
|
"rewards/margins": 0.03206663206219673, |
|
"rewards/rejected": -0.0923297330737114, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.2994011976047904, |
|
"grad_norm": 3.351780879830391, |
|
"learning_rate": 2.6557698641636836e-07, |
|
"logits/chosen": -0.09479612112045288, |
|
"logits/rejected": -0.05155174061655998, |
|
"logps/chosen": -273.46527099609375, |
|
"logps/rejected": -285.8160095214844, |
|
"loss": 0.6719, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.057442475110292435, |
|
"rewards/margins": 0.04023780673742294, |
|
"rewards/rejected": -0.09768027812242508, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3093812375249501, |
|
"grad_norm": 3.3953812787691744, |
|
"learning_rate": 2.6216971357477977e-07, |
|
"logits/chosen": -0.20542898774147034, |
|
"logits/rejected": -0.16760793328285217, |
|
"logps/chosen": -319.43438720703125, |
|
"logps/rejected": -337.89678955078125, |
|
"loss": 0.6677, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.0877736285328865, |
|
"rewards/margins": 0.05791449546813965, |
|
"rewards/rejected": -0.14568811655044556, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.3193612774451098, |
|
"grad_norm": 3.7295101426966957, |
|
"learning_rate": 2.5862577921562015e-07, |
|
"logits/chosen": -0.26064497232437134, |
|
"logits/rejected": -0.1645033359527588, |
|
"logps/chosen": -292.02484130859375, |
|
"logps/rejected": -300.4161376953125, |
|
"loss": 0.665, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.08468987792730331, |
|
"rewards/margins": 0.062365107238292694, |
|
"rewards/rejected": -0.1470550000667572, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.32934131736526945, |
|
"grad_norm": 3.5278102635310775, |
|
"learning_rate": 2.549495010770048e-07, |
|
"logits/chosen": -0.2758394777774811, |
|
"logits/rejected": -0.20927894115447998, |
|
"logps/chosen": -288.8243713378906, |
|
"logps/rejected": -291.8514404296875, |
|
"loss": 0.6655, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.10788210481405258, |
|
"rewards/margins": 0.06151789426803589, |
|
"rewards/rejected": -0.16940000653266907, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.3393213572854291, |
|
"grad_norm": 3.512512373193113, |
|
"learning_rate": 2.511453581375585e-07, |
|
"logits/chosen": -0.39803841710090637, |
|
"logits/rejected": -0.3213597238063812, |
|
"logps/chosen": -258.41326904296875, |
|
"logps/rejected": -263.5000915527344, |
|
"loss": 0.6625, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.10974812507629395, |
|
"rewards/margins": 0.04202640801668167, |
|
"rewards/rejected": -0.15177452564239502, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.34930139720558884, |
|
"grad_norm": 3.7824536020137383, |
|
"learning_rate": 2.4721798515946963e-07, |
|
"logits/chosen": -0.2360338717699051, |
|
"logits/rejected": -0.17899933457374573, |
|
"logps/chosen": -274.8690185546875, |
|
"logps/rejected": -291.98419189453125, |
|
"loss": 0.6587, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.1216902956366539, |
|
"rewards/margins": 0.0732160434126854, |
|
"rewards/rejected": -0.1949063539505005, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3592814371257485, |
|
"grad_norm": 3.6570477679381033, |
|
"learning_rate": 2.4317216704174655e-07, |
|
"logits/chosen": -0.2678098976612091, |
|
"logits/rejected": -0.22654588520526886, |
|
"logps/chosen": -285.76885986328125, |
|
"logps/rejected": -300.3975524902344, |
|
"loss": 0.6575, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.13574646413326263, |
|
"rewards/margins": 0.06750482320785522, |
|
"rewards/rejected": -0.20325128734111786, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.36926147704590817, |
|
"grad_norm": 3.705774667801494, |
|
"learning_rate": 2.3901283299055524e-07, |
|
"logits/chosen": -0.4732998013496399, |
|
"logits/rejected": -0.39881661534309387, |
|
"logps/chosen": -321.14984130859375, |
|
"logps/rejected": -326.15032958984375, |
|
"loss": 0.6514, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.17002463340759277, |
|
"rewards/margins": 0.08681143075227737, |
|
"rewards/rejected": -0.25683605670928955, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.37924151696606784, |
|
"grad_norm": 3.4125215861302176, |
|
"learning_rate": 2.3474505051374067e-07, |
|
"logits/chosen": -0.34785977005958557, |
|
"logits/rejected": -0.24581901729106903, |
|
"logps/chosen": -323.81732177734375, |
|
"logps/rejected": -328.8360900878906, |
|
"loss": 0.6496, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.20683076977729797, |
|
"rewards/margins": 0.07259462773799896, |
|
"rewards/rejected": -0.2794254422187805, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.38922155688622756, |
|
"grad_norm": 3.6035947439545715, |
|
"learning_rate": 2.3037401924684947e-07, |
|
"logits/chosen": -0.2815292775630951, |
|
"logits/rejected": -0.2620851397514343, |
|
"logps/chosen": -319.5675048828125, |
|
"logps/rejected": -332.9228210449219, |
|
"loss": 0.6501, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.2412305772304535, |
|
"rewards/margins": 0.09057402610778809, |
|
"rewards/rejected": -0.3318046033382416, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.3992015968063872, |
|
"grad_norm": 3.6822468215438624, |
|
"learning_rate": 2.2590506461817452e-07, |
|
"logits/chosen": -0.259944349527359, |
|
"logits/rejected": -0.20725861191749573, |
|
"logps/chosen": -298.30303955078125, |
|
"logps/rejected": -305.5474548339844, |
|
"loss": 0.6419, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.24381069839000702, |
|
"rewards/margins": 0.11996382474899292, |
|
"rewards/rejected": -0.36377447843551636, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4091816367265469, |
|
"grad_norm": 3.7132773714988927, |
|
"learning_rate": 2.213436313605413e-07, |
|
"logits/chosen": -0.5560285449028015, |
|
"logits/rejected": -0.49163347482681274, |
|
"logps/chosen": -387.44171142578125, |
|
"logps/rejected": -402.449462890625, |
|
"loss": 0.6434, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3169836699962616, |
|
"rewards/margins": 0.07618891447782516, |
|
"rewards/rejected": -0.39317259192466736, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.41916167664670656, |
|
"grad_norm": 3.775451248470663, |
|
"learning_rate": 2.166952768777391e-07, |
|
"logits/chosen": -0.2941240966320038, |
|
"logits/rejected": -0.3131045997142792, |
|
"logps/chosen": -303.090087890625, |
|
"logps/rejected": -298.85699462890625, |
|
"loss": 0.6404, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.2724608778953552, |
|
"rewards/margins": 0.1073354110121727, |
|
"rewards/rejected": -0.3797963261604309, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4291417165668663, |
|
"grad_norm": 3.7925188623151422, |
|
"learning_rate": 2.1196566447368126e-07, |
|
"logits/chosen": -0.26538053154945374, |
|
"logits/rejected": -0.25145062804222107, |
|
"logps/chosen": -334.455078125, |
|
"logps/rejected": -356.2786865234375, |
|
"loss": 0.6355, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.34816819429397583, |
|
"rewards/margins": 0.12635044753551483, |
|
"rewards/rejected": -0.4745185971260071, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.43912175648702595, |
|
"grad_norm": 3.8981243362393565, |
|
"learning_rate": 2.0716055645254113e-07, |
|
"logits/chosen": -0.46777939796447754, |
|
"logits/rejected": -0.4300519824028015, |
|
"logps/chosen": -325.1937255859375, |
|
"logps/rejected": -333.54022216796875, |
|
"loss": 0.641, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.4105430245399475, |
|
"rewards/margins": 0.10119415819644928, |
|
"rewards/rejected": -0.5117372274398804, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4491017964071856, |
|
"grad_norm": 3.7392987472200354, |
|
"learning_rate": 2.0228580709827226e-07, |
|
"logits/chosen": -0.5368104577064514, |
|
"logits/rejected": -0.45491117238998413, |
|
"logps/chosen": -349.7928771972656, |
|
"logps/rejected": -371.95050048828125, |
|
"loss": 0.6326, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.3743346631526947, |
|
"rewards/margins": 0.1426324099302292, |
|
"rewards/rejected": -0.5169671177864075, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.4590818363273453, |
|
"grad_norm": 3.6974655152807174, |
|
"learning_rate": 1.9734735554206537e-07, |
|
"logits/chosen": -0.31408676505088806, |
|
"logits/rejected": -0.2642524242401123, |
|
"logps/chosen": -349.4893798828125, |
|
"logps/rejected": -369.08990478515625, |
|
"loss": 0.6238, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.47439342737197876, |
|
"rewards/margins": 0.15798419713974, |
|
"rewards/rejected": -0.632377564907074, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.469061876247505, |
|
"grad_norm": 3.9074441662298662, |
|
"learning_rate": 1.9235121852643148e-07, |
|
"logits/chosen": -0.4889460504055023, |
|
"logits/rejected": -0.48752936720848083, |
|
"logps/chosen": -331.08624267578125, |
|
"logps/rejected": -348.8202819824219, |
|
"loss": 0.6259, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.41649022698402405, |
|
"rewards/margins": 0.1930549144744873, |
|
"rewards/rejected": -0.609545111656189, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.47904191616766467, |
|
"grad_norm": 3.7710978575315757, |
|
"learning_rate": 1.8730348307472826e-07, |
|
"logits/chosen": -0.4119279384613037, |
|
"logits/rejected": -0.444758802652359, |
|
"logps/chosen": -348.2624206542969, |
|
"logps/rejected": -369.9598083496094, |
|
"loss": 0.6249, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.5225319862365723, |
|
"rewards/margins": 0.13163016736507416, |
|
"rewards/rejected": -0.6541622281074524, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.48902195608782434, |
|
"grad_norm": 4.08348789093204, |
|
"learning_rate": 1.822102990750595e-07, |
|
"logits/chosen": -0.5410420298576355, |
|
"logits/rejected": -0.4581461548805237, |
|
"logps/chosen": -300.3963317871094, |
|
"logps/rejected": -326.97357177734375, |
|
"loss": 0.6205, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.4268406927585602, |
|
"rewards/margins": 0.17870576679706573, |
|
"rewards/rejected": -0.6055464744567871, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.499001996007984, |
|
"grad_norm": 3.8249633059365977, |
|
"learning_rate": 1.7707787178758402e-07, |
|
"logits/chosen": -0.5376532673835754, |
|
"logits/rejected": -0.5166374444961548, |
|
"logps/chosen": -334.3974609375, |
|
"logps/rejected": -363.59832763671875, |
|
"loss": 0.6097, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.5467029809951782, |
|
"rewards/margins": 0.219395712018013, |
|
"rewards/rejected": -0.7660986185073853, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5089820359281437, |
|
"grad_norm": 4.084608415013939, |
|
"learning_rate": 1.7191245428436176e-07, |
|
"logits/chosen": -0.6120297908782959, |
|
"logits/rejected": -0.5665668249130249, |
|
"logps/chosen": -388.49237060546875, |
|
"logps/rejected": -404.17913818359375, |
|
"loss": 0.6066, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.5541552305221558, |
|
"rewards/margins": 0.18654166162014008, |
|
"rewards/rejected": -0.7406969666481018, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.5189620758483033, |
|
"grad_norm": 4.127013078261958, |
|
"learning_rate": 1.667203398309488e-07, |
|
"logits/chosen": -0.602649450302124, |
|
"logits/rejected": -0.47091540694236755, |
|
"logps/chosen": -342.5652770996094, |
|
"logps/rejected": -363.1390380859375, |
|
"loss": 0.6099, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.5999665260314941, |
|
"rewards/margins": 0.22458717226982117, |
|
"rewards/rejected": -0.8245537877082825, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5289421157684631, |
|
"grad_norm": 4.181662944251744, |
|
"learning_rate": 1.6150785421902278e-07, |
|
"logits/chosen": -0.49136123061180115, |
|
"logits/rejected": -0.4355231821537018, |
|
"logps/chosen": -355.0814514160156, |
|
"logps/rejected": -400.48486328125, |
|
"loss": 0.6027, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.6992485523223877, |
|
"rewards/margins": 0.2647705674171448, |
|
"rewards/rejected": -0.9640190005302429, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5389221556886228, |
|
"grad_norm": 4.187328944312369, |
|
"learning_rate": 1.5628134805937993e-07, |
|
"logits/chosen": -0.6901322603225708, |
|
"logits/rejected": -0.7155795693397522, |
|
"logps/chosen": -338.4058532714844, |
|
"logps/rejected": -357.05145263671875, |
|
"loss": 0.6044, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.6499531865119934, |
|
"rewards/margins": 0.2078942060470581, |
|
"rewards/rejected": -0.8578473329544067, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5489021956087824, |
|
"grad_norm": 4.480820141822546, |
|
"learning_rate": 1.5104718904469424e-07, |
|
"logits/chosen": -0.49210062623023987, |
|
"logits/rejected": -0.4995419979095459, |
|
"logps/chosen": -362.78936767578125, |
|
"logps/rejected": -376.6024475097656, |
|
"loss": 0.6022, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.684400200843811, |
|
"rewards/margins": 0.22897310554981232, |
|
"rewards/rejected": -0.9133733510971069, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.5588822355289421, |
|
"grad_norm": 4.335560690928003, |
|
"learning_rate": 1.458117541914647e-07, |
|
"logits/chosen": -0.6205674409866333, |
|
"logits/rejected": -0.5415416955947876, |
|
"logps/chosen": -363.15484619140625, |
|
"logps/rejected": -380.423828125, |
|
"loss": 0.601, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.7319628000259399, |
|
"rewards/margins": 0.22972945868968964, |
|
"rewards/rejected": -0.9616923332214355, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5688622754491018, |
|
"grad_norm": 4.236266469806635, |
|
"learning_rate": 1.4058142207060298e-07, |
|
"logits/chosen": -0.5227392315864563, |
|
"logits/rejected": -0.44109734892845154, |
|
"logps/chosen": -350.81268310546875, |
|
"logps/rejected": -387.339111328125, |
|
"loss": 0.599, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.7962437272071838, |
|
"rewards/margins": 0.2677839994430542, |
|
"rewards/rejected": -1.0640277862548828, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.5788423153692615, |
|
"grad_norm": 4.1859368563558546, |
|
"learning_rate": 1.353625650361276e-07, |
|
"logits/chosen": -0.4567599296569824, |
|
"logits/rejected": -0.37081751227378845, |
|
"logps/chosen": -292.90582275390625, |
|
"logps/rejected": -351.08428955078125, |
|
"loss": 0.5976, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.7117597460746765, |
|
"rewards/margins": 0.23347418010234833, |
|
"rewards/rejected": -0.9452340006828308, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5888223552894212, |
|
"grad_norm": 4.963202872238042, |
|
"learning_rate": 1.301615414614316e-07, |
|
"logits/chosen": -0.5814245939254761, |
|
"logits/rejected": -0.477074533700943, |
|
"logps/chosen": -359.9764404296875, |
|
"logps/rejected": -408.649169921875, |
|
"loss": 0.5854, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.8252065777778625, |
|
"rewards/margins": 0.3438906669616699, |
|
"rewards/rejected": -1.1690973043441772, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.5988023952095808, |
|
"grad_norm": 4.522965998928778, |
|
"learning_rate": 1.2498468799258466e-07, |
|
"logits/chosen": -0.6931585669517517, |
|
"logits/rejected": -0.6726911067962646, |
|
"logps/chosen": -447.56134033203125, |
|
"logps/rejected": -467.0315856933594, |
|
"loss": 0.5951, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.0262882709503174, |
|
"rewards/margins": 0.3147868514060974, |
|
"rewards/rejected": -1.3410751819610596, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 501, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 101, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|