|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.24, |
|
"eval_steps": 500, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 479.06511306762695, |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 1.1869569851291564, |
|
"kl": 0.0, |
|
"learning_rate": 7.142857142857142e-08, |
|
"loss": -0.0, |
|
"reward": 0.3776041781529784, |
|
"reward_std": 0.4827471449971199, |
|
"rewards/equation_reward_func": 0.05989583441987634, |
|
"rewards/format_reward_func": 0.31770834140479565, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 504.68751525878906, |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 1.0572288353765749, |
|
"kl": 0.00037407875061035156, |
|
"learning_rate": 1.4285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.26562500977888703, |
|
"reward_std": 0.4200526801869273, |
|
"rewards/equation_reward_func": 0.041666667675599456, |
|
"rewards/format_reward_func": 0.22395834024064243, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 484.2343940734863, |
|
"epoch": 0.0032, |
|
"grad_norm": 0.8713849573088774, |
|
"kl": 0.00042569637298583984, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0, |
|
"reward": 0.335937506519258, |
|
"reward_std": 0.4768502954393625, |
|
"rewards/equation_reward_func": 0.06250000186264515, |
|
"rewards/format_reward_func": 0.27343750884756446, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 504.5208511352539, |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.9415822021899375, |
|
"kl": 0.0004322528839111328, |
|
"learning_rate": 2.857142857142857e-07, |
|
"loss": 0.0, |
|
"reward": 0.2838541753590107, |
|
"reward_std": 0.4456023368984461, |
|
"rewards/equation_reward_func": 0.02343750069849193, |
|
"rewards/format_reward_func": 0.2604166753590107, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 501.39324378967285, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.9228145520213509, |
|
"kl": 0.0004162788391113281, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"loss": 0.0, |
|
"reward": 0.29687500884756446, |
|
"reward_std": 0.44185679126530886, |
|
"rewards/equation_reward_func": 0.03385416744276881, |
|
"rewards/format_reward_func": 0.26302084093913436, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 494.34636878967285, |
|
"epoch": 0.0064, |
|
"grad_norm": 1.058429498755132, |
|
"kl": 0.0004417896270751953, |
|
"learning_rate": 4.285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.3203125111758709, |
|
"reward_std": 0.4622091678902507, |
|
"rewards/equation_reward_func": 0.04947916814126074, |
|
"rewards/format_reward_func": 0.27083334047347307, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 448.00001525878906, |
|
"epoch": 0.007466666666666667, |
|
"grad_norm": 1.1373377904866069, |
|
"kl": 0.0008156299591064453, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0, |
|
"reward": 0.48958335258066654, |
|
"reward_std": 0.5410491544753313, |
|
"rewards/equation_reward_func": 0.057291667675599456, |
|
"rewards/format_reward_func": 0.43229167722165585, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 469.2656364440918, |
|
"epoch": 0.008533333333333334, |
|
"grad_norm": 1.0961526951904466, |
|
"kl": 0.0011763572692871094, |
|
"learning_rate": 4.999740409224932e-07, |
|
"loss": 0.0, |
|
"reward": 0.5260416828095913, |
|
"reward_std": 0.5119463540613651, |
|
"rewards/equation_reward_func": 0.06510416720993817, |
|
"rewards/format_reward_func": 0.46093751303851604, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 444.05730628967285, |
|
"epoch": 0.0096, |
|
"grad_norm": 0.8490475472929186, |
|
"kl": 0.00301361083984375, |
|
"learning_rate": 4.998961690809627e-07, |
|
"loss": 0.0, |
|
"reward": 0.598958358168602, |
|
"reward_std": 0.5392562765628099, |
|
"rewards/equation_reward_func": 0.05208333465270698, |
|
"rewards/format_reward_func": 0.5468750149011612, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 433.02865409851074, |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 0.8530996393331266, |
|
"kl": 0.0049877166748046875, |
|
"learning_rate": 4.997664006472578e-07, |
|
"loss": 0.0, |
|
"reward": 0.8125000186264515, |
|
"reward_std": 0.4681414752267301, |
|
"rewards/equation_reward_func": 0.0703125016298145, |
|
"rewards/format_reward_func": 0.7421875223517418, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 456.56772232055664, |
|
"epoch": 0.011733333333333333, |
|
"grad_norm": 0.7697292886020256, |
|
"kl": 0.00555419921875, |
|
"learning_rate": 4.995847625707292e-07, |
|
"loss": 0.0, |
|
"reward": 0.8281250186264515, |
|
"reward_std": 0.4001491153612733, |
|
"rewards/equation_reward_func": 0.0416666679084301, |
|
"rewards/format_reward_func": 0.7864583507180214, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 460.73438453674316, |
|
"epoch": 0.0128, |
|
"grad_norm": 0.7848337477423635, |
|
"kl": 0.0071258544921875, |
|
"learning_rate": 4.993512925726318e-07, |
|
"loss": 0.0, |
|
"reward": 0.8854166865348816, |
|
"reward_std": 0.3724366007372737, |
|
"rewards/equation_reward_func": 0.049479167675599456, |
|
"rewards/format_reward_func": 0.8359375186264515, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 434.8932399749756, |
|
"epoch": 0.013866666666666666, |
|
"grad_norm": 0.7039638744422072, |
|
"kl": 0.007732391357421875, |
|
"learning_rate": 4.990660391382923e-07, |
|
"loss": 0.0, |
|
"reward": 0.9375000298023224, |
|
"reward_std": 0.4019690901041031, |
|
"rewards/equation_reward_func": 0.07812500209547579, |
|
"rewards/format_reward_func": 0.8593750223517418, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 480.76824378967285, |
|
"epoch": 0.014933333333333333, |
|
"grad_norm": 0.705507612255758, |
|
"kl": 0.010210037231445312, |
|
"learning_rate": 4.987290615070384e-07, |
|
"loss": 0.0, |
|
"reward": 0.958333358168602, |
|
"reward_std": 0.3932732567191124, |
|
"rewards/equation_reward_func": 0.09114583558402956, |
|
"rewards/format_reward_func": 0.8671875223517418, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 432.20053482055664, |
|
"epoch": 0.016, |
|
"grad_norm": 0.6874108412133968, |
|
"kl": 0.012973785400390625, |
|
"learning_rate": 4.983404296598978e-07, |
|
"loss": 0.0, |
|
"reward": 1.0364583730697632, |
|
"reward_std": 0.31663169525563717, |
|
"rewards/equation_reward_func": 0.10156250349245965, |
|
"rewards/format_reward_func": 0.9348958507180214, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 407.95313835144043, |
|
"epoch": 0.017066666666666667, |
|
"grad_norm": 0.6793132961839309, |
|
"kl": 0.0137176513671875, |
|
"learning_rate": 4.979002243050646e-07, |
|
"loss": 0.0, |
|
"reward": 1.0989583767950535, |
|
"reward_std": 0.3416873565874994, |
|
"rewards/equation_reward_func": 0.14843750512227416, |
|
"rewards/format_reward_func": 0.9505208618938923, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 434.5573043823242, |
|
"epoch": 0.018133333333333335, |
|
"grad_norm": 0.520698934589799, |
|
"kl": 0.01392364501953125, |
|
"learning_rate": 4.974085368611381e-07, |
|
"loss": 0.0, |
|
"reward": 1.0052083618938923, |
|
"reward_std": 0.2466061031445861, |
|
"rewards/equation_reward_func": 0.06250000093132257, |
|
"rewards/format_reward_func": 0.9427083507180214, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 402.1171989440918, |
|
"epoch": 0.0192, |
|
"grad_norm": 0.5349273835140175, |
|
"kl": 0.013996124267578125, |
|
"learning_rate": 4.968654694381379e-07, |
|
"loss": 0.0, |
|
"reward": 1.0625000149011612, |
|
"reward_std": 0.20629451051354408, |
|
"rewards/equation_reward_func": 0.0859375016298145, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 412.4765739440918, |
|
"epoch": 0.020266666666666665, |
|
"grad_norm": 0.6378526041216871, |
|
"kl": 0.01590728759765625, |
|
"learning_rate": 4.962711348162987e-07, |
|
"loss": 0.0, |
|
"reward": 1.1067708656191826, |
|
"reward_std": 0.28954573161900043, |
|
"rewards/equation_reward_func": 0.13541666930541396, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 398.5989685058594, |
|
"epoch": 0.021333333333333333, |
|
"grad_norm": 0.656016728201151, |
|
"kl": 0.01578521728515625, |
|
"learning_rate": 4.956256564226487e-07, |
|
"loss": 0.0, |
|
"reward": 1.070312537252903, |
|
"reward_std": 0.2771536544896662, |
|
"rewards/equation_reward_func": 0.10677083628252149, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 400.770845413208, |
|
"epoch": 0.0224, |
|
"grad_norm": 0.8289361768943612, |
|
"kl": 0.01721954345703125, |
|
"learning_rate": 4.949291683053768e-07, |
|
"loss": 0.0, |
|
"reward": 1.1223958656191826, |
|
"reward_std": 0.3327407846227288, |
|
"rewards/equation_reward_func": 0.1484375037252903, |
|
"rewards/format_reward_func": 0.9739583544433117, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 398.7526111602783, |
|
"epoch": 0.023466666666666667, |
|
"grad_norm": 0.6768780771715479, |
|
"kl": 0.017475128173828125, |
|
"learning_rate": 4.941818151059955e-07, |
|
"loss": 0.0, |
|
"reward": 1.0859375335276127, |
|
"reward_std": 0.24852651357650757, |
|
"rewards/equation_reward_func": 0.11718750325962901, |
|
"rewards/format_reward_func": 0.9687500111758709, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 390.9244899749756, |
|
"epoch": 0.024533333333333334, |
|
"grad_norm": 0.6089623829481902, |
|
"kl": 0.04027557373046875, |
|
"learning_rate": 4.933837520293017e-07, |
|
"loss": 0.0, |
|
"reward": 1.0729167014360428, |
|
"reward_std": 0.25113596161827445, |
|
"rewards/equation_reward_func": 0.10416666814126074, |
|
"rewards/format_reward_func": 0.9687500223517418, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 369.0937614440918, |
|
"epoch": 0.0256, |
|
"grad_norm": 0.5754646954335966, |
|
"kl": 0.0209197998046875, |
|
"learning_rate": 4.925351448111454e-07, |
|
"loss": 0.0, |
|
"reward": 1.1067708730697632, |
|
"reward_std": 0.24725896958261728, |
|
"rewards/equation_reward_func": 0.13020833930931985, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 388.3880310058594, |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 0.599342426740638, |
|
"kl": 0.0214385986328125, |
|
"learning_rate": 4.91636169684011e-07, |
|
"loss": 0.0, |
|
"reward": 1.0807292088866234, |
|
"reward_std": 0.22517748409882188, |
|
"rewards/equation_reward_func": 0.10416666883975267, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 370.52345085144043, |
|
"epoch": 0.027733333333333332, |
|
"grad_norm": 0.6909841937162223, |
|
"kl": 0.0216064453125, |
|
"learning_rate": 4.906870133404186e-07, |
|
"loss": 0.0, |
|
"reward": 1.0833333693444729, |
|
"reward_std": 0.262856621760875, |
|
"rewards/equation_reward_func": 0.11458333674818277, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 374.856782913208, |
|
"epoch": 0.0288, |
|
"grad_norm": 0.6283094034758113, |
|
"kl": 0.0230560302734375, |
|
"learning_rate": 4.896878728941531e-07, |
|
"loss": 0.0, |
|
"reward": 1.1015625223517418, |
|
"reward_std": 0.2443550513125956, |
|
"rewards/equation_reward_func": 0.1276041695382446, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 364.94011306762695, |
|
"epoch": 0.029866666666666666, |
|
"grad_norm": 0.7799936650160872, |
|
"kl": 0.0264739990234375, |
|
"learning_rate": 4.886389558393284e-07, |
|
"loss": 0.0, |
|
"reward": 1.1666667088866234, |
|
"reward_std": 0.33124217577278614, |
|
"rewards/equation_reward_func": 0.19010417396202683, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 352.15365409851074, |
|
"epoch": 0.030933333333333334, |
|
"grad_norm": 0.6990776726434789, |
|
"kl": 0.0289306640625, |
|
"learning_rate": 4.875404800072976e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750447034836, |
|
"reward_std": 0.22618821123614907, |
|
"rewards/equation_reward_func": 0.12760416977107525, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 347.57553482055664, |
|
"epoch": 0.032, |
|
"grad_norm": 0.7882548556903084, |
|
"kl": 0.02877044677734375, |
|
"learning_rate": 4.86392673521415e-07, |
|
"loss": 0.0, |
|
"reward": 1.2187500447034836, |
|
"reward_std": 0.32150182826444507, |
|
"rewards/equation_reward_func": 0.2291666737291962, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 364.71875953674316, |
|
"epoch": 0.03306666666666667, |
|
"grad_norm": 0.6290974292701452, |
|
"kl": 0.02837371826171875, |
|
"learning_rate": 4.851957747496606e-07, |
|
"loss": 0.0, |
|
"reward": 1.143229190260172, |
|
"reward_std": 0.2681962251663208, |
|
"rewards/equation_reward_func": 0.1718750037252903, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 351.825532913208, |
|
"epoch": 0.034133333333333335, |
|
"grad_norm": 0.7524850668668323, |
|
"kl": 0.03217315673828125, |
|
"learning_rate": 4.839500322551386e-07, |
|
"loss": 0.0, |
|
"reward": 1.1588542088866234, |
|
"reward_std": 0.2972170915454626, |
|
"rewards/equation_reward_func": 0.17968750488944352, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 360.0833435058594, |
|
"epoch": 0.0352, |
|
"grad_norm": 0.6380613373024979, |
|
"kl": 0.03417205810546875, |
|
"learning_rate": 4.826557047444563e-07, |
|
"loss": 0.0, |
|
"reward": 1.1302083805203438, |
|
"reward_std": 0.2470923252403736, |
|
"rewards/equation_reward_func": 0.15625000465661287, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 344.16928482055664, |
|
"epoch": 0.03626666666666667, |
|
"grad_norm": 0.8763117130489186, |
|
"kl": 0.03891754150390625, |
|
"learning_rate": 4.813130610139993e-07, |
|
"loss": 0.0, |
|
"reward": 1.1614583879709244, |
|
"reward_std": 0.31256247218698263, |
|
"rewards/equation_reward_func": 0.17968750605359674, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 374.7916774749756, |
|
"epoch": 0.037333333333333336, |
|
"grad_norm": 0.6775447687819961, |
|
"kl": 0.0399627685546875, |
|
"learning_rate": 4.799223798941089e-07, |
|
"loss": 0.0, |
|
"reward": 1.1223958507180214, |
|
"reward_std": 0.2880659820511937, |
|
"rewards/equation_reward_func": 0.14843750279396772, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 341.1588668823242, |
|
"epoch": 0.0384, |
|
"grad_norm": 0.6315374119974878, |
|
"kl": 0.045684814453125, |
|
"learning_rate": 4.78483950191177e-07, |
|
"loss": 0.0, |
|
"reward": 1.171875037252903, |
|
"reward_std": 0.2657899674959481, |
|
"rewards/equation_reward_func": 0.1953125069849193, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 347.85938453674316, |
|
"epoch": 0.039466666666666664, |
|
"grad_norm": 0.6797093361910038, |
|
"kl": 0.0632171630859375, |
|
"learning_rate": 4.769980706276687e-07, |
|
"loss": 0.0001, |
|
"reward": 1.166666716337204, |
|
"reward_std": 0.28392915101721883, |
|
"rewards/equation_reward_func": 0.18489583884365857, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 358.37500953674316, |
|
"epoch": 0.04053333333333333, |
|
"grad_norm": 0.7347841080142689, |
|
"kl": 0.05291748046875, |
|
"learning_rate": 4.7546504978008595e-07, |
|
"loss": 0.0001, |
|
"reward": 1.226562537252903, |
|
"reward_std": 0.34029595321044326, |
|
"rewards/equation_reward_func": 0.24739584024064243, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 334.1198024749756, |
|
"epoch": 0.0416, |
|
"grad_norm": 0.6278097170931212, |
|
"kl": 0.0596466064453125, |
|
"learning_rate": 4.738852060148848e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1432291939854622, |
|
"reward_std": 0.23812766559422016, |
|
"rewards/equation_reward_func": 0.15885417023673654, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 344.8177146911621, |
|
"epoch": 0.042666666666666665, |
|
"grad_norm": 0.6997187790917091, |
|
"kl": 0.061279296875, |
|
"learning_rate": 4.722588674223593e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1666666939854622, |
|
"reward_std": 0.26324747782200575, |
|
"rewards/equation_reward_func": 0.18489583767950535, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 309.60938453674316, |
|
"epoch": 0.04373333333333333, |
|
"grad_norm": 0.6997044550717475, |
|
"kl": 0.070892333984375, |
|
"learning_rate": 4.70586371748506e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2213542014360428, |
|
"reward_std": 0.2756146062165499, |
|
"rewards/equation_reward_func": 0.23177083861082792, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 335.4453239440918, |
|
"epoch": 0.0448, |
|
"grad_norm": 0.683238903553104, |
|
"kl": 0.0697021484375, |
|
"learning_rate": 4.6886806632488363e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1614583656191826, |
|
"reward_std": 0.27705384651198983, |
|
"rewards/equation_reward_func": 0.18229167093522847, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 305.8229236602783, |
|
"epoch": 0.04586666666666667, |
|
"grad_norm": 0.5785488696587523, |
|
"kl": 0.0760498046875, |
|
"learning_rate": 4.6710430799648143e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1822917014360428, |
|
"reward_std": 0.21207982301712036, |
|
"rewards/equation_reward_func": 0.1875000058207661, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 313.07552909851074, |
|
"epoch": 0.046933333333333334, |
|
"grad_norm": 0.8039995179667074, |
|
"kl": 0.0819091796875, |
|
"learning_rate": 4.652954630476127e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2161458805203438, |
|
"reward_std": 0.3164127543568611, |
|
"rewards/equation_reward_func": 0.22916667512618005, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 321.54948902130127, |
|
"epoch": 0.048, |
|
"grad_norm": 0.7157243173905158, |
|
"kl": 0.093475341796875, |
|
"learning_rate": 4.6344190712584713e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1927083805203438, |
|
"reward_std": 0.21211858373135328, |
|
"rewards/equation_reward_func": 0.20312500628642738, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 289.03386402130127, |
|
"epoch": 0.04906666666666667, |
|
"grad_norm": 0.9028411325076038, |
|
"kl": 0.0883941650390625, |
|
"learning_rate": 4.615440251639995e-07, |
|
"loss": 0.0001, |
|
"reward": 1.330729216337204, |
|
"reward_std": 0.28467333829030395, |
|
"rewards/equation_reward_func": 0.33333334140479565, |
|
"rewards/format_reward_func": 0.9973958358168602, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 304.7942819595337, |
|
"epoch": 0.050133333333333335, |
|
"grad_norm": 0.7882629426423002, |
|
"kl": 0.09014892578125, |
|
"learning_rate": 4.596022113001894e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2890625521540642, |
|
"reward_std": 0.29563618497923017, |
|
"rewards/equation_reward_func": 0.2968750095460564, |
|
"rewards/format_reward_func": 0.9921875074505806, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 336.986985206604, |
|
"epoch": 0.0512, |
|
"grad_norm": 0.7426512420493854, |
|
"kl": 0.086090087890625, |
|
"learning_rate": 4.576168687959895e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2552083656191826, |
|
"reward_std": 0.348067122977227, |
|
"rewards/equation_reward_func": 0.276041675824672, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 318.2812614440918, |
|
"epoch": 0.05226666666666667, |
|
"grad_norm": 0.8480603451919824, |
|
"kl": 0.09051513671875, |
|
"learning_rate": 4.555884099526793e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2890625335276127, |
|
"reward_std": 0.3150825258344412, |
|
"rewards/equation_reward_func": 0.3072916748933494, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 352.70313453674316, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 0.6067660652220382, |
|
"kl": 0.089141845703125, |
|
"learning_rate": 4.5351725602562174e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2916667014360428, |
|
"reward_std": 0.3160873227752745, |
|
"rewards/equation_reward_func": 0.32031250768341124, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 316.4739646911621, |
|
"epoch": 0.0544, |
|
"grad_norm": 0.7599757297215466, |
|
"kl": 0.08282470703125, |
|
"learning_rate": 4.514038371367791e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3177083879709244, |
|
"reward_std": 0.316761318128556, |
|
"rewards/equation_reward_func": 0.32552084513008595, |
|
"rewards/format_reward_func": 0.9921875074505806, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 346.94793033599854, |
|
"epoch": 0.055466666666666664, |
|
"grad_norm": 0.7139942682254907, |
|
"kl": 0.082855224609375, |
|
"learning_rate": 4.4924859218538936e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2343750298023224, |
|
"reward_std": 0.28453153278678656, |
|
"rewards/equation_reward_func": 0.247395837912336, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 333.86459159851074, |
|
"epoch": 0.05653333333333333, |
|
"grad_norm": 0.7262973137337115, |
|
"kl": 0.081298828125, |
|
"learning_rate": 4.470519687568185e-07, |
|
"loss": 0.0001, |
|
"reward": 1.216145858168602, |
|
"reward_std": 0.2771544805727899, |
|
"rewards/equation_reward_func": 0.23958334210328758, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 347.1354236602783, |
|
"epoch": 0.0576, |
|
"grad_norm": 0.8289662610034518, |
|
"kl": 0.0821533203125, |
|
"learning_rate": 4.4481442302960923e-07, |
|
"loss": 0.0001, |
|
"reward": 1.322916716337204, |
|
"reward_std": 0.3112520845606923, |
|
"rewards/equation_reward_func": 0.34375001257285476, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 358.14323806762695, |
|
"epoch": 0.058666666666666666, |
|
"grad_norm": 0.7194510877107703, |
|
"kl": 0.09747314453125, |
|
"learning_rate": 4.4253641968074505e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2656250447034836, |
|
"reward_std": 0.27732502575963736, |
|
"rewards/equation_reward_func": 0.29166667233221233, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 353.95573806762695, |
|
"epoch": 0.05973333333333333, |
|
"grad_norm": 0.8309996583293423, |
|
"kl": 0.08343505859375, |
|
"learning_rate": 4.402184317891501e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875596046448, |
|
"reward_std": 0.3259678762406111, |
|
"rewards/equation_reward_func": 0.32552084466442466, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 340.52345085144043, |
|
"epoch": 0.0608, |
|
"grad_norm": 0.860260137408963, |
|
"kl": 0.085205078125, |
|
"learning_rate": 4.37860940737443e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3645833656191826, |
|
"reward_std": 0.28312036022543907, |
|
"rewards/equation_reward_func": 0.3854166744276881, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 369.455735206604, |
|
"epoch": 0.06186666666666667, |
|
"grad_norm": 0.8461609359358174, |
|
"kl": 0.092681884765625, |
|
"learning_rate": 4.354644361119671e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583656191826, |
|
"reward_std": 0.3290282329544425, |
|
"rewards/equation_reward_func": 0.38541667675599456, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 392.994797706604, |
|
"epoch": 0.06293333333333333, |
|
"grad_norm": 0.7075500368751798, |
|
"kl": 0.0854949951171875, |
|
"learning_rate": 4.3302941560111716e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3020833767950535, |
|
"reward_std": 0.38001349521800876, |
|
"rewards/equation_reward_func": 0.3515625090803951, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 386.47136306762695, |
|
"epoch": 0.064, |
|
"grad_norm": 0.7448272303035507, |
|
"kl": 0.095428466796875, |
|
"learning_rate": 4.3055638489198236e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3281250447034836, |
|
"reward_std": 0.2941426238976419, |
|
"rewards/equation_reward_func": 0.3645833453629166, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 390.67970085144043, |
|
"epoch": 0.06506666666666666, |
|
"grad_norm": 0.6028452890831756, |
|
"kl": 0.102203369140625, |
|
"learning_rate": 4.280458575653296e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3151042088866234, |
|
"reward_std": 0.3121520522981882, |
|
"rewards/equation_reward_func": 0.3593750149011612, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 411.3906307220459, |
|
"epoch": 0.06613333333333334, |
|
"grad_norm": 0.6972432278932138, |
|
"kl": 0.1053466796875, |
|
"learning_rate": 4.2549835498894665e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875298023224, |
|
"reward_std": 0.3263821485452354, |
|
"rewards/equation_reward_func": 0.3541666744276881, |
|
"rewards/format_reward_func": 0.9505208469927311, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 398.1380367279053, |
|
"epoch": 0.0672, |
|
"grad_norm": 0.5908309999266357, |
|
"kl": 0.11505126953125, |
|
"learning_rate": 4.229144062093679e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3645833656191826, |
|
"reward_std": 0.30290936632081866, |
|
"rewards/equation_reward_func": 0.414062513737008, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 407.2057456970215, |
|
"epoch": 0.06826666666666667, |
|
"grad_norm": 0.696587638578745, |
|
"kl": 0.113983154296875, |
|
"learning_rate": 4.2029454784200675e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3723958730697632, |
|
"reward_std": 0.32597592612728477, |
|
"rewards/equation_reward_func": 0.41927084675990045, |
|
"rewards/format_reward_func": 0.9531250111758709, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 401.0286531448364, |
|
"epoch": 0.06933333333333333, |
|
"grad_norm": 0.6464987173940265, |
|
"kl": 0.109283447265625, |
|
"learning_rate": 4.1763932395971433e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3593750223517418, |
|
"reward_std": 0.283989277202636, |
|
"rewards/equation_reward_func": 0.3984375111758709, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 443.7916851043701, |
|
"epoch": 0.0704, |
|
"grad_norm": 0.7004776947090245, |
|
"kl": 0.105438232421875, |
|
"learning_rate": 4.1494928597979117e-07, |
|
"loss": 0.0001, |
|
"reward": 1.289062537252903, |
|
"reward_std": 0.3336244923993945, |
|
"rewards/equation_reward_func": 0.34114584419876337, |
|
"rewards/format_reward_func": 0.9479166902601719, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 415.70313262939453, |
|
"epoch": 0.07146666666666666, |
|
"grad_norm": 0.6650285479645688, |
|
"kl": 0.1195068359375, |
|
"learning_rate": 4.122249925494726e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3333333656191826, |
|
"reward_std": 0.3276231326162815, |
|
"rewards/equation_reward_func": 0.38020834140479565, |
|
"rewards/format_reward_func": 0.9531250260770321, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 397.13542652130127, |
|
"epoch": 0.07253333333333334, |
|
"grad_norm": 0.6650549480411382, |
|
"kl": 0.12451171875, |
|
"learning_rate": 4.094670094299131e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4140625335276127, |
|
"reward_std": 0.33390075946226716, |
|
"rewards/equation_reward_func": 0.4557291781529784, |
|
"rewards/format_reward_func": 0.958333358168602, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 427.82293033599854, |
|
"epoch": 0.0736, |
|
"grad_norm": 0.58867517657451, |
|
"kl": 0.1121826171875, |
|
"learning_rate": 4.066759093786931e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875521540642, |
|
"reward_std": 0.2838647039607167, |
|
"rewards/equation_reward_func": 0.3567708474583924, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 435.25261878967285, |
|
"epoch": 0.07466666666666667, |
|
"grad_norm": 0.7308572976998943, |
|
"kl": 0.1190185546875, |
|
"learning_rate": 4.038522720308732e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2994792088866234, |
|
"reward_std": 0.32467597210779786, |
|
"rewards/equation_reward_func": 0.3723958465270698, |
|
"rewards/format_reward_func": 0.9270833469927311, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 399.5807399749756, |
|
"epoch": 0.07573333333333333, |
|
"grad_norm": 0.7010882655857836, |
|
"kl": 0.128326416015625, |
|
"learning_rate": 4.009966837786194e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4166667014360428, |
|
"reward_std": 0.34676419477909803, |
|
"rewards/equation_reward_func": 0.48437501303851604, |
|
"rewards/format_reward_func": 0.932291679084301, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 385.9557418823242, |
|
"epoch": 0.0768, |
|
"grad_norm": 0.7470074693923137, |
|
"kl": 0.162322998046875, |
|
"learning_rate": 3.981097376494259e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4010417014360428, |
|
"reward_std": 0.26487945253029466, |
|
"rewards/equation_reward_func": 0.43750001466833055, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 408.0442810058594, |
|
"epoch": 0.07786666666666667, |
|
"grad_norm": 0.6999375294187344, |
|
"kl": 0.137908935546875, |
|
"learning_rate": 3.951920331829592e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3828125484287739, |
|
"reward_std": 0.29149100510403514, |
|
"rewards/equation_reward_func": 0.4375000128056854, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 403.9765729904175, |
|
"epoch": 0.07893333333333333, |
|
"grad_norm": 0.5287881916983996, |
|
"kl": 0.151641845703125, |
|
"learning_rate": 3.922441763065506e-07, |
|
"loss": 0.0002, |
|
"reward": 1.416666705161333, |
|
"reward_std": 0.28373547829687595, |
|
"rewards/equation_reward_func": 0.4817708465270698, |
|
"rewards/format_reward_func": 0.9348958544433117, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 410.20573902130127, |
|
"epoch": 0.08, |
|
"grad_norm": 0.5024042458612308, |
|
"kl": 0.14007568359375, |
|
"learning_rate": 3.8926677920936093e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792088866234, |
|
"reward_std": 0.28874032804742455, |
|
"rewards/equation_reward_func": 0.4218750086147338, |
|
"rewards/format_reward_func": 0.9401041865348816, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 404.48959732055664, |
|
"epoch": 0.08106666666666666, |
|
"grad_norm": 0.4963601555912834, |
|
"kl": 0.1383056640625, |
|
"learning_rate": 3.862604602152464e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4088541939854622, |
|
"reward_std": 0.2753666895441711, |
|
"rewards/equation_reward_func": 0.4687500107102096, |
|
"rewards/format_reward_func": 0.9401041865348816, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 413.2682399749756, |
|
"epoch": 0.08213333333333334, |
|
"grad_norm": 0.8055471860932037, |
|
"kl": 0.279388427734375, |
|
"learning_rate": 3.8322584365434934e-07, |
|
"loss": 0.0003, |
|
"reward": 1.3802083656191826, |
|
"reward_std": 0.26039192313328385, |
|
"rewards/equation_reward_func": 0.42968751210719347, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 402.37240505218506, |
|
"epoch": 0.0832, |
|
"grad_norm": 0.42808798597812875, |
|
"kl": 0.150421142578125, |
|
"learning_rate": 3.8016355973344173e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3593750447034836, |
|
"reward_std": 0.1753570344299078, |
|
"rewards/equation_reward_func": 0.4062500090803951, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 362.34376335144043, |
|
"epoch": 0.08426666666666667, |
|
"grad_norm": 0.7017085808063098, |
|
"kl": 0.1455078125, |
|
"learning_rate": 3.7707424440504863e-07, |
|
"loss": 0.0001, |
|
"reward": 1.416666705161333, |
|
"reward_std": 0.2758899559266865, |
|
"rewards/equation_reward_func": 0.4635416786186397, |
|
"rewards/format_reward_func": 0.9531250111758709, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 402.34636402130127, |
|
"epoch": 0.08533333333333333, |
|
"grad_norm": 0.7033098011768847, |
|
"kl": 0.13641357421875, |
|
"learning_rate": 3.739585392353787e-07, |
|
"loss": 0.0001, |
|
"reward": 1.398437537252903, |
|
"reward_std": 0.2615419952198863, |
|
"rewards/equation_reward_func": 0.4375000111758709, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 349.5468854904175, |
|
"epoch": 0.0864, |
|
"grad_norm": 0.8470177529007129, |
|
"kl": 0.17486572265625, |
|
"learning_rate": 3.7081709127108767e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4609375447034836, |
|
"reward_std": 0.21957123186439276, |
|
"rewards/equation_reward_func": 0.4791666835080832, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 408.7864656448364, |
|
"epoch": 0.08746666666666666, |
|
"grad_norm": 0.6697773440506227, |
|
"kl": 0.13555908203125, |
|
"learning_rate": 3.6765055290490513e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3645833767950535, |
|
"reward_std": 0.24416533997282386, |
|
"rewards/equation_reward_func": 0.40885417559184134, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 377.4479274749756, |
|
"epoch": 0.08853333333333334, |
|
"grad_norm": 0.5585350437896919, |
|
"kl": 0.15234375, |
|
"learning_rate": 3.644595817401501e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3958333767950535, |
|
"reward_std": 0.19870867347344756, |
|
"rewards/equation_reward_func": 0.4348958439659327, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 375.86198902130127, |
|
"epoch": 0.0896, |
|
"grad_norm": 0.5191461899813266, |
|
"kl": 0.14447021484375, |
|
"learning_rate": 3.6124484045416483e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4244792126119137, |
|
"reward_std": 0.21074969647452235, |
|
"rewards/equation_reward_func": 0.4479166853707284, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 367.8724069595337, |
|
"epoch": 0.09066666666666667, |
|
"grad_norm": 0.634261109914142, |
|
"kl": 0.1561279296875, |
|
"learning_rate": 3.580069966606949e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4140625447034836, |
|
"reward_std": 0.23517010640352964, |
|
"rewards/equation_reward_func": 0.4375000095460564, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 420.29949378967285, |
|
"epoch": 0.09173333333333333, |
|
"grad_norm": 0.6666515479679946, |
|
"kl": 0.17108154296875, |
|
"learning_rate": 3.547467227712444e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3671875335276127, |
|
"reward_std": 0.23187633976340294, |
|
"rewards/equation_reward_func": 0.411458347691223, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 380.9713659286499, |
|
"epoch": 0.0928, |
|
"grad_norm": 0.5233105345475487, |
|
"kl": 0.151123046875, |
|
"learning_rate": 3.5146469585543386e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4635417088866234, |
|
"reward_std": 0.21822098083794117, |
|
"rewards/equation_reward_func": 0.4973958465270698, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 367.92969512939453, |
|
"epoch": 0.09386666666666667, |
|
"grad_norm": 0.5157976215776814, |
|
"kl": 0.148345947265625, |
|
"learning_rate": 3.481615975003922e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.21948427613824606, |
|
"rewards/equation_reward_func": 0.45572917768731713, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 376.94011878967285, |
|
"epoch": 0.09493333333333333, |
|
"grad_norm": 0.6881879649420064, |
|
"kl": 0.17608642578125, |
|
"learning_rate": 3.448381136692089e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3984375484287739, |
|
"reward_std": 0.2576333871111274, |
|
"rewards/equation_reward_func": 0.4348958448972553, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 178 |
|
}, |
|
{ |
|
"completion_length": 390.6302185058594, |
|
"epoch": 0.096, |
|
"grad_norm": 0.8531012725646003, |
|
"kl": 0.1448974609375, |
|
"learning_rate": 3.4149493455847897e-07, |
|
"loss": 0.0001, |
|
"reward": 1.369791716337204, |
|
"reward_std": 0.229800954926759, |
|
"rewards/equation_reward_func": 0.3906250102445483, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 391.755220413208, |
|
"epoch": 0.09706666666666666, |
|
"grad_norm": 0.8499423182750618, |
|
"kl": 0.14874267578125, |
|
"learning_rate": 3.3813275445496766e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4114583730697632, |
|
"reward_std": 0.24884725222364068, |
|
"rewards/equation_reward_func": 0.4348958469927311, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 182 |
|
}, |
|
{ |
|
"completion_length": 432.4192810058594, |
|
"epoch": 0.09813333333333334, |
|
"grad_norm": 0.6105983090425946, |
|
"kl": 0.15069580078125, |
|
"learning_rate": 3.347522715914262e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3125000409781933, |
|
"reward_std": 0.18763548182323575, |
|
"rewards/equation_reward_func": 0.34635417559184134, |
|
"rewards/format_reward_func": 0.9661458469927311, |
|
"step": 184 |
|
}, |
|
{ |
|
"completion_length": 347.3046941757202, |
|
"epoch": 0.0992, |
|
"grad_norm": 0.5844753474786499, |
|
"kl": 0.1790771484375, |
|
"learning_rate": 3.313541880015877e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4843750558793545, |
|
"reward_std": 0.15258984873071313, |
|
"rewards/equation_reward_func": 0.5104166809469461, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 186 |
|
}, |
|
{ |
|
"completion_length": 347.9427185058594, |
|
"epoch": 0.10026666666666667, |
|
"grad_norm": 0.6723125365768322, |
|
"kl": 0.14996337890625, |
|
"learning_rate": 3.279392093743747e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5052083730697632, |
|
"reward_std": 0.1946416893042624, |
|
"rewards/equation_reward_func": 0.5156250139698386, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 188 |
|
}, |
|
{ |
|
"completion_length": 375.9453239440918, |
|
"epoch": 0.10133333333333333, |
|
"grad_norm": 0.9149096307542565, |
|
"kl": 0.17333984375, |
|
"learning_rate": 3.245080449073459e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4062500447034836, |
|
"reward_std": 0.17988635739311576, |
|
"rewards/equation_reward_func": 0.42968751094304025, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 379.95834827423096, |
|
"epoch": 0.1024, |
|
"grad_norm": 0.8006457880594571, |
|
"kl": 0.171051025390625, |
|
"learning_rate": 3.210614071594162e-07, |
|
"loss": 0.0002, |
|
"reward": 1.346354205161333, |
|
"reward_std": 0.19684508023783565, |
|
"rewards/equation_reward_func": 0.3671875058207661, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 192 |
|
}, |
|
{ |
|
"completion_length": 362.75261402130127, |
|
"epoch": 0.10346666666666667, |
|
"grad_norm": 1.2837479387779884, |
|
"kl": 0.1580810546875, |
|
"learning_rate": 3.1760001190287695e-07, |
|
"loss": 0.0002, |
|
"reward": 1.406250037252903, |
|
"reward_std": 0.19557411642745137, |
|
"rewards/equation_reward_func": 0.42447917722165585, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 194 |
|
}, |
|
{ |
|
"completion_length": 362.6302146911621, |
|
"epoch": 0.10453333333333334, |
|
"grad_norm": 0.6465378835667693, |
|
"kl": 0.152679443359375, |
|
"learning_rate": 3.141245779747502e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4192708767950535, |
|
"reward_std": 0.17333257384598255, |
|
"rewards/equation_reward_func": 0.4401041816454381, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 196 |
|
}, |
|
{ |
|
"completion_length": 354.5338625907898, |
|
"epoch": 0.1056, |
|
"grad_norm": 0.5360554028298243, |
|
"kl": 0.178070068359375, |
|
"learning_rate": 3.106358271275056e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4140625447034836, |
|
"reward_std": 0.16571144526824355, |
|
"rewards/equation_reward_func": 0.4322916786186397, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 198 |
|
}, |
|
{ |
|
"completion_length": 385.5208435058594, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.5592760034852065, |
|
"kl": 0.15142822265625, |
|
"learning_rate": 3.0713448387917227e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3463541939854622, |
|
"reward_std": 0.1820155093446374, |
|
"rewards/equation_reward_func": 0.35677084093913436, |
|
"rewards/format_reward_func": 0.9895833358168602, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 377.02084827423096, |
|
"epoch": 0.10773333333333333, |
|
"grad_norm": 0.5785507707354157, |
|
"kl": 0.174224853515625, |
|
"learning_rate": 3.0362127536287636e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3541667088866234, |
|
"reward_std": 0.13336461875587702, |
|
"rewards/equation_reward_func": 0.36197917349636555, |
|
"rewards/format_reward_func": 0.9921875074505806, |
|
"step": 202 |
|
}, |
|
{ |
|
"completion_length": 339.1198043823242, |
|
"epoch": 0.1088, |
|
"grad_norm": 0.49158548802495156, |
|
"kl": 0.180938720703125, |
|
"learning_rate": 3.0009693117583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4505208693444729, |
|
"reward_std": 0.18924278765916824, |
|
"rewards/equation_reward_func": 0.47135418141260743, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 204 |
|
}, |
|
{ |
|
"completion_length": 361.62500762939453, |
|
"epoch": 0.10986666666666667, |
|
"grad_norm": 0.6202583629431399, |
|
"kl": 0.19464111328125, |
|
"learning_rate": 2.965621832278401e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4192708656191826, |
|
"reward_std": 0.186479932628572, |
|
"rewards/equation_reward_func": 0.4427083437331021, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 206 |
|
}, |
|
{ |
|
"completion_length": 349.294282913208, |
|
"epoch": 0.11093333333333333, |
|
"grad_norm": 0.6453352495768684, |
|
"kl": 0.18829345703125, |
|
"learning_rate": 2.9301776558925875e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4817708805203438, |
|
"reward_std": 0.15738636022433639, |
|
"rewards/equation_reward_func": 0.4973958469927311, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 208 |
|
}, |
|
{ |
|
"completion_length": 373.44271659851074, |
|
"epoch": 0.112, |
|
"grad_norm": 0.5146343243630107, |
|
"kl": 0.180908203125, |
|
"learning_rate": 2.894644143385885e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3984375521540642, |
|
"reward_std": 0.1677132830955088, |
|
"rewards/equation_reward_func": 0.42968750931322575, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 339.29948711395264, |
|
"epoch": 0.11306666666666666, |
|
"grad_norm": 0.6039103056491067, |
|
"kl": 0.2369384765625, |
|
"learning_rate": 2.859028674095937e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4635417088866234, |
|
"reward_std": 0.11582242278382182, |
|
"rewards/equation_reward_func": 0.4661458439659327, |
|
"rewards/format_reward_func": 0.9973958358168602, |
|
"step": 212 |
|
}, |
|
{ |
|
"completion_length": 348.96354961395264, |
|
"epoch": 0.11413333333333334, |
|
"grad_norm": 0.6551166906763881, |
|
"kl": 0.19390869140625, |
|
"learning_rate": 2.823338644380566e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4270833656191826, |
|
"reward_std": 0.14563801139593124, |
|
"rewards/equation_reward_func": 0.450520847691223, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 214 |
|
}, |
|
{ |
|
"completion_length": 346.88542652130127, |
|
"epoch": 0.1152, |
|
"grad_norm": 0.5877900931360449, |
|
"kl": 0.17950439453125, |
|
"learning_rate": 2.7875814660817504e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4531250596046448, |
|
"reward_std": 0.2405162900686264, |
|
"rewards/equation_reward_func": 0.46875001583248377, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 216 |
|
}, |
|
{ |
|
"completion_length": 359.08073711395264, |
|
"epoch": 0.11626666666666667, |
|
"grad_norm": 0.4948533563264289, |
|
"kl": 0.18475341796875, |
|
"learning_rate": 2.751764564986396e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4088542088866234, |
|
"reward_std": 0.14888234762474895, |
|
"rewards/equation_reward_func": 0.42187500884756446, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 218 |
|
}, |
|
{ |
|
"completion_length": 319.13542795181274, |
|
"epoch": 0.11733333333333333, |
|
"grad_norm": 0.7214924505421153, |
|
"kl": 0.18463134765625, |
|
"learning_rate": 2.715895379284194e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4869792014360428, |
|
"reward_std": 0.17930027330294251, |
|
"rewards/equation_reward_func": 0.5078125083819032, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 347.49480152130127, |
|
"epoch": 0.1184, |
|
"grad_norm": 0.5439846803561813, |
|
"kl": 0.2037353515625, |
|
"learning_rate": 2.6799813580229174e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4192708879709244, |
|
"reward_std": 0.1784167350269854, |
|
"rewards/equation_reward_func": 0.4401041753590107, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 222 |
|
}, |
|
{ |
|
"completion_length": 317.0234432220459, |
|
"epoch": 0.11946666666666667, |
|
"grad_norm": 1.0377272313575363, |
|
"kl": 0.216064453125, |
|
"learning_rate": 2.6440299595614606e-07, |
|
"loss": 0.0002, |
|
"reward": 1.468750037252903, |
|
"reward_std": 0.2108682761900127, |
|
"rewards/equation_reward_func": 0.4791666774544865, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 224 |
|
}, |
|
{ |
|
"completion_length": 304.04688358306885, |
|
"epoch": 0.12053333333333334, |
|
"grad_norm": 0.5374280295870404, |
|
"kl": 0.20159912109375, |
|
"learning_rate": 2.6080486500209347e-07, |
|
"loss": 0.0002, |
|
"reward": 1.546875037252903, |
|
"reward_std": 0.12545502791181207, |
|
"rewards/equation_reward_func": 0.5572916779201478, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 226 |
|
}, |
|
{ |
|
"completion_length": 346.9557342529297, |
|
"epoch": 0.1216, |
|
"grad_norm": 0.6836413099426727, |
|
"kl": 0.1895751953125, |
|
"learning_rate": 2.572044901734166e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3333333805203438, |
|
"reward_std": 0.1696605570614338, |
|
"rewards/equation_reward_func": 0.35156251420266926, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 228 |
|
}, |
|
{ |
|
"completion_length": 322.09636402130127, |
|
"epoch": 0.12266666666666666, |
|
"grad_norm": 0.6445867346856486, |
|
"kl": 0.18853759765625, |
|
"learning_rate": 2.536026191693893e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4661458693444729, |
|
"reward_std": 0.11827961774542928, |
|
"rewards/equation_reward_func": 0.47916668094694614, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 310.0989685058594, |
|
"epoch": 0.12373333333333333, |
|
"grad_norm": 0.543792761903507, |
|
"kl": 0.29705810546875, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.0003, |
|
"reward": 1.4505208656191826, |
|
"reward_std": 0.17755454638972878, |
|
"rewards/equation_reward_func": 0.46093750838190317, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 232 |
|
}, |
|
{ |
|
"completion_length": 324.65625762939453, |
|
"epoch": 0.1248, |
|
"grad_norm": 0.6314276758218007, |
|
"kl": 0.2232666015625, |
|
"learning_rate": 2.4639738083061073e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4557292237877846, |
|
"reward_std": 0.18806396471336484, |
|
"rewards/equation_reward_func": 0.4739583507180214, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 234 |
|
}, |
|
{ |
|
"completion_length": 320.23178005218506, |
|
"epoch": 0.12586666666666665, |
|
"grad_norm": 0.7391960189787573, |
|
"kl": 0.20880126953125, |
|
"learning_rate": 2.4279550982658345e-07, |
|
"loss": 0.0002, |
|
"reward": 1.484375026077032, |
|
"reward_std": 0.2127929269336164, |
|
"rewards/equation_reward_func": 0.49739584140479565, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 236 |
|
}, |
|
{ |
|
"completion_length": 305.5807361602783, |
|
"epoch": 0.12693333333333334, |
|
"grad_norm": 0.5628054512286292, |
|
"kl": 0.20721435546875, |
|
"learning_rate": 2.3919513499790646e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4791667014360428, |
|
"reward_std": 0.1500206645578146, |
|
"rewards/equation_reward_func": 0.4947916797827929, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 238 |
|
}, |
|
{ |
|
"completion_length": 361.0677194595337, |
|
"epoch": 0.128, |
|
"grad_norm": 0.6408903693134582, |
|
"kl": 0.22772216796875, |
|
"learning_rate": 2.3559700404385394e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3489583693444729, |
|
"reward_std": 0.23990731267258525, |
|
"rewards/equation_reward_func": 0.38802084675990045, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 293.1484432220459, |
|
"epoch": 0.12906666666666666, |
|
"grad_norm": 0.5407067428590937, |
|
"kl": 0.22113037109375, |
|
"learning_rate": 2.3200186419770823e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4843750521540642, |
|
"reward_std": 0.1898468122817576, |
|
"rewards/equation_reward_func": 0.5026041865348816, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 242 |
|
}, |
|
{ |
|
"completion_length": 313.075532913208, |
|
"epoch": 0.13013333333333332, |
|
"grad_norm": 0.7079833699404031, |
|
"kl": 0.21197509765625, |
|
"learning_rate": 2.284104620715807e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4192708730697632, |
|
"reward_std": 0.2272069714963436, |
|
"rewards/equation_reward_func": 0.43229167675599456, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 244 |
|
}, |
|
{ |
|
"completion_length": 347.36980056762695, |
|
"epoch": 0.1312, |
|
"grad_norm": 0.8054821131616522, |
|
"kl": 0.2183837890625, |
|
"learning_rate": 2.2482354350136043e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3359375409781933, |
|
"reward_std": 0.1773815001361072, |
|
"rewards/equation_reward_func": 0.36718751094304025, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 246 |
|
}, |
|
{ |
|
"completion_length": 332.950532913208, |
|
"epoch": 0.13226666666666667, |
|
"grad_norm": 0.5333317411402013, |
|
"kl": 0.22491455078125, |
|
"learning_rate": 2.2124185339182496e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3906250447034836, |
|
"reward_std": 0.23194079333916306, |
|
"rewards/equation_reward_func": 0.42447917722165585, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 248 |
|
}, |
|
{ |
|
"completion_length": 342.0364685058594, |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.5916476351623432, |
|
"kl": 0.21429443359375, |
|
"learning_rate": 2.1766613556194344e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3463542088866234, |
|
"reward_std": 0.20954444538801908, |
|
"rewards/equation_reward_func": 0.3906250074505806, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 333.47396659851074, |
|
"epoch": 0.1344, |
|
"grad_norm": 0.8337535734326338, |
|
"kl": 0.23358154296875, |
|
"learning_rate": 2.1409713259040628e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3281250447034836, |
|
"reward_std": 0.23853017063811421, |
|
"rewards/equation_reward_func": 0.36979167931713164, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 252 |
|
}, |
|
{ |
|
"completion_length": 302.9713649749756, |
|
"epoch": 0.13546666666666668, |
|
"grad_norm": 0.5975375572638757, |
|
"kl": 0.2205810546875, |
|
"learning_rate": 2.105355856614115e-07, |
|
"loss": 0.0002, |
|
"reward": 1.455729216337204, |
|
"reward_std": 0.19006694853305817, |
|
"rewards/equation_reward_func": 0.4817708460614085, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 254 |
|
}, |
|
{ |
|
"completion_length": 295.7500066757202, |
|
"epoch": 0.13653333333333334, |
|
"grad_norm": 0.7360600957552589, |
|
"kl": 0.23883056640625, |
|
"learning_rate": 2.069822344107413e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4895833730697632, |
|
"reward_std": 0.2519920407794416, |
|
"rewards/equation_reward_func": 0.5156250165309757, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 256 |
|
}, |
|
{ |
|
"completion_length": 298.57552909851074, |
|
"epoch": 0.1376, |
|
"grad_norm": 0.6139982646016184, |
|
"kl": 0.2281494140625, |
|
"learning_rate": 2.034378167721599e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4687500298023224, |
|
"reward_std": 0.20331065729260445, |
|
"rewards/equation_reward_func": 0.5000000093132257, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 258 |
|
}, |
|
{ |
|
"completion_length": 267.87500762939453, |
|
"epoch": 0.13866666666666666, |
|
"grad_norm": 0.6087434332809726, |
|
"kl": 0.23504638671875, |
|
"learning_rate": 1.9990306882416485e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5546875335276127, |
|
"reward_std": 0.19739143131300807, |
|
"rewards/equation_reward_func": 0.585937513737008, |
|
"rewards/format_reward_func": 0.9687500111758709, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 276.205735206604, |
|
"epoch": 0.13973333333333332, |
|
"grad_norm": 0.4844743573865835, |
|
"kl": 0.23095703125, |
|
"learning_rate": 1.9637872463712362e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5026042088866234, |
|
"reward_std": 0.20278947753831744, |
|
"rewards/equation_reward_func": 0.5156250186264515, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 262 |
|
}, |
|
{ |
|
"completion_length": 317.5833406448364, |
|
"epoch": 0.1408, |
|
"grad_norm": 4.225936192922191, |
|
"kl": 0.6976318359375, |
|
"learning_rate": 1.9286551612082773e-07, |
|
"loss": 0.0007, |
|
"reward": 1.4218750409781933, |
|
"reward_std": 0.25739257177338004, |
|
"rewards/equation_reward_func": 0.4635416828095913, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 264 |
|
}, |
|
{ |
|
"completion_length": 289.7031316757202, |
|
"epoch": 0.14186666666666667, |
|
"grad_norm": 0.7733885366987531, |
|
"kl": 0.23797607421875, |
|
"learning_rate": 1.8936417287249446e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4427083730697632, |
|
"reward_std": 0.1840976132079959, |
|
"rewards/equation_reward_func": 0.4609375123400241, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 266 |
|
}, |
|
{ |
|
"completion_length": 279.06511211395264, |
|
"epoch": 0.14293333333333333, |
|
"grad_norm": 0.7812632663890211, |
|
"kl": 0.24505615234375, |
|
"learning_rate": 1.8587542202524985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4140625521540642, |
|
"reward_std": 0.20719708874821663, |
|
"rewards/equation_reward_func": 0.4375000107102096, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 268 |
|
}, |
|
{ |
|
"completion_length": 279.8958435058594, |
|
"epoch": 0.144, |
|
"grad_norm": 0.9200641519700825, |
|
"kl": 0.25006103515625, |
|
"learning_rate": 1.82399988097123e-07, |
|
"loss": 0.0003, |
|
"reward": 1.4479167126119137, |
|
"reward_std": 0.194319240283221, |
|
"rewards/equation_reward_func": 0.497395847691223, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 308.63542556762695, |
|
"epoch": 0.14506666666666668, |
|
"grad_norm": 0.6777891079331845, |
|
"kl": 0.246337890625, |
|
"learning_rate": 1.7893859284058378e-07, |
|
"loss": 0.0002, |
|
"reward": 1.398437537252903, |
|
"reward_std": 0.2058353153988719, |
|
"rewards/equation_reward_func": 0.432291675824672, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 272 |
|
}, |
|
{ |
|
"completion_length": 363.75000953674316, |
|
"epoch": 0.14613333333333334, |
|
"grad_norm": 0.7035357358785052, |
|
"kl": 0.238525390625, |
|
"learning_rate": 1.7549195509265407e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2578125484287739, |
|
"reward_std": 0.27548868488520384, |
|
"rewards/equation_reward_func": 0.32552084513008595, |
|
"rewards/format_reward_func": 0.9322916902601719, |
|
"step": 274 |
|
}, |
|
{ |
|
"completion_length": 320.4010524749756, |
|
"epoch": 0.1472, |
|
"grad_norm": 0.5994918354091399, |
|
"kl": 0.24481201171875, |
|
"learning_rate": 1.7206079062562536e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4062500447034836, |
|
"reward_std": 0.18554408056661487, |
|
"rewards/equation_reward_func": 0.4661458421032876, |
|
"rewards/format_reward_func": 0.9401041865348816, |
|
"step": 276 |
|
}, |
|
{ |
|
"completion_length": 317.41667556762695, |
|
"epoch": 0.14826666666666666, |
|
"grad_norm": 1.0621443788066243, |
|
"kl": 0.2325439453125, |
|
"learning_rate": 1.6864581199841226e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3697916977107525, |
|
"reward_std": 0.30007801530882716, |
|
"rewards/equation_reward_func": 0.42447918374091387, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 278 |
|
}, |
|
{ |
|
"completion_length": 293.86198806762695, |
|
"epoch": 0.14933333333333335, |
|
"grad_norm": 0.6658308659860376, |
|
"kl": 0.23321533203125, |
|
"learning_rate": 1.6524772840857388e-07, |
|
"loss": 0.0002, |
|
"reward": 1.471354216337204, |
|
"reward_std": 0.14693401101976633, |
|
"rewards/equation_reward_func": 0.5052083488553762, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 324.47657203674316, |
|
"epoch": 0.1504, |
|
"grad_norm": 0.749916869176833, |
|
"kl": 0.2568359375, |
|
"learning_rate": 1.6186724554503237e-07, |
|
"loss": 0.0003, |
|
"reward": 1.3463542088866234, |
|
"reward_std": 0.22985868388786912, |
|
"rewards/equation_reward_func": 0.4192708469927311, |
|
"rewards/format_reward_func": 0.9270833507180214, |
|
"step": 282 |
|
}, |
|
{ |
|
"completion_length": 329.3437557220459, |
|
"epoch": 0.15146666666666667, |
|
"grad_norm": 0.9887835539251056, |
|
"kl": 0.2294921875, |
|
"learning_rate": 1.5850506544152103e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3177083656191826, |
|
"reward_std": 0.26103097246959805, |
|
"rewards/equation_reward_func": 0.367187506519258, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 284 |
|
}, |
|
{ |
|
"completion_length": 315.1927185058594, |
|
"epoch": 0.15253333333333333, |
|
"grad_norm": 0.8107267722492516, |
|
"kl": 0.2274169921875, |
|
"learning_rate": 1.5516188633079107e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4088542088866234, |
|
"reward_std": 0.25638267770409584, |
|
"rewards/equation_reward_func": 0.4609375139698386, |
|
"rewards/format_reward_func": 0.9479166902601719, |
|
"step": 286 |
|
}, |
|
{ |
|
"completion_length": 281.1354236602783, |
|
"epoch": 0.1536, |
|
"grad_norm": 0.5088155440303934, |
|
"kl": 0.25, |
|
"learning_rate": 1.5183840249960784e-07, |
|
"loss": 0.0003, |
|
"reward": 1.500000037252903, |
|
"reward_std": 0.2080402374267578, |
|
"rewards/equation_reward_func": 0.533854179084301, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 288 |
|
}, |
|
{ |
|
"completion_length": 301.25521659851074, |
|
"epoch": 0.15466666666666667, |
|
"grad_norm": 0.625913529917873, |
|
"kl": 0.2359619140625, |
|
"learning_rate": 1.4853530414456612e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.26187680941075087, |
|
"rewards/equation_reward_func": 0.479166685603559, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 311.2500123977661, |
|
"epoch": 0.15573333333333333, |
|
"grad_norm": 0.6304718791189173, |
|
"kl": 0.21844482421875, |
|
"learning_rate": 1.4525327722875568e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4401041977107525, |
|
"reward_std": 0.25729640712961555, |
|
"rewards/equation_reward_func": 0.49218750977888703, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 292 |
|
}, |
|
{ |
|
"completion_length": 305.377610206604, |
|
"epoch": 0.1568, |
|
"grad_norm": 0.7466996789785751, |
|
"kl": 0.2376708984375, |
|
"learning_rate": 1.4199300333930515e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4270833507180214, |
|
"reward_std": 0.24685111083090305, |
|
"rewards/equation_reward_func": 0.4791666797827929, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 294 |
|
}, |
|
{ |
|
"completion_length": 277.5703225135803, |
|
"epoch": 0.15786666666666666, |
|
"grad_norm": 1.3431386452828957, |
|
"kl": 0.23419189453125, |
|
"learning_rate": 1.3875515954583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4583333730697632, |
|
"reward_std": 0.2398097929544747, |
|
"rewards/equation_reward_func": 0.49218751303851604, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 296 |
|
}, |
|
{ |
|
"completion_length": 302.50001096725464, |
|
"epoch": 0.15893333333333334, |
|
"grad_norm": 0.7084022277977373, |
|
"kl": 0.24224853515625, |
|
"learning_rate": 1.3554041825985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3958333730697632, |
|
"reward_std": 0.21419357042759657, |
|
"rewards/equation_reward_func": 0.43229168094694614, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 298 |
|
}, |
|
{ |
|
"completion_length": 298.9192838668823, |
|
"epoch": 0.16, |
|
"grad_norm": 0.6330236043077112, |
|
"kl": 0.236572265625, |
|
"learning_rate": 1.323494470950949e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4375000335276127, |
|
"reward_std": 0.19589123968034983, |
|
"rewards/equation_reward_func": 0.4765625116415322, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 304.861985206604, |
|
"epoch": 0.16106666666666666, |
|
"grad_norm": 0.8282349724126732, |
|
"kl": 0.2291259765625, |
|
"learning_rate": 1.2918290872891236e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3671875447034836, |
|
"reward_std": 0.19775044033303857, |
|
"rewards/equation_reward_func": 0.416666679084301, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 302 |
|
}, |
|
{ |
|
"completion_length": 310.7447986602783, |
|
"epoch": 0.16213333333333332, |
|
"grad_norm": 0.6732535657390017, |
|
"kl": 0.258056640625, |
|
"learning_rate": 1.260414607646213e-07, |
|
"loss": 0.0003, |
|
"reward": 1.359375026077032, |
|
"reward_std": 0.1770839923992753, |
|
"rewards/equation_reward_func": 0.4062500074505806, |
|
"rewards/format_reward_func": 0.9531250111758709, |
|
"step": 304 |
|
}, |
|
{ |
|
"completion_length": 273.6614646911621, |
|
"epoch": 0.1632, |
|
"grad_norm": 0.9183577183732234, |
|
"kl": 0.23602294921875, |
|
"learning_rate": 1.2292575559495143e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4166667014360428, |
|
"reward_std": 0.18182098167017102, |
|
"rewards/equation_reward_func": 0.4453125111758709, |
|
"rewards/format_reward_func": 0.9713541753590107, |
|
"step": 306 |
|
}, |
|
{ |
|
"completion_length": 247.5677146911621, |
|
"epoch": 0.16426666666666667, |
|
"grad_norm": 0.6700271556876387, |
|
"kl": 0.2451171875, |
|
"learning_rate": 1.1983644026655835e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5364583805203438, |
|
"reward_std": 0.16835713339969516, |
|
"rewards/equation_reward_func": 0.5468750132713467, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 308 |
|
}, |
|
{ |
|
"completion_length": 296.3489637374878, |
|
"epoch": 0.16533333333333333, |
|
"grad_norm": 0.7335847558655371, |
|
"kl": 0.22369384765625, |
|
"learning_rate": 1.1677415634565066e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.21954377181828022, |
|
"rewards/equation_reward_func": 0.4739583469927311, |
|
"rewards/format_reward_func": 0.9505208469927311, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 294.5338611602783, |
|
"epoch": 0.1664, |
|
"grad_norm": 0.566093567716464, |
|
"kl": 0.26129150390625, |
|
"learning_rate": 1.1373953978475353e-07, |
|
"loss": 0.0003, |
|
"reward": 1.3828125447034836, |
|
"reward_std": 0.15335895912721753, |
|
"rewards/equation_reward_func": 0.42968750884756446, |
|
"rewards/format_reward_func": 0.9531250111758709, |
|
"step": 312 |
|
}, |
|
{ |
|
"completion_length": 319.2760543823242, |
|
"epoch": 0.16746666666666668, |
|
"grad_norm": 0.7230399862261075, |
|
"kl": 0.2347412109375, |
|
"learning_rate": 1.1073322079063913e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2890625335276127, |
|
"reward_std": 0.1907347133383155, |
|
"rewards/equation_reward_func": 0.3281250062864274, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 314 |
|
}, |
|
{ |
|
"completion_length": 277.4401111602783, |
|
"epoch": 0.16853333333333334, |
|
"grad_norm": 0.7925402861184135, |
|
"kl": 0.228271484375, |
|
"learning_rate": 1.0775582369344946e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4869792237877846, |
|
"reward_std": 0.1919974870979786, |
|
"rewards/equation_reward_func": 0.5182291828095913, |
|
"rewards/format_reward_func": 0.9687500111758709, |
|
"step": 316 |
|
}, |
|
{ |
|
"completion_length": 314.05209255218506, |
|
"epoch": 0.1696, |
|
"grad_norm": 0.7822619678777167, |
|
"kl": 0.21319580078125, |
|
"learning_rate": 1.0480796681704077e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4088542126119137, |
|
"reward_std": 0.21224576281383634, |
|
"rewards/equation_reward_func": 0.4375000149011612, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 318 |
|
}, |
|
{ |
|
"completion_length": 263.19011211395264, |
|
"epoch": 0.17066666666666666, |
|
"grad_norm": 0.6210402052679087, |
|
"kl": 0.23016357421875, |
|
"learning_rate": 1.018902623505741e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4791667088866234, |
|
"reward_std": 0.2531423852778971, |
|
"rewards/equation_reward_func": 0.5208333446644247, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 287.1484489440918, |
|
"epoch": 0.17173333333333332, |
|
"grad_norm": 0.634776746244374, |
|
"kl": 0.2230224609375, |
|
"learning_rate": 9.900331622138063e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4348958730697632, |
|
"reward_std": 0.15947270533069968, |
|
"rewards/equation_reward_func": 0.4557291779201478, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 322 |
|
}, |
|
{ |
|
"completion_length": 284.5286521911621, |
|
"epoch": 0.1728, |
|
"grad_norm": 0.8088754769939193, |
|
"kl": 0.21588134765625, |
|
"learning_rate": 9.614772796912681e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4348958656191826, |
|
"reward_std": 0.23451161291450262, |
|
"rewards/equation_reward_func": 0.4791666802484542, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 324 |
|
}, |
|
{ |
|
"completion_length": 297.3776135444641, |
|
"epoch": 0.17386666666666667, |
|
"grad_norm": 0.9105446106335183, |
|
"kl": 0.22491455078125, |
|
"learning_rate": 9.332409062130686e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3177083805203438, |
|
"reward_std": 0.1509189954958856, |
|
"rewards/equation_reward_func": 0.3515625116415322, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 326 |
|
}, |
|
{ |
|
"completion_length": 298.77084398269653, |
|
"epoch": 0.17493333333333333, |
|
"grad_norm": 0.7126661333306704, |
|
"kl": 0.22003173828125, |
|
"learning_rate": 9.053299057008699e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3958333730697632, |
|
"reward_std": 0.15720906713977456, |
|
"rewards/equation_reward_func": 0.4401041779201478, |
|
"rewards/format_reward_func": 0.9557291865348816, |
|
"step": 328 |
|
}, |
|
{ |
|
"completion_length": 308.93490505218506, |
|
"epoch": 0.176, |
|
"grad_norm": 0.7401905494769911, |
|
"kl": 0.2979736328125, |
|
"learning_rate": 8.777500745052743e-08, |
|
"loss": 0.0003, |
|
"reward": 1.3385416977107525, |
|
"reward_std": 0.17720176372677088, |
|
"rewards/equation_reward_func": 0.3750000111758709, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 315.75782012939453, |
|
"epoch": 0.17706666666666668, |
|
"grad_norm": 1.0292481385344092, |
|
"kl": 0.2142333984375, |
|
"learning_rate": 8.505071402020892e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3567708730697632, |
|
"reward_std": 0.16194288805127144, |
|
"rewards/equation_reward_func": 0.3854166786186397, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 332 |
|
}, |
|
{ |
|
"completion_length": 261.085946559906, |
|
"epoch": 0.17813333333333334, |
|
"grad_norm": 0.6313338932442103, |
|
"kl": 0.27130126953125, |
|
"learning_rate": 8.236067604028562e-08, |
|
"loss": 0.0003, |
|
"reward": 1.5260417088866234, |
|
"reward_std": 0.19619298679754138, |
|
"rewards/equation_reward_func": 0.5703125167638063, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 334 |
|
}, |
|
{ |
|
"completion_length": 299.28125858306885, |
|
"epoch": 0.1792, |
|
"grad_norm": 0.767319955612925, |
|
"kl": 0.2216796875, |
|
"learning_rate": 7.970545215799327e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3984375521540642, |
|
"reward_std": 0.1856266581453383, |
|
"rewards/equation_reward_func": 0.43489584513008595, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 336 |
|
}, |
|
{ |
|
"completion_length": 289.1171979904175, |
|
"epoch": 0.18026666666666666, |
|
"grad_norm": 0.5279846258585172, |
|
"kl": 0.22845458984375, |
|
"learning_rate": 7.708559379063204e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4010417088866234, |
|
"reward_std": 0.19252674980089068, |
|
"rewards/equation_reward_func": 0.432291679084301, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 338 |
|
}, |
|
{ |
|
"completion_length": 261.9843816757202, |
|
"epoch": 0.18133333333333335, |
|
"grad_norm": 1.0110560575317675, |
|
"kl": 0.392333984375, |
|
"learning_rate": 7.45016450110534e-08, |
|
"loss": 0.0004, |
|
"reward": 1.4687500447034836, |
|
"reward_std": 0.16133547388017178, |
|
"rewards/equation_reward_func": 0.5000000167638063, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 264.8333406448364, |
|
"epoch": 0.1824, |
|
"grad_norm": 0.8570941539149842, |
|
"kl": 0.22308349609375, |
|
"learning_rate": 7.195414243467029e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4583333805203438, |
|
"reward_std": 0.13745049014687538, |
|
"rewards/equation_reward_func": 0.4687500139698386, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 342 |
|
}, |
|
{ |
|
"completion_length": 230.47396659851074, |
|
"epoch": 0.18346666666666667, |
|
"grad_norm": 0.7319616485107195, |
|
"kl": 0.24261474609375, |
|
"learning_rate": 6.944361510801763e-08, |
|
"loss": 0.0002, |
|
"reward": 1.6041667237877846, |
|
"reward_std": 0.09940882679075003, |
|
"rewards/equation_reward_func": 0.6197916846722364, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 344 |
|
}, |
|
{ |
|
"completion_length": 325.17448902130127, |
|
"epoch": 0.18453333333333333, |
|
"grad_norm": 0.8899948638554169, |
|
"kl": 0.22564697265625, |
|
"learning_rate": 6.697058439888283e-08, |
|
"loss": 0.0002, |
|
"reward": 1.2890625335276127, |
|
"reward_std": 0.17243655677884817, |
|
"rewards/equation_reward_func": 0.32552084047347307, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 346 |
|
}, |
|
{ |
|
"completion_length": 277.57032012939453, |
|
"epoch": 0.1856, |
|
"grad_norm": 0.9596879937723287, |
|
"kl": 0.23382568359375, |
|
"learning_rate": 6.453556388803288e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4322917237877846, |
|
"reward_std": 0.2332175257615745, |
|
"rewards/equation_reward_func": 0.46875001303851604, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 348 |
|
}, |
|
{ |
|
"completion_length": 247.51823616027832, |
|
"epoch": 0.18666666666666668, |
|
"grad_norm": 0.7147233096894462, |
|
"kl": 0.23199462890625, |
|
"learning_rate": 6.213905926255697e-08, |
|
"loss": 0.0002, |
|
"reward": 1.572916716337204, |
|
"reward_std": 0.13177413679659367, |
|
"rewards/equation_reward_func": 0.5885416772216558, |
|
"rewards/format_reward_func": 0.9843750037252903, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 254.29688453674316, |
|
"epoch": 0.18773333333333334, |
|
"grad_norm": 0.5156264024880255, |
|
"kl": 0.2138671875, |
|
"learning_rate": 5.978156821084987e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5338542014360428, |
|
"reward_std": 0.09953805012628436, |
|
"rewards/equation_reward_func": 0.570312513038516, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 352 |
|
}, |
|
{ |
|
"completion_length": 306.208345413208, |
|
"epoch": 0.1888, |
|
"grad_norm": 0.7349538500183934, |
|
"kl": 0.22650146484375, |
|
"learning_rate": 5.7463580319254853e-08, |
|
"loss": 0.0002, |
|
"reward": 1.2916666977107525, |
|
"reward_std": 0.23371722968295217, |
|
"rewards/equation_reward_func": 0.34375000814907253, |
|
"rewards/format_reward_func": 0.9479166939854622, |
|
"step": 354 |
|
}, |
|
{ |
|
"completion_length": 250.97396755218506, |
|
"epoch": 0.18986666666666666, |
|
"grad_norm": 0.9539899909075816, |
|
"kl": 0.2208251953125, |
|
"learning_rate": 5.518557697039081e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5442708805203438, |
|
"reward_std": 0.12091149017214775, |
|
"rewards/equation_reward_func": 0.5598958432674408, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 356 |
|
}, |
|
{ |
|
"completion_length": 247.60937976837158, |
|
"epoch": 0.19093333333333334, |
|
"grad_norm": 0.5833784789045885, |
|
"kl": 0.24102783203125, |
|
"learning_rate": 5.294803124318145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.520833395421505, |
|
"reward_std": 0.08653856860473752, |
|
"rewards/equation_reward_func": 0.5364583507180214, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 358 |
|
}, |
|
{ |
|
"completion_length": 277.3724021911621, |
|
"epoch": 0.192, |
|
"grad_norm": 0.7440919143169152, |
|
"kl": 0.2235107421875, |
|
"learning_rate": 5.07514078146106e-08, |
|
"loss": 0.0002, |
|
"reward": 1.476562537252903, |
|
"reward_std": 0.14440069487318397, |
|
"rewards/equation_reward_func": 0.5000000121071935, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 226.549485206604, |
|
"epoch": 0.19306666666666666, |
|
"grad_norm": 0.6991350685338126, |
|
"kl": 0.2354736328125, |
|
"learning_rate": 4.859616286322094e-08, |
|
"loss": 0.0002, |
|
"reward": 1.6692708656191826, |
|
"reward_std": 0.1841339054517448, |
|
"rewards/equation_reward_func": 0.6875000204890966, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 362 |
|
}, |
|
{ |
|
"completion_length": 257.7474002838135, |
|
"epoch": 0.19413333333333332, |
|
"grad_norm": 0.6684660994634332, |
|
"kl": 0.22576904296875, |
|
"learning_rate": 4.648274397437829e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4817708656191826, |
|
"reward_std": 0.14901333069428802, |
|
"rewards/equation_reward_func": 0.49739584466442466, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 364 |
|
}, |
|
{ |
|
"completion_length": 247.213547706604, |
|
"epoch": 0.1952, |
|
"grad_norm": 1.3498805283474344, |
|
"kl": 0.2373046875, |
|
"learning_rate": 4.4411590047320617e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5416667088866234, |
|
"reward_std": 0.20480258110910654, |
|
"rewards/equation_reward_func": 0.5677083497866988, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 366 |
|
}, |
|
{ |
|
"completion_length": 271.8333387374878, |
|
"epoch": 0.19626666666666667, |
|
"grad_norm": 0.4853801837605491, |
|
"kl": 0.2344970703125, |
|
"learning_rate": 4.2383131204010494e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4765625223517418, |
|
"reward_std": 0.13479887740686536, |
|
"rewards/equation_reward_func": 0.5052083446644247, |
|
"rewards/format_reward_func": 0.9713541753590107, |
|
"step": 368 |
|
}, |
|
{ |
|
"completion_length": 304.4739694595337, |
|
"epoch": 0.19733333333333333, |
|
"grad_norm": 0.6611011259992957, |
|
"kl": 0.22882080078125, |
|
"learning_rate": 4.039778869981064e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3151042014360428, |
|
"reward_std": 0.2048181821592152, |
|
"rewards/equation_reward_func": 0.3567708428017795, |
|
"rewards/format_reward_func": 0.9583333432674408, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 256.5833406448364, |
|
"epoch": 0.1984, |
|
"grad_norm": 0.7998496404585249, |
|
"kl": 0.22052001953125, |
|
"learning_rate": 3.845597483600049e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5052083767950535, |
|
"reward_std": 0.17007605405524373, |
|
"rewards/equation_reward_func": 0.5260416804812849, |
|
"rewards/format_reward_func": 0.9791666716337204, |
|
"step": 372 |
|
}, |
|
{ |
|
"completion_length": 288.625009059906, |
|
"epoch": 0.19946666666666665, |
|
"grad_norm": 0.6923241167833261, |
|
"kl": 0.228271484375, |
|
"learning_rate": 3.655809287415284e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4192708730697632, |
|
"reward_std": 0.18929289653897285, |
|
"rewards/equation_reward_func": 0.4531250118743628, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 374 |
|
}, |
|
{ |
|
"completion_length": 299.89323329925537, |
|
"epoch": 0.20053333333333334, |
|
"grad_norm": 0.8024434586179425, |
|
"kl": 0.21966552734375, |
|
"learning_rate": 3.4704536952387285e-08, |
|
"loss": 0.0002, |
|
"reward": 1.341145858168602, |
|
"reward_std": 0.2507268921472132, |
|
"rewards/equation_reward_func": 0.3828125037252903, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 376 |
|
}, |
|
{ |
|
"completion_length": 293.13542556762695, |
|
"epoch": 0.2016, |
|
"grad_norm": 0.7670744626957942, |
|
"kl": 0.23614501953125, |
|
"learning_rate": 3.2895692003518575e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3541666977107525, |
|
"reward_std": 0.26297286711633205, |
|
"rewards/equation_reward_func": 0.40364584466442466, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 378 |
|
}, |
|
{ |
|
"completion_length": 267.18490409851074, |
|
"epoch": 0.20266666666666666, |
|
"grad_norm": 18.75450521285186, |
|
"kl": 0.29046630859375, |
|
"learning_rate": 3.113193367511635e-08, |
|
"loss": 0.0003, |
|
"reward": 1.468750037252903, |
|
"reward_std": 0.20902110496535897, |
|
"rewards/equation_reward_func": 0.5026041809469461, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 287.71354961395264, |
|
"epoch": 0.20373333333333332, |
|
"grad_norm": 0.8438907529414764, |
|
"kl": 0.236083984375, |
|
"learning_rate": 2.9413628251493934e-08, |
|
"loss": 0.0002, |
|
"reward": 1.335937537252903, |
|
"reward_std": 0.18686132272705436, |
|
"rewards/equation_reward_func": 0.37760417233221233, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 382 |
|
}, |
|
{ |
|
"completion_length": 272.40365409851074, |
|
"epoch": 0.2048, |
|
"grad_norm": 1.0640930657692547, |
|
"kl": 0.232177734375, |
|
"learning_rate": 2.774113257764066e-08, |
|
"loss": 0.0002, |
|
"reward": 1.468750037252903, |
|
"reward_std": 0.259894413407892, |
|
"rewards/equation_reward_func": 0.49739584885537624, |
|
"rewards/format_reward_func": 0.9713541902601719, |
|
"step": 384 |
|
}, |
|
{ |
|
"completion_length": 294.9479236602783, |
|
"epoch": 0.20586666666666667, |
|
"grad_norm": 0.5559880495074878, |
|
"kl": 0.2174072265625, |
|
"learning_rate": 2.611479398511518e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3619792088866234, |
|
"reward_std": 0.11354415956884623, |
|
"rewards/equation_reward_func": 0.3854166741948575, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 386 |
|
}, |
|
{ |
|
"completion_length": 283.9375123977661, |
|
"epoch": 0.20693333333333333, |
|
"grad_norm": 0.6076443550251356, |
|
"kl": 0.22991943359375, |
|
"learning_rate": 2.4534950219914057e-08, |
|
"loss": 0.0002, |
|
"reward": 1.382812537252903, |
|
"reward_std": 0.14025551825761795, |
|
"rewards/equation_reward_func": 0.408854179084301, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 388 |
|
}, |
|
{ |
|
"completion_length": 279.5729293823242, |
|
"epoch": 0.208, |
|
"grad_norm": 0.8924913049812959, |
|
"kl": 0.22430419921875, |
|
"learning_rate": 2.300192937233128e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4427083656191826, |
|
"reward_std": 0.24587285611778498, |
|
"rewards/equation_reward_func": 0.47135418094694614, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 260.111985206604, |
|
"epoch": 0.20906666666666668, |
|
"grad_norm": 0.6858880533326718, |
|
"kl": 0.298583984375, |
|
"learning_rate": 2.1516049808822935e-08, |
|
"loss": 0.0003, |
|
"reward": 1.5052083730697632, |
|
"reward_std": 0.1241533849388361, |
|
"rewards/equation_reward_func": 0.5182291865348816, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 392 |
|
}, |
|
{ |
|
"completion_length": 266.47657012939453, |
|
"epoch": 0.21013333333333334, |
|
"grad_norm": 0.6402109636411236, |
|
"kl": 0.2220458984375, |
|
"learning_rate": 2.007762010589098e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4375000447034836, |
|
"reward_std": 0.20047930860891938, |
|
"rewards/equation_reward_func": 0.4765625181607902, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 394 |
|
}, |
|
{ |
|
"completion_length": 270.7604217529297, |
|
"epoch": 0.2112, |
|
"grad_norm": 0.906287924542971, |
|
"kl": 0.2283935546875, |
|
"learning_rate": 1.8686938986000627e-08, |
|
"loss": 0.0002, |
|
"reward": 1.502604205161333, |
|
"reward_std": 0.13055545324459672, |
|
"rewards/equation_reward_func": 0.5208333441987634, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 396 |
|
}, |
|
{ |
|
"completion_length": 296.5468854904175, |
|
"epoch": 0.21226666666666666, |
|
"grad_norm": 0.7760203626767087, |
|
"kl": 0.24676513671875, |
|
"learning_rate": 1.734429525554365e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3385417088866234, |
|
"reward_std": 0.18930128030478954, |
|
"rewards/equation_reward_func": 0.36197917931713164, |
|
"rewards/format_reward_func": 0.9765625223517418, |
|
"step": 398 |
|
}, |
|
{ |
|
"completion_length": 272.8854250907898, |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 1.5742500055580204, |
|
"kl": 0.24517822265625, |
|
"learning_rate": 1.604996774486145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4661458656191826, |
|
"reward_std": 0.20536453556269407, |
|
"rewards/equation_reward_func": 0.5052083469927311, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 267.20052909851074, |
|
"epoch": 0.2144, |
|
"grad_norm": 0.6444833598398121, |
|
"kl": 0.29022216796875, |
|
"learning_rate": 1.4804225250339281e-08, |
|
"loss": 0.0003, |
|
"reward": 1.4557292126119137, |
|
"reward_std": 0.20770712802186608, |
|
"rewards/equation_reward_func": 0.48958334466442466, |
|
"rewards/format_reward_func": 0.9661458395421505, |
|
"step": 402 |
|
}, |
|
{ |
|
"completion_length": 300.9765691757202, |
|
"epoch": 0.21546666666666667, |
|
"grad_norm": 1.4114979925483315, |
|
"kl": 0.25653076171875, |
|
"learning_rate": 1.360732647858498e-08, |
|
"loss": 0.0003, |
|
"reward": 1.3906250596046448, |
|
"reward_std": 0.22731021884828806, |
|
"rewards/equation_reward_func": 0.4401041760575026, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 404 |
|
}, |
|
{ |
|
"completion_length": 282.7500057220459, |
|
"epoch": 0.21653333333333333, |
|
"grad_norm": 0.6696240626293803, |
|
"kl": 0.22821044921875, |
|
"learning_rate": 1.2459519992702311e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4453125298023224, |
|
"reward_std": 0.20481601962819695, |
|
"rewards/equation_reward_func": 0.4661458432674408, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 406 |
|
}, |
|
{ |
|
"completion_length": 280.0234456062317, |
|
"epoch": 0.2176, |
|
"grad_norm": 0.6398611308559539, |
|
"kl": 0.2344970703125, |
|
"learning_rate": 1.1361044160671629e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4114583656191826, |
|
"reward_std": 0.16066451743245125, |
|
"rewards/equation_reward_func": 0.4322916760575026, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 408 |
|
}, |
|
{ |
|
"completion_length": 283.92448902130127, |
|
"epoch": 0.21866666666666668, |
|
"grad_norm": 0.7894317667037624, |
|
"kl": 0.2291259765625, |
|
"learning_rate": 1.0312127105846947e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4583333730697632, |
|
"reward_std": 0.1464251554571092, |
|
"rewards/equation_reward_func": 0.48958334466442466, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 269.15105152130127, |
|
"epoch": 0.21973333333333334, |
|
"grad_norm": 0.6521973364777306, |
|
"kl": 0.2493896484375, |
|
"learning_rate": 9.312986659581301e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4218750409781933, |
|
"reward_std": 0.20944805443286896, |
|
"rewards/equation_reward_func": 0.4583333421032876, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 412 |
|
}, |
|
{ |
|
"completion_length": 303.3698024749756, |
|
"epoch": 0.2208, |
|
"grad_norm": 0.928412007143324, |
|
"kl": 0.22808837890625, |
|
"learning_rate": 8.363830315988945e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3854167088866234, |
|
"reward_std": 0.22633069939911366, |
|
"rewards/equation_reward_func": 0.4140625100117177, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 414 |
|
}, |
|
{ |
|
"completion_length": 279.8463611602783, |
|
"epoch": 0.22186666666666666, |
|
"grad_norm": 0.6471319326937831, |
|
"kl": 0.2322998046875, |
|
"learning_rate": 7.46485518885462e-09, |
|
"loss": 0.0002, |
|
"reward": 1.393229216337204, |
|
"reward_std": 0.19153147842735052, |
|
"rewards/equation_reward_func": 0.4140625111758709, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 416 |
|
}, |
|
{ |
|
"completion_length": 286.213547706604, |
|
"epoch": 0.22293333333333334, |
|
"grad_norm": 1.030967274909426, |
|
"kl": 0.22314453125, |
|
"learning_rate": 6.616247970698319e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4739583805203438, |
|
"reward_std": 0.18684497009962797, |
|
"rewards/equation_reward_func": 0.5104166779201478, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 418 |
|
}, |
|
{ |
|
"completion_length": 268.1250114440918, |
|
"epoch": 0.224, |
|
"grad_norm": 0.687917802429927, |
|
"kl": 0.2392578125, |
|
"learning_rate": 5.8181848940044855e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4583333730697632, |
|
"reward_std": 0.19611964747309685, |
|
"rewards/equation_reward_func": 0.4791666818782687, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 286.42188358306885, |
|
"epoch": 0.22506666666666666, |
|
"grad_norm": 1.003000954678048, |
|
"kl": 0.24884033203125, |
|
"learning_rate": 5.070831694623135e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.24217658629640937, |
|
"rewards/equation_reward_func": 0.45833334093913436, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 422 |
|
}, |
|
{ |
|
"completion_length": 279.4114656448364, |
|
"epoch": 0.22613333333333333, |
|
"grad_norm": 0.7365913580715772, |
|
"kl": 0.22650146484375, |
|
"learning_rate": 4.374343577351336e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3828125521540642, |
|
"reward_std": 0.1807353887706995, |
|
"rewards/equation_reward_func": 0.40625000768341124, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 424 |
|
}, |
|
{ |
|
"completion_length": 285.20312881469727, |
|
"epoch": 0.2272, |
|
"grad_norm": 1.1325349820394892, |
|
"kl": 0.2252197265625, |
|
"learning_rate": 3.7288651837012745e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.24207678250968456, |
|
"rewards/equation_reward_func": 0.45572918676771224, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 426 |
|
}, |
|
{ |
|
"completion_length": 295.6354269981384, |
|
"epoch": 0.22826666666666667, |
|
"grad_norm": 0.6262258188992635, |
|
"kl": 0.224609375, |
|
"learning_rate": 3.134530561862081e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4192708656191826, |
|
"reward_std": 0.21014545997604728, |
|
"rewards/equation_reward_func": 0.4583333386108279, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 428 |
|
}, |
|
{ |
|
"completion_length": 295.90625858306885, |
|
"epoch": 0.22933333333333333, |
|
"grad_norm": 0.6142531023657672, |
|
"kl": 0.22576904296875, |
|
"learning_rate": 2.5914631388619103e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3385417088866234, |
|
"reward_std": 0.16080457624047995, |
|
"rewards/equation_reward_func": 0.3802083416376263, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 263.97396755218506, |
|
"epoch": 0.2304, |
|
"grad_norm": 0.6976796846300636, |
|
"kl": 0.2528076171875, |
|
"learning_rate": 2.0997756949353297e-09, |
|
"loss": 0.0003, |
|
"reward": 1.4322917014360428, |
|
"reward_std": 0.18798484792932868, |
|
"rewards/equation_reward_func": 0.46093751094304025, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 432 |
|
}, |
|
{ |
|
"completion_length": 238.1224021911621, |
|
"epoch": 0.23146666666666665, |
|
"grad_norm": 0.899293286345609, |
|
"kl": 0.23345947265625, |
|
"learning_rate": 1.6595703401020844e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4895833730697632, |
|
"reward_std": 0.16196552151814103, |
|
"rewards/equation_reward_func": 0.5130208421032876, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 434 |
|
}, |
|
{ |
|
"completion_length": 300.72136402130127, |
|
"epoch": 0.23253333333333334, |
|
"grad_norm": 0.8491958193033893, |
|
"kl": 0.214111328125, |
|
"learning_rate": 1.2709384929615596e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3567708730697632, |
|
"reward_std": 0.1968450741842389, |
|
"rewards/equation_reward_func": 0.3906250107102096, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 436 |
|
}, |
|
{ |
|
"completion_length": 281.16667556762695, |
|
"epoch": 0.2336, |
|
"grad_norm": 0.5991014285626669, |
|
"kl": 0.23944091796875, |
|
"learning_rate": 9.339608617077165e-10, |
|
"loss": 0.0002, |
|
"reward": 1.3958333656191826, |
|
"reward_std": 0.19733294053003192, |
|
"rewards/equation_reward_func": 0.4401041781529784, |
|
"rewards/format_reward_func": 0.9557291865348816, |
|
"step": 438 |
|
}, |
|
{ |
|
"completion_length": 271.0625057220459, |
|
"epoch": 0.23466666666666666, |
|
"grad_norm": 0.5794558183115414, |
|
"kl": 0.22308349609375, |
|
"learning_rate": 6.487074273681114e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4661458656191826, |
|
"reward_std": 0.17582183191552758, |
|
"rewards/equation_reward_func": 0.4869791818782687, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 261.28386306762695, |
|
"epoch": 0.23573333333333332, |
|
"grad_norm": 0.7209816553467453, |
|
"kl": 0.248779296875, |
|
"learning_rate": 4.152374292708538e-10, |
|
"loss": 0.0002, |
|
"reward": 1.471354205161333, |
|
"reward_std": 0.16987955244258046, |
|
"rewards/equation_reward_func": 0.4973958469927311, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 442 |
|
}, |
|
{ |
|
"completion_length": 231.79427671432495, |
|
"epoch": 0.2368, |
|
"grad_norm": 0.6551899921842496, |
|
"kl": 0.2474365234375, |
|
"learning_rate": 2.3359935274214204e-10, |
|
"loss": 0.0002, |
|
"reward": 1.5885417088866234, |
|
"reward_std": 0.18360807234421372, |
|
"rewards/equation_reward_func": 0.6223958495538682, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 444 |
|
}, |
|
{ |
|
"completion_length": 284.0859432220459, |
|
"epoch": 0.23786666666666667, |
|
"grad_norm": 0.6724399152364189, |
|
"kl": 0.231201171875, |
|
"learning_rate": 1.0383091903720665e-10, |
|
"loss": 0.0002, |
|
"reward": 1.411458358168602, |
|
"reward_std": 0.22678064601495862, |
|
"rewards/equation_reward_func": 0.4401041748933494, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 446 |
|
}, |
|
{ |
|
"completion_length": 276.44792556762695, |
|
"epoch": 0.23893333333333333, |
|
"grad_norm": 0.98764526989421, |
|
"kl": 0.33917236328125, |
|
"learning_rate": 2.595907750671533e-11, |
|
"loss": 0.0003, |
|
"reward": 1.4427083767950535, |
|
"reward_std": 0.17404404049739242, |
|
"rewards/equation_reward_func": 0.4661458453629166, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 448 |
|
}, |
|
{ |
|
"completion_length": 276.0937604904175, |
|
"epoch": 0.24, |
|
"grad_norm": 0.844438465358864, |
|
"kl": 0.215087890625, |
|
"learning_rate": 0.0, |
|
"loss": 0.0002, |
|
"reward": 1.5312500298023224, |
|
"reward_std": 0.17535703862085938, |
|
"rewards/equation_reward_func": 0.5364583441987634, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"step": 450, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0001681143713844217, |
|
"train_runtime": 19587.5481, |
|
"train_samples_per_second": 0.551, |
|
"train_steps_per_second": 0.023 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|