philschmid's picture
Add files using upload-large-folder tool
0fe789f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02666666666666667,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"completion_length": 485.49220275878906,
"epoch": 0.0010666666666666667,
"grad_norm": 0.13112049806907955,
"kl": 0.0,
"learning_rate": 7.142857142857142e-08,
"loss": -0.0,
"reward": 0.3281250111758709,
"reward_std": 0.4913413915783167,
"rewards/equation_reward_func": 0.05729166814126074,
"rewards/format_reward_func": 0.27083334047347307,
"step": 2
},
{
"completion_length": 530.2265796661377,
"epoch": 0.0021333333333333334,
"grad_norm": 0.12198138838063947,
"kl": 0.0003826618194580078,
"learning_rate": 1.4285714285714285e-07,
"loss": 0.0,
"reward": 0.299479172565043,
"reward_std": 0.44007157534360886,
"rewards/equation_reward_func": 0.03385416744276881,
"rewards/format_reward_func": 0.26562500977888703,
"step": 4
},
{
"completion_length": 496.8776264190674,
"epoch": 0.0032,
"grad_norm": 0.12162717174644112,
"kl": 0.0003865957260131836,
"learning_rate": 2.1428571428571426e-07,
"loss": 0.0,
"reward": 0.2916666781529784,
"reward_std": 0.47719811648130417,
"rewards/equation_reward_func": 0.05468750116415322,
"rewards/format_reward_func": 0.23697917303070426,
"step": 6
},
{
"completion_length": 504.77865982055664,
"epoch": 0.004266666666666667,
"grad_norm": 0.13232346241260942,
"kl": 0.0003762245178222656,
"learning_rate": 2.857142857142857e-07,
"loss": 0.0,
"reward": 0.33593750558793545,
"reward_std": 0.4751614350825548,
"rewards/equation_reward_func": 0.04947916720993817,
"rewards/format_reward_func": 0.28645834140479565,
"step": 8
},
{
"completion_length": 475.7057456970215,
"epoch": 0.005333333333333333,
"grad_norm": 0.13843718324607834,
"kl": 0.0003968477249145508,
"learning_rate": 3.5714285714285716e-07,
"loss": 0.0,
"reward": 0.3828125102445483,
"reward_std": 0.5227206833660603,
"rewards/equation_reward_func": 0.0885416679084301,
"rewards/format_reward_func": 0.2942708395421505,
"step": 10
},
{
"completion_length": 475.98699378967285,
"epoch": 0.0064,
"grad_norm": 0.14337833822484186,
"kl": 0.0004818439483642578,
"learning_rate": 4.285714285714285e-07,
"loss": 0.0,
"reward": 0.33333334140479565,
"reward_std": 0.4693184234201908,
"rewards/equation_reward_func": 0.05468750139698386,
"rewards/format_reward_func": 0.2786458395421505,
"step": 12
},
{
"completion_length": 472.8099060058594,
"epoch": 0.007466666666666667,
"grad_norm": 0.129867140491159,
"kl": 0.0007684230804443359,
"learning_rate": 5e-07,
"loss": 0.0,
"reward": 0.45052084885537624,
"reward_std": 0.5166866518557072,
"rewards/equation_reward_func": 0.041666667675599456,
"rewards/format_reward_func": 0.40885418094694614,
"step": 14
},
{
"completion_length": 464.46875762939453,
"epoch": 0.008533333333333334,
"grad_norm": 0.1279190002551803,
"kl": 0.0013058185577392578,
"learning_rate": 4.999740409224932e-07,
"loss": 0.0,
"reward": 0.5052083488553762,
"reward_std": 0.5728582534939051,
"rewards/equation_reward_func": 0.06510416814126074,
"rewards/format_reward_func": 0.4401041753590107,
"step": 16
},
{
"completion_length": 480.2291717529297,
"epoch": 0.0096,
"grad_norm": 0.10647649710010443,
"kl": 0.00380706787109375,
"learning_rate": 4.998961690809627e-07,
"loss": 0.0,
"reward": 0.6588541902601719,
"reward_std": 0.5287479311227798,
"rewards/equation_reward_func": 0.05468750139698386,
"rewards/format_reward_func": 0.6041666828095913,
"step": 18
},
{
"completion_length": 493.8073043823242,
"epoch": 0.010666666666666666,
"grad_norm": 0.10522760864642984,
"kl": 0.004913330078125,
"learning_rate": 4.997664006472578e-07,
"loss": 0.0,
"reward": 0.7734375223517418,
"reward_std": 0.4910791157744825,
"rewards/equation_reward_func": 0.07031250116415322,
"rewards/format_reward_func": 0.7031250186264515,
"step": 20
},
{
"completion_length": 455.6510524749756,
"epoch": 0.011733333333333333,
"grad_norm": 0.09917661844432689,
"kl": 0.008411407470703125,
"learning_rate": 4.995847625707292e-07,
"loss": 0.0,
"reward": 0.7812500186264515,
"reward_std": 0.4674575887620449,
"rewards/equation_reward_func": 0.0651041679084301,
"rewards/format_reward_func": 0.7161458507180214,
"step": 22
},
{
"completion_length": 464.50782012939453,
"epoch": 0.0128,
"grad_norm": 0.10189992043189328,
"kl": 0.0059833526611328125,
"learning_rate": 4.993512925726318e-07,
"loss": 0.0,
"reward": 0.8619791865348816,
"reward_std": 0.49650320410728455,
"rewards/equation_reward_func": 0.08854166860692203,
"rewards/format_reward_func": 0.7734375223517418,
"step": 24
},
{
"completion_length": 447.40626335144043,
"epoch": 0.013866666666666666,
"grad_norm": 0.09219816177682034,
"kl": 0.006900787353515625,
"learning_rate": 4.990660391382923e-07,
"loss": 0.0,
"reward": 0.960937537252903,
"reward_std": 0.4377214591950178,
"rewards/equation_reward_func": 0.11718750256113708,
"rewards/format_reward_func": 0.8437500260770321,
"step": 26
},
{
"completion_length": 436.3099117279053,
"epoch": 0.014933333333333333,
"grad_norm": 0.07907793746945187,
"kl": 0.009281158447265625,
"learning_rate": 4.987290615070384e-07,
"loss": 0.0,
"reward": 0.9713542014360428,
"reward_std": 0.3975960807874799,
"rewards/equation_reward_func": 0.09895833535119891,
"rewards/format_reward_func": 0.872395858168602,
"step": 28
},
{
"completion_length": 428.1354293823242,
"epoch": 0.016,
"grad_norm": 0.0845150241699145,
"kl": 0.011430740356445312,
"learning_rate": 4.983404296598978e-07,
"loss": 0.0,
"reward": 0.9531250298023224,
"reward_std": 0.359499204903841,
"rewards/equation_reward_func": 0.0703125016298145,
"rewards/format_reward_func": 0.8828125223517418,
"step": 30
},
{
"completion_length": 434.62500953674316,
"epoch": 0.017066666666666667,
"grad_norm": 0.08196142586101564,
"kl": 0.010782241821289062,
"learning_rate": 4.979002243050646e-07,
"loss": 0.0,
"reward": 1.0260416977107525,
"reward_std": 0.30062979739159346,
"rewards/equation_reward_func": 0.09635416860692203,
"rewards/format_reward_func": 0.9296875260770321,
"step": 32
},
{
"completion_length": 438.08595085144043,
"epoch": 0.018133333333333335,
"grad_norm": 0.08432790923176402,
"kl": 0.012115478515625,
"learning_rate": 4.974085368611381e-07,
"loss": 0.0,
"reward": 1.049479205161333,
"reward_std": 0.3121222285553813,
"rewards/equation_reward_func": 0.11197917000390589,
"rewards/format_reward_func": 0.9375000223517418,
"step": 34
},
{
"completion_length": 421.16407203674316,
"epoch": 0.0192,
"grad_norm": 0.080639508238748,
"kl": 0.01397705078125,
"learning_rate": 4.968654694381379e-07,
"loss": 0.0,
"reward": 1.0598958618938923,
"reward_std": 0.27779901027679443,
"rewards/equation_reward_func": 0.10416666907258332,
"rewards/format_reward_func": 0.9557291865348816,
"step": 36
},
{
"completion_length": 405.12240982055664,
"epoch": 0.020266666666666665,
"grad_norm": 0.07681322754981268,
"kl": 0.013866424560546875,
"learning_rate": 4.962711348162987e-07,
"loss": 0.0,
"reward": 1.0390625409781933,
"reward_std": 0.2664716215804219,
"rewards/equation_reward_func": 0.08593750279396772,
"rewards/format_reward_func": 0.9531250186264515,
"step": 38
},
{
"completion_length": 400.0104274749756,
"epoch": 0.021333333333333333,
"grad_norm": 0.08198714493646655,
"kl": 0.015293121337890625,
"learning_rate": 4.956256564226487e-07,
"loss": 0.0,
"reward": 1.1067708730697632,
"reward_std": 0.28682188084349036,
"rewards/equation_reward_func": 0.14322917023673654,
"rewards/format_reward_func": 0.9635416828095913,
"step": 40
},
{
"completion_length": 400.78386306762695,
"epoch": 0.0224,
"grad_norm": 0.0855015587257399,
"kl": 0.018611907958984375,
"learning_rate": 4.949291683053768e-07,
"loss": 0.0,
"reward": 1.0937500223517418,
"reward_std": 0.24716421775519848,
"rewards/equation_reward_func": 0.11197916930541396,
"rewards/format_reward_func": 0.9817708469927311,
"step": 42
},
{
"completion_length": 408.34115409851074,
"epoch": 0.023466666666666667,
"grad_norm": 0.07249139521720419,
"kl": 0.017139434814453125,
"learning_rate": 4.941818151059955e-07,
"loss": 0.0,
"reward": 1.0546875298023224,
"reward_std": 0.24979113461449742,
"rewards/equation_reward_func": 0.0963541695382446,
"rewards/format_reward_func": 0.9583333544433117,
"step": 44
},
{
"completion_length": 388.62761306762695,
"epoch": 0.024533333333333334,
"grad_norm": 0.07507762465990464,
"kl": 0.017795562744140625,
"learning_rate": 4.933837520293017e-07,
"loss": 0.0,
"reward": 1.0781250484287739,
"reward_std": 0.2523620016872883,
"rewards/equation_reward_func": 0.11197917046956718,
"rewards/format_reward_func": 0.9661458507180214,
"step": 46
},
{
"completion_length": 399.57032585144043,
"epoch": 0.0256,
"grad_norm": 0.06604121980517051,
"kl": 0.017971038818359375,
"learning_rate": 4.925351448111454e-07,
"loss": 0.0,
"reward": 1.0468750335276127,
"reward_std": 0.21127380011603236,
"rewards/equation_reward_func": 0.07552083511836827,
"rewards/format_reward_func": 0.9713541865348816,
"step": 48
},
{
"completion_length": 392.4687557220459,
"epoch": 0.02666666666666667,
"grad_norm": 0.0903310628098169,
"kl": 0.0193939208984375,
"learning_rate": 4.91636169684011e-07,
"loss": 0.0,
"reward": 1.1093750409781933,
"reward_std": 0.29062134958803654,
"rewards/equation_reward_func": 0.13541667209938169,
"rewards/format_reward_func": 0.9739583432674408,
"step": 50
}
],
"logging_steps": 2,
"max_steps": 450,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}