|
{ |
|
"best_metric": 2.4689817428588867, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.11363636363636363, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0022727272727272726, |
|
"grad_norm": 1.606259822845459, |
|
"learning_rate": 2e-05, |
|
"loss": 2.6575, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0022727272727272726, |
|
"eval_loss": 2.972724676132202, |
|
"eval_runtime": 6.8425, |
|
"eval_samples_per_second": 108.294, |
|
"eval_steps_per_second": 13.592, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004545454545454545, |
|
"grad_norm": 1.7978665828704834, |
|
"learning_rate": 4e-05, |
|
"loss": 2.566, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.006818181818181818, |
|
"grad_norm": 1.5056153535842896, |
|
"learning_rate": 6e-05, |
|
"loss": 2.6681, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00909090909090909, |
|
"grad_norm": 1.284254550933838, |
|
"learning_rate": 8e-05, |
|
"loss": 2.5945, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.011363636363636364, |
|
"grad_norm": 1.6918977499008179, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6936, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.013636363636363636, |
|
"grad_norm": 2.2507495880126953, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.7084, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.015909090909090907, |
|
"grad_norm": 1.648572325706482, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.5595, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01818181818181818, |
|
"grad_norm": 1.4604798555374146, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.3795, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.020454545454545454, |
|
"grad_norm": 1.4552377462387085, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.532, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.022727272727272728, |
|
"grad_norm": 1.552384376525879, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.4922, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.025, |
|
"grad_norm": 1.5556209087371826, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.4043, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02727272727272727, |
|
"grad_norm": 1.5756804943084717, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.4997, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.029545454545454545, |
|
"grad_norm": 1.541499376296997, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.4007, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.031818181818181815, |
|
"grad_norm": 1.5218021869659424, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.5088, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03409090909090909, |
|
"grad_norm": 1.52951180934906, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.402, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03636363636363636, |
|
"grad_norm": 1.5703572034835815, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.4933, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.038636363636363635, |
|
"grad_norm": 1.676938772201538, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.3683, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04090909090909091, |
|
"grad_norm": 1.6145377159118652, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.5634, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04318181818181818, |
|
"grad_norm": 1.7284026145935059, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.5014, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.045454545454545456, |
|
"grad_norm": 1.7103477716445923, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4047, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04772727272727273, |
|
"grad_norm": 1.8176960945129395, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.4621, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.9118075370788574, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.3042, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05227272727272727, |
|
"grad_norm": 1.9427374601364136, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.3648, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05454545454545454, |
|
"grad_norm": 1.935987949371338, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.3904, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.056818181818181816, |
|
"grad_norm": 2.205674409866333, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.4429, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.056818181818181816, |
|
"eval_loss": 2.5051064491271973, |
|
"eval_runtime": 6.7932, |
|
"eval_samples_per_second": 109.08, |
|
"eval_steps_per_second": 13.69, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05909090909090909, |
|
"grad_norm": 2.0927062034606934, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.2257, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06136363636363636, |
|
"grad_norm": 2.312185049057007, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.1917, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06363636363636363, |
|
"grad_norm": 2.3717732429504395, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.2485, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0659090909090909, |
|
"grad_norm": 2.3789658546447754, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 2.3248, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.06818181818181818, |
|
"grad_norm": 2.333639621734619, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.2749, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07045454545454545, |
|
"grad_norm": 2.7359251976013184, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 2.4323, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"grad_norm": 2.6724472045898438, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.6001, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.075, |
|
"grad_norm": 2.704455614089966, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.3212, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.07727272727272727, |
|
"grad_norm": 2.7967569828033447, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.245, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.07954545454545454, |
|
"grad_norm": 2.7874977588653564, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.0644, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08181818181818182, |
|
"grad_norm": 3.0134224891662598, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.2045, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08409090909090909, |
|
"grad_norm": 3.377096652984619, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 2.0878, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.08636363636363636, |
|
"grad_norm": 3.990339517593384, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 2.3226, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.08863636363636364, |
|
"grad_norm": 3.2142210006713867, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.0823, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 3.8085083961486816, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.4723, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09318181818181819, |
|
"grad_norm": 3.704538106918335, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 2.1217, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.09545454545454546, |
|
"grad_norm": 3.812995195388794, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.9568, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.09772727272727273, |
|
"grad_norm": 4.169566631317139, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 2.2289, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 4.201821327209473, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 2.184, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.10227272727272728, |
|
"grad_norm": 4.2996907234191895, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.4758, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10454545454545454, |
|
"grad_norm": 4.644790172576904, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 2.1504, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.10681818181818181, |
|
"grad_norm": 5.309967041015625, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 2.0347, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.10909090909090909, |
|
"grad_norm": 7.990983009338379, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.2895, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11136363636363636, |
|
"grad_norm": 10.469155311584473, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 2.84, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.11363636363636363, |
|
"grad_norm": 20.095691680908203, |
|
"learning_rate": 0.0, |
|
"loss": 3.1589, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11363636363636363, |
|
"eval_loss": 2.4689817428588867, |
|
"eval_runtime": 6.7551, |
|
"eval_samples_per_second": 109.694, |
|
"eval_steps_per_second": 13.767, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1990084226187264.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|