llama_lorafull64128_480 / trainer_state.json
jgayed's picture
Upload folder using huggingface_hub
d4b1108 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 100,
"global_step": 192,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20833333333333334,
"grad_norm": 14.965083122253418,
"learning_rate": 4.991638098272951e-05,
"loss": 4.5817,
"num_input_tokens_seen": 70400,
"step": 5
},
{
"epoch": 0.4166666666666667,
"grad_norm": 1.2255456447601318,
"learning_rate": 4.966608330212198e-05,
"loss": 0.4906,
"num_input_tokens_seen": 142296,
"step": 10
},
{
"epoch": 0.625,
"grad_norm": 0.9692479372024536,
"learning_rate": 4.9250781329863606e-05,
"loss": 0.5919,
"num_input_tokens_seen": 215224,
"step": 15
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.2537702322006226,
"learning_rate": 4.867325323737765e-05,
"loss": 0.3977,
"num_input_tokens_seen": 285568,
"step": 20
},
{
"epoch": 1.0416666666666667,
"grad_norm": 5.391773700714111,
"learning_rate": 4.793736241118728e-05,
"loss": 0.4036,
"num_input_tokens_seen": 357968,
"step": 25
},
{
"epoch": 1.25,
"grad_norm": 0.7211081981658936,
"learning_rate": 4.7048031608708876e-05,
"loss": 0.3966,
"num_input_tokens_seen": 428848,
"step": 30
},
{
"epoch": 1.4583333333333333,
"grad_norm": 1.8750863075256348,
"learning_rate": 4.601121002736095e-05,
"loss": 0.3386,
"num_input_tokens_seen": 500536,
"step": 35
},
{
"epoch": 1.6666666666666665,
"grad_norm": 3.4102139472961426,
"learning_rate": 4.4833833507280884e-05,
"loss": 0.3656,
"num_input_tokens_seen": 572648,
"step": 40
},
{
"epoch": 1.875,
"grad_norm": 2.648550510406494,
"learning_rate": 4.352377813387398e-05,
"loss": 0.3556,
"num_input_tokens_seen": 643624,
"step": 45
},
{
"epoch": 2.0833333333333335,
"grad_norm": 0.8133513331413269,
"learning_rate": 4.208980755057178e-05,
"loss": 0.2927,
"num_input_tokens_seen": 715504,
"step": 50
},
{
"epoch": 2.2916666666666665,
"grad_norm": 2.6871180534362793,
"learning_rate": 4.054151433425194e-05,
"loss": 0.2644,
"num_input_tokens_seen": 786328,
"step": 55
},
{
"epoch": 2.5,
"grad_norm": 1.71990966796875,
"learning_rate": 3.888925582549006e-05,
"loss": 0.2624,
"num_input_tokens_seen": 856784,
"step": 60
},
{
"epoch": 2.7083333333333335,
"grad_norm": 1.2586452960968018,
"learning_rate": 3.7144084842908505e-05,
"loss": 0.2458,
"num_input_tokens_seen": 929768,
"step": 65
},
{
"epoch": 2.9166666666666665,
"grad_norm": 1.3015620708465576,
"learning_rate": 3.5317675745109866e-05,
"loss": 0.2809,
"num_input_tokens_seen": 1001312,
"step": 70
},
{
"epoch": 3.125,
"grad_norm": 1.136038899421692,
"learning_rate": 3.34222463348055e-05,
"loss": 0.2873,
"num_input_tokens_seen": 1074016,
"step": 75
},
{
"epoch": 3.3333333333333335,
"grad_norm": 1.0206663608551025,
"learning_rate": 3.147047612756302e-05,
"loss": 0.2201,
"num_input_tokens_seen": 1144184,
"step": 80
},
{
"epoch": 3.5416666666666665,
"grad_norm": 1.0465854406356812,
"learning_rate": 2.9475421531915827e-05,
"loss": 0.1891,
"num_input_tokens_seen": 1215680,
"step": 85
},
{
"epoch": 3.75,
"grad_norm": 2.669116497039795,
"learning_rate": 2.7450428508239024e-05,
"loss": 0.1966,
"num_input_tokens_seen": 1287848,
"step": 90
},
{
"epoch": 3.9583333333333335,
"grad_norm": 2.8238487243652344,
"learning_rate": 2.5409043290662173e-05,
"loss": 0.1773,
"num_input_tokens_seen": 1359184,
"step": 95
},
{
"epoch": 4.166666666666667,
"grad_norm": 4.206888198852539,
"learning_rate": 2.3364921769246423e-05,
"loss": 0.1521,
"num_input_tokens_seen": 1431816,
"step": 100
},
{
"epoch": 4.166666666666667,
"eval_loss": 0.3628197908401489,
"eval_runtime": 109.019,
"eval_samples_per_second": 0.881,
"eval_steps_per_second": 0.44,
"num_input_tokens_seen": 1431816,
"step": 100
},
{
"epoch": 4.375,
"grad_norm": 1.345531702041626,
"learning_rate": 2.1331738138615958e-05,
"loss": 0.0969,
"num_input_tokens_seen": 1503912,
"step": 105
},
{
"epoch": 4.583333333333333,
"grad_norm": 2.6333017349243164,
"learning_rate": 1.932309342414067e-05,
"loss": 0.1199,
"num_input_tokens_seen": 1575912,
"step": 110
},
{
"epoch": 4.791666666666667,
"grad_norm": 1.8659350872039795,
"learning_rate": 1.7352424497586163e-05,
"loss": 0.1323,
"num_input_tokens_seen": 1646056,
"step": 115
},
{
"epoch": 5.0,
"grad_norm": 2.1656887531280518,
"learning_rate": 1.5432914190872757e-05,
"loss": 0.1403,
"num_input_tokens_seen": 1717560,
"step": 120
},
{
"epoch": 5.208333333333333,
"grad_norm": 4.230530738830566,
"learning_rate": 1.3577403109239484e-05,
"loss": 0.1139,
"num_input_tokens_seen": 1789304,
"step": 125
},
{
"epoch": 5.416666666666667,
"grad_norm": 0.5218907594680786,
"learning_rate": 1.1798303733740802e-05,
"loss": 0.0935,
"num_input_tokens_seen": 1859552,
"step": 130
},
{
"epoch": 5.625,
"grad_norm": 1.5045415163040161,
"learning_rate": 1.0107517387689166e-05,
"loss": 0.0502,
"num_input_tokens_seen": 1931216,
"step": 135
},
{
"epoch": 5.833333333333333,
"grad_norm": 2.7641360759735107,
"learning_rate": 8.51635462249828e-06,
"loss": 0.043,
"num_input_tokens_seen": 2003784,
"step": 140
},
{
"epoch": 6.041666666666667,
"grad_norm": 1.3363369703292847,
"learning_rate": 7.035459555507548e-06,
"loss": 0.0432,
"num_input_tokens_seen": 2075648,
"step": 145
},
{
"epoch": 6.25,
"grad_norm": 2.7394213676452637,
"learning_rate": 5.674738665931575e-06,
"loss": 0.0336,
"num_input_tokens_seen": 2147344,
"step": 150
},
{
"epoch": 6.458333333333333,
"grad_norm": 1.555774450302124,
"learning_rate": 4.4432945252556284e-06,
"loss": 0.0189,
"num_input_tokens_seen": 2218968,
"step": 155
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.5655226111412048,
"learning_rate": 3.3493649053890326e-06,
"loss": 0.0344,
"num_input_tokens_seen": 2290400,
"step": 160
},
{
"epoch": 6.875,
"grad_norm": 0.8995972275733948,
"learning_rate": 2.4002676719139166e-06,
"loss": 0.0112,
"num_input_tokens_seen": 2362184,
"step": 165
},
{
"epoch": 7.083333333333333,
"grad_norm": 0.36821281909942627,
"learning_rate": 1.6023518310668618e-06,
"loss": 0.0071,
"num_input_tokens_seen": 2432600,
"step": 170
},
{
"epoch": 7.291666666666667,
"grad_norm": 2.5213656425476074,
"learning_rate": 9.609550579259496e-07,
"loss": 0.0239,
"num_input_tokens_seen": 2503072,
"step": 175
},
{
"epoch": 7.5,
"grad_norm": 0.3469555377960205,
"learning_rate": 4.803679899192392e-07,
"loss": 0.0051,
"num_input_tokens_seen": 2575144,
"step": 180
},
{
"epoch": 7.708333333333333,
"grad_norm": 1.465752124786377,
"learning_rate": 1.6380552451389086e-07,
"loss": 0.0197,
"num_input_tokens_seen": 2646888,
"step": 185
},
{
"epoch": 7.916666666666667,
"grad_norm": 0.40796151757240295,
"learning_rate": 1.3385313090857887e-08,
"loss": 0.0048,
"num_input_tokens_seen": 2719168,
"step": 190
},
{
"epoch": 8.0,
"num_input_tokens_seen": 2748096,
"step": 192,
"total_flos": 194502843170816.0,
"train_loss": 0.2990388348637983,
"train_runtime": 8392.057,
"train_samples_per_second": 0.366,
"train_steps_per_second": 0.023
}
],
"logging_steps": 5,
"max_steps": 192,
"num_input_tokens_seen": 2748096,
"num_train_epochs": 8,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 194502843170816.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}