simplewiki_upsampled_tom / trainer_state.json
jennhu's picture
jennhu/olmo-7b-lora_simplewiki_upsampled_tom_simplewiki_upsampled_tom
08106b7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.942760942760943,
"eval_steps": 400,
"global_step": 111,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05387205387205387,
"grad_norm": 0.01257950346916914,
"learning_rate": 0.000990909090909091,
"loss": 2.8653,
"step": 2
},
{
"epoch": 0.10774410774410774,
"grad_norm": 0.02894681692123413,
"learning_rate": 0.0009727272727272728,
"loss": 2.8469,
"step": 4
},
{
"epoch": 0.16161616161616163,
"grad_norm": 0.0666627511382103,
"learning_rate": 0.0009545454545454546,
"loss": 2.7697,
"step": 6
},
{
"epoch": 0.21548821548821548,
"grad_norm": 0.1011333167552948,
"learning_rate": 0.0009363636363636364,
"loss": 2.8416,
"step": 8
},
{
"epoch": 0.26936026936026936,
"grad_norm": 0.1431330442428589,
"learning_rate": 0.0009181818181818182,
"loss": 2.8207,
"step": 10
},
{
"epoch": 0.32323232323232326,
"grad_norm": 0.17145054042339325,
"learning_rate": 0.0009000000000000001,
"loss": 2.791,
"step": 12
},
{
"epoch": 0.3771043771043771,
"grad_norm": 0.18456311523914337,
"learning_rate": 0.0008818181818181819,
"loss": 2.7702,
"step": 14
},
{
"epoch": 0.43097643097643096,
"grad_norm": 0.17877964675426483,
"learning_rate": 0.0008636363636363636,
"loss": 2.7494,
"step": 16
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.16067726910114288,
"learning_rate": 0.0008454545454545455,
"loss": 2.7386,
"step": 18
},
{
"epoch": 0.5387205387205387,
"grad_norm": 0.18989939987659454,
"learning_rate": 0.0008272727272727273,
"loss": 2.7428,
"step": 20
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.1703718900680542,
"learning_rate": 0.0008090909090909092,
"loss": 2.7249,
"step": 22
},
{
"epoch": 0.6464646464646465,
"grad_norm": 0.15262621641159058,
"learning_rate": 0.0007909090909090909,
"loss": 2.7456,
"step": 24
},
{
"epoch": 0.7003367003367004,
"grad_norm": 0.15567754209041595,
"learning_rate": 0.0007727272727272727,
"loss": 2.7306,
"step": 26
},
{
"epoch": 0.7542087542087542,
"grad_norm": 0.12221667915582657,
"learning_rate": 0.0007545454545454546,
"loss": 2.7012,
"step": 28
},
{
"epoch": 0.8080808080808081,
"grad_norm": 0.11120902001857758,
"learning_rate": 0.0007363636363636363,
"loss": 2.7122,
"step": 30
},
{
"epoch": 0.8619528619528619,
"grad_norm": 0.10489355772733688,
"learning_rate": 0.0007181818181818181,
"loss": 2.7142,
"step": 32
},
{
"epoch": 0.9158249158249159,
"grad_norm": 0.10036630928516388,
"learning_rate": 0.0007,
"loss": 2.7211,
"step": 34
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.0986902043223381,
"learning_rate": 0.0006818181818181818,
"loss": 2.6404,
"step": 36
},
{
"epoch": 1.0,
"grad_norm": 0.2551414966583252,
"learning_rate": 0.0006636363636363638,
"loss": 2.6932,
"step": 38
},
{
"epoch": 1.0538720538720538,
"grad_norm": 0.12025883048772812,
"learning_rate": 0.0006454545454545455,
"loss": 2.698,
"step": 40
},
{
"epoch": 1.1077441077441077,
"grad_norm": 0.1064164862036705,
"learning_rate": 0.0006272727272727273,
"loss": 2.66,
"step": 42
},
{
"epoch": 1.1616161616161615,
"grad_norm": 0.12751424312591553,
"learning_rate": 0.0006090909090909092,
"loss": 2.6307,
"step": 44
},
{
"epoch": 1.2154882154882154,
"grad_norm": 0.12690748274326324,
"learning_rate": 0.0005909090909090909,
"loss": 2.6843,
"step": 46
},
{
"epoch": 1.2693602693602695,
"grad_norm": 0.12534023821353912,
"learning_rate": 0.0005727272727272727,
"loss": 2.6592,
"step": 48
},
{
"epoch": 1.3232323232323233,
"grad_norm": 0.10958981513977051,
"learning_rate": 0.0005545454545454546,
"loss": 2.6535,
"step": 50
},
{
"epoch": 1.3771043771043772,
"grad_norm": 0.12295985221862793,
"learning_rate": 0.0005363636363636364,
"loss": 2.6185,
"step": 52
},
{
"epoch": 1.430976430976431,
"grad_norm": 0.12431646883487701,
"learning_rate": 0.0005181818181818182,
"loss": 2.6336,
"step": 54
},
{
"epoch": 1.4848484848484849,
"grad_norm": 0.10543714463710785,
"learning_rate": 0.0005,
"loss": 2.6503,
"step": 56
},
{
"epoch": 1.5387205387205387,
"grad_norm": 0.11133274435997009,
"learning_rate": 0.00048181818181818184,
"loss": 2.6365,
"step": 58
},
{
"epoch": 1.5925925925925926,
"grad_norm": 0.11586187779903412,
"learning_rate": 0.00046363636363636366,
"loss": 2.6366,
"step": 60
},
{
"epoch": 1.6464646464646466,
"grad_norm": 0.11602918803691864,
"learning_rate": 0.00044545454545454543,
"loss": 2.663,
"step": 62
},
{
"epoch": 1.7003367003367003,
"grad_norm": 0.11441349983215332,
"learning_rate": 0.00042727272727272726,
"loss": 2.6727,
"step": 64
},
{
"epoch": 1.7542087542087543,
"grad_norm": 0.11416248232126236,
"learning_rate": 0.00040909090909090913,
"loss": 2.6752,
"step": 66
},
{
"epoch": 1.808080808080808,
"grad_norm": 0.11019805073738098,
"learning_rate": 0.00039090909090909096,
"loss": 2.6541,
"step": 68
},
{
"epoch": 1.861952861952862,
"grad_norm": 0.10957842320203781,
"learning_rate": 0.00037272727272727273,
"loss": 2.6745,
"step": 70
},
{
"epoch": 1.9158249158249159,
"grad_norm": 0.10212814062833786,
"learning_rate": 0.00035454545454545455,
"loss": 2.6264,
"step": 72
},
{
"epoch": 1.9696969696969697,
"grad_norm": 0.10807520151138306,
"learning_rate": 0.0003363636363636364,
"loss": 2.6491,
"step": 74
},
{
"epoch": 2.0,
"grad_norm": 0.285987913608551,
"learning_rate": 0.0003181818181818182,
"loss": 2.6873,
"step": 76
},
{
"epoch": 2.053872053872054,
"grad_norm": 0.11446108669042587,
"learning_rate": 0.0003,
"loss": 2.639,
"step": 78
},
{
"epoch": 2.1077441077441077,
"grad_norm": 0.1077447310090065,
"learning_rate": 0.0002818181818181818,
"loss": 2.6196,
"step": 80
},
{
"epoch": 2.1616161616161618,
"grad_norm": 0.1157441958785057,
"learning_rate": 0.0002636363636363636,
"loss": 2.5955,
"step": 82
},
{
"epoch": 2.2154882154882154,
"grad_norm": 0.12017026543617249,
"learning_rate": 0.00024545454545454545,
"loss": 2.6339,
"step": 84
},
{
"epoch": 2.2693602693602695,
"grad_norm": 0.11678113788366318,
"learning_rate": 0.00022727272727272727,
"loss": 2.6475,
"step": 86
},
{
"epoch": 2.323232323232323,
"grad_norm": 0.12528547644615173,
"learning_rate": 0.00020909090909090907,
"loss": 2.6177,
"step": 88
},
{
"epoch": 2.377104377104377,
"grad_norm": 0.1168636754155159,
"learning_rate": 0.00019090909090909092,
"loss": 2.6442,
"step": 90
},
{
"epoch": 2.430976430976431,
"grad_norm": 0.11126925051212311,
"learning_rate": 0.00017272727272727272,
"loss": 2.645,
"step": 92
},
{
"epoch": 2.484848484848485,
"grad_norm": 0.1180771067738533,
"learning_rate": 0.00015454545454545454,
"loss": 2.5993,
"step": 94
},
{
"epoch": 2.538720538720539,
"grad_norm": 0.10938919335603714,
"learning_rate": 0.00013636363636363637,
"loss": 2.6169,
"step": 96
},
{
"epoch": 2.5925925925925926,
"grad_norm": 0.11159233003854752,
"learning_rate": 0.00011818181818181818,
"loss": 2.6337,
"step": 98
},
{
"epoch": 2.6464646464646466,
"grad_norm": 0.11683017015457153,
"learning_rate": 0.0001,
"loss": 2.6153,
"step": 100
},
{
"epoch": 2.7003367003367003,
"grad_norm": 0.12459760904312134,
"learning_rate": 8.181818181818182e-05,
"loss": 2.6644,
"step": 102
},
{
"epoch": 2.7542087542087543,
"grad_norm": 0.1154564693570137,
"learning_rate": 6.363636363636363e-05,
"loss": 2.5877,
"step": 104
},
{
"epoch": 2.808080808080808,
"grad_norm": 0.11724215745925903,
"learning_rate": 4.545454545454546e-05,
"loss": 2.6417,
"step": 106
},
{
"epoch": 2.861952861952862,
"grad_norm": 0.12444790452718735,
"learning_rate": 2.7272727272727273e-05,
"loss": 2.6061,
"step": 108
},
{
"epoch": 2.915824915824916,
"grad_norm": 0.12090171128511429,
"learning_rate": 9.090909090909091e-06,
"loss": 2.5846,
"step": 110
},
{
"epoch": 2.942760942760943,
"step": 111,
"total_flos": 3.0985659629568e+17,
"train_loss": 2.6788850926064156,
"train_runtime": 977.7958,
"train_samples_per_second": 29.175,
"train_steps_per_second": 0.114
},
{
"epoch": 2.942760942760943,
"eval_loss": 2.6915676593780518,
"eval_runtime": 45.4669,
"eval_samples_per_second": 26.151,
"eval_steps_per_second": 3.277,
"step": 111
},
{
"epoch": 2.942760942760943,
"eval_loss": 2.6915125846862793,
"eval_runtime": 44.7446,
"eval_samples_per_second": 26.573,
"eval_steps_per_second": 3.33,
"step": 111
}
],
"logging_steps": 2,
"max_steps": 111,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.0985659629568e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}