fengyao1909's picture
Upload folder using huggingface_hub
8029517 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 63,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015873015873015872,
"grad_norm": 2.355871594757968,
"learning_rate": 0.0,
"loss": 1.0469,
"step": 1
},
{
"epoch": 0.031746031746031744,
"grad_norm": 2.29666205825573,
"learning_rate": 1.5625e-06,
"loss": 0.9929,
"step": 2
},
{
"epoch": 0.047619047619047616,
"grad_norm": 2.535252155158116,
"learning_rate": 3.125e-06,
"loss": 1.0185,
"step": 3
},
{
"epoch": 0.06349206349206349,
"grad_norm": 2.106429965859871,
"learning_rate": 4.6875000000000004e-06,
"loss": 0.9046,
"step": 4
},
{
"epoch": 0.07936507936507936,
"grad_norm": 1.7754252196610092,
"learning_rate": 6.25e-06,
"loss": 1.0312,
"step": 5
},
{
"epoch": 0.09523809523809523,
"grad_norm": 1.5212254656019404,
"learning_rate": 7.8125e-06,
"loss": 0.9741,
"step": 6
},
{
"epoch": 0.1111111111111111,
"grad_norm": 1.2286700272250457,
"learning_rate": 9.375000000000001e-06,
"loss": 0.924,
"step": 7
},
{
"epoch": 0.12698412698412698,
"grad_norm": 1.239763672997659,
"learning_rate": 1.09375e-05,
"loss": 0.9112,
"step": 8
},
{
"epoch": 0.14285714285714285,
"grad_norm": 1.7804807603399884,
"learning_rate": 1.25e-05,
"loss": 0.9631,
"step": 9
},
{
"epoch": 0.15873015873015872,
"grad_norm": 1.6096276860914913,
"learning_rate": 1.4062500000000001e-05,
"loss": 0.8914,
"step": 10
},
{
"epoch": 0.1746031746031746,
"grad_norm": 1.4685478602306294,
"learning_rate": 1.5625e-05,
"loss": 0.8313,
"step": 11
},
{
"epoch": 0.19047619047619047,
"grad_norm": 1.797675338951107,
"learning_rate": 1.71875e-05,
"loss": 0.9572,
"step": 12
},
{
"epoch": 0.20634920634920634,
"grad_norm": 1.5020710264445867,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.8929,
"step": 13
},
{
"epoch": 0.2222222222222222,
"grad_norm": 1.389913325647765,
"learning_rate": 2.0312500000000002e-05,
"loss": 1.0575,
"step": 14
},
{
"epoch": 0.23809523809523808,
"grad_norm": 1.0364172775494112,
"learning_rate": 2.1875e-05,
"loss": 0.9367,
"step": 15
},
{
"epoch": 0.25396825396825395,
"grad_norm": 1.1754008678466592,
"learning_rate": 2.34375e-05,
"loss": 1.0082,
"step": 16
},
{
"epoch": 0.2698412698412698,
"grad_norm": 1.0227204004003465,
"learning_rate": 2.5e-05,
"loss": 0.9148,
"step": 17
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.0820292890536618,
"learning_rate": 2.6562500000000002e-05,
"loss": 0.9748,
"step": 18
},
{
"epoch": 0.30158730158730157,
"grad_norm": 0.945837076799249,
"learning_rate": 2.8125000000000003e-05,
"loss": 0.7132,
"step": 19
},
{
"epoch": 0.31746031746031744,
"grad_norm": 1.0248571882025115,
"learning_rate": 2.96875e-05,
"loss": 0.8881,
"step": 20
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.7825673389448284,
"learning_rate": 3.125e-05,
"loss": 0.9596,
"step": 21
},
{
"epoch": 0.3492063492063492,
"grad_norm": 0.9610289643147956,
"learning_rate": 3.2812500000000005e-05,
"loss": 1.0261,
"step": 22
},
{
"epoch": 0.36507936507936506,
"grad_norm": 0.9669319262880848,
"learning_rate": 3.4375e-05,
"loss": 0.8504,
"step": 23
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.7449356400588687,
"learning_rate": 3.59375e-05,
"loss": 0.8124,
"step": 24
},
{
"epoch": 0.3968253968253968,
"grad_norm": 0.83693613965816,
"learning_rate": 3.7500000000000003e-05,
"loss": 1.0061,
"step": 25
},
{
"epoch": 0.4126984126984127,
"grad_norm": 0.732383235323442,
"learning_rate": 3.90625e-05,
"loss": 0.8433,
"step": 26
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.7794935876422098,
"learning_rate": 4.0625000000000005e-05,
"loss": 0.7042,
"step": 27
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.7604958575268047,
"learning_rate": 4.21875e-05,
"loss": 0.9629,
"step": 28
},
{
"epoch": 0.4603174603174603,
"grad_norm": 0.6984385536231642,
"learning_rate": 4.375e-05,
"loss": 0.8909,
"step": 29
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.7941140442495155,
"learning_rate": 4.5312500000000004e-05,
"loss": 0.9318,
"step": 30
},
{
"epoch": 0.49206349206349204,
"grad_norm": 0.7485507149365567,
"learning_rate": 4.6875e-05,
"loss": 0.9032,
"step": 31
},
{
"epoch": 0.5079365079365079,
"grad_norm": 0.7312955579208906,
"learning_rate": 4.8437500000000005e-05,
"loss": 0.8793,
"step": 32
},
{
"epoch": 0.5238095238095238,
"grad_norm": 0.728955139017027,
"learning_rate": 5e-05,
"loss": 1.0292,
"step": 33
},
{
"epoch": 0.5396825396825397,
"grad_norm": 0.6223071714803796,
"learning_rate": 4.999845960383972e-05,
"loss": 0.829,
"step": 34
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.7081482857388056,
"learning_rate": 4.999383860518451e-05,
"loss": 0.821,
"step": 35
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.7021438050343367,
"learning_rate": 4.998613757348784e-05,
"loss": 0.9658,
"step": 36
},
{
"epoch": 0.5873015873015873,
"grad_norm": 0.6005545292155794,
"learning_rate": 4.9975357457760896e-05,
"loss": 0.9805,
"step": 37
},
{
"epoch": 0.6031746031746031,
"grad_norm": 0.66159765943155,
"learning_rate": 4.9961499586455584e-05,
"loss": 0.8479,
"step": 38
},
{
"epoch": 0.6190476190476191,
"grad_norm": 0.6859851915992995,
"learning_rate": 4.994456566730085e-05,
"loss": 0.9104,
"step": 39
},
{
"epoch": 0.6349206349206349,
"grad_norm": 0.5770937588145084,
"learning_rate": 4.9924557787092216e-05,
"loss": 0.7705,
"step": 40
},
{
"epoch": 0.6507936507936508,
"grad_norm": 0.5587380636863567,
"learning_rate": 4.990147841143462e-05,
"loss": 0.8406,
"step": 41
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.5939117737515469,
"learning_rate": 4.9875330384438604e-05,
"loss": 0.9143,
"step": 42
},
{
"epoch": 0.6825396825396826,
"grad_norm": 0.5421902034671652,
"learning_rate": 4.984611692836979e-05,
"loss": 0.8323,
"step": 43
},
{
"epoch": 0.6984126984126984,
"grad_norm": 0.5523631282552526,
"learning_rate": 4.9813841643251836e-05,
"loss": 0.7557,
"step": 44
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.5991896816540728,
"learning_rate": 4.977850850642275e-05,
"loss": 0.8471,
"step": 45
},
{
"epoch": 0.7301587301587301,
"grad_norm": 0.6364621652121066,
"learning_rate": 4.97401218720448e-05,
"loss": 0.8013,
"step": 46
},
{
"epoch": 0.746031746031746,
"grad_norm": 0.5534873919742207,
"learning_rate": 4.969868647056792e-05,
"loss": 0.8996,
"step": 47
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.5035034360883922,
"learning_rate": 4.965420740814679e-05,
"loss": 0.8944,
"step": 48
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.5729489774574688,
"learning_rate": 4.9606690166011546e-05,
"loss": 0.7887,
"step": 49
},
{
"epoch": 0.7936507936507936,
"grad_norm": 0.6004405465949967,
"learning_rate": 4.95561405997924e-05,
"loss": 0.831,
"step": 50
},
{
"epoch": 0.8095238095238095,
"grad_norm": 0.725673276310606,
"learning_rate": 4.9502564938797946e-05,
"loss": 0.8849,
"step": 51
},
{
"epoch": 0.8253968253968254,
"grad_norm": 0.4990176056299948,
"learning_rate": 4.9445969785247595e-05,
"loss": 0.8298,
"step": 52
},
{
"epoch": 0.8412698412698413,
"grad_norm": 0.6071056927514447,
"learning_rate": 4.938636211345792e-05,
"loss": 0.944,
"step": 53
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.6345636016627114,
"learning_rate": 4.932374926898321e-05,
"loss": 0.8069,
"step": 54
},
{
"epoch": 0.873015873015873,
"grad_norm": 0.6333202856879505,
"learning_rate": 4.9258138967710306e-05,
"loss": 0.7373,
"step": 55
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.5152048396222051,
"learning_rate": 4.918953929490768e-05,
"loss": 0.8855,
"step": 56
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.5806639114473304,
"learning_rate": 4.9117958704229154e-05,
"loss": 0.8731,
"step": 57
},
{
"epoch": 0.9206349206349206,
"grad_norm": 0.6001547400645247,
"learning_rate": 4.904340601667208e-05,
"loss": 0.816,
"step": 58
},
{
"epoch": 0.9365079365079365,
"grad_norm": 0.470270082255384,
"learning_rate": 4.8965890419490354e-05,
"loss": 0.8847,
"step": 59
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.5015222585808989,
"learning_rate": 4.8885421465062236e-05,
"loss": 0.8507,
"step": 60
},
{
"epoch": 0.9682539682539683,
"grad_norm": 0.48808541077856465,
"learning_rate": 4.88020090697132e-05,
"loss": 0.774,
"step": 61
},
{
"epoch": 0.9841269841269841,
"grad_norm": 0.5837250225569859,
"learning_rate": 4.871566351249393e-05,
"loss": 0.8844,
"step": 62
},
{
"epoch": 1.0,
"grad_norm": 0.5121395435398158,
"learning_rate": 4.86263954339136e-05,
"loss": 0.7565,
"step": 63
}
],
"logging_steps": 1,
"max_steps": 315,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 15539903594496.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}