|
{
|
|
"best_metric": 0.602910749664121,
|
|
"best_model_checkpoint": "phobert-legal-qa-finetuned\\checkpoint-1800",
|
|
"epoch": 0.36487270917986564,
|
|
"eval_steps": 200,
|
|
"global_step": 2200,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.008292561572269675,
|
|
"grad_norm": 8.954318046569824,
|
|
"learning_rate": 5.417357656163627e-07,
|
|
"loss": 5.4244,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.01658512314453935,
|
|
"grad_norm": 6.557638168334961,
|
|
"learning_rate": 1.0834715312327253e-06,
|
|
"loss": 5.1885,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.024877684716809022,
|
|
"grad_norm": 6.624057769775391,
|
|
"learning_rate": 1.6141514648977336e-06,
|
|
"loss": 4.6824,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.0331702462890787,
|
|
"grad_norm": 6.447659015655518,
|
|
"learning_rate": 2.1669430624654506e-06,
|
|
"loss": 3.9743,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.0331702462890787,
|
|
"eval_accuracy": 0.5348311306901615,
|
|
"eval_end_accuracy": 0.29080763582966224,
|
|
"eval_end_f1": 0.20537819782547337,
|
|
"eval_f1": 0.10898462727065836,
|
|
"eval_loss": 3.2860655784606934,
|
|
"eval_runtime": 127.3249,
|
|
"eval_samples_per_second": 133.713,
|
|
"eval_start_accuracy": 0.7788546255506608,
|
|
"eval_start_f1": 0.012591056715843358,
|
|
"eval_steps_per_second": 16.721,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.04146280786134837,
|
|
"grad_norm": 5.946743965148926,
|
|
"learning_rate": 2.7197346600331676e-06,
|
|
"loss": 3.0621,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.049755369433618045,
|
|
"grad_norm": 7.603392601013184,
|
|
"learning_rate": 3.272526257600885e-06,
|
|
"loss": 1.9122,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.05804793100588772,
|
|
"grad_norm": 4.718493461608887,
|
|
"learning_rate": 3.825317855168602e-06,
|
|
"loss": 0.8663,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.0663404925781574,
|
|
"grad_norm": 1.8805421590805054,
|
|
"learning_rate": 4.367053620784965e-06,
|
|
"loss": 0.4477,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.0663404925781574,
|
|
"eval_accuracy": 0.9048458149779737,
|
|
"eval_end_accuracy": 0.9175917767988253,
|
|
"eval_end_f1": 0.8303333971515077,
|
|
"eval_f1": 0.424233749861785,
|
|
"eval_loss": 0.23289425671100616,
|
|
"eval_runtime": 129.7527,
|
|
"eval_samples_per_second": 131.211,
|
|
"eval_start_accuracy": 0.8920998531571219,
|
|
"eval_start_f1": 0.018134102572062404,
|
|
"eval_steps_per_second": 16.408,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.07463305415042706,
|
|
"grad_norm": 1.6680586338043213,
|
|
"learning_rate": 4.919845218352681e-06,
|
|
"loss": 0.2926,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 0.08292561572269674,
|
|
"grad_norm": 1.3913531303405762,
|
|
"learning_rate": 5.472636815920398e-06,
|
|
"loss": 0.2317,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.09121817729496641,
|
|
"grad_norm": 0.8732656836509705,
|
|
"learning_rate": 6.025428413488116e-06,
|
|
"loss": 0.2013,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 0.09951073886723609,
|
|
"grad_norm": 0.4801824986934662,
|
|
"learning_rate": 6.578220011055833e-06,
|
|
"loss": 0.1501,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.09951073886723609,
|
|
"eval_accuracy": 0.9687224669603525,
|
|
"eval_end_accuracy": 0.9620558002936858,
|
|
"eval_end_f1": 0.9029114827270522,
|
|
"eval_f1": 0.47707631709814124,
|
|
"eval_loss": 0.10375536233186722,
|
|
"eval_runtime": 115.3633,
|
|
"eval_samples_per_second": 147.577,
|
|
"eval_start_accuracy": 0.9753891336270191,
|
|
"eval_start_f1": 0.051241151469230285,
|
|
"eval_steps_per_second": 18.455,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.10780330043950577,
|
|
"grad_norm": 3.085040807723999,
|
|
"learning_rate": 7.131011608623549e-06,
|
|
"loss": 0.147,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 0.11609586201177544,
|
|
"grad_norm": 2.0271573066711426,
|
|
"learning_rate": 7.672747374239912e-06,
|
|
"loss": 0.1356,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.12438842358404512,
|
|
"grad_norm": 0.833656370639801,
|
|
"learning_rate": 8.22553897180763e-06,
|
|
"loss": 0.1604,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 0.1326809851563148,
|
|
"grad_norm": 4.890761375427246,
|
|
"learning_rate": 8.778330569375346e-06,
|
|
"loss": 0.2731,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.1326809851563148,
|
|
"eval_accuracy": 0.9534801762114538,
|
|
"eval_end_accuracy": 0.9742143906020558,
|
|
"eval_end_f1": 0.9321589248479495,
|
|
"eval_f1": 0.49608082825261623,
|
|
"eval_loss": 0.1122935563325882,
|
|
"eval_runtime": 133.8336,
|
|
"eval_samples_per_second": 127.21,
|
|
"eval_start_accuracy": 0.9327459618208517,
|
|
"eval_start_f1": 0.06000273165728297,
|
|
"eval_steps_per_second": 15.908,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.14097354672858445,
|
|
"grad_norm": 1.1853266954421997,
|
|
"learning_rate": 9.331122166943063e-06,
|
|
"loss": 0.1276,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 0.14926610830085413,
|
|
"grad_norm": 7.255343437194824,
|
|
"learning_rate": 9.88391376451078e-06,
|
|
"loss": 0.0954,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.1575586698731238,
|
|
"grad_norm": 0.5000291466712952,
|
|
"learning_rate": 1.0436705362078497e-05,
|
|
"loss": 0.1081,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 0.1658512314453935,
|
|
"grad_norm": 0.3002016544342041,
|
|
"learning_rate": 1.0989496959646216e-05,
|
|
"loss": 0.1257,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.1658512314453935,
|
|
"eval_accuracy": 0.9772980910425844,
|
|
"eval_end_accuracy": 0.9769750367107195,
|
|
"eval_end_f1": 0.9421694663520186,
|
|
"eval_f1": 0.5088194880993329,
|
|
"eval_loss": 0.08298086374998093,
|
|
"eval_runtime": 127.1174,
|
|
"eval_samples_per_second": 133.931,
|
|
"eval_start_accuracy": 0.9776211453744493,
|
|
"eval_start_f1": 0.07546950984664719,
|
|
"eval_steps_per_second": 16.748,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.17414379301766317,
|
|
"grad_norm": 1.4803558588027954,
|
|
"learning_rate": 1.1542288557213931e-05,
|
|
"loss": 0.0988,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 0.18243635458993282,
|
|
"grad_norm": 5.256414413452148,
|
|
"learning_rate": 1.2095080154781648e-05,
|
|
"loss": 0.1159,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.1907289161622025,
|
|
"grad_norm": 2.2532193660736084,
|
|
"learning_rate": 1.2647871752349365e-05,
|
|
"loss": 0.1119,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 0.19902147773447218,
|
|
"grad_norm": 1.1581368446350098,
|
|
"learning_rate": 1.3200663349917082e-05,
|
|
"loss": 0.0801,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.19902147773447218,
|
|
"eval_accuracy": 0.9774743024963289,
|
|
"eval_end_accuracy": 0.9773274596182085,
|
|
"eval_end_f1": 0.9522660725045008,
|
|
"eval_f1": 0.5138705881207118,
|
|
"eval_loss": 0.06253915280103683,
|
|
"eval_runtime": 129.6143,
|
|
"eval_samples_per_second": 131.351,
|
|
"eval_start_accuracy": 0.9776211453744493,
|
|
"eval_start_f1": 0.0754751037369228,
|
|
"eval_steps_per_second": 16.426,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.20731403930674186,
|
|
"grad_norm": 3.3523240089416504,
|
|
"learning_rate": 1.3742399115533445e-05,
|
|
"loss": 0.1058,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 0.21560660087901154,
|
|
"grad_norm": 0.09368986636400223,
|
|
"learning_rate": 1.429519071310116e-05,
|
|
"loss": 0.1002,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 0.2238991624512812,
|
|
"grad_norm": 2.6880834102630615,
|
|
"learning_rate": 1.4847982310668878e-05,
|
|
"loss": 0.0563,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 0.23219172402355087,
|
|
"grad_norm": 10.309085845947266,
|
|
"learning_rate": 1.5400773908236596e-05,
|
|
"loss": 0.0973,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 0.23219172402355087,
|
|
"eval_accuracy": 0.9787371512481644,
|
|
"eval_end_accuracy": 0.978208516886931,
|
|
"eval_end_f1": 0.9484799222981063,
|
|
"eval_f1": 0.5438896216202269,
|
|
"eval_loss": 0.0639370009303093,
|
|
"eval_runtime": 128.1258,
|
|
"eval_samples_per_second": 132.877,
|
|
"eval_start_accuracy": 0.9792657856093979,
|
|
"eval_start_f1": 0.13929932094234762,
|
|
"eval_steps_per_second": 16.616,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 0.24048428559582055,
|
|
"grad_norm": 0.07747649401426315,
|
|
"learning_rate": 1.5953565505804315e-05,
|
|
"loss": 0.0724,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 0.24877684716809023,
|
|
"grad_norm": 0.076473668217659,
|
|
"learning_rate": 1.650635710337203e-05,
|
|
"loss": 0.0928,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.2570694087403599,
|
|
"grad_norm": 0.18258516490459442,
|
|
"learning_rate": 1.7059148700939746e-05,
|
|
"loss": 0.085,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 0.2653619703126296,
|
|
"grad_norm": 2.0234451293945312,
|
|
"learning_rate": 1.7611940298507464e-05,
|
|
"loss": 0.1107,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 0.2653619703126296,
|
|
"eval_accuracy": 0.9774449339207049,
|
|
"eval_end_accuracy": 0.9773274596182085,
|
|
"eval_end_f1": 0.9548822846052203,
|
|
"eval_f1": 0.5144663543275407,
|
|
"eval_loss": 0.06893135607242584,
|
|
"eval_runtime": 137.3173,
|
|
"eval_samples_per_second": 123.983,
|
|
"eval_start_accuracy": 0.9775624082232012,
|
|
"eval_start_f1": 0.07405042404986113,
|
|
"eval_steps_per_second": 15.504,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 0.27365453188489924,
|
|
"grad_norm": 2.9243485927581787,
|
|
"learning_rate": 1.816473189607518e-05,
|
|
"loss": 0.069,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"epoch": 0.2819470934571689,
|
|
"grad_norm": 0.05162263661623001,
|
|
"learning_rate": 1.87175234936429e-05,
|
|
"loss": 0.0815,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 0.2902396550294386,
|
|
"grad_norm": 0.12183202058076859,
|
|
"learning_rate": 1.9270315091210617e-05,
|
|
"loss": 0.0741,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"epoch": 0.29853221660170826,
|
|
"grad_norm": 3.247403621673584,
|
|
"learning_rate": 1.9823106688778332e-05,
|
|
"loss": 0.0819,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 0.29853221660170826,
|
|
"eval_accuracy": 0.9795007342143907,
|
|
"eval_end_accuracy": 0.9793832599118942,
|
|
"eval_end_f1": 0.9547976694500852,
|
|
"eval_f1": 0.602910749664121,
|
|
"eval_loss": 0.07215487957000732,
|
|
"eval_runtime": 135.7664,
|
|
"eval_samples_per_second": 125.399,
|
|
"eval_start_accuracy": 0.979618208516887,
|
|
"eval_start_f1": 0.25102382987815675,
|
|
"eval_steps_per_second": 15.681,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 0.30682477817397796,
|
|
"grad_norm": 0.042116910219192505,
|
|
"learning_rate": 1.9958225826268585e-05,
|
|
"loss": 0.0641,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"epoch": 0.3151173397462476,
|
|
"grad_norm": 0.36534908413887024,
|
|
"learning_rate": 1.989679321784003e-05,
|
|
"loss": 0.1036,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 0.32340990131851727,
|
|
"grad_norm": 0.2260214239358902,
|
|
"learning_rate": 1.9835360609411478e-05,
|
|
"loss": 0.0751,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"epoch": 0.331702462890787,
|
|
"grad_norm": 0.03621504455804825,
|
|
"learning_rate": 1.977392800098292e-05,
|
|
"loss": 0.0632,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 0.331702462890787,
|
|
"eval_accuracy": 0.9779148311306902,
|
|
"eval_end_accuracy": 0.9779735682819384,
|
|
"eval_end_f1": 0.9323578270443484,
|
|
"eval_f1": 0.5095598212540562,
|
|
"eval_loss": 0.058555059134960175,
|
|
"eval_runtime": 136.5016,
|
|
"eval_samples_per_second": 124.724,
|
|
"eval_start_accuracy": 0.977856093979442,
|
|
"eval_start_f1": 0.08676181546376394,
|
|
"eval_steps_per_second": 15.597,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 0.3399950244630566,
|
|
"grad_norm": 0.4064314365386963,
|
|
"learning_rate": 1.9712495392554368e-05,
|
|
"loss": 0.0833,
|
|
"step": 2050
|
|
},
|
|
{
|
|
"epoch": 0.34828758603532634,
|
|
"grad_norm": 1.703637957572937,
|
|
"learning_rate": 1.9651062784125818e-05,
|
|
"loss": 0.1415,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 0.356580147607596,
|
|
"grad_norm": 0.7372889518737793,
|
|
"learning_rate": 1.958963017569726e-05,
|
|
"loss": 0.2726,
|
|
"step": 2150
|
|
},
|
|
{
|
|
"epoch": 0.36487270917986564,
|
|
"grad_norm": 0.28392699360847473,
|
|
"learning_rate": 1.9528197567268707e-05,
|
|
"loss": 0.137,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 0.36487270917986564,
|
|
"eval_accuracy": 0.9787958883994126,
|
|
"eval_end_accuracy": 0.9785609397944199,
|
|
"eval_end_f1": 0.9504899506887509,
|
|
"eval_f1": 0.5319231158110048,
|
|
"eval_loss": 0.05977020785212517,
|
|
"eval_runtime": 136.9823,
|
|
"eval_samples_per_second": 124.286,
|
|
"eval_start_accuracy": 0.9790308370044053,
|
|
"eval_start_f1": 0.11335628093325875,
|
|
"eval_steps_per_second": 15.542,
|
|
"step": 2200
|
|
}
|
|
],
|
|
"logging_steps": 50,
|
|
"max_steps": 18087,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 200,
|
|
"stateful_callbacks": {
|
|
"EarlyStoppingCallback": {
|
|
"args": {
|
|
"early_stopping_patience": 3,
|
|
"early_stopping_threshold": 0.001
|
|
},
|
|
"attributes": {
|
|
"early_stopping_patience_counter": 0
|
|
}
|
|
},
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4598822918553600.0,
|
|
"train_batch_size": 4,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|