|
{ |
|
"best_global_step": 500, |
|
"best_metric": 0.9950738019660021, |
|
"best_model_checkpoint": "hiera-finetuned-stroke-binary-ultrasound/checkpoint-500", |
|
"epoch": 12.0, |
|
"eval_steps": 100, |
|
"global_step": 1380, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.437636761487965, |
|
"grad_norm": 4.08018684387207, |
|
"learning_rate": 6.811594202898551e-06, |
|
"loss": 0.0505, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.87527352297593, |
|
"grad_norm": 0.09228578954935074, |
|
"learning_rate": 1.4057971014492755e-05, |
|
"loss": 0.038, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.87527352297593, |
|
"eval_accuracy": 0.9938423645320197, |
|
"eval_f1": 0.9938419442238937, |
|
"eval_loss": 0.018425090238451958, |
|
"eval_precision": 0.9939169121769106, |
|
"eval_recall": 0.9938423645320197, |
|
"eval_runtime": 4.7821, |
|
"eval_samples_per_second": 169.799, |
|
"eval_steps_per_second": 21.329, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3063457330415755, |
|
"grad_norm": 13.409342765808105, |
|
"learning_rate": 1.9997952651793994e-05, |
|
"loss": 0.0537, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.7439824945295404, |
|
"grad_norm": 4.2889485359191895, |
|
"learning_rate": 1.9892575474365977e-05, |
|
"loss": 0.0468, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.7439824945295404, |
|
"eval_accuracy": 0.9926108374384236, |
|
"eval_f1": 0.9926101649260403, |
|
"eval_loss": 0.02063354291021824, |
|
"eval_precision": 0.9927179267509103, |
|
"eval_recall": 0.9926108374384236, |
|
"eval_runtime": 4.2841, |
|
"eval_samples_per_second": 189.54, |
|
"eval_steps_per_second": 23.809, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.175054704595186, |
|
"grad_norm": 27.501617431640625, |
|
"learning_rate": 1.9629172873477995e-05, |
|
"loss": 0.055, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.612691466083151, |
|
"grad_norm": 34.34234619140625, |
|
"learning_rate": 1.9211952480161382e-05, |
|
"loss": 0.0445, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.612691466083151, |
|
"eval_accuracy": 0.9901477832512315, |
|
"eval_f1": 0.9901478430214063, |
|
"eval_loss": 0.02250731736421585, |
|
"eval_precision": 0.9901599165230894, |
|
"eval_recall": 0.9901477832512315, |
|
"eval_runtime": 4.3458, |
|
"eval_samples_per_second": 186.849, |
|
"eval_steps_per_second": 23.471, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.0437636761487963, |
|
"grad_norm": 1.1840153932571411, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 0.0309, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.4814004376367613, |
|
"grad_norm": 0.7110581994056702, |
|
"learning_rate": 1.7960402776569358e-05, |
|
"loss": 0.0415, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.4814004376367613, |
|
"eval_accuracy": 0.9889162561576355, |
|
"eval_f1": 0.9889161721031502, |
|
"eval_loss": 0.018740125000476837, |
|
"eval_precision": 0.9889190879070968, |
|
"eval_recall": 0.9889162561576355, |
|
"eval_runtime": 4.4065, |
|
"eval_samples_per_second": 184.274, |
|
"eval_steps_per_second": 23.148, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.9190371991247264, |
|
"grad_norm": 44.404239654541016, |
|
"learning_rate": 1.7133390896410106e-05, |
|
"loss": 0.034, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.350109409190372, |
|
"grad_norm": 0.5970667004585266, |
|
"learning_rate": 1.6192429204088022e-05, |
|
"loss": 0.0465, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.350109409190372, |
|
"eval_accuracy": 0.9950738916256158, |
|
"eval_f1": 0.9950738019660021, |
|
"eval_loss": 0.009755603037774563, |
|
"eval_precision": 0.9950857869728612, |
|
"eval_recall": 0.9950738916256158, |
|
"eval_runtime": 5.2229, |
|
"eval_samples_per_second": 155.47, |
|
"eval_steps_per_second": 19.53, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.787746170678337, |
|
"grad_norm": 0.04525822028517723, |
|
"learning_rate": 1.5152548757013183e-05, |
|
"loss": 0.0284, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.218818380743983, |
|
"grad_norm": 6.67152214050293, |
|
"learning_rate": 1.4030360755011423e-05, |
|
"loss": 0.0397, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.218818380743983, |
|
"eval_accuracy": 0.9901477832512315, |
|
"eval_f1": 0.9901463484623846, |
|
"eval_loss": 0.028631530702114105, |
|
"eval_precision": 0.9903372489579386, |
|
"eval_recall": 0.9901477832512315, |
|
"eval_runtime": 5.1408, |
|
"eval_samples_per_second": 157.953, |
|
"eval_steps_per_second": 19.841, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.656455142231947, |
|
"grad_norm": 2.804370641708374, |
|
"learning_rate": 1.284379119062912e-05, |
|
"loss": 0.0334, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 6.087527352297593, |
|
"grad_norm": 0.5940968990325928, |
|
"learning_rate": 1.1611794496750019e-05, |
|
"loss": 0.0257, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.087527352297593, |
|
"eval_accuracy": 0.9926108374384236, |
|
"eval_f1": 0.9926104787847378, |
|
"eval_loss": 0.01877174898982048, |
|
"eval_precision": 0.992658305036109, |
|
"eval_recall": 0.9926108374384236, |
|
"eval_runtime": 4.3633, |
|
"eval_samples_per_second": 186.097, |
|
"eval_steps_per_second": 23.377, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.525164113785558, |
|
"grad_norm": 6.283336162567139, |
|
"learning_rate": 1.0354050765758148e-05, |
|
"loss": 0.0226, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.962800875273523, |
|
"grad_norm": 0.4039391279220581, |
|
"learning_rate": 9.090651376911532e-06, |
|
"loss": 0.0434, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.962800875273523, |
|
"eval_accuracy": 0.9938423645320197, |
|
"eval_f1": 0.9938419442238937, |
|
"eval_loss": 0.02092530019581318, |
|
"eval_precision": 0.9939169121769106, |
|
"eval_recall": 0.9938423645320197, |
|
"eval_runtime": 5.4149, |
|
"eval_samples_per_second": 149.956, |
|
"eval_steps_per_second": 18.837, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.393873085339169, |
|
"grad_norm": 26.558956146240234, |
|
"learning_rate": 7.841778053760212e-06, |
|
"loss": 0.0256, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.831509846827133, |
|
"grad_norm": 1.8528072834014893, |
|
"learning_rate": 6.627380478391543e-06, |
|
"loss": 0.0261, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.831509846827133, |
|
"eval_accuracy": 0.9926108374384236, |
|
"eval_f1": 0.9926107029490034, |
|
"eval_loss": 0.015434592962265015, |
|
"eval_precision": 0.9926226132344405, |
|
"eval_recall": 0.9926108374384236, |
|
"eval_runtime": 4.4967, |
|
"eval_samples_per_second": 180.577, |
|
"eval_steps_per_second": 22.683, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.262582056892779, |
|
"grad_norm": 0.028279444202780724, |
|
"learning_rate": 5.466857612339229e-06, |
|
"loss": 0.0132, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.700218818380744, |
|
"grad_norm": 10.328960418701172, |
|
"learning_rate": 4.378747814781629e-06, |
|
"loss": 0.0198, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.700218818380744, |
|
"eval_accuracy": 0.9950738916256158, |
|
"eval_f1": 0.9950736525231585, |
|
"eval_loss": 0.009430773556232452, |
|
"eval_precision": 0.9951217179205126, |
|
"eval_recall": 0.9950738916256158, |
|
"eval_runtime": 5.241, |
|
"eval_samples_per_second": 154.932, |
|
"eval_steps_per_second": 19.462, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.13129102844639, |
|
"grad_norm": 5.253521919250488, |
|
"learning_rate": 3.3804327081259304e-06, |
|
"loss": 0.0244, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.568927789934355, |
|
"grad_norm": 0.006149813067167997, |
|
"learning_rate": 2.4878595214718236e-06, |
|
"loss": 0.0207, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.568927789934355, |
|
"eval_accuracy": 0.9938423645320197, |
|
"eval_f1": 0.9938421683977477, |
|
"eval_loss": 0.012234929017722607, |
|
"eval_precision": 0.9938691081078003, |
|
"eval_recall": 0.9938423645320197, |
|
"eval_runtime": 4.3976, |
|
"eval_samples_per_second": 184.648, |
|
"eval_steps_per_second": 23.195, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.003324387362226844, |
|
"learning_rate": 1.7152863472802195e-06, |
|
"loss": 0.0212, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 10.437636761487965, |
|
"grad_norm": 5.904305934906006, |
|
"learning_rate": 1.075054380553552e-06, |
|
"loss": 0.0157, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.437636761487965, |
|
"eval_accuracy": 0.9950738916256158, |
|
"eval_f1": 0.9950736525231585, |
|
"eval_loss": 0.01014081109315157, |
|
"eval_precision": 0.9951217179205126, |
|
"eval_recall": 0.9950738916256158, |
|
"eval_runtime": 5.4913, |
|
"eval_samples_per_second": 147.869, |
|
"eval_steps_per_second": 18.575, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.87527352297593, |
|
"grad_norm": 0.6864893436431885, |
|
"learning_rate": 5.77390778811796e-07, |
|
"loss": 0.0143, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 11.306345733041576, |
|
"grad_norm": 39.91190719604492, |
|
"learning_rate": 2.3024529200728952e-07, |
|
"loss": 0.0188, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.306345733041576, |
|
"eval_accuracy": 0.9950738916256158, |
|
"eval_f1": 0.9950736525231585, |
|
"eval_loss": 0.010448944754898548, |
|
"eval_precision": 0.9951217179205126, |
|
"eval_recall": 0.9950738916256158, |
|
"eval_runtime": 5.3991, |
|
"eval_samples_per_second": 150.395, |
|
"eval_steps_per_second": 18.892, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.74398249452954, |
|
"grad_norm": 40.59695816040039, |
|
"learning_rate": 3.916327207559967e-08, |
|
"loss": 0.0198, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 1380, |
|
"total_flos": 4.018008276329693e+18, |
|
"train_loss": 0.031628868545311085, |
|
"train_runtime": 1401.2705, |
|
"train_samples_per_second": 62.549, |
|
"train_steps_per_second": 0.985 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1380, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 12, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.018008276329693e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|