{ "best_global_step": 1756, "best_metric": 0.9242246437552389, "best_model_checkpoint": "C:\\Users\\Shara\\projects\\models\\run_16-lr_2e-05-acc_1-wd_0.01-bs_8-ep_7\\checkpoint-1756", "epoch": 1.0, "eval_steps": 500, "global_step": 1756, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02847380410022779, "grad_norm": 4.8302321434021, "learning_rate": 1.992840872111943e-05, "loss": 0.9994, "step": 50 }, { "epoch": 0.05694760820045558, "grad_norm": 3.4978649616241455, "learning_rate": 1.9847054995118778e-05, "loss": 0.3939, "step": 100 }, { "epoch": 0.08542141230068337, "grad_norm": 8.353672981262207, "learning_rate": 1.9765701269118128e-05, "loss": 0.2882, "step": 150 }, { "epoch": 0.11389521640091116, "grad_norm": 9.822871208190918, "learning_rate": 1.9684347543117477e-05, "loss": 0.1728, "step": 200 }, { "epoch": 0.14236902050113895, "grad_norm": 3.1411221027374268, "learning_rate": 1.9602993817116827e-05, "loss": 0.1251, "step": 250 }, { "epoch": 0.17084282460136674, "grad_norm": 8.493424415588379, "learning_rate": 1.9521640091116173e-05, "loss": 0.1405, "step": 300 }, { "epoch": 0.19931662870159453, "grad_norm": 4.757007122039795, "learning_rate": 1.9440286365115523e-05, "loss": 0.1431, "step": 350 }, { "epoch": 0.22779043280182232, "grad_norm": 4.339049816131592, "learning_rate": 1.9358932639114873e-05, "loss": 0.0912, "step": 400 }, { "epoch": 0.25626423690205014, "grad_norm": 14.540772438049316, "learning_rate": 1.9277578913114222e-05, "loss": 0.1048, "step": 450 }, { "epoch": 0.2847380410022779, "grad_norm": 10.503073692321777, "learning_rate": 1.9196225187113572e-05, "loss": 0.1224, "step": 500 }, { "epoch": 0.3132118451025057, "grad_norm": 2.738131284713745, "learning_rate": 1.911487146111292e-05, "loss": 0.1025, "step": 550 }, { "epoch": 0.3416856492027335, "grad_norm": 6.681102752685547, "learning_rate": 1.903351773511227e-05, "loss": 0.0869, "step": 600 }, { "epoch": 0.3701594533029613, "grad_norm": 1.4330339431762695, "learning_rate": 1.895216400911162e-05, "loss": 0.052, "step": 650 }, { "epoch": 0.39863325740318906, "grad_norm": 2.581470012664795, "learning_rate": 1.8870810283110967e-05, "loss": 0.0808, "step": 700 }, { "epoch": 0.4271070615034169, "grad_norm": 5.825100898742676, "learning_rate": 1.8789456557110317e-05, "loss": 0.0716, "step": 750 }, { "epoch": 0.45558086560364464, "grad_norm": 1.74166738986969, "learning_rate": 1.8708102831109666e-05, "loss": 0.0806, "step": 800 }, { "epoch": 0.48405466970387245, "grad_norm": 7.3097405433654785, "learning_rate": 1.8626749105109016e-05, "loss": 0.0666, "step": 850 }, { "epoch": 0.5125284738041003, "grad_norm": 2.220766067504883, "learning_rate": 1.8545395379108362e-05, "loss": 0.0768, "step": 900 }, { "epoch": 0.541002277904328, "grad_norm": 4.0566511154174805, "learning_rate": 1.8464041653107715e-05, "loss": 0.0707, "step": 950 }, { "epoch": 0.5694760820045558, "grad_norm": 0.2833240330219269, "learning_rate": 1.8382687927107065e-05, "loss": 0.0858, "step": 1000 }, { "epoch": 0.5979498861047836, "grad_norm": 21.131481170654297, "learning_rate": 1.8301334201106415e-05, "loss": 0.0595, "step": 1050 }, { "epoch": 0.6264236902050114, "grad_norm": 27.655433654785156, "learning_rate": 1.821998047510576e-05, "loss": 0.0466, "step": 1100 }, { "epoch": 0.6548974943052391, "grad_norm": 3.4721717834472656, "learning_rate": 1.813862674910511e-05, "loss": 0.0776, "step": 1150 }, { "epoch": 0.683371298405467, "grad_norm": 2.4811441898345947, "learning_rate": 1.805727302310446e-05, "loss": 0.0647, "step": 1200 }, { "epoch": 0.7118451025056948, "grad_norm": 6.169066429138184, "learning_rate": 1.797591929710381e-05, "loss": 0.0724, "step": 1250 }, { "epoch": 0.7403189066059226, "grad_norm": 8.984748840332031, "learning_rate": 1.7894565571103156e-05, "loss": 0.0838, "step": 1300 }, { "epoch": 0.7687927107061503, "grad_norm": 5.944505214691162, "learning_rate": 1.7813211845102506e-05, "loss": 0.0683, "step": 1350 }, { "epoch": 0.7972665148063781, "grad_norm": 0.28812381625175476, "learning_rate": 1.7731858119101856e-05, "loss": 0.0614, "step": 1400 }, { "epoch": 0.8257403189066059, "grad_norm": 6.176011085510254, "learning_rate": 1.7650504393101205e-05, "loss": 0.0615, "step": 1450 }, { "epoch": 0.8542141230068337, "grad_norm": 2.5244405269622803, "learning_rate": 1.7569150667100555e-05, "loss": 0.0714, "step": 1500 }, { "epoch": 0.8826879271070615, "grad_norm": 3.3508074283599854, "learning_rate": 1.7487796941099904e-05, "loss": 0.0563, "step": 1550 }, { "epoch": 0.9111617312072893, "grad_norm": 9.980842590332031, "learning_rate": 1.7406443215099254e-05, "loss": 0.0647, "step": 1600 }, { "epoch": 0.9396355353075171, "grad_norm": 3.6959774494171143, "learning_rate": 1.7325089489098604e-05, "loss": 0.0682, "step": 1650 }, { "epoch": 0.9681093394077449, "grad_norm": 0.626620888710022, "learning_rate": 1.724373576309795e-05, "loss": 0.0499, "step": 1700 }, { "epoch": 0.9965831435079726, "grad_norm": 7.145672798156738, "learning_rate": 1.71623820370973e-05, "loss": 0.0569, "step": 1750 }, { "epoch": 1.0, "eval_f1": 0.9242246437552389, "eval_loss": 0.05600914731621742, "eval_precision": 0.9206746826987308, "eval_recall": 0.927802086839448, "eval_runtime": 3.7538, "eval_samples_per_second": 865.798, "eval_steps_per_second": 108.425, "step": 1756 } ], "logging_steps": 50, "max_steps": 12292, "num_input_tokens_seen": 0, "num_train_epochs": 7, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 344842189594686.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }