| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.04479684630202034, | |
| "eval_steps": 1000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002239842315101017, | |
| "grad_norm": 71.10852813720703, | |
| "learning_rate": 0.0001, | |
| "loss": 3.5057, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.004479684630202034, | |
| "grad_norm": 57.756534576416016, | |
| "learning_rate": 9.9998756572327e-05, | |
| "loss": 0.8254, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.006719526945303051, | |
| "grad_norm": 44.61080551147461, | |
| "learning_rate": 9.999502635115246e-05, | |
| "loss": 0.6935, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.008959369260404068, | |
| "grad_norm": 50.82619094848633, | |
| "learning_rate": 9.998880952200681e-05, | |
| "loss": 0.5972, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.011199211575505085, | |
| "grad_norm": 44.26677703857422, | |
| "learning_rate": 9.998010639409713e-05, | |
| "loss": 0.5408, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.013439053890606102, | |
| "grad_norm": 42.8045654296875, | |
| "learning_rate": 9.996891740029186e-05, | |
| "loss": 0.5509, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.01567889620570712, | |
| "grad_norm": 30.989139556884766, | |
| "learning_rate": 9.995524309709913e-05, | |
| "loss": 0.4823, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.017918738520808136, | |
| "grad_norm": 34.01952362060547, | |
| "learning_rate": 9.993908416463927e-05, | |
| "loss": 0.5111, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.020158580835909153, | |
| "grad_norm": 34.079307556152344, | |
| "learning_rate": 9.992044140661079e-05, | |
| "loss": 0.4635, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.02239842315101017, | |
| "grad_norm": 26.16071128845215, | |
| "learning_rate": 9.989931575025056e-05, | |
| "loss": 0.4883, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.02239842315101017, | |
| "eval_avg_non_pair_similarity": 0.0020343252948339737, | |
| "eval_avg_pair_similarity": 0.008852629057131708, | |
| "eval_loss": 0.5431402921676636, | |
| "eval_runtime": 19.6178, | |
| "eval_samples_per_second": 25.487, | |
| "eval_similarity_ratio": 4.35162905343228, | |
| "eval_steps_per_second": 0.816, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.024638265466111187, | |
| "grad_norm": 35.38695526123047, | |
| "learning_rate": 9.987570824628759e-05, | |
| "loss": 0.4655, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.026878107781212204, | |
| "grad_norm": 32.071346282958984, | |
| "learning_rate": 9.984962006889084e-05, | |
| "loss": 0.4342, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.029117950096313218, | |
| "grad_norm": 39.6610221862793, | |
| "learning_rate": 9.982105251561082e-05, | |
| "loss": 0.458, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.03135779241141424, | |
| "grad_norm": 31.493322372436523, | |
| "learning_rate": 9.979000700731491e-05, | |
| "loss": 0.4525, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.03359763472651525, | |
| "grad_norm": 34.453399658203125, | |
| "learning_rate": 9.975648508811693e-05, | |
| "loss": 0.41, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.03583747704161627, | |
| "grad_norm": 33.990074157714844, | |
| "learning_rate": 9.972048842530012e-05, | |
| "loss": 0.4097, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.038077319356717286, | |
| "grad_norm": 30.44228172302246, | |
| "learning_rate": 9.968201880923439e-05, | |
| "loss": 0.4257, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.040317161671818307, | |
| "grad_norm": 31.427162170410156, | |
| "learning_rate": 9.964107815328711e-05, | |
| "loss": 0.3821, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.04255700398691932, | |
| "grad_norm": 25.877887725830078, | |
| "learning_rate": 9.959766849372808e-05, | |
| "loss": 0.3788, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.04479684630202034, | |
| "grad_norm": 25.36798095703125, | |
| "learning_rate": 9.955179198962817e-05, | |
| "loss": 0.3854, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.04479684630202034, | |
| "eval_avg_non_pair_similarity": 0.0016289287904792565, | |
| "eval_avg_pair_similarity": 0.0032495629731565715, | |
| "eval_loss": 0.4394480586051941, | |
| "eval_runtime": 19.5064, | |
| "eval_samples_per_second": 25.633, | |
| "eval_similarity_ratio": 1.9949079371360972, | |
| "eval_steps_per_second": 0.82, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 44646, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |