|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.980039920159681, |
|
"eval_steps": 500, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.249500998003992, |
|
"grad_norm": 2.452378511428833, |
|
"learning_rate": 4.876746506986028e-05, |
|
"loss": 2.1274, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.499001996007984, |
|
"grad_norm": 10.962518692016602, |
|
"learning_rate": 4.751996007984032e-05, |
|
"loss": 1.3207, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7485029940119761, |
|
"grad_norm": 2.4429094791412354, |
|
"learning_rate": 4.627245508982036e-05, |
|
"loss": 1.138, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.998003992015968, |
|
"grad_norm": 2.2755186557769775, |
|
"learning_rate": 4.50249500998004e-05, |
|
"loss": 1.0141, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.24750499001996, |
|
"grad_norm": 1.8802367448806763, |
|
"learning_rate": 4.377744510978044e-05, |
|
"loss": 0.8877, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4970059880239521, |
|
"grad_norm": 3.98767352104187, |
|
"learning_rate": 4.252994011976048e-05, |
|
"loss": 0.8705, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.746506986027944, |
|
"grad_norm": 1.9973433017730713, |
|
"learning_rate": 4.128243512974052e-05, |
|
"loss": 0.826, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.996007984031936, |
|
"grad_norm": 2.2026467323303223, |
|
"learning_rate": 4.003493013972056e-05, |
|
"loss": 0.8115, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.245508982035928, |
|
"grad_norm": 2.212198257446289, |
|
"learning_rate": 3.87874251497006e-05, |
|
"loss": 0.6751, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.49500998003992, |
|
"grad_norm": 1.5753542184829712, |
|
"learning_rate": 3.754241516966068e-05, |
|
"loss": 0.6747, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.744510978043912, |
|
"grad_norm": 2.395479202270508, |
|
"learning_rate": 3.629491017964072e-05, |
|
"loss": 0.6646, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9940119760479043, |
|
"grad_norm": 2.577756643295288, |
|
"learning_rate": 3.504740518962076e-05, |
|
"loss": 0.66, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.243512974051896, |
|
"grad_norm": 2.7224512100219727, |
|
"learning_rate": 3.3799900199600795e-05, |
|
"loss": 0.5135, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.493013972055888, |
|
"grad_norm": 2.3300223350524902, |
|
"learning_rate": 3.255239520958084e-05, |
|
"loss": 0.5112, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.7425149700598803, |
|
"grad_norm": 2.3015055656433105, |
|
"learning_rate": 3.130489021956088e-05, |
|
"loss": 0.5119, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.992015968063872, |
|
"grad_norm": 2.4661030769348145, |
|
"learning_rate": 3.0057385229540918e-05, |
|
"loss": 0.5107, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.241516966067865, |
|
"grad_norm": 2.5810928344726562, |
|
"learning_rate": 2.880988023952096e-05, |
|
"loss": 0.3741, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.491017964071856, |
|
"grad_norm": 1.8933159112930298, |
|
"learning_rate": 2.7562375249501e-05, |
|
"loss": 0.3756, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.740518962075848, |
|
"grad_norm": 2.711456775665283, |
|
"learning_rate": 2.6314870259481038e-05, |
|
"loss": 0.3745, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.99001996007984, |
|
"grad_norm": 2.8451802730560303, |
|
"learning_rate": 2.506736526946108e-05, |
|
"loss": 0.3743, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.2395209580838324, |
|
"grad_norm": 2.1212046146392822, |
|
"learning_rate": 2.381986027944112e-05, |
|
"loss": 0.2603, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.489021956087824, |
|
"grad_norm": 2.1375863552093506, |
|
"learning_rate": 2.257235528942116e-05, |
|
"loss": 0.2567, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.738522954091817, |
|
"grad_norm": 3.2050704956054688, |
|
"learning_rate": 2.1324850299401196e-05, |
|
"loss": 0.2596, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9880239520958085, |
|
"grad_norm": 1.9162492752075195, |
|
"learning_rate": 2.007734530938124e-05, |
|
"loss": 0.2692, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.2375249500998, |
|
"grad_norm": 1.6697739362716675, |
|
"learning_rate": 1.8829840319361277e-05, |
|
"loss": 0.1711, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.487025948103792, |
|
"grad_norm": 2.253767728805542, |
|
"learning_rate": 1.758233532934132e-05, |
|
"loss": 0.1705, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.736526946107785, |
|
"grad_norm": 1.5864336490631104, |
|
"learning_rate": 1.6337325349301397e-05, |
|
"loss": 0.1735, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.986027944111776, |
|
"grad_norm": 2.6011617183685303, |
|
"learning_rate": 1.5089820359281437e-05, |
|
"loss": 0.1785, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.235528942115769, |
|
"grad_norm": 1.8357607126235962, |
|
"learning_rate": 1.3842315369261476e-05, |
|
"loss": 0.1101, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.485029940119761, |
|
"grad_norm": 2.048788547515869, |
|
"learning_rate": 1.2594810379241517e-05, |
|
"loss": 0.1152, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.734530938123752, |
|
"grad_norm": 2.7625575065612793, |
|
"learning_rate": 1.1347305389221557e-05, |
|
"loss": 0.1126, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.984031936127744, |
|
"grad_norm": 2.9754233360290527, |
|
"learning_rate": 1.0099800399201598e-05, |
|
"loss": 0.1116, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.233532934131736, |
|
"grad_norm": 2.279146909713745, |
|
"learning_rate": 8.852295409181637e-06, |
|
"loss": 0.0743, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.48303393213573, |
|
"grad_norm": 2.4354634284973145, |
|
"learning_rate": 7.604790419161677e-06, |
|
"loss": 0.074, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.732534930139721, |
|
"grad_norm": 2.9256038665771484, |
|
"learning_rate": 6.357285429141717e-06, |
|
"loss": 0.0722, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.982035928143713, |
|
"grad_norm": 1.8734424114227295, |
|
"learning_rate": 5.109780439121756e-06, |
|
"loss": 0.0703, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.231536926147704, |
|
"grad_norm": 2.1415419578552246, |
|
"learning_rate": 3.862275449101797e-06, |
|
"loss": 0.0503, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 9.481037924151696, |
|
"grad_norm": 2.486562490463257, |
|
"learning_rate": 2.6172654690618765e-06, |
|
"loss": 0.0497, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 9.730538922155688, |
|
"grad_norm": 1.6438294649124146, |
|
"learning_rate": 1.3697604790419162e-06, |
|
"loss": 0.0467, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 9.980039920159681, |
|
"grad_norm": 2.3399159908294678, |
|
"learning_rate": 1.222554890219561e-07, |
|
"loss": 0.0457, |
|
"step": 20000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 20040, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.167129767936e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|