|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968186638388123, |
|
"eval_steps": 500, |
|
"global_step": 235, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.042417815482502653, |
|
"grad_norm": 0.8155243915035402, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.6415, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08483563096500531, |
|
"grad_norm": 0.3724018495341306, |
|
"learning_rate": 9.975707393083328e-06, |
|
"loss": 0.4089, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12725344644750794, |
|
"grad_norm": 0.28463462503464637, |
|
"learning_rate": 9.85729110226596e-06, |
|
"loss": 0.3616, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16967126193001061, |
|
"grad_norm": 0.297538920144775, |
|
"learning_rate": 9.642632564563576e-06, |
|
"loss": 0.3331, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.21208907741251326, |
|
"grad_norm": 0.2991153079080703, |
|
"learning_rate": 9.33598501898256e-06, |
|
"loss": 0.3282, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2545068928950159, |
|
"grad_norm": 0.2618563036667496, |
|
"learning_rate": 8.943424372477455e-06, |
|
"loss": 0.305, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.29692470837751855, |
|
"grad_norm": 0.2945290407529977, |
|
"learning_rate": 8.472728812079436e-06, |
|
"loss": 0.3293, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.33934252386002123, |
|
"grad_norm": 0.2686931795972466, |
|
"learning_rate": 7.933224688089059e-06, |
|
"loss": 0.3093, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.38176033934252385, |
|
"grad_norm": 0.24826628745526108, |
|
"learning_rate": 7.3356017219999236e-06, |
|
"loss": 0.2981, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4241781548250265, |
|
"grad_norm": 0.2770179971302181, |
|
"learning_rate": 6.691701200618925e-06, |
|
"loss": 0.2925, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.46659597030752914, |
|
"grad_norm": 0.2581961736174019, |
|
"learning_rate": 6.014281353099601e-06, |
|
"loss": 0.2999, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5090137857900318, |
|
"grad_norm": 0.2686100147543252, |
|
"learning_rate": 5.3167645597022855e-06, |
|
"loss": 0.3111, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.5514316012725344, |
|
"grad_norm": 1.1286877967357856, |
|
"learning_rate": 4.612971401080521e-06, |
|
"loss": 0.2747, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.5938494167550371, |
|
"grad_norm": 0.27687544341997167, |
|
"learning_rate": 3.916846817634618e-06, |
|
"loss": 0.2929, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6362672322375398, |
|
"grad_norm": 0.27713605473267083, |
|
"learning_rate": 3.2421838048042516e-06, |
|
"loss": 0.291, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.6786850477200425, |
|
"grad_norm": 0.2658744274079011, |
|
"learning_rate": 2.602350118994782e-06, |
|
"loss": 0.2846, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.721102863202545, |
|
"grad_norm": 0.2771317958447051, |
|
"learning_rate": 2.0100234091793778e-06, |
|
"loss": 0.2883, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.7635206786850477, |
|
"grad_norm": 0.26274869656822974, |
|
"learning_rate": 1.4769400222728974e-06, |
|
"loss": 0.2904, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.8059384941675504, |
|
"grad_norm": 0.25909723594124506, |
|
"learning_rate": 1.0136624594416828e-06, |
|
"loss": 0.2808, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.848356309650053, |
|
"grad_norm": 0.2815262998286053, |
|
"learning_rate": 6.29370090964262e-07, |
|
"loss": 0.2844, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.8907741251325557, |
|
"grad_norm": 0.27545396785421056, |
|
"learning_rate": 3.3167727641356064e-07, |
|
"loss": 0.273, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.9331919406150583, |
|
"grad_norm": 0.27726375004859716, |
|
"learning_rate": 1.2648249392289925e-07, |
|
"loss": 0.2713, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 0.26279284026638045, |
|
"learning_rate": 1.785146788478298e-08, |
|
"loss": 0.2891, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.9968186638388123, |
|
"step": 235, |
|
"total_flos": 412229836996608.0, |
|
"train_loss": 0.3199658251823263, |
|
"train_runtime": 20135.6606, |
|
"train_samples_per_second": 1.498, |
|
"train_steps_per_second": 0.012 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 235, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 412229836996608.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|