|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.007590564927794751, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.795282463897376e-05, |
|
"eval_loss": 1.6482040882110596, |
|
"eval_runtime": 124.9716, |
|
"eval_samples_per_second": 88.78, |
|
"eval_steps_per_second": 44.394, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00037952824638973757, |
|
"grad_norm": 7.470829486846924, |
|
"learning_rate": 0.00019967573081342103, |
|
"loss": 4.9266, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0007590564927794751, |
|
"grad_norm": 5.788771629333496, |
|
"learning_rate": 0.0001970941817426052, |
|
"loss": 1.8729, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0011385847391692126, |
|
"grad_norm": 6.233973979949951, |
|
"learning_rate": 0.00019199794436588243, |
|
"loss": 1.0846, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0015181129855589503, |
|
"grad_norm": 5.445612907409668, |
|
"learning_rate": 0.0001845190085543795, |
|
"loss": 0.5393, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0018976412319486877, |
|
"grad_norm": 5.85573148727417, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 0.5118, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0018976412319486877, |
|
"eval_loss": 0.10395737737417221, |
|
"eval_runtime": 124.7323, |
|
"eval_samples_per_second": 88.951, |
|
"eval_steps_per_second": 44.479, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.002277169478338425, |
|
"grad_norm": 4.383733749389648, |
|
"learning_rate": 0.00016324453755953773, |
|
"loss": 0.4487, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.002656697724728163, |
|
"grad_norm": 2.5093071460723877, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.306, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0030362259711179005, |
|
"grad_norm": 6.711397647857666, |
|
"learning_rate": 0.00013546048870425356, |
|
"loss": 0.3747, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.003415754217507638, |
|
"grad_norm": 2.3839523792266846, |
|
"learning_rate": 0.00012000256937760445, |
|
"loss": 0.3563, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0037952824638973755, |
|
"grad_norm": 2.5638654232025146, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 0.3081, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0037952824638973755, |
|
"eval_loss": 0.064681276679039, |
|
"eval_runtime": 125.0521, |
|
"eval_samples_per_second": 88.723, |
|
"eval_steps_per_second": 44.365, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.004174810710287113, |
|
"grad_norm": 5.275169849395752, |
|
"learning_rate": 8.79463319744677e-05, |
|
"loss": 0.1927, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.00455433895667685, |
|
"grad_norm": 3.027852773666382, |
|
"learning_rate": 7.217825360835473e-05, |
|
"loss": 0.2492, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0049338672030665885, |
|
"grad_norm": 22.369112014770508, |
|
"learning_rate": 5.713074385969457e-05, |
|
"loss": 0.3438, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.005313395449456326, |
|
"grad_norm": 5.542289733886719, |
|
"learning_rate": 4.3193525326884435e-05, |
|
"loss": 0.2183, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.005692923695846063, |
|
"grad_norm": 0.9455445408821106, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 0.1863, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.005692923695846063, |
|
"eval_loss": 0.05306173115968704, |
|
"eval_runtime": 126.2787, |
|
"eval_samples_per_second": 87.861, |
|
"eval_steps_per_second": 43.935, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.006072451942235801, |
|
"grad_norm": 2.1838719844818115, |
|
"learning_rate": 2.0055723659649904e-05, |
|
"loss": 0.1598, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.006451980188625538, |
|
"grad_norm": 0.6331111788749695, |
|
"learning_rate": 1.1454397434679021e-05, |
|
"loss": 0.2209, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.006831508435015276, |
|
"grad_norm": 2.03953218460083, |
|
"learning_rate": 5.146355805285452e-06, |
|
"loss": 0.1126, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.007211036681405014, |
|
"grad_norm": 3.976135492324829, |
|
"learning_rate": 1.2949737362087156e-06, |
|
"loss": 0.2813, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.007590564927794751, |
|
"grad_norm": 1.570008635520935, |
|
"learning_rate": 0.0, |
|
"loss": 0.1904, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.007590564927794751, |
|
"eval_loss": 0.05086271092295647, |
|
"eval_runtime": 124.6253, |
|
"eval_samples_per_second": 89.027, |
|
"eval_steps_per_second": 44.517, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1505146542489600.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|