|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9904761904761905, |
|
"eval_steps": 500, |
|
"global_step": 195, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0761904761904762, |
|
"grad_norm": 0.26280027627944946, |
|
"learning_rate": 4.9918932703355256e-05, |
|
"loss": 0.315, |
|
"num_input_tokens_seen": 211280, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1523809523809524, |
|
"grad_norm": 0.22838161885738373, |
|
"learning_rate": 4.967625656594782e-05, |
|
"loss": 0.241, |
|
"num_input_tokens_seen": 419136, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 0.22524158656597137, |
|
"learning_rate": 4.92735454356513e-05, |
|
"loss": 0.1709, |
|
"num_input_tokens_seen": 631632, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3047619047619048, |
|
"grad_norm": 0.14552432298660278, |
|
"learning_rate": 4.8713411048678635e-05, |
|
"loss": 0.1225, |
|
"num_input_tokens_seen": 836896, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 0.12247142195701599, |
|
"learning_rate": 4.799948609147061e-05, |
|
"loss": 0.0945, |
|
"num_input_tokens_seen": 1042416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 0.12574797868728638, |
|
"learning_rate": 4.713640064133025e-05, |
|
"loss": 0.0871, |
|
"num_input_tokens_seen": 1244384, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.09612642973661423, |
|
"learning_rate": 4.6129752138594874e-05, |
|
"loss": 0.0634, |
|
"num_input_tokens_seen": 1445168, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6095238095238096, |
|
"grad_norm": 0.10900659114122391, |
|
"learning_rate": 4.498606908508754e-05, |
|
"loss": 0.0783, |
|
"num_input_tokens_seen": 1651760, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 0.12594197690486908, |
|
"learning_rate": 4.371276870427753e-05, |
|
"loss": 0.0632, |
|
"num_input_tokens_seen": 1849920, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 0.12295418232679367, |
|
"learning_rate": 4.231810883773999e-05, |
|
"loss": 0.0713, |
|
"num_input_tokens_seen": 2056592, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8380952380952381, |
|
"grad_norm": 0.07784571498632431, |
|
"learning_rate": 4.0811134389884433e-05, |
|
"loss": 0.0589, |
|
"num_input_tokens_seen": 2265056, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 0.10686610639095306, |
|
"learning_rate": 3.920161866827889e-05, |
|
"loss": 0.0635, |
|
"num_input_tokens_seen": 2471328, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9904761904761905, |
|
"grad_norm": 0.13862575590610504, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0583, |
|
"num_input_tokens_seen": 2676640, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0761904761904761, |
|
"grad_norm": 0.11633503437042236, |
|
"learning_rate": 3.5717314035076355e-05, |
|
"loss": 0.0686, |
|
"num_input_tokens_seen": 2897712, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1523809523809523, |
|
"grad_norm": 0.10034333169460297, |
|
"learning_rate": 3.386512217606339e-05, |
|
"loss": 0.0538, |
|
"num_input_tokens_seen": 3089808, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.2285714285714286, |
|
"grad_norm": 0.10412129759788513, |
|
"learning_rate": 3.195543659791132e-05, |
|
"loss": 0.0546, |
|
"num_input_tokens_seen": 3296576, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.3047619047619048, |
|
"grad_norm": 0.10257629305124283, |
|
"learning_rate": 3.0000642344401113e-05, |
|
"loss": 0.0447, |
|
"num_input_tokens_seen": 3508416, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.380952380952381, |
|
"grad_norm": 0.14629635214805603, |
|
"learning_rate": 2.8013417006383076e-05, |
|
"loss": 0.0536, |
|
"num_input_tokens_seen": 3706512, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.457142857142857, |
|
"grad_norm": 0.0958041399717331, |
|
"learning_rate": 2.600664850273538e-05, |
|
"loss": 0.0489, |
|
"num_input_tokens_seen": 3913120, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.5333333333333332, |
|
"grad_norm": 0.11812577396631241, |
|
"learning_rate": 2.399335149726463e-05, |
|
"loss": 0.0441, |
|
"num_input_tokens_seen": 4121248, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.6095238095238096, |
|
"grad_norm": 0.11522660404443741, |
|
"learning_rate": 2.1986582993616926e-05, |
|
"loss": 0.0479, |
|
"num_input_tokens_seen": 4327824, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.6857142857142857, |
|
"grad_norm": 0.1051577553153038, |
|
"learning_rate": 1.9999357655598893e-05, |
|
"loss": 0.0441, |
|
"num_input_tokens_seen": 4537936, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.7619047619047619, |
|
"grad_norm": 0.14891363680362701, |
|
"learning_rate": 1.8044563402088684e-05, |
|
"loss": 0.043, |
|
"num_input_tokens_seen": 4744432, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.8380952380952382, |
|
"grad_norm": 0.08558758348226547, |
|
"learning_rate": 1.613487782393661e-05, |
|
"loss": 0.0425, |
|
"num_input_tokens_seen": 4955296, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.9142857142857141, |
|
"grad_norm": 0.1266234815120697, |
|
"learning_rate": 1.4282685964923642e-05, |
|
"loss": 0.0437, |
|
"num_input_tokens_seen": 5154784, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.9904761904761905, |
|
"grad_norm": 0.09191666543483734, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 0.0391, |
|
"num_input_tokens_seen": 5358160, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.0761904761904764, |
|
"grad_norm": 0.08223439753055573, |
|
"learning_rate": 1.0798381331721109e-05, |
|
"loss": 0.0466, |
|
"num_input_tokens_seen": 5577808, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.1523809523809523, |
|
"grad_norm": 0.08920733630657196, |
|
"learning_rate": 9.18886561011557e-06, |
|
"loss": 0.0452, |
|
"num_input_tokens_seen": 5775552, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.2285714285714286, |
|
"grad_norm": 0.08723399043083191, |
|
"learning_rate": 7.681891162260015e-06, |
|
"loss": 0.0386, |
|
"num_input_tokens_seen": 5986112, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.3047619047619046, |
|
"grad_norm": 0.09175682067871094, |
|
"learning_rate": 6.28723129572247e-06, |
|
"loss": 0.0507, |
|
"num_input_tokens_seen": 6185568, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.13733746111392975, |
|
"learning_rate": 5.013930914912476e-06, |
|
"loss": 0.0471, |
|
"num_input_tokens_seen": 6389392, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.4571428571428573, |
|
"grad_norm": 0.09620795398950577, |
|
"learning_rate": 3.8702478614051355e-06, |
|
"loss": 0.0384, |
|
"num_input_tokens_seen": 6594256, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.533333333333333, |
|
"grad_norm": 0.0873759537935257, |
|
"learning_rate": 2.8635993586697553e-06, |
|
"loss": 0.036, |
|
"num_input_tokens_seen": 6802240, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.6095238095238096, |
|
"grad_norm": 0.10035382211208344, |
|
"learning_rate": 2.0005139085293945e-06, |
|
"loss": 0.0383, |
|
"num_input_tokens_seen": 7004816, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.685714285714286, |
|
"grad_norm": 0.1140541359782219, |
|
"learning_rate": 1.286588951321363e-06, |
|
"loss": 0.0416, |
|
"num_input_tokens_seen": 7208608, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.761904761904762, |
|
"grad_norm": 0.09188826382160187, |
|
"learning_rate": 7.264545643486997e-07, |
|
"loss": 0.04, |
|
"num_input_tokens_seen": 7418320, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.8380952380952382, |
|
"grad_norm": 0.10470282286405563, |
|
"learning_rate": 3.237434340521789e-07, |
|
"loss": 0.0423, |
|
"num_input_tokens_seen": 7633248, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.914285714285714, |
|
"grad_norm": 0.09629818797111511, |
|
"learning_rate": 8.106729664475176e-08, |
|
"loss": 0.0379, |
|
"num_input_tokens_seen": 7838576, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.9904761904761905, |
|
"grad_norm": 0.10630781948566437, |
|
"learning_rate": 0.0, |
|
"loss": 0.047, |
|
"num_input_tokens_seen": 8039616, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.9904761904761905, |
|
"num_input_tokens_seen": 8039616, |
|
"step": 195, |
|
"total_flos": 3.746792787278561e+17, |
|
"train_loss": 0.068369100185541, |
|
"train_runtime": 4403.2508, |
|
"train_samples_per_second": 0.715, |
|
"train_steps_per_second": 0.044 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 195, |
|
"num_input_tokens_seen": 8039616, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.746792787278561e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|