|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.16039296275875897, |
|
"eval_steps": 500, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010024560172422436, |
|
"grad_norm": 0.7648569941520691, |
|
"learning_rate": 2.9699260180042907e-05, |
|
"loss": 0.0563, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02004912034484487, |
|
"grad_norm": 0.5078855752944946, |
|
"learning_rate": 2.939852036008581e-05, |
|
"loss": 0.051, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.030073680517267304, |
|
"grad_norm": 0.6789458394050598, |
|
"learning_rate": 2.909778054012872e-05, |
|
"loss": 0.0481, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04009824068968974, |
|
"grad_norm": 0.561828076839447, |
|
"learning_rate": 2.879704072017162e-05, |
|
"loss": 0.0475, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.050122800862112175, |
|
"grad_norm": 0.7291754484176636, |
|
"learning_rate": 2.8496300900214528e-05, |
|
"loss": 0.0461, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.06014736103453461, |
|
"grad_norm": 0.5085094571113586, |
|
"learning_rate": 2.8195561080257434e-05, |
|
"loss": 0.0447, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.07017192120695705, |
|
"grad_norm": 0.5000583529472351, |
|
"learning_rate": 2.789482126030034e-05, |
|
"loss": 0.0438, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.08019648137937949, |
|
"grad_norm": 0.36143258213996887, |
|
"learning_rate": 2.7594081440343246e-05, |
|
"loss": 0.0427, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.09022104155180191, |
|
"grad_norm": 0.6115249991416931, |
|
"learning_rate": 2.7293341620386152e-05, |
|
"loss": 0.0422, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.10024560172422435, |
|
"grad_norm": 0.28902167081832886, |
|
"learning_rate": 2.6992601800429055e-05, |
|
"loss": 0.0418, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.11027016189664679, |
|
"grad_norm": 0.3481534421443939, |
|
"learning_rate": 2.669186198047196e-05, |
|
"loss": 0.0413, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.12029472206906922, |
|
"grad_norm": 0.4140373468399048, |
|
"learning_rate": 2.6391122160514867e-05, |
|
"loss": 0.0403, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.13031928224149164, |
|
"grad_norm": 0.3342398703098297, |
|
"learning_rate": 2.6090382340557773e-05, |
|
"loss": 0.0395, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.1403438424139141, |
|
"grad_norm": 0.32196730375289917, |
|
"learning_rate": 2.578964252060068e-05, |
|
"loss": 0.039, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.15036840258633652, |
|
"grad_norm": 0.3885316252708435, |
|
"learning_rate": 2.548890270064358e-05, |
|
"loss": 0.0381, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.16039296275875897, |
|
"grad_norm": 0.37349772453308105, |
|
"learning_rate": 2.518816288068649e-05, |
|
"loss": 0.0378, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 49877, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|