Safetensors
qwen2
ShieldAgent / trainer_state.json
yida-lu's picture
Upload folder using huggingface_hub
8bbaa83 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 448,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17857142857142858,
"grad_norm": 17.865875244140625,
"learning_rate": 1.9910714285714287e-05,
"loss": 1.0838,
"step": 20
},
{
"epoch": 0.35714285714285715,
"grad_norm": 3.3213462829589844,
"learning_rate": 1.9151785714285714e-05,
"loss": 0.7877,
"step": 40
},
{
"epoch": 0.5357142857142857,
"grad_norm": 2.8315253257751465,
"learning_rate": 1.8258928571428574e-05,
"loss": 0.6396,
"step": 60
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.6161699295043945,
"learning_rate": 1.736607142857143e-05,
"loss": 0.5952,
"step": 80
},
{
"epoch": 0.8928571428571429,
"grad_norm": 2.6655831336975098,
"learning_rate": 1.6473214285714288e-05,
"loss": 0.5743,
"step": 100
},
{
"epoch": 1.0714285714285714,
"grad_norm": 2.081516981124878,
"learning_rate": 1.5580357142857145e-05,
"loss": 0.4802,
"step": 120
},
{
"epoch": 1.25,
"grad_norm": 2.047880172729492,
"learning_rate": 1.4687500000000001e-05,
"loss": 0.3406,
"step": 140
},
{
"epoch": 1.4285714285714286,
"grad_norm": 1.9429481029510498,
"learning_rate": 1.379464285714286e-05,
"loss": 0.347,
"step": 160
},
{
"epoch": 1.6071428571428572,
"grad_norm": 2.0169007778167725,
"learning_rate": 1.2901785714285715e-05,
"loss": 0.333,
"step": 180
},
{
"epoch": 1.7857142857142856,
"grad_norm": 1.9162710905075073,
"learning_rate": 1.2008928571428573e-05,
"loss": 0.3454,
"step": 200
},
{
"epoch": 1.9642857142857144,
"grad_norm": 1.9847056865692139,
"learning_rate": 1.1116071428571428e-05,
"loss": 0.3392,
"step": 220
},
{
"epoch": 2.142857142857143,
"grad_norm": 1.6575144529342651,
"learning_rate": 1.0223214285714287e-05,
"loss": 0.1765,
"step": 240
},
{
"epoch": 2.3214285714285716,
"grad_norm": 1.6254658699035645,
"learning_rate": 9.330357142857143e-06,
"loss": 0.1455,
"step": 260
},
{
"epoch": 2.5,
"grad_norm": 1.5523484945297241,
"learning_rate": 8.4375e-06,
"loss": 0.1407,
"step": 280
},
{
"epoch": 2.678571428571429,
"grad_norm": 1.5673795938491821,
"learning_rate": 7.544642857142858e-06,
"loss": 0.1397,
"step": 300
},
{
"epoch": 2.857142857142857,
"grad_norm": 1.509041428565979,
"learning_rate": 6.6517857142857144e-06,
"loss": 0.1371,
"step": 320
},
{
"epoch": 3.0357142857142856,
"grad_norm": 1.0347487926483154,
"learning_rate": 5.758928571428571e-06,
"loss": 0.1147,
"step": 340
},
{
"epoch": 3.2142857142857144,
"grad_norm": 1.1131216287612915,
"learning_rate": 4.866071428571429e-06,
"loss": 0.0511,
"step": 360
},
{
"epoch": 3.392857142857143,
"grad_norm": 1.2071995735168457,
"learning_rate": 3.9732142857142855e-06,
"loss": 0.0523,
"step": 380
},
{
"epoch": 3.571428571428571,
"grad_norm": 1.0390278100967407,
"learning_rate": 3.080357142857143e-06,
"loss": 0.05,
"step": 400
},
{
"epoch": 3.75,
"grad_norm": 0.982015073299408,
"learning_rate": 2.1875000000000002e-06,
"loss": 0.0473,
"step": 420
},
{
"epoch": 3.928571428571429,
"grad_norm": 0.9728031158447266,
"learning_rate": 1.2946428571428574e-06,
"loss": 0.0437,
"step": 440
}
],
"logging_steps": 20,
"max_steps": 448,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 76855084482560.0,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}