Text Classification
Transformers
Safetensors
xlm-roberta
Inference Endpoints
xlmr-base-toxicity-classifier / trainer_state.json
dardem's picture
Upload 11 files
b7bc29a verified
{
"best_metric": 0.2546990215778351,
"best_model_checkpoint": "xlm-roberta-base-all-finetuned-toxicity-classification/checkpoint-2532",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 7596,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"learning_rate": 1.8683517640863615e-05,
"loss": 0.2799,
"step": 500
},
{
"epoch": 0.39,
"learning_rate": 1.7367035281727228e-05,
"loss": 0.2876,
"step": 1000
},
{
"epoch": 0.59,
"learning_rate": 1.6050552922590838e-05,
"loss": 0.2671,
"step": 1500
},
{
"epoch": 0.79,
"learning_rate": 1.473407056345445e-05,
"loss": 0.2639,
"step": 2000
},
{
"epoch": 0.99,
"learning_rate": 1.3417588204318064e-05,
"loss": 0.2675,
"step": 2500
},
{
"epoch": 1.0,
"eval_loss": 0.2546990215778351,
"eval_runtime": 35.0277,
"eval_samples_per_second": 128.47,
"eval_steps_per_second": 8.051,
"step": 2532
},
{
"epoch": 1.18,
"learning_rate": 1.2101105845181676e-05,
"loss": 0.2072,
"step": 3000
},
{
"epoch": 1.38,
"learning_rate": 1.0784623486045287e-05,
"loss": 0.2035,
"step": 3500
},
{
"epoch": 1.58,
"learning_rate": 9.4681411269089e-06,
"loss": 0.1987,
"step": 4000
},
{
"epoch": 1.78,
"learning_rate": 8.151658767772512e-06,
"loss": 0.1938,
"step": 4500
},
{
"epoch": 1.97,
"learning_rate": 6.835176408636125e-06,
"loss": 0.189,
"step": 5000
},
{
"epoch": 2.0,
"eval_loss": 0.29669827222824097,
"eval_runtime": 35.0287,
"eval_samples_per_second": 128.466,
"eval_steps_per_second": 8.051,
"step": 5064
},
{
"epoch": 2.17,
"learning_rate": 5.518694049499738e-06,
"loss": 0.1412,
"step": 5500
},
{
"epoch": 2.37,
"learning_rate": 4.20221169036335e-06,
"loss": 0.1467,
"step": 6000
},
{
"epoch": 2.57,
"learning_rate": 2.885729331226962e-06,
"loss": 0.1397,
"step": 6500
},
{
"epoch": 2.76,
"learning_rate": 1.5692469720905742e-06,
"loss": 0.1476,
"step": 7000
},
{
"epoch": 2.96,
"learning_rate": 2.527646129541864e-07,
"loss": 0.1383,
"step": 7500
},
{
"epoch": 3.0,
"eval_loss": 0.4095066785812378,
"eval_runtime": 35.0202,
"eval_samples_per_second": 128.497,
"eval_steps_per_second": 8.053,
"step": 7596
}
],
"logging_steps": 500,
"max_steps": 7596,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 3.196799322624e+16,
"trial_name": null,
"trial_params": null
}