|
{ |
|
"best_metric": 0.3207553029060364, |
|
"best_model_checkpoint": "/content/drive/MyDrive/YoutubeTranscriptQADataset/code/models/xlmr_large/checkpoint/checkpoint-10163", |
|
"epoch": 2.0, |
|
"global_step": 20326, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.9508019285644e-06, |
|
"loss": 1.2245, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.9016038571288e-06, |
|
"loss": 0.6119, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.852405785693202e-06, |
|
"loss": 0.5168, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.803207714257602e-06, |
|
"loss": 0.4754, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 9.754009642822002e-06, |
|
"loss": 0.4414, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.704811571386402e-06, |
|
"loss": 0.4249, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.655613499950802e-06, |
|
"loss": 0.4322, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.606415428515204e-06, |
|
"loss": 0.4398, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.557217357079604e-06, |
|
"loss": 0.4151, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 9.508019285644004e-06, |
|
"loss": 0.4015, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.458821214208404e-06, |
|
"loss": 0.3581, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 9.409623142772804e-06, |
|
"loss": 0.407, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 9.360425071337204e-06, |
|
"loss": 0.4068, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 9.311226999901604e-06, |
|
"loss": 0.3947, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.262028928466004e-06, |
|
"loss": 0.3646, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 9.212830857030404e-06, |
|
"loss": 0.346, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 9.163632785594806e-06, |
|
"loss": 0.3644, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 9.114434714159206e-06, |
|
"loss": 0.3436, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 9.065236642723606e-06, |
|
"loss": 0.3528, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 9.016038571288006e-06, |
|
"loss": 0.3225, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.3207553029060364, |
|
"eval_runtime": 236.2565, |
|
"eval_samples_per_second": 42.661, |
|
"eval_steps_per_second": 5.333, |
|
"step": 10163 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 8.966840499852406e-06, |
|
"loss": 0.28, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 8.917642428416807e-06, |
|
"loss": 0.2658, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 8.868444356981207e-06, |
|
"loss": 0.2642, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.819246285545607e-06, |
|
"loss": 0.2493, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 8.770048214110007e-06, |
|
"loss": 0.2754, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 8.720850142674407e-06, |
|
"loss": 0.2482, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 8.671652071238809e-06, |
|
"loss": 0.2614, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 8.622453999803209e-06, |
|
"loss": 0.2638, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 8.573255928367609e-06, |
|
"loss": 0.2586, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 8.524057856932009e-06, |
|
"loss": 0.2842, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 8.474859785496409e-06, |
|
"loss": 0.246, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 8.42566171406081e-06, |
|
"loss": 0.2538, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 8.37646364262521e-06, |
|
"loss": 0.2609, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.32726557118961e-06, |
|
"loss": 0.2317, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.27806749975401e-06, |
|
"loss": 0.2855, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 8.22886942831841e-06, |
|
"loss": 0.2564, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 8.17967135688281e-06, |
|
"loss": 0.2693, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 8.130473285447212e-06, |
|
"loss": 0.2361, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 8.081275214011612e-06, |
|
"loss": 0.2829, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 8.032077142576012e-06, |
|
"loss": 0.2411, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.3713413178920746, |
|
"eval_runtime": 236.2518, |
|
"eval_samples_per_second": 42.662, |
|
"eval_steps_per_second": 5.333, |
|
"step": 20326 |
|
} |
|
], |
|
"max_steps": 101630, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.510077569200128e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|