fats-fme's picture
Training in progress, step 200, checkpoint
e8f3858 verified
raw
history blame
4.55 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.008051448757560814,
"eval_steps": 100,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.025724378780407e-05,
"eval_loss": NaN,
"eval_runtime": 1418.5159,
"eval_samples_per_second": 7.374,
"eval_steps_per_second": 3.687,
"step": 1
},
{
"epoch": 0.0004025724378780407,
"grad_norm": NaN,
"learning_rate": 1e-05,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.0008051448757560814,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.001207717313634122,
"grad_norm": NaN,
"learning_rate": 3e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.0016102897515121628,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.0020128621893902034,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.002415434627268244,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.0028180070651462846,
"grad_norm": NaN,
"learning_rate": 7e-05,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.0032205795030243256,
"grad_norm": NaN,
"learning_rate": 8e-05,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.0036231519409023662,
"grad_norm": NaN,
"learning_rate": 9e-05,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.004025724378780407,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.004025724378780407,
"eval_loss": NaN,
"eval_runtime": 1421.3319,
"eval_samples_per_second": 7.359,
"eval_steps_per_second": 3.68,
"step": 100
},
{
"epoch": 0.004428296816658447,
"grad_norm": NaN,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.004830869254536488,
"grad_norm": NaN,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.005233441692414529,
"grad_norm": NaN,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.005636014130292569,
"grad_norm": NaN,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.00603858656817061,
"grad_norm": NaN,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.006441159006048651,
"grad_norm": NaN,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.006843731443926692,
"grad_norm": NaN,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0,
"step": 170
},
{
"epoch": 0.0072463038818047325,
"grad_norm": NaN,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.007648876319682773,
"grad_norm": NaN,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0,
"step": 190
},
{
"epoch": 0.008051448757560814,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 200
},
{
"epoch": 0.008051448757560814,
"eval_loss": NaN,
"eval_runtime": 1419.7108,
"eval_samples_per_second": 7.368,
"eval_steps_per_second": 3.684,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.45128942174208e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}