fats-fme's picture
Training in progress, step 200, checkpoint
3794d3c verified
{
"best_metric": 2.953709840774536,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.01811922449719152,
"eval_steps": 100,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 9.05961224859576e-05,
"eval_loss": 5.916714191436768,
"eval_runtime": 778.909,
"eval_samples_per_second": 5.967,
"eval_steps_per_second": 2.984,
"step": 1
},
{
"epoch": 0.000905961224859576,
"grad_norm": 7.96359395980835,
"learning_rate": 1e-05,
"loss": 5.5292,
"step": 10
},
{
"epoch": 0.001811922449719152,
"grad_norm": 7.275904655456543,
"learning_rate": 2e-05,
"loss": 4.9062,
"step": 20
},
{
"epoch": 0.002717883674578728,
"grad_norm": 6.696507453918457,
"learning_rate": 3e-05,
"loss": 3.6854,
"step": 30
},
{
"epoch": 0.003623844899438304,
"grad_norm": 8.084734916687012,
"learning_rate": 4e-05,
"loss": 3.3666,
"step": 40
},
{
"epoch": 0.00452980612429788,
"grad_norm": 5.354772090911865,
"learning_rate": 5e-05,
"loss": 3.0034,
"step": 50
},
{
"epoch": 0.005435767349157456,
"grad_norm": 5.084025859832764,
"learning_rate": 6e-05,
"loss": 3.0904,
"step": 60
},
{
"epoch": 0.006341728574017032,
"grad_norm": 6.143799781799316,
"learning_rate": 7e-05,
"loss": 3.0085,
"step": 70
},
{
"epoch": 0.007247689798876608,
"grad_norm": 4.883930206298828,
"learning_rate": 8e-05,
"loss": 3.0715,
"step": 80
},
{
"epoch": 0.008153651023736184,
"grad_norm": 5.013826847076416,
"learning_rate": 9e-05,
"loss": 2.8267,
"step": 90
},
{
"epoch": 0.00905961224859576,
"grad_norm": 4.358269214630127,
"learning_rate": 0.0001,
"loss": 3.1339,
"step": 100
},
{
"epoch": 0.00905961224859576,
"eval_loss": 3.037921667098999,
"eval_runtime": 785.1635,
"eval_samples_per_second": 5.92,
"eval_steps_per_second": 2.96,
"step": 100
},
{
"epoch": 0.009965573473455336,
"grad_norm": 4.184726238250732,
"learning_rate": 9.755282581475769e-05,
"loss": 3.1687,
"step": 110
},
{
"epoch": 0.010871534698314912,
"grad_norm": 5.467559814453125,
"learning_rate": 9.045084971874738e-05,
"loss": 3.0523,
"step": 120
},
{
"epoch": 0.011777495923174489,
"grad_norm": 4.539832592010498,
"learning_rate": 7.938926261462366e-05,
"loss": 2.9123,
"step": 130
},
{
"epoch": 0.012683457148034065,
"grad_norm": 6.541698932647705,
"learning_rate": 6.545084971874738e-05,
"loss": 3.0405,
"step": 140
},
{
"epoch": 0.01358941837289364,
"grad_norm": 4.431535243988037,
"learning_rate": 5e-05,
"loss": 3.1148,
"step": 150
},
{
"epoch": 0.014495379597753216,
"grad_norm": 4.524734020233154,
"learning_rate": 3.4549150281252636e-05,
"loss": 3.0427,
"step": 160
},
{
"epoch": 0.015401340822612792,
"grad_norm": 4.3518385887146,
"learning_rate": 2.061073738537635e-05,
"loss": 2.9297,
"step": 170
},
{
"epoch": 0.01630730204747237,
"grad_norm": 3.8135385513305664,
"learning_rate": 9.549150281252633e-06,
"loss": 2.9592,
"step": 180
},
{
"epoch": 0.017213263272331945,
"grad_norm": 3.840271472930908,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.7632,
"step": 190
},
{
"epoch": 0.01811922449719152,
"grad_norm": 4.834849834442139,
"learning_rate": 0.0,
"loss": 2.9525,
"step": 200
},
{
"epoch": 0.01811922449719152,
"eval_loss": 2.953709840774536,
"eval_runtime": 784.593,
"eval_samples_per_second": 5.924,
"eval_steps_per_second": 2.962,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.541498927579136e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}