|
{ |
|
"best_global_step": 1376, |
|
"best_metric": 0.008029412478208542, |
|
"best_model_checkpoint": "projects/PetBERT_annonymisation/data/case_sensitive/model/checkpoint-1376", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 3440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0, |
|
"eval_f1": 0.010973861448813302, |
|
"eval_loss": 2.4063162803649902, |
|
"eval_precision": 0.09108980806958722, |
|
"eval_recall": 0.10473207318294019, |
|
"eval_runtime": 23.7722, |
|
"eval_samples_per_second": 139.407, |
|
"eval_steps_per_second": 4.375, |
|
"step": 0 |
|
}, |
|
{ |
|
"epoch": 0.7267441860465116, |
|
"grad_norm": 0.2666139304637909, |
|
"learning_rate": 4.996373546511628e-05, |
|
"loss": 0.0518, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.9198586422350306, |
|
"eval_loss": 0.010010140016674995, |
|
"eval_precision": 0.9136306910527949, |
|
"eval_recall": 0.9281047546302535, |
|
"eval_runtime": 24.8489, |
|
"eval_samples_per_second": 133.366, |
|
"eval_steps_per_second": 4.185, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 1.4534883720930232, |
|
"grad_norm": 0.11741874366998672, |
|
"learning_rate": 4.992739825581396e-05, |
|
"loss": 0.0036, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.9300254346144702, |
|
"eval_loss": 0.008029412478208542, |
|
"eval_precision": 0.9130114164496201, |
|
"eval_recall": 0.9489397415434179, |
|
"eval_runtime": 25.1412, |
|
"eval_samples_per_second": 131.815, |
|
"eval_steps_per_second": 4.137, |
|
"step": 1376 |
|
}, |
|
{ |
|
"epoch": 2.1802325581395348, |
|
"grad_norm": 0.3377048373222351, |
|
"learning_rate": 4.989106104651163e-05, |
|
"loss": 0.0026, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.9069767441860463, |
|
"grad_norm": 0.2929118573665619, |
|
"learning_rate": 4.985472383720931e-05, |
|
"loss": 0.0011, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_f1": 0.9402795283929368, |
|
"eval_loss": 0.01124291867017746, |
|
"eval_precision": 0.9422754585534239, |
|
"eval_recall": 0.9386206401984958, |
|
"eval_runtime": 24.8815, |
|
"eval_samples_per_second": 133.191, |
|
"eval_steps_per_second": 4.18, |
|
"step": 2064 |
|
}, |
|
{ |
|
"epoch": 3.633720930232558, |
|
"grad_norm": 0.002858501160517335, |
|
"learning_rate": 4.981838662790698e-05, |
|
"loss": 0.0012, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_f1": 0.9321076846329125, |
|
"eval_loss": 0.011350538581609726, |
|
"eval_precision": 0.9585579423470411, |
|
"eval_recall": 0.9092514508781232, |
|
"eval_runtime": 24.8844, |
|
"eval_samples_per_second": 133.176, |
|
"eval_steps_per_second": 4.179, |
|
"step": 2752 |
|
}, |
|
{ |
|
"epoch": 4.3604651162790695, |
|
"grad_norm": 0.3254820704460144, |
|
"learning_rate": 4.978204941860465e-05, |
|
"loss": 0.001, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_f1": 0.9375959937426187, |
|
"eval_loss": 0.012251886539161205, |
|
"eval_precision": 0.9347887957568951, |
|
"eval_recall": 0.9407576824824448, |
|
"eval_runtime": 24.848, |
|
"eval_samples_per_second": 133.371, |
|
"eval_steps_per_second": 4.185, |
|
"step": 3440 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 688000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1000, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 3 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.874236880509952e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|