Training in progress, epoch 7, checkpoint
Browse files- last-checkpoint/model.safetensors +1 -1
- last-checkpoint/optimizer.pt +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/rng_state_4.pth +1 -1
- last-checkpoint/rng_state_5.pth +1 -1
- last-checkpoint/rng_state_6.pth +1 -1
- last-checkpoint/rng_state_7.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +100 -5
last-checkpoint/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 711443456
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91eb96d3922be157532116be345e306074157b7a7a4a7335c0238c11a13104fb
|
3 |
size 711443456
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1423007994
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1914f1fa557a8fa584d003645654c4d6913c76b6a764548c9acfbff31bb2d3d0
|
3 |
size 1423007994
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c46246640d1543a5d18433713b555d6e3842e91778e76a1852735bc0ed173fe2
|
3 |
size 15984
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5ed2125154aeb523313b13badf0ffb177c19ce0c77a62f1b348e5f1cb4c3b09
|
3 |
size 15984
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d5d3bc9f42a0ece76532c1de7bc707e6df336aca81bca6f2066c30c05126a2f
|
3 |
size 15984
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ecbdd4a506b7c616a788f2418e754c6a53c2bcc7a63cff96be4697ca6bc0706a
|
3 |
size 15984
|
last-checkpoint/rng_state_4.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2100657e88a6589af92eae97d1bbf8683394c289c642981d0c2937312744e3b
|
3 |
size 15984
|
last-checkpoint/rng_state_5.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a771fb324acc9bce75a763b43a2389416b47091b1973e7a8b137672d65216c7
|
3 |
size 15984
|
last-checkpoint/rng_state_6.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8efe5d627912fd093b28bfa11c19eed1c8663d5c4427be33733700365128d5f9
|
3 |
size 15984
|
last-checkpoint/rng_state_7.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:162222fe11954a835fdb6646ebeec9ede29106cf49f7358b96266185f60e2fe4
|
3 |
size 15984
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df2fb6fb1693a9f822931c64464c1e0bfe205ebfb9fe72b6c70c024b845afee2
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
-
"best_model_checkpoint": "checkpoint/cross_encoder_20250522_full_data/checkpoint-
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -563,6 +563,101 @@
|
|
563 |
"eval_samples_per_second": 808.07,
|
564 |
"eval_steps_per_second": 6.313,
|
565 |
"step": 13902
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
566 |
}
|
567 |
],
|
568 |
"logging_steps": 200,
|
@@ -570,7 +665,7 @@
|
|
570 |
"num_input_tokens_seen": 0,
|
571 |
"num_train_epochs": 10,
|
572 |
"save_steps": 500,
|
573 |
-
"total_flos":
|
574 |
"train_batch_size": 16,
|
575 |
"trial_name": null,
|
576 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.7979517910648003,
|
3 |
+
"best_model_checkpoint": "checkpoint/cross_encoder_20250522_full_data/checkpoint-16219",
|
4 |
+
"epoch": 7.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 16219,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
563 |
"eval_samples_per_second": 808.07,
|
564 |
"eval_steps_per_second": 6.313,
|
565 |
"step": 13902
|
566 |
+
},
|
567 |
+
{
|
568 |
+
"epoch": 6.04,
|
569 |
+
"grad_norm": 1.9424314498901367,
|
570 |
+
"learning_rate": 4.0591573080173995e-06,
|
571 |
+
"loss": 0.3873,
|
572 |
+
"step": 14000
|
573 |
+
},
|
574 |
+
{
|
575 |
+
"epoch": 6.13,
|
576 |
+
"grad_norm": 2.831543445587158,
|
577 |
+
"learning_rate": 3.911643687576664e-06,
|
578 |
+
"loss": 0.3807,
|
579 |
+
"step": 14200
|
580 |
+
},
|
581 |
+
{
|
582 |
+
"epoch": 6.21,
|
583 |
+
"grad_norm": 2.7500486373901367,
|
584 |
+
"learning_rate": 3.7651180761199505e-06,
|
585 |
+
"loss": 0.3801,
|
586 |
+
"step": 14400
|
587 |
+
},
|
588 |
+
{
|
589 |
+
"epoch": 6.3,
|
590 |
+
"grad_norm": 4.015429496765137,
|
591 |
+
"learning_rate": 3.619713489479354e-06,
|
592 |
+
"loss": 0.3824,
|
593 |
+
"step": 14600
|
594 |
+
},
|
595 |
+
{
|
596 |
+
"epoch": 6.39,
|
597 |
+
"grad_norm": 2.4831278324127197,
|
598 |
+
"learning_rate": 3.4755619258215407e-06,
|
599 |
+
"loss": 0.3808,
|
600 |
+
"step": 14800
|
601 |
+
},
|
602 |
+
{
|
603 |
+
"epoch": 6.47,
|
604 |
+
"grad_norm": 7.442523002624512,
|
605 |
+
"learning_rate": 3.3327942458199193e-06,
|
606 |
+
"loss": 0.385,
|
607 |
+
"step": 15000
|
608 |
+
},
|
609 |
+
{
|
610 |
+
"epoch": 6.56,
|
611 |
+
"grad_norm": 4.242701530456543,
|
612 |
+
"learning_rate": 3.1915400538594333e-06,
|
613 |
+
"loss": 0.3832,
|
614 |
+
"step": 15200
|
615 |
+
},
|
616 |
+
{
|
617 |
+
"epoch": 6.65,
|
618 |
+
"grad_norm": 5.000258922576904,
|
619 |
+
"learning_rate": 3.0519275803818014e-06,
|
620 |
+
"loss": 0.3805,
|
621 |
+
"step": 15400
|
622 |
+
},
|
623 |
+
{
|
624 |
+
"epoch": 6.73,
|
625 |
+
"grad_norm": 2.0560362339019775,
|
626 |
+
"learning_rate": 2.914083565478024e-06,
|
627 |
+
"loss": 0.381,
|
628 |
+
"step": 15600
|
629 |
+
},
|
630 |
+
{
|
631 |
+
"epoch": 6.82,
|
632 |
+
"grad_norm": 1.8777313232421875,
|
633 |
+
"learning_rate": 2.7781331438338317e-06,
|
634 |
+
"loss": 0.3831,
|
635 |
+
"step": 15800
|
636 |
+
},
|
637 |
+
{
|
638 |
+
"epoch": 6.91,
|
639 |
+
"grad_norm": 1.0822844505310059,
|
640 |
+
"learning_rate": 2.6441997311325196e-06,
|
641 |
+
"loss": 0.3797,
|
642 |
+
"step": 16000
|
643 |
+
},
|
644 |
+
{
|
645 |
+
"epoch": 6.99,
|
646 |
+
"grad_norm": 3.2328083515167236,
|
647 |
+
"learning_rate": 2.5124049120182916e-06,
|
648 |
+
"loss": 0.383,
|
649 |
+
"step": 16200
|
650 |
+
},
|
651 |
+
{
|
652 |
+
"epoch": 7.0,
|
653 |
+
"eval_f2": 0.7979517910648003,
|
654 |
+
"eval_loss": 0.4335618019104004,
|
655 |
+
"eval_precision": 0.5632793509486378,
|
656 |
+
"eval_recall": 0.8907248402555911,
|
657 |
+
"eval_runtime": 148.5629,
|
658 |
+
"eval_samples_per_second": 809.031,
|
659 |
+
"eval_steps_per_second": 6.321,
|
660 |
+
"step": 16219
|
661 |
}
|
662 |
],
|
663 |
"logging_steps": 200,
|
|
|
665 |
"num_input_tokens_seen": 0,
|
666 |
"num_train_epochs": 10,
|
667 |
"save_steps": 500,
|
668 |
+
"total_flos": 5.462269789659464e+17,
|
669 |
"train_batch_size": 16,
|
670 |
"trial_name": null,
|
671 |
"trial_params": null
|