tyzhu commited on
Commit
411979e
·
verified ·
1 Parent(s): 1175513

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-56/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-56/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-xl",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "do_sample": true,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "max_length": 50,
15
+ "model_type": "gpt2",
16
+ "n_ctx": 1024,
17
+ "n_embd": 1600,
18
+ "n_head": 25,
19
+ "n_inner": null,
20
+ "n_layer": 48,
21
+ "n_positions": 1024,
22
+ "output_past": true,
23
+ "reorder_and_upcast_attn": false,
24
+ "resid_pdrop": 0.1,
25
+ "scale_attn_by_inverse_layer_idx": false,
26
+ "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50
36
+ }
37
+ },
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.34.0",
40
+ "use_cache": true,
41
+ "vocab_size": 50257
42
+ }
checkpoint-56/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "do_sample": true,
5
+ "eos_token_id": 50256,
6
+ "max_length": 50,
7
+ "transformers_version": "4.34.0"
8
+ }
checkpoint-56/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-56/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:624ad7cb5a55ed3d908aa94ad8c04bc4238d7ed78d04e9a8b432665608bb0d17
3
+ size 12461385454
checkpoint-56/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88675366c236726998242da0776f76b194e94f1a510a3ebc1f65e4176908faa4
3
+ size 6230637102
checkpoint-56/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9387d4e40eb73db345e8351ca744eb6af5443bec3381b382000fca3f9015cc
3
+ size 14244
checkpoint-56/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8d23a8d533e4044d404c3867715711e71802a5adf225f4c1473f95c1c71209e
3
+ size 1064
checkpoint-56/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-56/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-56/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-56/trainer_state.json ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 28,
6
+ "global_step": 56,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.11,
13
+ "learning_rate": 3e-05,
14
+ "loss": 3.5167,
15
+ "step": 6
16
+ },
17
+ {
18
+ "epoch": 0.21,
19
+ "learning_rate": 3e-05,
20
+ "loss": 2.7034,
21
+ "step": 12
22
+ },
23
+ {
24
+ "epoch": 0.32,
25
+ "learning_rate": 3e-05,
26
+ "loss": 2.5409,
27
+ "step": 18
28
+ },
29
+ {
30
+ "epoch": 0.43,
31
+ "learning_rate": 3e-05,
32
+ "loss": 2.4931,
33
+ "step": 24
34
+ },
35
+ {
36
+ "epoch": 0.5,
37
+ "eval_accuracy": 0.5748751060189339,
38
+ "eval_loss": 2.390355348587036,
39
+ "eval_runtime": 25.7729,
40
+ "eval_samples_per_second": 34.261,
41
+ "eval_steps_per_second": 2.173,
42
+ "step": 28
43
+ },
44
+ {
45
+ "epoch": 0.5,
46
+ "eval_bleu": 2.6290615444017718,
47
+ "eval_exact_match": 0.0,
48
+ "eval_prefix_exact_match": 0.0,
49
+ "step": 28
50
+ },
51
+ {
52
+ "epoch": 0.54,
53
+ "learning_rate": 3e-05,
54
+ "loss": 2.4944,
55
+ "step": 30
56
+ },
57
+ {
58
+ "epoch": 0.64,
59
+ "learning_rate": 3e-05,
60
+ "loss": 2.4832,
61
+ "step": 36
62
+ },
63
+ {
64
+ "epoch": 0.75,
65
+ "learning_rate": 3e-05,
66
+ "loss": 2.4336,
67
+ "step": 42
68
+ },
69
+ {
70
+ "epoch": 0.86,
71
+ "learning_rate": 3e-05,
72
+ "loss": 2.5101,
73
+ "step": 48
74
+ },
75
+ {
76
+ "epoch": 0.96,
77
+ "learning_rate": 3e-05,
78
+ "loss": 2.4699,
79
+ "step": 54
80
+ },
81
+ {
82
+ "epoch": 1.0,
83
+ "eval_accuracy": 0.6049526162776446,
84
+ "eval_loss": 2.101423740386963,
85
+ "eval_runtime": 23.9781,
86
+ "eval_samples_per_second": 36.825,
87
+ "eval_steps_per_second": 2.335,
88
+ "step": 56
89
+ },
90
+ {
91
+ "epoch": 1.0,
92
+ "eval_bleu": 2.731416805620766,
93
+ "eval_exact_match": 0.0,
94
+ "eval_prefix_exact_match": 0.0,
95
+ "step": 56
96
+ }
97
+ ],
98
+ "logging_steps": 6,
99
+ "max_steps": 560,
100
+ "num_train_epochs": 10,
101
+ "save_steps": 500,
102
+ "total_flos": 1360515013171200.0,
103
+ "trial_name": null,
104
+ "trial_params": null
105
+ }
checkpoint-56/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b959b77797bfa32386725e4d78839c39c28940487e69e41fb564f8bb31a78b3
3
+ size 4664
checkpoint-56/vocab.json ADDED
The diff for this file is too large to render. See raw diff