lmeninato commited on
Commit
b8ac573
·
1 Parent(s): d64dc52

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +11 -0
  2. README.md +78 -0
  3. checkpoint-4125/config.json +170 -0
  4. checkpoint-4125/generation_config.json +5 -0
  5. checkpoint-4125/optimizer.pt +3 -0
  6. checkpoint-4125/pytorch_model.bin +3 -0
  7. checkpoint-4125/rng_state.pth +3 -0
  8. checkpoint-4125/scheduler.pt +3 -0
  9. checkpoint-4125/special_tokens_map.json +7 -0
  10. checkpoint-4125/tokenizer_config.json +15 -0
  11. checkpoint-4125/trainer_state.json +196 -0
  12. checkpoint-4125/training_args.bin +3 -0
  13. checkpoint-4125/vocab.txt +0 -0
  14. checkpoint-4500/config.json +170 -0
  15. checkpoint-4500/generation_config.json +5 -0
  16. checkpoint-4500/optimizer.pt +3 -0
  17. checkpoint-4500/pytorch_model.bin +3 -0
  18. checkpoint-4500/rng_state.pth +3 -0
  19. checkpoint-4500/scheduler.pt +3 -0
  20. checkpoint-4500/special_tokens_map.json +7 -0
  21. checkpoint-4500/tokenizer_config.json +15 -0
  22. checkpoint-4500/trainer_state.json +214 -0
  23. checkpoint-4500/training_args.bin +3 -0
  24. checkpoint-4500/vocab.txt +0 -0
  25. checkpoint-4875/config.json +170 -0
  26. checkpoint-4875/generation_config.json +5 -0
  27. checkpoint-4875/optimizer.pt +3 -0
  28. checkpoint-4875/pytorch_model.bin +3 -0
  29. checkpoint-4875/rng_state.pth +3 -0
  30. checkpoint-4875/scheduler.pt +3 -0
  31. checkpoint-4875/special_tokens_map.json +7 -0
  32. checkpoint-4875/tokenizer_config.json +15 -0
  33. checkpoint-4875/trainer_state.json +226 -0
  34. checkpoint-4875/training_args.bin +3 -0
  35. checkpoint-4875/vocab.txt +0 -0
  36. checkpoint-5250/config.json +170 -0
  37. checkpoint-5250/generation_config.json +5 -0
  38. checkpoint-5250/optimizer.pt +3 -0
  39. checkpoint-5250/pytorch_model.bin +3 -0
  40. checkpoint-5250/rng_state.pth +3 -0
  41. checkpoint-5250/scheduler.pt +3 -0
  42. checkpoint-5250/special_tokens_map.json +7 -0
  43. checkpoint-5250/tokenizer_config.json +15 -0
  44. checkpoint-5250/trainer_state.json +244 -0
  45. checkpoint-5250/training_args.bin +3 -0
  46. checkpoint-5250/vocab.txt +0 -0
  47. checkpoint-5625/config.json +170 -0
  48. checkpoint-5625/generation_config.json +5 -0
  49. checkpoint-5625/optimizer.pt +3 -0
  50. checkpoint-5625/pytorch_model.bin +3 -0
.gitattributes CHANGED
@@ -32,3 +32,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-4125/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-4500/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-4500/training_args.bin filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-4875/training_args.bin filter=lfs diff=lfs merge=lfs -text
40
+ checkpoint-5250/training_args.bin filter=lfs diff=lfs merge=lfs -text
41
+ checkpoint-5625/training_args.bin filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-4125/training_args.bin filter=lfs diff=lfs merge=lfs -text
43
+ checkpoint-4875/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
44
+ checkpoint-5250/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
45
+ checkpoint-5625/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ metrics:
5
+ - bleu
6
+ - rouge
7
+ model-index:
8
+ - name: bert-small-codesearchnet-python
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # bert-small-codesearchnet-python
16
+
17
+ This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.0582
20
+ - Bleu: 0.0347
21
+ - Rouge1: 0.6428
22
+ - Rouge2: 0.6252
23
+ - Avg Length: 17.891
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 5e-05
43
+ - train_batch_size: 8
44
+ - eval_batch_size: 8
45
+ - seed: 42
46
+ - gradient_accumulation_steps: 10
47
+ - total_train_batch_size: 80
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: linear
50
+ - num_epochs: 15
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Bleu | Rouge1 | Rouge2 | Avg Length |
55
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:----------:|
56
+ | No log | 1.0 | 375 | 1.2151 | 0.0 | 0.0928 | 0.0083 | 10.684 |
57
+ | 1.9359 | 2.0 | 750 | 1.0291 | 0.0032 | 0.1752 | 0.0338 | 15.0624 |
58
+ | 0.9422 | 3.0 | 1125 | 0.9173 | 0.0061 | 0.2506 | 0.0711 | 17.9358 |
59
+ | 0.776 | 4.0 | 1500 | 0.8058 | 0.0088 | 0.3321 | 0.1409 | 18.3724 |
60
+ | 0.776 | 5.0 | 1875 | 0.6915 | 0.0123 | 0.4044 | 0.2267 | 18.564 |
61
+ | 0.6218 | 6.0 | 2250 | 0.5281 | 0.0193 | 0.5382 | 0.4097 | 17.5586 |
62
+ | 0.4363 | 7.0 | 2625 | 0.1897 | 0.0333 | 0.6311 | 0.6002 | 17.8768 |
63
+ | 0.1518 | 8.0 | 3000 | 0.0834 | 0.0346 | 0.6413 | 0.621 | 17.879 |
64
+ | 0.1518 | 9.0 | 3375 | 0.0587 | 0.0349 | 0.6439 | 0.6268 | 17.8886 |
65
+ | 0.0579 | 10.0 | 3750 | 0.0547 | 0.0348 | 0.6443 | 0.6276 | 17.885 |
66
+ | 0.0437 | 11.0 | 4125 | 0.0525 | 0.0348 | 0.6442 | 0.6278 | 17.8766 |
67
+ | 0.0365 | 12.0 | 4500 | 0.0550 | 0.0347 | 0.6436 | 0.6266 | 17.8876 |
68
+ | 0.0365 | 13.0 | 4875 | 0.0545 | 0.0347 | 0.6439 | 0.627 | 17.876 |
69
+ | 0.032 | 14.0 | 5250 | 0.0539 | 0.0347 | 0.644 | 0.6268 | 17.8822 |
70
+ | 0.0288 | 15.0 | 5625 | 0.0582 | 0.0347 | 0.6428 | 0.6252 | 17.891 |
71
+
72
+
73
+ ### Framework versions
74
+
75
+ - Transformers 4.28.1
76
+ - Pytorch 2.0.0+cu118
77
+ - Datasets 2.12.0
78
+ - Tokenizers 0.13.3
checkpoint-4125/config.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "prajjwal1/bert-small",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 2048,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.28.1",
79
+ "type_vocab_size": 2,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true,
83
+ "vocab_size": 30522
84
+ },
85
+ "decoder_start_token_id": 101,
86
+ "encoder": {
87
+ "_name_or_path": "prajjwal1/bert-small",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_probs_dropout_prob": 0.1,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": null,
94
+ "chunk_size_feed_forward": 0,
95
+ "classifier_dropout": null,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "eos_token_id": null,
103
+ "exponential_decay_length_penalty": null,
104
+ "finetuning_task": null,
105
+ "forced_bos_token_id": null,
106
+ "forced_eos_token_id": null,
107
+ "hidden_act": "gelu",
108
+ "hidden_dropout_prob": 0.1,
109
+ "hidden_size": 512,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 2048,
116
+ "is_decoder": false,
117
+ "is_encoder_decoder": false,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "length_penalty": 1.0,
124
+ "max_length": 20,
125
+ "max_position_embeddings": 512,
126
+ "min_length": 0,
127
+ "model_type": "bert",
128
+ "no_repeat_ngram_size": 0,
129
+ "num_attention_heads": 8,
130
+ "num_beam_groups": 1,
131
+ "num_beams": 1,
132
+ "num_hidden_layers": 4,
133
+ "num_return_sequences": 1,
134
+ "output_attentions": false,
135
+ "output_hidden_states": false,
136
+ "output_scores": false,
137
+ "pad_token_id": 0,
138
+ "position_embedding_type": "absolute",
139
+ "prefix": null,
140
+ "problem_type": null,
141
+ "pruned_heads": {},
142
+ "remove_invalid_values": false,
143
+ "repetition_penalty": 1.0,
144
+ "return_dict": true,
145
+ "return_dict_in_generate": false,
146
+ "sep_token_id": null,
147
+ "suppress_tokens": null,
148
+ "task_specific_params": null,
149
+ "temperature": 1.0,
150
+ "tf_legacy_loss": false,
151
+ "tie_encoder_decoder": false,
152
+ "tie_word_embeddings": true,
153
+ "tokenizer_class": null,
154
+ "top_k": 50,
155
+ "top_p": 1.0,
156
+ "torch_dtype": null,
157
+ "torchscript": false,
158
+ "transformers_version": "4.28.1",
159
+ "type_vocab_size": 2,
160
+ "typical_p": 1.0,
161
+ "use_bfloat16": false,
162
+ "use_cache": true,
163
+ "vocab_size": 30522
164
+ },
165
+ "is_encoder_decoder": true,
166
+ "model_type": "encoder-decoder",
167
+ "pad_token_id": 0,
168
+ "torch_dtype": "float32",
169
+ "transformers_version": null
170
+ }
checkpoint-4125/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
+ }
checkpoint-4125/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df9ec8cfdbd32e9b4c758f22c1396265bb7afb2613b1cf81e9ba6ae3c0885b77
3
+ size 1147141
checkpoint-4125/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6add994a47482e8c7e3421e149cca1b0810fa08c8433a6980d94b84444ea7c02
3
+ size 247135097
checkpoint-4125/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b677ec431d9cf8b4aa3161c37d3c0908a69b7aaf145214135f952c66b1d7fb5d
3
+ size 14575
checkpoint-4125/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a07b14bea4a2000a0edbbb046d156c975117fac458b68dc3ac9997be295822b
3
+ size 881
checkpoint-4125/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-4125/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-4125/trainer_state.json ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05250174552202225,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/prajjwal1/bert-small-codesearchnet-python/checkpoint-4125",
4
+ "epoch": 11.0,
5
+ "global_step": 4125,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_avg_length": 10.684,
13
+ "eval_bleu": 0.0,
14
+ "eval_loss": 1.2151237726211548,
15
+ "eval_rouge1": 0.0928,
16
+ "eval_rouge2": 0.0083,
17
+ "eval_runtime": 138.1107,
18
+ "eval_samples_per_second": 36.203,
19
+ "eval_steps_per_second": 4.525,
20
+ "step": 375
21
+ },
22
+ {
23
+ "epoch": 1.33,
24
+ "learning_rate": 2.354011121497024e-05,
25
+ "loss": 1.9359,
26
+ "step": 500
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "eval_avg_length": 15.0624,
31
+ "eval_bleu": 0.0032,
32
+ "eval_loss": 1.0291130542755127,
33
+ "eval_rouge1": 0.1752,
34
+ "eval_rouge2": 0.0338,
35
+ "eval_runtime": 131.019,
36
+ "eval_samples_per_second": 38.162,
37
+ "eval_steps_per_second": 4.77,
38
+ "step": 750
39
+ },
40
+ {
41
+ "epoch": 2.67,
42
+ "learning_rate": 4.7082288801902905e-05,
43
+ "loss": 0.9422,
44
+ "step": 1000
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_avg_length": 17.9358,
49
+ "eval_bleu": 0.0061,
50
+ "eval_loss": 0.9172993898391724,
51
+ "eval_rouge1": 0.2506,
52
+ "eval_rouge2": 0.0711,
53
+ "eval_runtime": 129.41,
54
+ "eval_samples_per_second": 38.637,
55
+ "eval_steps_per_second": 4.83,
56
+ "step": 1125
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 7.063511293381453e-05,
61
+ "loss": 0.776,
62
+ "step": 1500
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_avg_length": 18.3724,
67
+ "eval_bleu": 0.0088,
68
+ "eval_loss": 0.8057555556297302,
69
+ "eval_rouge1": 0.3321,
70
+ "eval_rouge2": 0.1409,
71
+ "eval_runtime": 125.5689,
72
+ "eval_samples_per_second": 39.819,
73
+ "eval_steps_per_second": 4.977,
74
+ "step": 1500
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_avg_length": 18.564,
79
+ "eval_bleu": 0.0123,
80
+ "eval_loss": 0.6914781928062439,
81
+ "eval_rouge1": 0.4044,
82
+ "eval_rouge2": 0.2267,
83
+ "eval_runtime": 125.7562,
84
+ "eval_samples_per_second": 39.759,
85
+ "eval_steps_per_second": 4.97,
86
+ "step": 1875
87
+ },
88
+ {
89
+ "epoch": 5.33,
90
+ "learning_rate": 9.420605056220666e-05,
91
+ "loss": 0.6218,
92
+ "step": 2000
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_avg_length": 17.5586,
97
+ "eval_bleu": 0.0193,
98
+ "eval_loss": 0.5281431674957275,
99
+ "eval_rouge1": 0.5382,
100
+ "eval_rouge2": 0.4097,
101
+ "eval_runtime": 124.9018,
102
+ "eval_samples_per_second": 40.031,
103
+ "eval_steps_per_second": 5.004,
104
+ "step": 2250
105
+ },
106
+ {
107
+ "epoch": 6.67,
108
+ "learning_rate": 0.00011781098874052987,
109
+ "loss": 0.4363,
110
+ "step": 2500
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_avg_length": 17.8768,
115
+ "eval_bleu": 0.0333,
116
+ "eval_loss": 0.18967217206954956,
117
+ "eval_rouge1": 0.6311,
118
+ "eval_rouge2": 0.6002,
119
+ "eval_runtime": 127.7329,
120
+ "eval_samples_per_second": 39.144,
121
+ "eval_steps_per_second": 4.893,
122
+ "step": 2625
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "learning_rate": 0.0001414699072483927,
127
+ "loss": 0.1518,
128
+ "step": 3000
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_avg_length": 17.879,
133
+ "eval_bleu": 0.0346,
134
+ "eval_loss": 0.08337126672267914,
135
+ "eval_rouge1": 0.6413,
136
+ "eval_rouge2": 0.621,
137
+ "eval_runtime": 127.6119,
138
+ "eval_samples_per_second": 39.181,
139
+ "eval_steps_per_second": 4.898,
140
+ "step": 3000
141
+ },
142
+ {
143
+ "epoch": 9.0,
144
+ "eval_avg_length": 17.8886,
145
+ "eval_bleu": 0.0349,
146
+ "eval_loss": 0.058715466409921646,
147
+ "eval_rouge1": 0.6439,
148
+ "eval_rouge2": 0.6268,
149
+ "eval_runtime": 128.3455,
150
+ "eval_samples_per_second": 38.957,
151
+ "eval_steps_per_second": 4.87,
152
+ "step": 3375
153
+ },
154
+ {
155
+ "epoch": 9.33,
156
+ "learning_rate": 0.000165146600920707,
157
+ "loss": 0.0579,
158
+ "step": 3500
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_avg_length": 17.885,
163
+ "eval_bleu": 0.0348,
164
+ "eval_loss": 0.054685767740011215,
165
+ "eval_rouge1": 0.6443,
166
+ "eval_rouge2": 0.6276,
167
+ "eval_runtime": 125.2586,
168
+ "eval_samples_per_second": 39.917,
169
+ "eval_steps_per_second": 4.99,
170
+ "step": 3750
171
+ },
172
+ {
173
+ "epoch": 10.67,
174
+ "learning_rate": 0.00018887515761889517,
175
+ "loss": 0.0437,
176
+ "step": 4000
177
+ },
178
+ {
179
+ "epoch": 11.0,
180
+ "eval_avg_length": 17.8766,
181
+ "eval_bleu": 0.0348,
182
+ "eval_loss": 0.05250174552202225,
183
+ "eval_rouge1": 0.6442,
184
+ "eval_rouge2": 0.6278,
185
+ "eval_runtime": 125.5302,
186
+ "eval_samples_per_second": 39.831,
187
+ "eval_steps_per_second": 4.979,
188
+ "step": 4125
189
+ }
190
+ ],
191
+ "max_steps": 5625,
192
+ "num_train_epochs": 15,
193
+ "total_flos": 1.51985788416e+16,
194
+ "trial_name": null,
195
+ "trial_params": null
196
+ }
checkpoint-4125/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8924762bcb0b0be937da03bc6254f6f33585885549c84b07f1eb1ac6331ec3f4
3
+ size 3963
checkpoint-4125/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4500/config.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "prajjwal1/bert-small",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 2048,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.28.1",
79
+ "type_vocab_size": 2,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true,
83
+ "vocab_size": 30522
84
+ },
85
+ "decoder_start_token_id": 101,
86
+ "encoder": {
87
+ "_name_or_path": "prajjwal1/bert-small",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_probs_dropout_prob": 0.1,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": null,
94
+ "chunk_size_feed_forward": 0,
95
+ "classifier_dropout": null,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "eos_token_id": null,
103
+ "exponential_decay_length_penalty": null,
104
+ "finetuning_task": null,
105
+ "forced_bos_token_id": null,
106
+ "forced_eos_token_id": null,
107
+ "hidden_act": "gelu",
108
+ "hidden_dropout_prob": 0.1,
109
+ "hidden_size": 512,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 2048,
116
+ "is_decoder": false,
117
+ "is_encoder_decoder": false,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "length_penalty": 1.0,
124
+ "max_length": 20,
125
+ "max_position_embeddings": 512,
126
+ "min_length": 0,
127
+ "model_type": "bert",
128
+ "no_repeat_ngram_size": 0,
129
+ "num_attention_heads": 8,
130
+ "num_beam_groups": 1,
131
+ "num_beams": 1,
132
+ "num_hidden_layers": 4,
133
+ "num_return_sequences": 1,
134
+ "output_attentions": false,
135
+ "output_hidden_states": false,
136
+ "output_scores": false,
137
+ "pad_token_id": 0,
138
+ "position_embedding_type": "absolute",
139
+ "prefix": null,
140
+ "problem_type": null,
141
+ "pruned_heads": {},
142
+ "remove_invalid_values": false,
143
+ "repetition_penalty": 1.0,
144
+ "return_dict": true,
145
+ "return_dict_in_generate": false,
146
+ "sep_token_id": null,
147
+ "suppress_tokens": null,
148
+ "task_specific_params": null,
149
+ "temperature": 1.0,
150
+ "tf_legacy_loss": false,
151
+ "tie_encoder_decoder": false,
152
+ "tie_word_embeddings": true,
153
+ "tokenizer_class": null,
154
+ "top_k": 50,
155
+ "top_p": 1.0,
156
+ "torch_dtype": null,
157
+ "torchscript": false,
158
+ "transformers_version": "4.28.1",
159
+ "type_vocab_size": 2,
160
+ "typical_p": 1.0,
161
+ "use_bfloat16": false,
162
+ "use_cache": true,
163
+ "vocab_size": 30522
164
+ },
165
+ "is_encoder_decoder": true,
166
+ "model_type": "encoder-decoder",
167
+ "pad_token_id": 0,
168
+ "torch_dtype": "float32",
169
+ "transformers_version": null
170
+ }
checkpoint-4500/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
+ }
checkpoint-4500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d5ea6a5de6ad34f1af0b505d66a6360677ff24ec488016c7479fb93fc9207d1
3
+ size 1147141
checkpoint-4500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13fa7b593334f28926096172f420153a19d327fea4c2bc603ac7e667a0215a0c
3
+ size 247135097
checkpoint-4500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f941e670fea78950a92708359e7dd44b27e56bc1ec98e01e9c31c3931bfb4813
3
+ size 14575
checkpoint-4500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5729adc3c127b0467258bfcda5cd70513c83184f85b340afeee53a74967187
3
+ size 881
checkpoint-4500/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-4500/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-4500/trainer_state.json ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05250174552202225,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/prajjwal1/bert-small-codesearchnet-python/checkpoint-4125",
4
+ "epoch": 12.0,
5
+ "global_step": 4500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_avg_length": 10.684,
13
+ "eval_bleu": 0.0,
14
+ "eval_loss": 1.2151237726211548,
15
+ "eval_rouge1": 0.0928,
16
+ "eval_rouge2": 0.0083,
17
+ "eval_runtime": 138.1107,
18
+ "eval_samples_per_second": 36.203,
19
+ "eval_steps_per_second": 4.525,
20
+ "step": 375
21
+ },
22
+ {
23
+ "epoch": 1.33,
24
+ "learning_rate": 2.354011121497024e-05,
25
+ "loss": 1.9359,
26
+ "step": 500
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "eval_avg_length": 15.0624,
31
+ "eval_bleu": 0.0032,
32
+ "eval_loss": 1.0291130542755127,
33
+ "eval_rouge1": 0.1752,
34
+ "eval_rouge2": 0.0338,
35
+ "eval_runtime": 131.019,
36
+ "eval_samples_per_second": 38.162,
37
+ "eval_steps_per_second": 4.77,
38
+ "step": 750
39
+ },
40
+ {
41
+ "epoch": 2.67,
42
+ "learning_rate": 4.7082288801902905e-05,
43
+ "loss": 0.9422,
44
+ "step": 1000
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_avg_length": 17.9358,
49
+ "eval_bleu": 0.0061,
50
+ "eval_loss": 0.9172993898391724,
51
+ "eval_rouge1": 0.2506,
52
+ "eval_rouge2": 0.0711,
53
+ "eval_runtime": 129.41,
54
+ "eval_samples_per_second": 38.637,
55
+ "eval_steps_per_second": 4.83,
56
+ "step": 1125
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 7.063511293381453e-05,
61
+ "loss": 0.776,
62
+ "step": 1500
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_avg_length": 18.3724,
67
+ "eval_bleu": 0.0088,
68
+ "eval_loss": 0.8057555556297302,
69
+ "eval_rouge1": 0.3321,
70
+ "eval_rouge2": 0.1409,
71
+ "eval_runtime": 125.5689,
72
+ "eval_samples_per_second": 39.819,
73
+ "eval_steps_per_second": 4.977,
74
+ "step": 1500
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_avg_length": 18.564,
79
+ "eval_bleu": 0.0123,
80
+ "eval_loss": 0.6914781928062439,
81
+ "eval_rouge1": 0.4044,
82
+ "eval_rouge2": 0.2267,
83
+ "eval_runtime": 125.7562,
84
+ "eval_samples_per_second": 39.759,
85
+ "eval_steps_per_second": 4.97,
86
+ "step": 1875
87
+ },
88
+ {
89
+ "epoch": 5.33,
90
+ "learning_rate": 9.420605056220666e-05,
91
+ "loss": 0.6218,
92
+ "step": 2000
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_avg_length": 17.5586,
97
+ "eval_bleu": 0.0193,
98
+ "eval_loss": 0.5281431674957275,
99
+ "eval_rouge1": 0.5382,
100
+ "eval_rouge2": 0.4097,
101
+ "eval_runtime": 124.9018,
102
+ "eval_samples_per_second": 40.031,
103
+ "eval_steps_per_second": 5.004,
104
+ "step": 2250
105
+ },
106
+ {
107
+ "epoch": 6.67,
108
+ "learning_rate": 0.00011781098874052987,
109
+ "loss": 0.4363,
110
+ "step": 2500
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_avg_length": 17.8768,
115
+ "eval_bleu": 0.0333,
116
+ "eval_loss": 0.18967217206954956,
117
+ "eval_rouge1": 0.6311,
118
+ "eval_rouge2": 0.6002,
119
+ "eval_runtime": 127.7329,
120
+ "eval_samples_per_second": 39.144,
121
+ "eval_steps_per_second": 4.893,
122
+ "step": 2625
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "learning_rate": 0.0001414699072483927,
127
+ "loss": 0.1518,
128
+ "step": 3000
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_avg_length": 17.879,
133
+ "eval_bleu": 0.0346,
134
+ "eval_loss": 0.08337126672267914,
135
+ "eval_rouge1": 0.6413,
136
+ "eval_rouge2": 0.621,
137
+ "eval_runtime": 127.6119,
138
+ "eval_samples_per_second": 39.181,
139
+ "eval_steps_per_second": 4.898,
140
+ "step": 3000
141
+ },
142
+ {
143
+ "epoch": 9.0,
144
+ "eval_avg_length": 17.8886,
145
+ "eval_bleu": 0.0349,
146
+ "eval_loss": 0.058715466409921646,
147
+ "eval_rouge1": 0.6439,
148
+ "eval_rouge2": 0.6268,
149
+ "eval_runtime": 128.3455,
150
+ "eval_samples_per_second": 38.957,
151
+ "eval_steps_per_second": 4.87,
152
+ "step": 3375
153
+ },
154
+ {
155
+ "epoch": 9.33,
156
+ "learning_rate": 0.000165146600920707,
157
+ "loss": 0.0579,
158
+ "step": 3500
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_avg_length": 17.885,
163
+ "eval_bleu": 0.0348,
164
+ "eval_loss": 0.054685767740011215,
165
+ "eval_rouge1": 0.6443,
166
+ "eval_rouge2": 0.6276,
167
+ "eval_runtime": 125.2586,
168
+ "eval_samples_per_second": 39.917,
169
+ "eval_steps_per_second": 4.99,
170
+ "step": 3750
171
+ },
172
+ {
173
+ "epoch": 10.67,
174
+ "learning_rate": 0.00018887515761889517,
175
+ "loss": 0.0437,
176
+ "step": 4000
177
+ },
178
+ {
179
+ "epoch": 11.0,
180
+ "eval_avg_length": 17.8766,
181
+ "eval_bleu": 0.0348,
182
+ "eval_loss": 0.05250174552202225,
183
+ "eval_rouge1": 0.6442,
184
+ "eval_rouge2": 0.6278,
185
+ "eval_runtime": 125.5302,
186
+ "eval_samples_per_second": 39.831,
187
+ "eval_steps_per_second": 4.979,
188
+ "step": 4125
189
+ },
190
+ {
191
+ "epoch": 12.0,
192
+ "learning_rate": 0.0002126803301507607,
193
+ "loss": 0.0365,
194
+ "step": 4500
195
+ },
196
+ {
197
+ "epoch": 12.0,
198
+ "eval_avg_length": 17.8876,
199
+ "eval_bleu": 0.0347,
200
+ "eval_loss": 0.05503207445144653,
201
+ "eval_rouge1": 0.6436,
202
+ "eval_rouge2": 0.6266,
203
+ "eval_runtime": 126.9255,
204
+ "eval_samples_per_second": 39.393,
205
+ "eval_steps_per_second": 4.924,
206
+ "step": 4500
207
+ }
208
+ ],
209
+ "max_steps": 5625,
210
+ "num_train_epochs": 15,
211
+ "total_flos": 1.65802678272e+16,
212
+ "trial_name": null,
213
+ "trial_params": null
214
+ }
checkpoint-4500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8924762bcb0b0be937da03bc6254f6f33585885549c84b07f1eb1ac6331ec3f4
3
+ size 3963
checkpoint-4500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4875/config.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "prajjwal1/bert-small",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 2048,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.28.1",
79
+ "type_vocab_size": 2,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true,
83
+ "vocab_size": 30522
84
+ },
85
+ "decoder_start_token_id": 101,
86
+ "encoder": {
87
+ "_name_or_path": "prajjwal1/bert-small",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_probs_dropout_prob": 0.1,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": null,
94
+ "chunk_size_feed_forward": 0,
95
+ "classifier_dropout": null,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "eos_token_id": null,
103
+ "exponential_decay_length_penalty": null,
104
+ "finetuning_task": null,
105
+ "forced_bos_token_id": null,
106
+ "forced_eos_token_id": null,
107
+ "hidden_act": "gelu",
108
+ "hidden_dropout_prob": 0.1,
109
+ "hidden_size": 512,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 2048,
116
+ "is_decoder": false,
117
+ "is_encoder_decoder": false,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "length_penalty": 1.0,
124
+ "max_length": 20,
125
+ "max_position_embeddings": 512,
126
+ "min_length": 0,
127
+ "model_type": "bert",
128
+ "no_repeat_ngram_size": 0,
129
+ "num_attention_heads": 8,
130
+ "num_beam_groups": 1,
131
+ "num_beams": 1,
132
+ "num_hidden_layers": 4,
133
+ "num_return_sequences": 1,
134
+ "output_attentions": false,
135
+ "output_hidden_states": false,
136
+ "output_scores": false,
137
+ "pad_token_id": 0,
138
+ "position_embedding_type": "absolute",
139
+ "prefix": null,
140
+ "problem_type": null,
141
+ "pruned_heads": {},
142
+ "remove_invalid_values": false,
143
+ "repetition_penalty": 1.0,
144
+ "return_dict": true,
145
+ "return_dict_in_generate": false,
146
+ "sep_token_id": null,
147
+ "suppress_tokens": null,
148
+ "task_specific_params": null,
149
+ "temperature": 1.0,
150
+ "tf_legacy_loss": false,
151
+ "tie_encoder_decoder": false,
152
+ "tie_word_embeddings": true,
153
+ "tokenizer_class": null,
154
+ "top_k": 50,
155
+ "top_p": 1.0,
156
+ "torch_dtype": null,
157
+ "torchscript": false,
158
+ "transformers_version": "4.28.1",
159
+ "type_vocab_size": 2,
160
+ "typical_p": 1.0,
161
+ "use_bfloat16": false,
162
+ "use_cache": true,
163
+ "vocab_size": 30522
164
+ },
165
+ "is_encoder_decoder": true,
166
+ "model_type": "encoder-decoder",
167
+ "pad_token_id": 0,
168
+ "torch_dtype": "float32",
169
+ "transformers_version": null
170
+ }
checkpoint-4875/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
+ }
checkpoint-4875/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05bcdfd3017fa4b8a17cfacac393946d80eeb32e7835583f88bd88157961c44d
3
+ size 1147141
checkpoint-4875/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63f9df95c2bcf40e414ebe0e5f51d3cd06a1a0e6692d21b3f7d1ba1949eb2b70
3
+ size 247135097
checkpoint-4875/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e67ac94251fa16f7e59fcf27fd4f50dcb5d44e2274e6a817873f4feb5d53fd3f
3
+ size 14575
checkpoint-4875/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3901fca1149ae706d2670fde9397a97084c3018dfdfc961cb1769bcedbef4cc1
3
+ size 881
checkpoint-4875/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-4875/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-4875/trainer_state.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05250174552202225,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/prajjwal1/bert-small-codesearchnet-python/checkpoint-4125",
4
+ "epoch": 13.0,
5
+ "global_step": 4875,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_avg_length": 10.684,
13
+ "eval_bleu": 0.0,
14
+ "eval_loss": 1.2151237726211548,
15
+ "eval_rouge1": 0.0928,
16
+ "eval_rouge2": 0.0083,
17
+ "eval_runtime": 138.1107,
18
+ "eval_samples_per_second": 36.203,
19
+ "eval_steps_per_second": 4.525,
20
+ "step": 375
21
+ },
22
+ {
23
+ "epoch": 1.33,
24
+ "learning_rate": 2.354011121497024e-05,
25
+ "loss": 1.9359,
26
+ "step": 500
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "eval_avg_length": 15.0624,
31
+ "eval_bleu": 0.0032,
32
+ "eval_loss": 1.0291130542755127,
33
+ "eval_rouge1": 0.1752,
34
+ "eval_rouge2": 0.0338,
35
+ "eval_runtime": 131.019,
36
+ "eval_samples_per_second": 38.162,
37
+ "eval_steps_per_second": 4.77,
38
+ "step": 750
39
+ },
40
+ {
41
+ "epoch": 2.67,
42
+ "learning_rate": 4.7082288801902905e-05,
43
+ "loss": 0.9422,
44
+ "step": 1000
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_avg_length": 17.9358,
49
+ "eval_bleu": 0.0061,
50
+ "eval_loss": 0.9172993898391724,
51
+ "eval_rouge1": 0.2506,
52
+ "eval_rouge2": 0.0711,
53
+ "eval_runtime": 129.41,
54
+ "eval_samples_per_second": 38.637,
55
+ "eval_steps_per_second": 4.83,
56
+ "step": 1125
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 7.063511293381453e-05,
61
+ "loss": 0.776,
62
+ "step": 1500
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_avg_length": 18.3724,
67
+ "eval_bleu": 0.0088,
68
+ "eval_loss": 0.8057555556297302,
69
+ "eval_rouge1": 0.3321,
70
+ "eval_rouge2": 0.1409,
71
+ "eval_runtime": 125.5689,
72
+ "eval_samples_per_second": 39.819,
73
+ "eval_steps_per_second": 4.977,
74
+ "step": 1500
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_avg_length": 18.564,
79
+ "eval_bleu": 0.0123,
80
+ "eval_loss": 0.6914781928062439,
81
+ "eval_rouge1": 0.4044,
82
+ "eval_rouge2": 0.2267,
83
+ "eval_runtime": 125.7562,
84
+ "eval_samples_per_second": 39.759,
85
+ "eval_steps_per_second": 4.97,
86
+ "step": 1875
87
+ },
88
+ {
89
+ "epoch": 5.33,
90
+ "learning_rate": 9.420605056220666e-05,
91
+ "loss": 0.6218,
92
+ "step": 2000
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_avg_length": 17.5586,
97
+ "eval_bleu": 0.0193,
98
+ "eval_loss": 0.5281431674957275,
99
+ "eval_rouge1": 0.5382,
100
+ "eval_rouge2": 0.4097,
101
+ "eval_runtime": 124.9018,
102
+ "eval_samples_per_second": 40.031,
103
+ "eval_steps_per_second": 5.004,
104
+ "step": 2250
105
+ },
106
+ {
107
+ "epoch": 6.67,
108
+ "learning_rate": 0.00011781098874052987,
109
+ "loss": 0.4363,
110
+ "step": 2500
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_avg_length": 17.8768,
115
+ "eval_bleu": 0.0333,
116
+ "eval_loss": 0.18967217206954956,
117
+ "eval_rouge1": 0.6311,
118
+ "eval_rouge2": 0.6002,
119
+ "eval_runtime": 127.7329,
120
+ "eval_samples_per_second": 39.144,
121
+ "eval_steps_per_second": 4.893,
122
+ "step": 2625
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "learning_rate": 0.0001414699072483927,
127
+ "loss": 0.1518,
128
+ "step": 3000
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_avg_length": 17.879,
133
+ "eval_bleu": 0.0346,
134
+ "eval_loss": 0.08337126672267914,
135
+ "eval_rouge1": 0.6413,
136
+ "eval_rouge2": 0.621,
137
+ "eval_runtime": 127.6119,
138
+ "eval_samples_per_second": 39.181,
139
+ "eval_steps_per_second": 4.898,
140
+ "step": 3000
141
+ },
142
+ {
143
+ "epoch": 9.0,
144
+ "eval_avg_length": 17.8886,
145
+ "eval_bleu": 0.0349,
146
+ "eval_loss": 0.058715466409921646,
147
+ "eval_rouge1": 0.6439,
148
+ "eval_rouge2": 0.6268,
149
+ "eval_runtime": 128.3455,
150
+ "eval_samples_per_second": 38.957,
151
+ "eval_steps_per_second": 4.87,
152
+ "step": 3375
153
+ },
154
+ {
155
+ "epoch": 9.33,
156
+ "learning_rate": 0.000165146600920707,
157
+ "loss": 0.0579,
158
+ "step": 3500
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_avg_length": 17.885,
163
+ "eval_bleu": 0.0348,
164
+ "eval_loss": 0.054685767740011215,
165
+ "eval_rouge1": 0.6443,
166
+ "eval_rouge2": 0.6276,
167
+ "eval_runtime": 125.2586,
168
+ "eval_samples_per_second": 39.917,
169
+ "eval_steps_per_second": 4.99,
170
+ "step": 3750
171
+ },
172
+ {
173
+ "epoch": 10.67,
174
+ "learning_rate": 0.00018887515761889517,
175
+ "loss": 0.0437,
176
+ "step": 4000
177
+ },
178
+ {
179
+ "epoch": 11.0,
180
+ "eval_avg_length": 17.8766,
181
+ "eval_bleu": 0.0348,
182
+ "eval_loss": 0.05250174552202225,
183
+ "eval_rouge1": 0.6442,
184
+ "eval_rouge2": 0.6278,
185
+ "eval_runtime": 125.5302,
186
+ "eval_samples_per_second": 39.831,
187
+ "eval_steps_per_second": 4.979,
188
+ "step": 4125
189
+ },
190
+ {
191
+ "epoch": 12.0,
192
+ "learning_rate": 0.0002126803301507607,
193
+ "loss": 0.0365,
194
+ "step": 4500
195
+ },
196
+ {
197
+ "epoch": 12.0,
198
+ "eval_avg_length": 17.8876,
199
+ "eval_bleu": 0.0347,
200
+ "eval_loss": 0.05503207445144653,
201
+ "eval_rouge1": 0.6436,
202
+ "eval_rouge2": 0.6266,
203
+ "eval_runtime": 126.9255,
204
+ "eval_samples_per_second": 39.393,
205
+ "eval_steps_per_second": 4.924,
206
+ "step": 4500
207
+ },
208
+ {
209
+ "epoch": 13.0,
210
+ "eval_avg_length": 17.876,
211
+ "eval_bleu": 0.0347,
212
+ "eval_loss": 0.05446252599358559,
213
+ "eval_rouge1": 0.6439,
214
+ "eval_rouge2": 0.627,
215
+ "eval_runtime": 126.1815,
216
+ "eval_samples_per_second": 39.625,
217
+ "eval_steps_per_second": 4.953,
218
+ "step": 4875
219
+ }
220
+ ],
221
+ "max_steps": 5625,
222
+ "num_train_epochs": 15,
223
+ "total_flos": 1.79619568128e+16,
224
+ "trial_name": null,
225
+ "trial_params": null
226
+ }
checkpoint-4875/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8924762bcb0b0be937da03bc6254f6f33585885549c84b07f1eb1ac6331ec3f4
3
+ size 3963
checkpoint-4875/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-5250/config.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "prajjwal1/bert-small",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 2048,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.28.1",
79
+ "type_vocab_size": 2,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true,
83
+ "vocab_size": 30522
84
+ },
85
+ "decoder_start_token_id": 101,
86
+ "encoder": {
87
+ "_name_or_path": "prajjwal1/bert-small",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_probs_dropout_prob": 0.1,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": null,
94
+ "chunk_size_feed_forward": 0,
95
+ "classifier_dropout": null,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "eos_token_id": null,
103
+ "exponential_decay_length_penalty": null,
104
+ "finetuning_task": null,
105
+ "forced_bos_token_id": null,
106
+ "forced_eos_token_id": null,
107
+ "hidden_act": "gelu",
108
+ "hidden_dropout_prob": 0.1,
109
+ "hidden_size": 512,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 2048,
116
+ "is_decoder": false,
117
+ "is_encoder_decoder": false,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "length_penalty": 1.0,
124
+ "max_length": 20,
125
+ "max_position_embeddings": 512,
126
+ "min_length": 0,
127
+ "model_type": "bert",
128
+ "no_repeat_ngram_size": 0,
129
+ "num_attention_heads": 8,
130
+ "num_beam_groups": 1,
131
+ "num_beams": 1,
132
+ "num_hidden_layers": 4,
133
+ "num_return_sequences": 1,
134
+ "output_attentions": false,
135
+ "output_hidden_states": false,
136
+ "output_scores": false,
137
+ "pad_token_id": 0,
138
+ "position_embedding_type": "absolute",
139
+ "prefix": null,
140
+ "problem_type": null,
141
+ "pruned_heads": {},
142
+ "remove_invalid_values": false,
143
+ "repetition_penalty": 1.0,
144
+ "return_dict": true,
145
+ "return_dict_in_generate": false,
146
+ "sep_token_id": null,
147
+ "suppress_tokens": null,
148
+ "task_specific_params": null,
149
+ "temperature": 1.0,
150
+ "tf_legacy_loss": false,
151
+ "tie_encoder_decoder": false,
152
+ "tie_word_embeddings": true,
153
+ "tokenizer_class": null,
154
+ "top_k": 50,
155
+ "top_p": 1.0,
156
+ "torch_dtype": null,
157
+ "torchscript": false,
158
+ "transformers_version": "4.28.1",
159
+ "type_vocab_size": 2,
160
+ "typical_p": 1.0,
161
+ "use_bfloat16": false,
162
+ "use_cache": true,
163
+ "vocab_size": 30522
164
+ },
165
+ "is_encoder_decoder": true,
166
+ "model_type": "encoder-decoder",
167
+ "pad_token_id": 0,
168
+ "torch_dtype": "float32",
169
+ "transformers_version": null
170
+ }
checkpoint-5250/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
+ }
checkpoint-5250/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed4e4c11ece6f0138eaffa1d6762ab695478ec3d1f8e2d44c7848212eba7e63
3
+ size 1147141
checkpoint-5250/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27fb4935dde282af80b68d7a534c687d2a1529e4cda5cf4ac1712dfca2336157
3
+ size 247135097
checkpoint-5250/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6551df537d27cc9e49b96669fef869ec9b4587a8896024d0c570d2420d00dec
3
+ size 14575
checkpoint-5250/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53a2fe9b040c69a8b2fc8ba5281e4c6dd46cd7071717e430395711b27cb0ff45
3
+ size 881
checkpoint-5250/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-5250/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
checkpoint-5250/trainer_state.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05250174552202225,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/Colab Notebooks/models/prajjwal1/bert-small-codesearchnet-python/checkpoint-4125",
4
+ "epoch": 14.0,
5
+ "global_step": 5250,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_avg_length": 10.684,
13
+ "eval_bleu": 0.0,
14
+ "eval_loss": 1.2151237726211548,
15
+ "eval_rouge1": 0.0928,
16
+ "eval_rouge2": 0.0083,
17
+ "eval_runtime": 138.1107,
18
+ "eval_samples_per_second": 36.203,
19
+ "eval_steps_per_second": 4.525,
20
+ "step": 375
21
+ },
22
+ {
23
+ "epoch": 1.33,
24
+ "learning_rate": 2.354011121497024e-05,
25
+ "loss": 1.9359,
26
+ "step": 500
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "eval_avg_length": 15.0624,
31
+ "eval_bleu": 0.0032,
32
+ "eval_loss": 1.0291130542755127,
33
+ "eval_rouge1": 0.1752,
34
+ "eval_rouge2": 0.0338,
35
+ "eval_runtime": 131.019,
36
+ "eval_samples_per_second": 38.162,
37
+ "eval_steps_per_second": 4.77,
38
+ "step": 750
39
+ },
40
+ {
41
+ "epoch": 2.67,
42
+ "learning_rate": 4.7082288801902905e-05,
43
+ "loss": 0.9422,
44
+ "step": 1000
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_avg_length": 17.9358,
49
+ "eval_bleu": 0.0061,
50
+ "eval_loss": 0.9172993898391724,
51
+ "eval_rouge1": 0.2506,
52
+ "eval_rouge2": 0.0711,
53
+ "eval_runtime": 129.41,
54
+ "eval_samples_per_second": 38.637,
55
+ "eval_steps_per_second": 4.83,
56
+ "step": 1125
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 7.063511293381453e-05,
61
+ "loss": 0.776,
62
+ "step": 1500
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_avg_length": 18.3724,
67
+ "eval_bleu": 0.0088,
68
+ "eval_loss": 0.8057555556297302,
69
+ "eval_rouge1": 0.3321,
70
+ "eval_rouge2": 0.1409,
71
+ "eval_runtime": 125.5689,
72
+ "eval_samples_per_second": 39.819,
73
+ "eval_steps_per_second": 4.977,
74
+ "step": 1500
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_avg_length": 18.564,
79
+ "eval_bleu": 0.0123,
80
+ "eval_loss": 0.6914781928062439,
81
+ "eval_rouge1": 0.4044,
82
+ "eval_rouge2": 0.2267,
83
+ "eval_runtime": 125.7562,
84
+ "eval_samples_per_second": 39.759,
85
+ "eval_steps_per_second": 4.97,
86
+ "step": 1875
87
+ },
88
+ {
89
+ "epoch": 5.33,
90
+ "learning_rate": 9.420605056220666e-05,
91
+ "loss": 0.6218,
92
+ "step": 2000
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_avg_length": 17.5586,
97
+ "eval_bleu": 0.0193,
98
+ "eval_loss": 0.5281431674957275,
99
+ "eval_rouge1": 0.5382,
100
+ "eval_rouge2": 0.4097,
101
+ "eval_runtime": 124.9018,
102
+ "eval_samples_per_second": 40.031,
103
+ "eval_steps_per_second": 5.004,
104
+ "step": 2250
105
+ },
106
+ {
107
+ "epoch": 6.67,
108
+ "learning_rate": 0.00011781098874052987,
109
+ "loss": 0.4363,
110
+ "step": 2500
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_avg_length": 17.8768,
115
+ "eval_bleu": 0.0333,
116
+ "eval_loss": 0.18967217206954956,
117
+ "eval_rouge1": 0.6311,
118
+ "eval_rouge2": 0.6002,
119
+ "eval_runtime": 127.7329,
120
+ "eval_samples_per_second": 39.144,
121
+ "eval_steps_per_second": 4.893,
122
+ "step": 2625
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "learning_rate": 0.0001414699072483927,
127
+ "loss": 0.1518,
128
+ "step": 3000
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "eval_avg_length": 17.879,
133
+ "eval_bleu": 0.0346,
134
+ "eval_loss": 0.08337126672267914,
135
+ "eval_rouge1": 0.6413,
136
+ "eval_rouge2": 0.621,
137
+ "eval_runtime": 127.6119,
138
+ "eval_samples_per_second": 39.181,
139
+ "eval_steps_per_second": 4.898,
140
+ "step": 3000
141
+ },
142
+ {
143
+ "epoch": 9.0,
144
+ "eval_avg_length": 17.8886,
145
+ "eval_bleu": 0.0349,
146
+ "eval_loss": 0.058715466409921646,
147
+ "eval_rouge1": 0.6439,
148
+ "eval_rouge2": 0.6268,
149
+ "eval_runtime": 128.3455,
150
+ "eval_samples_per_second": 38.957,
151
+ "eval_steps_per_second": 4.87,
152
+ "step": 3375
153
+ },
154
+ {
155
+ "epoch": 9.33,
156
+ "learning_rate": 0.000165146600920707,
157
+ "loss": 0.0579,
158
+ "step": 3500
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_avg_length": 17.885,
163
+ "eval_bleu": 0.0348,
164
+ "eval_loss": 0.054685767740011215,
165
+ "eval_rouge1": 0.6443,
166
+ "eval_rouge2": 0.6276,
167
+ "eval_runtime": 125.2586,
168
+ "eval_samples_per_second": 39.917,
169
+ "eval_steps_per_second": 4.99,
170
+ "step": 3750
171
+ },
172
+ {
173
+ "epoch": 10.67,
174
+ "learning_rate": 0.00018887515761889517,
175
+ "loss": 0.0437,
176
+ "step": 4000
177
+ },
178
+ {
179
+ "epoch": 11.0,
180
+ "eval_avg_length": 17.8766,
181
+ "eval_bleu": 0.0348,
182
+ "eval_loss": 0.05250174552202225,
183
+ "eval_rouge1": 0.6442,
184
+ "eval_rouge2": 0.6278,
185
+ "eval_runtime": 125.5302,
186
+ "eval_samples_per_second": 39.831,
187
+ "eval_steps_per_second": 4.979,
188
+ "step": 4125
189
+ },
190
+ {
191
+ "epoch": 12.0,
192
+ "learning_rate": 0.0002126803301507607,
193
+ "loss": 0.0365,
194
+ "step": 4500
195
+ },
196
+ {
197
+ "epoch": 12.0,
198
+ "eval_avg_length": 17.8876,
199
+ "eval_bleu": 0.0347,
200
+ "eval_loss": 0.05503207445144653,
201
+ "eval_rouge1": 0.6436,
202
+ "eval_rouge2": 0.6266,
203
+ "eval_runtime": 126.9255,
204
+ "eval_samples_per_second": 39.393,
205
+ "eval_steps_per_second": 4.924,
206
+ "step": 4500
207
+ },
208
+ {
209
+ "epoch": 13.0,
210
+ "eval_avg_length": 17.876,
211
+ "eval_bleu": 0.0347,
212
+ "eval_loss": 0.05446252599358559,
213
+ "eval_rouge1": 0.6439,
214
+ "eval_rouge2": 0.627,
215
+ "eval_runtime": 126.1815,
216
+ "eval_samples_per_second": 39.625,
217
+ "eval_steps_per_second": 4.953,
218
+ "step": 4875
219
+ },
220
+ {
221
+ "epoch": 13.33,
222
+ "learning_rate": 0.0002366175758652389,
223
+ "loss": 0.032,
224
+ "step": 5000
225
+ },
226
+ {
227
+ "epoch": 14.0,
228
+ "eval_avg_length": 17.8822,
229
+ "eval_bleu": 0.0347,
230
+ "eval_loss": 0.05387634411454201,
231
+ "eval_rouge1": 0.644,
232
+ "eval_rouge2": 0.6268,
233
+ "eval_runtime": 126.6924,
234
+ "eval_samples_per_second": 39.466,
235
+ "eval_steps_per_second": 4.933,
236
+ "step": 5250
237
+ }
238
+ ],
239
+ "max_steps": 5625,
240
+ "num_train_epochs": 15,
241
+ "total_flos": 1.93436457984e+16,
242
+ "trial_name": null,
243
+ "trial_params": null
244
+ }
checkpoint-5250/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8924762bcb0b0be937da03bc6254f6f33585885549c84b07f1eb1ac6331ec3f4
3
+ size 3963
checkpoint-5250/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-5625/config.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "EncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "prajjwal1/bert-small",
8
+ "add_cross_attention": true,
9
+ "architectures": null,
10
+ "attention_probs_dropout_prob": 0.1,
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "classifier_dropout": null,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 512,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 2048,
36
+ "is_decoder": true,
37
+ "is_encoder_decoder": false,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "length_penalty": 1.0,
44
+ "max_length": 20,
45
+ "max_position_embeddings": 512,
46
+ "min_length": 0,
47
+ "model_type": "bert",
48
+ "no_repeat_ngram_size": 0,
49
+ "num_attention_heads": 8,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 4,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "position_embedding_type": "absolute",
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "repetition_penalty": 1.0,
64
+ "return_dict": true,
65
+ "return_dict_in_generate": false,
66
+ "sep_token_id": null,
67
+ "suppress_tokens": null,
68
+ "task_specific_params": null,
69
+ "temperature": 1.0,
70
+ "tf_legacy_loss": false,
71
+ "tie_encoder_decoder": false,
72
+ "tie_word_embeddings": true,
73
+ "tokenizer_class": null,
74
+ "top_k": 50,
75
+ "top_p": 1.0,
76
+ "torch_dtype": null,
77
+ "torchscript": false,
78
+ "transformers_version": "4.28.1",
79
+ "type_vocab_size": 2,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": true,
83
+ "vocab_size": 30522
84
+ },
85
+ "decoder_start_token_id": 101,
86
+ "encoder": {
87
+ "_name_or_path": "prajjwal1/bert-small",
88
+ "add_cross_attention": false,
89
+ "architectures": null,
90
+ "attention_probs_dropout_prob": 0.1,
91
+ "bad_words_ids": null,
92
+ "begin_suppress_tokens": null,
93
+ "bos_token_id": null,
94
+ "chunk_size_feed_forward": 0,
95
+ "classifier_dropout": null,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "eos_token_id": null,
103
+ "exponential_decay_length_penalty": null,
104
+ "finetuning_task": null,
105
+ "forced_bos_token_id": null,
106
+ "forced_eos_token_id": null,
107
+ "hidden_act": "gelu",
108
+ "hidden_dropout_prob": 0.1,
109
+ "hidden_size": 512,
110
+ "id2label": {
111
+ "0": "LABEL_0",
112
+ "1": "LABEL_1"
113
+ },
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 2048,
116
+ "is_decoder": false,
117
+ "is_encoder_decoder": false,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "length_penalty": 1.0,
124
+ "max_length": 20,
125
+ "max_position_embeddings": 512,
126
+ "min_length": 0,
127
+ "model_type": "bert",
128
+ "no_repeat_ngram_size": 0,
129
+ "num_attention_heads": 8,
130
+ "num_beam_groups": 1,
131
+ "num_beams": 1,
132
+ "num_hidden_layers": 4,
133
+ "num_return_sequences": 1,
134
+ "output_attentions": false,
135
+ "output_hidden_states": false,
136
+ "output_scores": false,
137
+ "pad_token_id": 0,
138
+ "position_embedding_type": "absolute",
139
+ "prefix": null,
140
+ "problem_type": null,
141
+ "pruned_heads": {},
142
+ "remove_invalid_values": false,
143
+ "repetition_penalty": 1.0,
144
+ "return_dict": true,
145
+ "return_dict_in_generate": false,
146
+ "sep_token_id": null,
147
+ "suppress_tokens": null,
148
+ "task_specific_params": null,
149
+ "temperature": 1.0,
150
+ "tf_legacy_loss": false,
151
+ "tie_encoder_decoder": false,
152
+ "tie_word_embeddings": true,
153
+ "tokenizer_class": null,
154
+ "top_k": 50,
155
+ "top_p": 1.0,
156
+ "torch_dtype": null,
157
+ "torchscript": false,
158
+ "transformers_version": "4.28.1",
159
+ "type_vocab_size": 2,
160
+ "typical_p": 1.0,
161
+ "use_bfloat16": false,
162
+ "use_cache": true,
163
+ "vocab_size": 30522
164
+ },
165
+ "is_encoder_decoder": true,
166
+ "model_type": "encoder-decoder",
167
+ "pad_token_id": 0,
168
+ "torch_dtype": "float32",
169
+ "transformers_version": null
170
+ }
checkpoint-5625/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 101,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.28.1"
5
+ }
checkpoint-5625/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c303963638abcbaf4bebf5adaf3c2c155ae641f5b8da2a5c11fbc0181cbaf1e1
3
+ size 1147141
checkpoint-5625/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12d5dedbfe110a74156952f0616d802f350cd81281cca5db99c874732404e267
3
+ size 247135097