Ethan Sim commited on
Commit
f4001dc
·
1 Parent(s): 440c9ac

stage best unsampled glossary fine adapt wce model

Browse files
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-en-fr",
3
+ "_num_labels": 3,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "swish",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": false,
8
+ "architectures": [
9
+ "MarianMTModel"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bad_words_ids": [
13
+ [
14
+ 59513
15
+ ]
16
+ ],
17
+ "bos_token_id": 0,
18
+ "classif_dropout": 0.0,
19
+ "classifier_dropout": 0.0,
20
+ "d_model": 512,
21
+ "decoder_attention_heads": 8,
22
+ "decoder_ffn_dim": 2048,
23
+ "decoder_layerdrop": 0.0,
24
+ "decoder_layers": 6,
25
+ "decoder_start_token_id": 59513,
26
+ "decoder_vocab_size": 59514,
27
+ "dropout": 0.1,
28
+ "encoder_attention_heads": 8,
29
+ "encoder_ffn_dim": 2048,
30
+ "encoder_layerdrop": 0.0,
31
+ "encoder_layers": 6,
32
+ "eos_token_id": 0,
33
+ "forced_eos_token_id": 0,
34
+ "gradient_checkpointing": false,
35
+ "id2label": {
36
+ "0": "LABEL_0",
37
+ "1": "LABEL_1",
38
+ "2": "LABEL_2"
39
+ },
40
+ "init_std": 0.02,
41
+ "is_encoder_decoder": true,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1,
45
+ "LABEL_2": 2
46
+ },
47
+ "max_length": 512,
48
+ "max_position_embeddings": 512,
49
+ "model_type": "marian",
50
+ "normalize_before": false,
51
+ "normalize_embedding": false,
52
+ "num_beams": 4,
53
+ "num_hidden_layers": 6,
54
+ "pad_token_id": 59513,
55
+ "scale_embedding": true,
56
+ "share_encoder_decoder_embeddings": true,
57
+ "static_position_embeddings": true,
58
+ "torch_dtype": "float32",
59
+ "transformers_version": "4.28.0",
60
+ "use_cache": true,
61
+ "vocab_size": 59514
62
+ }
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 59513
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 59513,
9
+ "eos_token_id": 0,
10
+ "forced_eos_token_id": 0,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 59513,
14
+ "transformers_version": "4.28.0"
15
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:798bcd5a03038f2dbc7b283d0b4f6cdf5caf2bdd81ccf0dfb01a644f9a64f274
3
+ size 597026309
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e255482a74a9085ba82922f26f8859ffc90e6721e3894cffbdd99e354123d1db
3
+ size 298763205
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eb8608536ff2f06f043c578921ef710ce4d295e077da17100e3e91b5050e6ed
3
+ size 14575
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f995fcf21d7c7551be1a3db97443d6f8a7fdfaa0e74ccd9e8616d6b4fc94f56c
3
+ size 557
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e3349122e2399c65deb4183b16094c6d030d1768282a8e017f61e944aead1de
3
+ size 627
source.spm ADDED
Binary file (778 kB). View file
 
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
Binary file (802 kB). View file
 
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "eos_token": "</s>",
4
+ "model_max_length": 512,
5
+ "pad_token": "<pad>",
6
+ "separate_vocabs": false,
7
+ "source_lang": "en",
8
+ "sp_model_kwargs": {},
9
+ "target_lang": "fr",
10
+ "tokenizer_class": "MarianTokenizer",
11
+ "unk_token": "<unk>"
12
+ }
trainer_state.json ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 42.7722,
3
+ "best_model_checkpoint": "opus_base_adapt_wce_gloss_unsampled_precision_3_ubweight_1.25/checkpoint-80000",
4
+ "epoch": 3.943217665615142,
5
+ "global_step": 80000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.2,
12
+ "learning_rate": 1.9753918572555207e-05,
13
+ "loss": 0.2076,
14
+ "step": 4000
15
+ },
16
+ {
17
+ "epoch": 0.2,
18
+ "eval_bleu": 35.6766,
19
+ "eval_gen_len": 39.8754,
20
+ "eval_loss": 0.10901036113500595,
21
+ "eval_runtime": 190.0619,
22
+ "eval_samples_per_second": 5.488,
23
+ "eval_steps_per_second": 0.174,
24
+ "step": 4000
25
+ },
26
+ {
27
+ "epoch": 0.39,
28
+ "learning_rate": 1.950746746845426e-05,
29
+ "loss": 0.1743,
30
+ "step": 8000
31
+ },
32
+ {
33
+ "epoch": 0.39,
34
+ "eval_bleu": 37.712,
35
+ "eval_gen_len": 37.931,
36
+ "eval_loss": 0.10654148459434509,
37
+ "eval_runtime": 187.1025,
38
+ "eval_samples_per_second": 5.574,
39
+ "eval_steps_per_second": 0.176,
40
+ "step": 8000
41
+ },
42
+ {
43
+ "epoch": 0.59,
44
+ "learning_rate": 1.9261016364353314e-05,
45
+ "loss": 0.1672,
46
+ "step": 12000
47
+ },
48
+ {
49
+ "epoch": 0.59,
50
+ "eval_bleu": 41.1339,
51
+ "eval_gen_len": 34.9118,
52
+ "eval_loss": 0.10531440377235413,
53
+ "eval_runtime": 148.6317,
54
+ "eval_samples_per_second": 7.017,
55
+ "eval_steps_per_second": 0.222,
56
+ "step": 12000
57
+ },
58
+ {
59
+ "epoch": 0.79,
60
+ "learning_rate": 1.9014565260252367e-05,
61
+ "loss": 0.1618,
62
+ "step": 16000
63
+ },
64
+ {
65
+ "epoch": 0.79,
66
+ "eval_bleu": 41.355,
67
+ "eval_gen_len": 35.2416,
68
+ "eval_loss": 0.10421621054410934,
69
+ "eval_runtime": 124.1237,
70
+ "eval_samples_per_second": 8.403,
71
+ "eval_steps_per_second": 0.266,
72
+ "step": 16000
73
+ },
74
+ {
75
+ "epoch": 0.99,
76
+ "learning_rate": 1.876811415615142e-05,
77
+ "loss": 0.1594,
78
+ "step": 20000
79
+ },
80
+ {
81
+ "epoch": 0.99,
82
+ "eval_bleu": 41.6651,
83
+ "eval_gen_len": 35.4228,
84
+ "eval_loss": 0.10366757214069366,
85
+ "eval_runtime": 159.7806,
86
+ "eval_samples_per_second": 6.528,
87
+ "eval_steps_per_second": 0.207,
88
+ "step": 20000
89
+ },
90
+ {
91
+ "epoch": 1.18,
92
+ "learning_rate": 1.8521786277602524e-05,
93
+ "loss": 0.1477,
94
+ "step": 24000
95
+ },
96
+ {
97
+ "epoch": 1.18,
98
+ "eval_bleu": 41.9522,
99
+ "eval_gen_len": 35.0451,
100
+ "eval_loss": 0.10360125452280045,
101
+ "eval_runtime": 138.0407,
102
+ "eval_samples_per_second": 7.556,
103
+ "eval_steps_per_second": 0.239,
104
+ "step": 24000
105
+ },
106
+ {
107
+ "epoch": 1.38,
108
+ "learning_rate": 1.827545839905363e-05,
109
+ "loss": 0.1475,
110
+ "step": 28000
111
+ },
112
+ {
113
+ "epoch": 1.38,
114
+ "eval_bleu": 40.2121,
115
+ "eval_gen_len": 36.4669,
116
+ "eval_loss": 0.10278471559286118,
117
+ "eval_runtime": 128.7397,
118
+ "eval_samples_per_second": 8.102,
119
+ "eval_steps_per_second": 0.256,
120
+ "step": 28000
121
+ },
122
+ {
123
+ "epoch": 1.58,
124
+ "learning_rate": 1.8029130520504733e-05,
125
+ "loss": 0.147,
126
+ "step": 32000
127
+ },
128
+ {
129
+ "epoch": 1.58,
130
+ "eval_bleu": 41.9375,
131
+ "eval_gen_len": 35.2301,
132
+ "eval_loss": 0.10222012549638748,
133
+ "eval_runtime": 132.4166,
134
+ "eval_samples_per_second": 7.877,
135
+ "eval_steps_per_second": 0.249,
136
+ "step": 32000
137
+ },
138
+ {
139
+ "epoch": 1.77,
140
+ "learning_rate": 1.7782802641955836e-05,
141
+ "loss": 0.1456,
142
+ "step": 36000
143
+ },
144
+ {
145
+ "epoch": 1.77,
146
+ "eval_bleu": 40.9742,
147
+ "eval_gen_len": 37.1965,
148
+ "eval_loss": 0.10183104127645493,
149
+ "eval_runtime": 119.5559,
150
+ "eval_samples_per_second": 8.724,
151
+ "eval_steps_per_second": 0.276,
152
+ "step": 36000
153
+ },
154
+ {
155
+ "epoch": 1.97,
156
+ "learning_rate": 1.7536474763406942e-05,
157
+ "loss": 0.1448,
158
+ "step": 40000
159
+ },
160
+ {
161
+ "epoch": 1.97,
162
+ "eval_bleu": 41.7528,
163
+ "eval_gen_len": 35.7248,
164
+ "eval_loss": 0.10195966809988022,
165
+ "eval_runtime": 123.0307,
166
+ "eval_samples_per_second": 8.478,
167
+ "eval_steps_per_second": 0.268,
168
+ "step": 40000
169
+ },
170
+ {
171
+ "epoch": 2.17,
172
+ "learning_rate": 1.7290146884858045e-05,
173
+ "loss": 0.1372,
174
+ "step": 44000
175
+ },
176
+ {
177
+ "epoch": 2.17,
178
+ "eval_bleu": 42.1909,
179
+ "eval_gen_len": 35.2924,
180
+ "eval_loss": 0.1022593304514885,
181
+ "eval_runtime": 159.0132,
182
+ "eval_samples_per_second": 6.559,
183
+ "eval_steps_per_second": 0.208,
184
+ "step": 44000
185
+ },
186
+ {
187
+ "epoch": 2.37,
188
+ "learning_rate": 1.7043757393533123e-05,
189
+ "loss": 0.1363,
190
+ "step": 48000
191
+ },
192
+ {
193
+ "epoch": 2.37,
194
+ "eval_bleu": 42.6162,
195
+ "eval_gen_len": 34.9866,
196
+ "eval_loss": 0.10216603428125381,
197
+ "eval_runtime": 116.2982,
198
+ "eval_samples_per_second": 8.968,
199
+ "eval_steps_per_second": 0.284,
200
+ "step": 48000
201
+ },
202
+ {
203
+ "epoch": 2.56,
204
+ "learning_rate": 1.679742951498423e-05,
205
+ "loss": 0.1352,
206
+ "step": 52000
207
+ },
208
+ {
209
+ "epoch": 2.56,
210
+ "eval_bleu": 42.4525,
211
+ "eval_gen_len": 34.8888,
212
+ "eval_loss": 0.10181364417076111,
213
+ "eval_runtime": 134.9256,
214
+ "eval_samples_per_second": 7.73,
215
+ "eval_steps_per_second": 0.245,
216
+ "step": 52000
217
+ },
218
+ {
219
+ "epoch": 2.76,
220
+ "learning_rate": 1.6551101636435333e-05,
221
+ "loss": 0.1355,
222
+ "step": 56000
223
+ },
224
+ {
225
+ "epoch": 2.76,
226
+ "eval_bleu": 41.9729,
227
+ "eval_gen_len": 35.9051,
228
+ "eval_loss": 0.10166899114847183,
229
+ "eval_runtime": 135.9018,
230
+ "eval_samples_per_second": 7.675,
231
+ "eval_steps_per_second": 0.243,
232
+ "step": 56000
233
+ },
234
+ {
235
+ "epoch": 2.96,
236
+ "learning_rate": 1.6304773757886436e-05,
237
+ "loss": 0.1358,
238
+ "step": 60000
239
+ },
240
+ {
241
+ "epoch": 2.96,
242
+ "eval_bleu": 42.3275,
243
+ "eval_gen_len": 34.8514,
244
+ "eval_loss": 0.10106752812862396,
245
+ "eval_runtime": 160.3679,
246
+ "eval_samples_per_second": 6.504,
247
+ "eval_steps_per_second": 0.206,
248
+ "step": 60000
249
+ },
250
+ {
251
+ "epoch": 3.15,
252
+ "learning_rate": 1.6058445879337542e-05,
253
+ "loss": 0.1294,
254
+ "step": 64000
255
+ },
256
+ {
257
+ "epoch": 3.15,
258
+ "eval_bleu": 42.2988,
259
+ "eval_gen_len": 34.8188,
260
+ "eval_loss": 0.10183101147413254,
261
+ "eval_runtime": 137.4561,
262
+ "eval_samples_per_second": 7.588,
263
+ "eval_steps_per_second": 0.24,
264
+ "step": 64000
265
+ },
266
+ {
267
+ "epoch": 3.35,
268
+ "learning_rate": 1.5812118000788645e-05,
269
+ "loss": 0.1289,
270
+ "step": 68000
271
+ },
272
+ {
273
+ "epoch": 3.35,
274
+ "eval_bleu": 42.7527,
275
+ "eval_gen_len": 34.7766,
276
+ "eval_loss": 0.10201847553253174,
277
+ "eval_runtime": 136.3856,
278
+ "eval_samples_per_second": 7.647,
279
+ "eval_steps_per_second": 0.242,
280
+ "step": 68000
281
+ },
282
+ {
283
+ "epoch": 3.55,
284
+ "learning_rate": 1.5565728509463723e-05,
285
+ "loss": 0.1277,
286
+ "step": 72000
287
+ },
288
+ {
289
+ "epoch": 3.55,
290
+ "eval_bleu": 42.3528,
291
+ "eval_gen_len": 35.2416,
292
+ "eval_loss": 0.10199479013681412,
293
+ "eval_runtime": 110.1197,
294
+ "eval_samples_per_second": 9.472,
295
+ "eval_steps_per_second": 0.3,
296
+ "step": 72000
297
+ },
298
+ {
299
+ "epoch": 3.75,
300
+ "learning_rate": 1.5319400630914826e-05,
301
+ "loss": 0.1282,
302
+ "step": 76000
303
+ },
304
+ {
305
+ "epoch": 3.75,
306
+ "eval_bleu": 42.4538,
307
+ "eval_gen_len": 35.4746,
308
+ "eval_loss": 0.10173474997282028,
309
+ "eval_runtime": 126.659,
310
+ "eval_samples_per_second": 8.235,
311
+ "eval_steps_per_second": 0.261,
312
+ "step": 76000
313
+ },
314
+ {
315
+ "epoch": 3.94,
316
+ "learning_rate": 1.5073072752365931e-05,
317
+ "loss": 0.129,
318
+ "step": 80000
319
+ },
320
+ {
321
+ "epoch": 3.94,
322
+ "eval_bleu": 42.7722,
323
+ "eval_gen_len": 34.8581,
324
+ "eval_loss": 0.10149160027503967,
325
+ "eval_runtime": 151.6781,
326
+ "eval_samples_per_second": 6.876,
327
+ "eval_steps_per_second": 0.218,
328
+ "step": 80000
329
+ }
330
+ ],
331
+ "max_steps": 324608,
332
+ "num_train_epochs": 16,
333
+ "total_flos": 1.3796012702564352e+17,
334
+ "trial_name": null,
335
+ "trial_params": null
336
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95403e3b8a17ad59b55b7a5c77ea0c35560a3f8a0621ae589da0ae3348a38c98
3
+ size 3835
vocab.json ADDED
The diff for this file is too large to render. See raw diff