ngxtnhi commited on
Commit
c136d04
·
verified ·
1 Parent(s): f9644a1

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. config.json +28 -0
  2. model.safetensors +3 -0
  3. optimizer.pt +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +432 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/xlm-v-base",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.41.2",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 901629
28
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20f6dd918c36fadd3498b85134f99868826accf3b9dd34e3d6983d0e4e50b640
3
+ size 3114003416
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcecf1d62854b12d3769f1047756eda61aca276178c2fa57d2f052e6a69a78e5
3
+ size 6228131858
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36fedb56c18b8956a1a960fd61eee9346a81b45d97785ce8389eb6293135c9a
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ddeb483ab2976b44726760e4177d1f2b2dcb72ebf25afba0c989632df7e861e
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7947742682321812,
3
+ "best_model_checkpoint": "./XLM-V_64-multi-outputs/checkpoint-15000",
4
+ "epoch": 14.8619957537155,
5
+ "eval_steps": 1000,
6
+ "global_step": 21000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.7077140835102619,
13
+ "grad_norm": 1.493615746498108,
14
+ "learning_rate": 9.433962264150943e-07,
15
+ "loss": 0.6936,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.7077140835102619,
20
+ "eval_accuracy": 0.5659663447177138,
21
+ "eval_f1": 0.6995657867530498,
22
+ "eval_loss": 0.682445764541626,
23
+ "eval_precision": 0.5400659785037778,
24
+ "eval_recall": 0.9927621283255086,
25
+ "eval_runtime": 57.8197,
26
+ "eval_samples_per_second": 173.695,
27
+ "eval_steps_per_second": 2.715,
28
+ "step": 1000
29
+ },
30
+ {
31
+ "epoch": 1.4154281670205238,
32
+ "grad_norm": 3.4982151985168457,
33
+ "learning_rate": 1.8867924528301887e-06,
34
+ "loss": 0.672,
35
+ "step": 2000
36
+ },
37
+ {
38
+ "epoch": 1.4154281670205238,
39
+ "eval_accuracy": 0.6204321417903017,
40
+ "eval_f1": 0.7176714560805807,
41
+ "eval_loss": 0.6340225338935852,
42
+ "eval_precision": 0.5774731823599524,
43
+ "eval_recall": 0.9477699530516432,
44
+ "eval_runtime": 57.5505,
45
+ "eval_samples_per_second": 174.508,
46
+ "eval_steps_per_second": 2.728,
47
+ "step": 2000
48
+ },
49
+ {
50
+ "epoch": 2.1231422505307855,
51
+ "grad_norm": 12.846685409545898,
52
+ "learning_rate": 1.907732634338139e-06,
53
+ "loss": 0.6537,
54
+ "step": 3000
55
+ },
56
+ {
57
+ "epoch": 2.1231422505307855,
58
+ "eval_accuracy": 0.6434332370805537,
59
+ "eval_f1": 0.7224676431837557,
60
+ "eval_loss": 0.6225757002830505,
61
+ "eval_precision": 0.5982543960980619,
62
+ "eval_recall": 0.9117762128325508,
63
+ "eval_runtime": 57.5731,
64
+ "eval_samples_per_second": 174.439,
65
+ "eval_steps_per_second": 2.727,
66
+ "step": 3000
67
+ },
68
+ {
69
+ "epoch": 2.8308563340410475,
70
+ "grad_norm": 4.933294296264648,
71
+ "learning_rate": 1.8028833551769331e-06,
72
+ "loss": 0.6499,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 2.8308563340410475,
77
+ "eval_accuracy": 0.6594643034949716,
78
+ "eval_f1": 0.743858597962852,
79
+ "eval_loss": 0.604403555393219,
80
+ "eval_precision": 0.6026699029126213,
81
+ "eval_recall": 0.9714397496087637,
82
+ "eval_runtime": 57.5924,
83
+ "eval_samples_per_second": 174.381,
84
+ "eval_steps_per_second": 2.726,
85
+ "step": 4000
86
+ },
87
+ {
88
+ "epoch": 3.538570417551309,
89
+ "grad_norm": 9.257452011108398,
90
+ "learning_rate": 1.6980340760157273e-06,
91
+ "loss": 0.6389,
92
+ "step": 5000
93
+ },
94
+ {
95
+ "epoch": 3.538570417551309,
96
+ "eval_accuracy": 0.660460021905805,
97
+ "eval_f1": 0.6269146608315098,
98
+ "eval_loss": 0.6047727465629578,
99
+ "eval_precision": 0.7112711022840119,
100
+ "eval_recall": 0.5604460093896714,
101
+ "eval_runtime": 57.6043,
102
+ "eval_samples_per_second": 174.344,
103
+ "eval_steps_per_second": 2.725,
104
+ "step": 5000
105
+ },
106
+ {
107
+ "epoch": 4.246284501061571,
108
+ "grad_norm": 8.192702293395996,
109
+ "learning_rate": 1.5931847968545215e-06,
110
+ "loss": 0.6187,
111
+ "step": 6000
112
+ },
113
+ {
114
+ "epoch": 4.246284501061571,
115
+ "eval_accuracy": 0.6978990341531415,
116
+ "eval_f1": 0.7157046476761619,
117
+ "eval_loss": 0.5651090741157532,
118
+ "eval_precision": 0.6868705035971223,
119
+ "eval_recall": 0.7470657276995305,
120
+ "eval_runtime": 57.5852,
121
+ "eval_samples_per_second": 174.402,
122
+ "eval_steps_per_second": 2.726,
123
+ "step": 6000
124
+ },
125
+ {
126
+ "epoch": 4.953998584571833,
127
+ "grad_norm": 49.664493560791016,
128
+ "learning_rate": 1.488335517693316e-06,
129
+ "loss": 0.5962,
130
+ "step": 7000
131
+ },
132
+ {
133
+ "epoch": 4.953998584571833,
134
+ "eval_accuracy": 0.7021806233197252,
135
+ "eval_f1": 0.7534009398961168,
136
+ "eval_loss": 0.5552804470062256,
137
+ "eval_precision": 0.6511329628046174,
138
+ "eval_recall": 0.8937793427230047,
139
+ "eval_runtime": 57.4997,
140
+ "eval_samples_per_second": 174.662,
141
+ "eval_steps_per_second": 2.73,
142
+ "step": 7000
143
+ },
144
+ {
145
+ "epoch": 5.661712668082095,
146
+ "grad_norm": 7.591745376586914,
147
+ "learning_rate": 1.38348623853211e-06,
148
+ "loss": 0.5716,
149
+ "step": 8000
150
+ },
151
+ {
152
+ "epoch": 5.661712668082095,
153
+ "eval_accuracy": 0.7096485114009758,
154
+ "eval_f1": 0.7601184600197434,
155
+ "eval_loss": 0.5378949046134949,
156
+ "eval_precision": 0.65587734241908,
157
+ "eval_recall": 0.903755868544601,
158
+ "eval_runtime": 57.5168,
159
+ "eval_samples_per_second": 174.61,
160
+ "eval_steps_per_second": 2.73,
161
+ "step": 8000
162
+ },
163
+ {
164
+ "epoch": 6.369426751592357,
165
+ "grad_norm": 7.424753189086914,
166
+ "learning_rate": 1.2786369593709043e-06,
167
+ "loss": 0.531,
168
+ "step": 9000
169
+ },
170
+ {
171
+ "epoch": 6.369426751592357,
172
+ "eval_accuracy": 0.7241860001991437,
173
+ "eval_f1": 0.746800731261426,
174
+ "eval_loss": 0.4708074629306793,
175
+ "eval_precision": 0.7009265614275909,
176
+ "eval_recall": 0.7991001564945227,
177
+ "eval_runtime": 57.6158,
178
+ "eval_samples_per_second": 174.31,
179
+ "eval_steps_per_second": 2.725,
180
+ "step": 9000
181
+ },
182
+ {
183
+ "epoch": 7.077140835102618,
184
+ "grad_norm": 9.683096885681152,
185
+ "learning_rate": 1.1737876802096983e-06,
186
+ "loss": 0.4858,
187
+ "step": 10000
188
+ },
189
+ {
190
+ "epoch": 7.077140835102618,
191
+ "eval_accuracy": 0.7385243453151449,
192
+ "eval_f1": 0.7664947536902009,
193
+ "eval_loss": 0.44091591238975525,
194
+ "eval_precision": 0.7026410172807304,
195
+ "eval_recall": 0.843114241001565,
196
+ "eval_runtime": 57.5121,
197
+ "eval_samples_per_second": 174.624,
198
+ "eval_steps_per_second": 2.73,
199
+ "step": 10000
200
+ },
201
+ {
202
+ "epoch": 7.78485491861288,
203
+ "grad_norm": 25.252531051635742,
204
+ "learning_rate": 1.0689384010484928e-06,
205
+ "loss": 0.4577,
206
+ "step": 11000
207
+ },
208
+ {
209
+ "epoch": 7.78485491861288,
210
+ "eval_accuracy": 0.7428059344817286,
211
+ "eval_f1": 0.7718399434678915,
212
+ "eval_loss": 0.42118868231773376,
213
+ "eval_precision": 0.7036559832501208,
214
+ "eval_recall": 0.8546557120500783,
215
+ "eval_runtime": 57.4963,
216
+ "eval_samples_per_second": 174.672,
217
+ "eval_steps_per_second": 2.731,
218
+ "step": 11000
219
+ },
220
+ {
221
+ "epoch": 8.492569002123142,
222
+ "grad_norm": 9.067980766296387,
223
+ "learning_rate": 9.64089121887287e-07,
224
+ "loss": 0.4404,
225
+ "step": 12000
226
+ },
227
+ {
228
+ "epoch": 8.492569002123142,
229
+ "eval_accuracy": 0.7539579806830629,
230
+ "eval_f1": 0.7588091752074183,
231
+ "eval_loss": 0.40373048186302185,
232
+ "eval_precision": 0.75725696473797,
233
+ "eval_recall": 0.7603677621283255,
234
+ "eval_runtime": 57.7852,
235
+ "eval_samples_per_second": 173.799,
236
+ "eval_steps_per_second": 2.717,
237
+ "step": 12000
238
+ },
239
+ {
240
+ "epoch": 9.200283085633403,
241
+ "grad_norm": 6.412005424499512,
242
+ "learning_rate": 8.592398427260812e-07,
243
+ "loss": 0.42,
244
+ "step": 13000
245
+ },
246
+ {
247
+ "epoch": 9.200283085633403,
248
+ "eval_accuracy": 0.7534601214776461,
249
+ "eval_f1": 0.7862938028655274,
250
+ "eval_loss": 0.394222617149353,
251
+ "eval_precision": 0.7035835650293482,
252
+ "eval_recall": 0.8910406885758998,
253
+ "eval_runtime": 57.6787,
254
+ "eval_samples_per_second": 174.12,
255
+ "eval_steps_per_second": 2.722,
256
+ "step": 13000
257
+ },
258
+ {
259
+ "epoch": 9.907997169143666,
260
+ "grad_norm": 9.248723983764648,
261
+ "learning_rate": 7.543905635648754e-07,
262
+ "loss": 0.4061,
263
+ "step": 14000
264
+ },
265
+ {
266
+ "epoch": 9.907997169143666,
267
+ "eval_accuracy": 0.7524644030668127,
268
+ "eval_f1": 0.7793752218672346,
269
+ "eval_loss": 0.3883645236492157,
270
+ "eval_precision": 0.7132878492527616,
271
+ "eval_recall": 0.8589593114241002,
272
+ "eval_runtime": 57.5171,
273
+ "eval_samples_per_second": 174.609,
274
+ "eval_steps_per_second": 2.73,
275
+ "step": 14000
276
+ },
277
+ {
278
+ "epoch": 10.615711252653927,
279
+ "grad_norm": 8.524171829223633,
280
+ "learning_rate": 6.495412844036698e-07,
281
+ "loss": 0.4007,
282
+ "step": 15000
283
+ },
284
+ {
285
+ "epoch": 10.615711252653927,
286
+ "eval_accuracy": 0.7528626904311461,
287
+ "eval_f1": 0.7947742682321812,
288
+ "eval_loss": 0.38538095355033875,
289
+ "eval_precision": 0.6883414494414208,
290
+ "eval_recall": 0.9401408450704225,
291
+ "eval_runtime": 57.8102,
292
+ "eval_samples_per_second": 173.724,
293
+ "eval_steps_per_second": 2.716,
294
+ "step": 15000
295
+ },
296
+ {
297
+ "epoch": 11.32342533616419,
298
+ "grad_norm": 4.774406909942627,
299
+ "learning_rate": 5.44692005242464e-07,
300
+ "loss": 0.3936,
301
+ "step": 16000
302
+ },
303
+ {
304
+ "epoch": 11.32342533616419,
305
+ "eval_accuracy": 0.7545554117295629,
306
+ "eval_f1": 0.793083186434987,
307
+ "eval_loss": 0.38386669754981995,
308
+ "eval_precision": 0.6946037347448905,
309
+ "eval_recall": 0.9241001564945227,
310
+ "eval_runtime": 57.8533,
311
+ "eval_samples_per_second": 173.594,
312
+ "eval_steps_per_second": 2.714,
313
+ "step": 16000
314
+ },
315
+ {
316
+ "epoch": 12.031139419674451,
317
+ "grad_norm": 4.318003177642822,
318
+ "learning_rate": 4.398427260812582e-07,
319
+ "loss": 0.3915,
320
+ "step": 17000
321
+ },
322
+ {
323
+ "epoch": 12.031139419674451,
324
+ "eval_accuracy": 0.7562481330279797,
325
+ "eval_f1": 0.7931034482758621,
326
+ "eval_loss": 0.3860309422016144,
327
+ "eval_precision": 0.6982142857142857,
328
+ "eval_recall": 0.9178403755868545,
329
+ "eval_runtime": 57.8089,
330
+ "eval_samples_per_second": 173.728,
331
+ "eval_steps_per_second": 2.716,
332
+ "step": 17000
333
+ },
334
+ {
335
+ "epoch": 12.738853503184714,
336
+ "grad_norm": 15.07564926147461,
337
+ "learning_rate": 3.3499344692005245e-07,
338
+ "loss": 0.3888,
339
+ "step": 18000
340
+ },
341
+ {
342
+ "epoch": 12.738853503184714,
343
+ "eval_accuracy": 0.7568455640744798,
344
+ "eval_f1": 0.7816523605150214,
345
+ "eval_loss": 0.3812848627567291,
346
+ "eval_precision": 0.7198616600790514,
347
+ "eval_recall": 0.8550469483568075,
348
+ "eval_runtime": 57.8126,
349
+ "eval_samples_per_second": 173.716,
350
+ "eval_steps_per_second": 2.716,
351
+ "step": 18000
352
+ },
353
+ {
354
+ "epoch": 13.446567586694975,
355
+ "grad_norm": 10.069337844848633,
356
+ "learning_rate": 2.3014416775884665e-07,
357
+ "loss": 0.3832,
358
+ "step": 19000
359
+ },
360
+ {
361
+ "epoch": 13.446567586694975,
362
+ "eval_accuracy": 0.7569451359155631,
363
+ "eval_f1": 0.7900576244947106,
364
+ "eval_loss": 0.37964996695518494,
365
+ "eval_precision": 0.7049884881043745,
366
+ "eval_recall": 0.8984741784037559,
367
+ "eval_runtime": 57.6196,
368
+ "eval_samples_per_second": 174.298,
369
+ "eval_steps_per_second": 2.725,
370
+ "step": 19000
371
+ },
372
+ {
373
+ "epoch": 14.154281670205236,
374
+ "grad_norm": 8.055023193359375,
375
+ "learning_rate": 1.252948885976409e-07,
376
+ "loss": 0.383,
377
+ "step": 20000
378
+ },
379
+ {
380
+ "epoch": 14.154281670205236,
381
+ "eval_accuracy": 0.7573434232798965,
382
+ "eval_f1": 0.7904016513288037,
383
+ "eval_loss": 0.3802996873855591,
384
+ "eval_precision": 0.7052954719877207,
385
+ "eval_recall": 0.8988654147104851,
386
+ "eval_runtime": 57.777,
387
+ "eval_samples_per_second": 173.824,
388
+ "eval_steps_per_second": 2.717,
389
+ "step": 20000
390
+ },
391
+ {
392
+ "epoch": 14.8619957537155,
393
+ "grad_norm": 2.661689281463623,
394
+ "learning_rate": 2.0445609436435123e-08,
395
+ "loss": 0.3823,
396
+ "step": 21000
397
+ },
398
+ {
399
+ "epoch": 14.8619957537155,
400
+ "eval_accuracy": 0.7576421388031465,
401
+ "eval_f1": 0.7906056434962148,
402
+ "eval_loss": 0.37897399067878723,
403
+ "eval_precision": 0.7056203931203932,
404
+ "eval_recall": 0.8988654147104851,
405
+ "eval_runtime": 57.6891,
406
+ "eval_samples_per_second": 174.088,
407
+ "eval_steps_per_second": 2.721,
408
+ "step": 21000
409
+ }
410
+ ],
411
+ "logging_steps": 1000,
412
+ "max_steps": 21195,
413
+ "num_input_tokens_seen": 0,
414
+ "num_train_epochs": 15,
415
+ "save_steps": 1000,
416
+ "stateful_callbacks": {
417
+ "TrainerControl": {
418
+ "args": {
419
+ "should_epoch_stop": false,
420
+ "should_evaluate": false,
421
+ "should_log": false,
422
+ "should_save": true,
423
+ "should_training_stop": false
424
+ },
425
+ "attributes": {}
426
+ }
427
+ },
428
+ "total_flos": 3.173409489802339e+17,
429
+ "train_batch_size": 64,
430
+ "trial_name": null,
431
+ "trial_params": null
432
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12b63819340b3d7edb21d08f611fa95a9314421ddb7bb063c3cba8c43ccdc949
3
+ size 5048