malexandersalazar commited on
Commit
bc77471
·
verified ·
1 Parent(s): 296338a

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. config.json +28 -0
  2. model.safetensors +3 -0
  3. optimizer.pt +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +561 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "FacebookAI/xlm-roberta-large",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.48.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0353fad2ef29b49f1308e7a9a870616fa310ddee171204b465189d94848aa6f8
3
+ size 2239618672
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed9f4bff3d0768c91f785e05335294b516bab85a8890ed272d375074d5bee6a8
3
+ size 4479472785
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50e9cb5ffe6b61fcaacf8a8b8825c880a87fdde4b3b810a440b76998c770c95
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe56ca14f7fb09ab9f2892706755ed252c9eb9cedd4a2e20dc6c5662b347f21
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9842267397484589,
3
+ "best_model_checkpoint": "/home/alexanders/Workspaces/malexandersalazar/xlm-roberta-large-cls-toxicity/data/models/xlm-roberta-large-cls-toxicity/config_5/checkpoint-28610",
4
+ "epoch": 9.999737922599808,
5
+ "eval_steps": 500,
6
+ "global_step": 28610,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1747182667947934,
13
+ "grad_norm": 12.17603588104248,
14
+ "learning_rate": 6.934153502027121e-07,
15
+ "loss": 0.7119,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.3494365335895868,
20
+ "grad_norm": 13.519432067871094,
21
+ "learning_rate": 1.3910247448622957e-06,
22
+ "loss": 0.3407,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.5241548003843802,
27
+ "grad_norm": 11.993674278259277,
28
+ "learning_rate": 2.090032154340836e-06,
29
+ "loss": 0.1725,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.6988730671791736,
34
+ "grad_norm": 13.306364059448242,
35
+ "learning_rate": 2.789039563819377e-06,
36
+ "loss": 0.1498,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.8735913339739669,
41
+ "grad_norm": 13.724279403686523,
42
+ "learning_rate": 3.488046973297917e-06,
43
+ "loss": 0.1368,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.9997379225998078,
48
+ "eval_accuracy": 0.9578529000698812,
49
+ "eval_f1": 0.9580195344026164,
50
+ "eval_loss": 0.14260601997375488,
51
+ "eval_precision": 0.9585943807196626,
52
+ "eval_recall": 0.9578529000698812,
53
+ "eval_runtime": 68.6764,
54
+ "eval_samples_per_second": 333.389,
55
+ "eval_steps_per_second": 20.837,
56
+ "step": 2861
57
+ },
58
+ {
59
+ "epoch": 1.0485716781689525,
60
+ "grad_norm": 15.129600524902344,
61
+ "learning_rate": 4.187054382776457e-06,
62
+ "loss": 0.1225,
63
+ "step": 3000
64
+ },
65
+ {
66
+ "epoch": 1.223289944963746,
67
+ "grad_norm": 18.516948699951172,
68
+ "learning_rate": 4.884663777436041e-06,
69
+ "loss": 0.111,
70
+ "step": 3500
71
+ },
72
+ {
73
+ "epoch": 1.3980082117585393,
74
+ "grad_norm": 3.170327663421631,
75
+ "learning_rate": 5.583671186914582e-06,
76
+ "loss": 0.109,
77
+ "step": 4000
78
+ },
79
+ {
80
+ "epoch": 1.5727264785533328,
81
+ "grad_norm": 9.642043113708496,
82
+ "learning_rate": 6.281280581574165e-06,
83
+ "loss": 0.1045,
84
+ "step": 4500
85
+ },
86
+ {
87
+ "epoch": 1.747444745348126,
88
+ "grad_norm": 12.047298431396484,
89
+ "learning_rate": 6.980287991052706e-06,
90
+ "loss": 0.0981,
91
+ "step": 5000
92
+ },
93
+ {
94
+ "epoch": 1.9221630121429194,
95
+ "grad_norm": 11.591704368591309,
96
+ "learning_rate": 7.679295400531246e-06,
97
+ "loss": 0.0983,
98
+ "step": 5500
99
+ },
100
+ {
101
+ "epoch": 1.9997379225998078,
102
+ "eval_accuracy": 0.9699074074074074,
103
+ "eval_f1": 0.9698689694737291,
104
+ "eval_loss": 0.09155124425888062,
105
+ "eval_precision": 0.9698681669937823,
106
+ "eval_recall": 0.9699074074074074,
107
+ "eval_runtime": 68.6299,
108
+ "eval_samples_per_second": 333.616,
109
+ "eval_steps_per_second": 20.851,
110
+ "step": 5722
111
+ },
112
+ {
113
+ "epoch": 2.097143356337905,
114
+ "grad_norm": 7.699872970581055,
115
+ "learning_rate": 8.378302810009785e-06,
116
+ "loss": 0.0818,
117
+ "step": 6000
118
+ },
119
+ {
120
+ "epoch": 2.2718616231326987,
121
+ "grad_norm": 2.9679672718048096,
122
+ "learning_rate": 9.077310219488328e-06,
123
+ "loss": 0.078,
124
+ "step": 6500
125
+ },
126
+ {
127
+ "epoch": 2.446579889927492,
128
+ "grad_norm": 5.794170379638672,
129
+ "learning_rate": 9.776317628966868e-06,
130
+ "loss": 0.0769,
131
+ "step": 7000
132
+ },
133
+ {
134
+ "epoch": 2.6212981567222853,
135
+ "grad_norm": 7.152242183685303,
136
+ "learning_rate": 9.842009600596542e-06,
137
+ "loss": 0.0719,
138
+ "step": 7500
139
+ },
140
+ {
141
+ "epoch": 2.7960164235170786,
142
+ "grad_norm": 3.8340728282928467,
143
+ "learning_rate": 9.608985412685837e-06,
144
+ "loss": 0.0708,
145
+ "step": 8000
146
+ },
147
+ {
148
+ "epoch": 2.970734690311872,
149
+ "grad_norm": 13.128349304199219,
150
+ "learning_rate": 9.375961224775132e-06,
151
+ "loss": 0.0748,
152
+ "step": 8500
153
+ },
154
+ {
155
+ "epoch": 2.9997379225998078,
156
+ "eval_accuracy": 0.9731394129979035,
157
+ "eval_f1": 0.9731494346588756,
158
+ "eval_loss": 0.09937231242656708,
159
+ "eval_precision": 0.9731628120366744,
160
+ "eval_recall": 0.9731394129979035,
161
+ "eval_runtime": 68.6513,
162
+ "eval_samples_per_second": 333.512,
163
+ "eval_steps_per_second": 20.844,
164
+ "step": 8583
165
+ },
166
+ {
167
+ "epoch": 3.145715034506858,
168
+ "grad_norm": 20.52392578125,
169
+ "learning_rate": 9.142937036864427e-06,
170
+ "loss": 0.0504,
171
+ "step": 9000
172
+ },
173
+ {
174
+ "epoch": 3.320433301301651,
175
+ "grad_norm": 33.67174530029297,
176
+ "learning_rate": 8.909912848953722e-06,
177
+ "loss": 0.049,
178
+ "step": 9500
179
+ },
180
+ {
181
+ "epoch": 3.4951515680964444,
182
+ "grad_norm": 0.8490325212478638,
183
+ "learning_rate": 8.676888661043017e-06,
184
+ "loss": 0.0464,
185
+ "step": 10000
186
+ },
187
+ {
188
+ "epoch": 3.6698698348912377,
189
+ "grad_norm": 13.017010688781738,
190
+ "learning_rate": 8.443864473132312e-06,
191
+ "loss": 0.0463,
192
+ "step": 10500
193
+ },
194
+ {
195
+ "epoch": 3.844588101686031,
196
+ "grad_norm": 0.6549157500267029,
197
+ "learning_rate": 8.210840285221606e-06,
198
+ "loss": 0.049,
199
+ "step": 11000
200
+ },
201
+ {
202
+ "epoch": 3.9997379225998078,
203
+ "eval_accuracy": 0.9785552061495457,
204
+ "eval_f1": 0.9785386610066998,
205
+ "eval_loss": 0.0886269360780716,
206
+ "eval_precision": 0.9785361579407956,
207
+ "eval_recall": 0.9785552061495457,
208
+ "eval_runtime": 68.6459,
209
+ "eval_samples_per_second": 333.538,
210
+ "eval_steps_per_second": 20.846,
211
+ "step": 11444
212
+ },
213
+ {
214
+ "epoch": 4.019568445881017,
215
+ "grad_norm": 40.50947952270508,
216
+ "learning_rate": 7.977816097310901e-06,
217
+ "loss": 0.0454,
218
+ "step": 11500
219
+ },
220
+ {
221
+ "epoch": 4.19428671267581,
222
+ "grad_norm": 1.1528542041778564,
223
+ "learning_rate": 7.744791909400196e-06,
224
+ "loss": 0.029,
225
+ "step": 12000
226
+ },
227
+ {
228
+ "epoch": 4.369004979470604,
229
+ "grad_norm": 42.926326751708984,
230
+ "learning_rate": 7.512233769865313e-06,
231
+ "loss": 0.0313,
232
+ "step": 12500
233
+ },
234
+ {
235
+ "epoch": 4.543723246265397,
236
+ "grad_norm": 13.328317642211914,
237
+ "learning_rate": 7.2792095819546074e-06,
238
+ "loss": 0.0326,
239
+ "step": 13000
240
+ },
241
+ {
242
+ "epoch": 4.71844151306019,
243
+ "grad_norm": 0.08160242438316345,
244
+ "learning_rate": 7.0461853940439014e-06,
245
+ "loss": 0.0293,
246
+ "step": 13500
247
+ },
248
+ {
249
+ "epoch": 4.893159779854984,
250
+ "grad_norm": 21.219942092895508,
251
+ "learning_rate": 6.813161206133197e-06,
252
+ "loss": 0.0281,
253
+ "step": 14000
254
+ },
255
+ {
256
+ "epoch": 4.999737922599808,
257
+ "eval_accuracy": 0.9791229909154437,
258
+ "eval_f1": 0.9791175700337718,
259
+ "eval_loss": 0.12131603062152863,
260
+ "eval_precision": 0.9791137606071209,
261
+ "eval_recall": 0.9791229909154437,
262
+ "eval_runtime": 68.6286,
263
+ "eval_samples_per_second": 333.622,
264
+ "eval_steps_per_second": 20.851,
265
+ "step": 14305
266
+ },
267
+ {
268
+ "epoch": 5.068140124049969,
269
+ "grad_norm": 8.010854721069336,
270
+ "learning_rate": 6.580137018222492e-06,
271
+ "loss": 0.0292,
272
+ "step": 14500
273
+ },
274
+ {
275
+ "epoch": 5.242858390844763,
276
+ "grad_norm": 4.4188337326049805,
277
+ "learning_rate": 6.347112830311787e-06,
278
+ "loss": 0.0215,
279
+ "step": 15000
280
+ },
281
+ {
282
+ "epoch": 5.4175766576395565,
283
+ "grad_norm": 65.55230712890625,
284
+ "learning_rate": 6.1145546907769025e-06,
285
+ "loss": 0.021,
286
+ "step": 15500
287
+ },
288
+ {
289
+ "epoch": 5.592294924434349,
290
+ "grad_norm": 0.20221485197544098,
291
+ "learning_rate": 5.881530502866198e-06,
292
+ "loss": 0.0225,
293
+ "step": 16000
294
+ },
295
+ {
296
+ "epoch": 5.767013191229143,
297
+ "grad_norm": 65.68180084228516,
298
+ "learning_rate": 5.648506314955493e-06,
299
+ "loss": 0.0199,
300
+ "step": 16500
301
+ },
302
+ {
303
+ "epoch": 5.941731458023936,
304
+ "grad_norm": 0.33296695351600647,
305
+ "learning_rate": 5.415482127044788e-06,
306
+ "loss": 0.0211,
307
+ "step": 17000
308
+ },
309
+ {
310
+ "epoch": 5.999737922599808,
311
+ "eval_accuracy": 0.9804769392033543,
312
+ "eval_f1": 0.98049806714974,
313
+ "eval_loss": 0.10157082974910736,
314
+ "eval_precision": 0.9805481970936862,
315
+ "eval_recall": 0.9804769392033543,
316
+ "eval_runtime": 68.666,
317
+ "eval_samples_per_second": 333.44,
318
+ "eval_steps_per_second": 20.84,
319
+ "step": 17166
320
+ },
321
+ {
322
+ "epoch": 6.116711802218922,
323
+ "grad_norm": 0.5054177045822144,
324
+ "learning_rate": 5.182457939134083e-06,
325
+ "loss": 0.0149,
326
+ "step": 17500
327
+ },
328
+ {
329
+ "epoch": 6.291430069013716,
330
+ "grad_norm": 0.08331651240587234,
331
+ "learning_rate": 4.9494337512233775e-06,
332
+ "loss": 0.0141,
333
+ "step": 18000
334
+ },
335
+ {
336
+ "epoch": 6.466148335808509,
337
+ "grad_norm": 0.04097016155719757,
338
+ "learning_rate": 4.716409563312672e-06,
339
+ "loss": 0.0107,
340
+ "step": 18500
341
+ },
342
+ {
343
+ "epoch": 6.640866602603302,
344
+ "grad_norm": 0.009394422173500061,
345
+ "learning_rate": 4.483385375401967e-06,
346
+ "loss": 0.0142,
347
+ "step": 19000
348
+ },
349
+ {
350
+ "epoch": 6.815584869398096,
351
+ "grad_norm": 55.671607971191406,
352
+ "learning_rate": 4.250361187491262e-06,
353
+ "loss": 0.0112,
354
+ "step": 19500
355
+ },
356
+ {
357
+ "epoch": 6.990303136192889,
358
+ "grad_norm": 37.646053314208984,
359
+ "learning_rate": 4.018269096332199e-06,
360
+ "loss": 0.0143,
361
+ "step": 20000
362
+ },
363
+ {
364
+ "epoch": 6.999737922599808,
365
+ "eval_accuracy": 0.9821366177498253,
366
+ "eval_f1": 0.9821391542473246,
367
+ "eval_loss": 0.12774688005447388,
368
+ "eval_precision": 0.9821421791956497,
369
+ "eval_recall": 0.9821366177498253,
370
+ "eval_runtime": 68.6239,
371
+ "eval_samples_per_second": 333.645,
372
+ "eval_steps_per_second": 20.853,
373
+ "step": 20027
374
+ },
375
+ {
376
+ "epoch": 7.165283480387875,
377
+ "grad_norm": 4.926445007324219,
378
+ "learning_rate": 3.7852449084214942e-06,
379
+ "loss": 0.0079,
380
+ "step": 20500
381
+ },
382
+ {
383
+ "epoch": 7.340001747182668,
384
+ "grad_norm": 8.786434173583984,
385
+ "learning_rate": 3.5522207205107895e-06,
386
+ "loss": 0.0089,
387
+ "step": 21000
388
+ },
389
+ {
390
+ "epoch": 7.5147200139774615,
391
+ "grad_norm": 0.1356939673423767,
392
+ "learning_rate": 3.319196532600084e-06,
393
+ "loss": 0.0074,
394
+ "step": 21500
395
+ },
396
+ {
397
+ "epoch": 7.689438280772254,
398
+ "grad_norm": 16.782020568847656,
399
+ "learning_rate": 3.0861723446893788e-06,
400
+ "loss": 0.0096,
401
+ "step": 22000
402
+ },
403
+ {
404
+ "epoch": 7.864156547567048,
405
+ "grad_norm": 0.0004071469884365797,
406
+ "learning_rate": 2.853148156778674e-06,
407
+ "loss": 0.007,
408
+ "step": 22500
409
+ },
410
+ {
411
+ "epoch": 7.999737922599808,
412
+ "eval_accuracy": 0.9819619147449337,
413
+ "eval_f1": 0.9819917414068494,
414
+ "eval_loss": 0.14060455560684204,
415
+ "eval_precision": 0.9820904217450387,
416
+ "eval_recall": 0.9819619147449337,
417
+ "eval_runtime": 68.6348,
418
+ "eval_samples_per_second": 333.592,
419
+ "eval_steps_per_second": 20.849,
420
+ "step": 22888
421
+ },
422
+ {
423
+ "epoch": 8.039136891762034,
424
+ "grad_norm": 0.003943814896047115,
425
+ "learning_rate": 2.6201239688679685e-06,
426
+ "loss": 0.008,
427
+ "step": 23000
428
+ },
429
+ {
430
+ "epoch": 8.213855158556827,
431
+ "grad_norm": 12.356912612915039,
432
+ "learning_rate": 2.3870997809572637e-06,
433
+ "loss": 0.0051,
434
+ "step": 23500
435
+ },
436
+ {
437
+ "epoch": 8.38857342535162,
438
+ "grad_norm": 0.018336663022637367,
439
+ "learning_rate": 2.1540755930465586e-06,
440
+ "loss": 0.0053,
441
+ "step": 24000
442
+ },
443
+ {
444
+ "epoch": 8.563291692146414,
445
+ "grad_norm": 0.031037895008921623,
446
+ "learning_rate": 1.9210514051358534e-06,
447
+ "loss": 0.0048,
448
+ "step": 24500
449
+ },
450
+ {
451
+ "epoch": 8.738009958941207,
452
+ "grad_norm": 0.0018096828134730458,
453
+ "learning_rate": 1.6884932656009695e-06,
454
+ "loss": 0.0041,
455
+ "step": 25000
456
+ },
457
+ {
458
+ "epoch": 8.912728225736,
459
+ "grad_norm": 0.006106176879256964,
460
+ "learning_rate": 1.455935126066086e-06,
461
+ "loss": 0.0043,
462
+ "step": 25500
463
+ },
464
+ {
465
+ "epoch": 8.999737922599808,
466
+ "eval_accuracy": 0.9835779175401816,
467
+ "eval_f1": 0.9835697887213877,
468
+ "eval_loss": 0.15101368725299835,
469
+ "eval_precision": 0.9835675135482098,
470
+ "eval_recall": 0.9835779175401816,
471
+ "eval_runtime": 68.619,
472
+ "eval_samples_per_second": 333.668,
473
+ "eval_steps_per_second": 20.854,
474
+ "step": 25749
475
+ },
476
+ {
477
+ "epoch": 9.087708569930987,
478
+ "grad_norm": 0.00021469616331160069,
479
+ "learning_rate": 1.2229109381553806e-06,
480
+ "loss": 0.0036,
481
+ "step": 26000
482
+ },
483
+ {
484
+ "epoch": 9.26242683672578,
485
+ "grad_norm": 0.008365228772163391,
486
+ "learning_rate": 9.898867502446755e-07,
487
+ "loss": 0.0027,
488
+ "step": 26500
489
+ },
490
+ {
491
+ "epoch": 9.437145103520573,
492
+ "grad_norm": 0.0006792581407353282,
493
+ "learning_rate": 7.568625623339704e-07,
494
+ "loss": 0.0028,
495
+ "step": 27000
496
+ },
497
+ {
498
+ "epoch": 9.611863370315366,
499
+ "grad_norm": 0.004307614639401436,
500
+ "learning_rate": 5.238383744232652e-07,
501
+ "loss": 0.0021,
502
+ "step": 27500
503
+ },
504
+ {
505
+ "epoch": 9.78658163711016,
506
+ "grad_norm": 0.0005787264672107995,
507
+ "learning_rate": 2.9081418651256006e-07,
508
+ "loss": 0.0031,
509
+ "step": 28000
510
+ },
511
+ {
512
+ "epoch": 9.961299903904953,
513
+ "grad_norm": 0.008150073699653149,
514
+ "learning_rate": 5.7789998601854876e-08,
515
+ "loss": 0.0019,
516
+ "step": 28500
517
+ },
518
+ {
519
+ "epoch": 9.999737922599808,
520
+ "eval_accuracy": 0.9842330538085255,
521
+ "eval_f1": 0.9842291652151357,
522
+ "eval_loss": 0.14871150255203247,
523
+ "eval_precision": 0.9842267397484589,
524
+ "eval_recall": 0.9842330538085255,
525
+ "eval_runtime": 68.5176,
526
+ "eval_samples_per_second": 334.162,
527
+ "eval_steps_per_second": 20.885,
528
+ "step": 28610
529
+ }
530
+ ],
531
+ "logging_steps": 500,
532
+ "max_steps": 28610,
533
+ "num_input_tokens_seen": 0,
534
+ "num_train_epochs": 10,
535
+ "save_steps": 500,
536
+ "stateful_callbacks": {
537
+ "EarlyStoppingCallback": {
538
+ "args": {
539
+ "early_stopping_patience": 3,
540
+ "early_stopping_threshold": 0.001
541
+ },
542
+ "attributes": {
543
+ "early_stopping_patience_counter": 1
544
+ }
545
+ },
546
+ "TrainerControl": {
547
+ "args": {
548
+ "should_epoch_stop": false,
549
+ "should_evaluate": false,
550
+ "should_log": false,
551
+ "should_save": true,
552
+ "should_training_stop": true
553
+ },
554
+ "attributes": {}
555
+ }
556
+ },
557
+ "total_flos": 1.7066719997407396e+18,
558
+ "train_batch_size": 16,
559
+ "trial_name": null,
560
+ "trial_params": null
561
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f628c5e446ae54cdc2d20f466a5d879e7f13d5508ffaa08829c41eea5d0fde3
3
+ size 5560