exdysa TimeRobber commited on
Commit
e7df351
0 Parent(s):

Duplicate from bigscience/mt0-small

Browse files

Co-authored-by: Thomas Wang <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
25
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
27
+ *.tflite filter=lfs diff=lfs merge=lfs -text
28
+ *.tgz filter=lfs diff=lfs merge=lfs -text
29
+ *.wasm filter=lfs diff=lfs merge=lfs -text
30
+ *.xz filter=lfs diff=lfs merge=lfs -text
31
+ *.zip filter=lfs diff=lfs merge=lfs -text
32
+ *.zst filter=lfs diff=lfs merge=lfs -text
33
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
34
+ onnx/tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,902 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - bigscience/xP3
4
+ - mc4
5
+ license: apache-2.0
6
+ language:
7
+ - af
8
+ - am
9
+ - ar
10
+ - az
11
+ - be
12
+ - bg
13
+ - bn
14
+ - ca
15
+ - ceb
16
+ - co
17
+ - cs
18
+ - cy
19
+ - da
20
+ - de
21
+ - el
22
+ - en
23
+ - eo
24
+ - es
25
+ - et
26
+ - eu
27
+ - fa
28
+ - fi
29
+ - fil
30
+ - fr
31
+ - fy
32
+ - ga
33
+ - gd
34
+ - gl
35
+ - gu
36
+ - ha
37
+ - haw
38
+ - hi
39
+ - hmn
40
+ - ht
41
+ - hu
42
+ - hy
43
+ - ig
44
+ - is
45
+ - it
46
+ - iw
47
+ - ja
48
+ - jv
49
+ - ka
50
+ - kk
51
+ - km
52
+ - kn
53
+ - ko
54
+ - ku
55
+ - ky
56
+ - la
57
+ - lb
58
+ - lo
59
+ - lt
60
+ - lv
61
+ - mg
62
+ - mi
63
+ - mk
64
+ - ml
65
+ - mn
66
+ - mr
67
+ - ms
68
+ - mt
69
+ - my
70
+ - ne
71
+ - nl
72
+ - no
73
+ - ny
74
+ - pa
75
+ - pl
76
+ - ps
77
+ - pt
78
+ - ro
79
+ - ru
80
+ - sd
81
+ - si
82
+ - sk
83
+ - sl
84
+ - sm
85
+ - sn
86
+ - so
87
+ - sq
88
+ - sr
89
+ - st
90
+ - su
91
+ - sv
92
+ - sw
93
+ - ta
94
+ - te
95
+ - tg
96
+ - th
97
+ - tr
98
+ - uk
99
+ - und
100
+ - ur
101
+ - uz
102
+ - vi
103
+ - xh
104
+ - yi
105
+ - yo
106
+ - zh
107
+ - zu
108
+ pipeline_tag: text2text-generation
109
+ widget:
110
+ - text: "一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。Would you rate the previous review as positive, neutral or negative?"
111
+ example_title: "zh-en sentiment"
112
+ - text: "一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。你认为这句话的立场是赞扬、中立还是批评?"
113
+ example_title: "zh-zh sentiment"
114
+ - text: "Suggest at least five related search terms to \"Mạng neural nhân tạo\"."
115
+ example_title: "vi-en query"
116
+ - text: "Proposez au moins cinq mots clés concernant «Réseau de neurones artificiels»."
117
+ example_title: "fr-fr query"
118
+ - text: "Explain in a sentence in Telugu what is backpropagation in neural networks."
119
+ example_title: "te-en qa"
120
+ - text: "Why is the sky blue?"
121
+ example_title: "en-en qa"
122
+ - text: "Write a fairy tale about a troll saving a princess from a dangerous dragon. The fairy tale is a masterpiece that has achieved praise worldwide and its moral is \"Heroes Come in All Shapes and Sizes\". Story (in Spanish):"
123
+ example_title: "es-en fable"
124
+ - text: "Write a fable about wood elves living in a forest that is suddenly invaded by ogres. The fable is a masterpiece that has achieved praise worldwide and its moral is \"Violence is the last refuge of the incompetent\". Fable (in Hindi):"
125
+ example_title: "hi-en fable"
126
+ model-index:
127
+ - name: mt0-small
128
+ results:
129
+ - task:
130
+ type: Coreference resolution
131
+ dataset:
132
+ type: winogrande
133
+ name: Winogrande XL (xl)
134
+ config: xl
135
+ split: validation
136
+ revision: a80f460359d1e9a67c006011c94de42a8759430c
137
+ metrics:
138
+ - type: Accuracy
139
+ value: 50.51
140
+ - task:
141
+ type: Coreference resolution
142
+ dataset:
143
+ type: Muennighoff/xwinograd
144
+ name: XWinograd (en)
145
+ config: en
146
+ split: test
147
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
148
+ metrics:
149
+ - type: Accuracy
150
+ value: 51.31
151
+ - task:
152
+ type: Coreference resolution
153
+ dataset:
154
+ type: Muennighoff/xwinograd
155
+ name: XWinograd (fr)
156
+ config: fr
157
+ split: test
158
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
159
+ metrics:
160
+ - type: Accuracy
161
+ value: 54.22
162
+ - task:
163
+ type: Coreference resolution
164
+ dataset:
165
+ type: Muennighoff/xwinograd
166
+ name: XWinograd (jp)
167
+ config: jp
168
+ split: test
169
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
170
+ metrics:
171
+ - type: Accuracy
172
+ value: 52.45
173
+ - task:
174
+ type: Coreference resolution
175
+ dataset:
176
+ type: Muennighoff/xwinograd
177
+ name: XWinograd (pt)
178
+ config: pt
179
+ split: test
180
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
181
+ metrics:
182
+ - type: Accuracy
183
+ value: 51.71
184
+ - task:
185
+ type: Coreference resolution
186
+ dataset:
187
+ type: Muennighoff/xwinograd
188
+ name: XWinograd (ru)
189
+ config: ru
190
+ split: test
191
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
192
+ metrics:
193
+ - type: Accuracy
194
+ value: 54.29
195
+ - task:
196
+ type: Coreference resolution
197
+ dataset:
198
+ type: Muennighoff/xwinograd
199
+ name: XWinograd (zh)
200
+ config: zh
201
+ split: test
202
+ revision: 9dd5ea5505fad86b7bedad667955577815300cee
203
+ metrics:
204
+ - type: Accuracy
205
+ value: 54.17
206
+ - task:
207
+ type: Natural language inference
208
+ dataset:
209
+ type: anli
210
+ name: ANLI (r1)
211
+ config: r1
212
+ split: validation
213
+ revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094
214
+ metrics:
215
+ - type: Accuracy
216
+ value: 34.7
217
+ - task:
218
+ type: Natural language inference
219
+ dataset:
220
+ type: anli
221
+ name: ANLI (r2)
222
+ config: r2
223
+ split: validation
224
+ revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094
225
+ metrics:
226
+ - type: Accuracy
227
+ value: 34.0
228
+ - task:
229
+ type: Natural language inference
230
+ dataset:
231
+ type: anli
232
+ name: ANLI (r3)
233
+ config: r3
234
+ split: validation
235
+ revision: 9dbd830a06fea8b1c49d6e5ef2004a08d9f45094
236
+ metrics:
237
+ - type: Accuracy
238
+ value: 33.83
239
+ - task:
240
+ type: Natural language inference
241
+ dataset:
242
+ type: super_glue
243
+ name: SuperGLUE (cb)
244
+ config: cb
245
+ split: validation
246
+ revision: 9e12063561e7e6c79099feb6d5a493142584e9e2
247
+ metrics:
248
+ - type: Accuracy
249
+ value: 50.0
250
+ - task:
251
+ type: Natural language inference
252
+ dataset:
253
+ type: super_glue
254
+ name: SuperGLUE (rte)
255
+ config: rte
256
+ split: validation
257
+ revision: 9e12063561e7e6c79099feb6d5a493142584e9e2
258
+ metrics:
259
+ - type: Accuracy
260
+ value: 61.01
261
+ - task:
262
+ type: Natural language inference
263
+ dataset:
264
+ type: xnli
265
+ name: XNLI (ar)
266
+ config: ar
267
+ split: validation
268
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
269
+ metrics:
270
+ - type: Accuracy
271
+ value: 37.43
272
+ - task:
273
+ type: Natural language inference
274
+ dataset:
275
+ type: xnli
276
+ name: XNLI (bg)
277
+ config: bg
278
+ split: validation
279
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
280
+ metrics:
281
+ - type: Accuracy
282
+ value: 37.55
283
+ - task:
284
+ type: Natural language inference
285
+ dataset:
286
+ type: xnli
287
+ name: XNLI (de)
288
+ config: de
289
+ split: validation
290
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
291
+ metrics:
292
+ - type: Accuracy
293
+ value: 35.78
294
+ - task:
295
+ type: Natural language inference
296
+ dataset:
297
+ type: xnli
298
+ name: XNLI (el)
299
+ config: el
300
+ split: validation
301
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
302
+ metrics:
303
+ - type: Accuracy
304
+ value: 37.43
305
+ - task:
306
+ type: Natural language inference
307
+ dataset:
308
+ type: xnli
309
+ name: XNLI (en)
310
+ config: en
311
+ split: validation
312
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
313
+ metrics:
314
+ - type: Accuracy
315
+ value: 38.47
316
+ - task:
317
+ type: Natural language inference
318
+ dataset:
319
+ type: xnli
320
+ name: XNLI (es)
321
+ config: es
322
+ split: validation
323
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
324
+ metrics:
325
+ - type: Accuracy
326
+ value: 36.75
327
+ - task:
328
+ type: Natural language inference
329
+ dataset:
330
+ type: xnli
331
+ name: XNLI (fr)
332
+ config: fr
333
+ split: validation
334
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
335
+ metrics:
336
+ - type: Accuracy
337
+ value: 37.15
338
+ - task:
339
+ type: Natural language inference
340
+ dataset:
341
+ type: xnli
342
+ name: XNLI (hi)
343
+ config: hi
344
+ split: validation
345
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
346
+ metrics:
347
+ - type: Accuracy
348
+ value: 35.38
349
+ - task:
350
+ type: Natural language inference
351
+ dataset:
352
+ type: xnli
353
+ name: XNLI (ru)
354
+ config: ru
355
+ split: validation
356
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
357
+ metrics:
358
+ - type: Accuracy
359
+ value: 37.35
360
+ - task:
361
+ type: Natural language inference
362
+ dataset:
363
+ type: xnli
364
+ name: XNLI (sw)
365
+ config: sw
366
+ split: validation
367
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
368
+ metrics:
369
+ - type: Accuracy
370
+ value: 35.18
371
+ - task:
372
+ type: Natural language inference
373
+ dataset:
374
+ type: xnli
375
+ name: XNLI (th)
376
+ config: th
377
+ split: validation
378
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
379
+ metrics:
380
+ - type: Accuracy
381
+ value: 37.55
382
+ - task:
383
+ type: Natural language inference
384
+ dataset:
385
+ type: xnli
386
+ name: XNLI (tr)
387
+ config: tr
388
+ split: validation
389
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
390
+ metrics:
391
+ - type: Accuracy
392
+ value: 36.51
393
+ - task:
394
+ type: Natural language inference
395
+ dataset:
396
+ type: xnli
397
+ name: XNLI (ur)
398
+ config: ur
399
+ split: validation
400
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
401
+ metrics:
402
+ - type: Accuracy
403
+ value: 35.78
404
+ - task:
405
+ type: Natural language inference
406
+ dataset:
407
+ type: xnli
408
+ name: XNLI (vi)
409
+ config: vi
410
+ split: validation
411
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
412
+ metrics:
413
+ - type: Accuracy
414
+ value: 36.95
415
+ - task:
416
+ type: Natural language inference
417
+ dataset:
418
+ type: xnli
419
+ name: XNLI (zh)
420
+ config: zh
421
+ split: validation
422
+ revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16
423
+ metrics:
424
+ - type: Accuracy
425
+ value: 37.07
426
+ - task:
427
+ type: Sentence completion
428
+ dataset:
429
+ type: story_cloze
430
+ name: StoryCloze (2016)
431
+ config: "2016"
432
+ split: validation
433
+ revision: e724c6f8cdf7c7a2fb229d862226e15b023ee4db
434
+ metrics:
435
+ - type: Accuracy
436
+ value: 54.36
437
+ - task:
438
+ type: Sentence completion
439
+ dataset:
440
+ type: super_glue
441
+ name: SuperGLUE (copa)
442
+ config: copa
443
+ split: validation
444
+ revision: 9e12063561e7e6c79099feb6d5a493142584e9e2
445
+ metrics:
446
+ - type: Accuracy
447
+ value: 57.0
448
+ - task:
449
+ type: Sentence completion
450
+ dataset:
451
+ type: xcopa
452
+ name: XCOPA (et)
453
+ config: et
454
+ split: validation
455
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
456
+ metrics:
457
+ - type: Accuracy
458
+ value: 57.0
459
+ - task:
460
+ type: Sentence completion
461
+ dataset:
462
+ type: xcopa
463
+ name: XCOPA (ht)
464
+ config: ht
465
+ split: validation
466
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
467
+ metrics:
468
+ - type: Accuracy
469
+ value: 60.0
470
+ - task:
471
+ type: Sentence completion
472
+ dataset:
473
+ type: xcopa
474
+ name: XCOPA (id)
475
+ config: id
476
+ split: validation
477
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
478
+ metrics:
479
+ - type: Accuracy
480
+ value: 59.0
481
+ - task:
482
+ type: Sentence completion
483
+ dataset:
484
+ type: xcopa
485
+ name: XCOPA (it)
486
+ config: it
487
+ split: validation
488
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
489
+ metrics:
490
+ - type: Accuracy
491
+ value: 59.0
492
+ - task:
493
+ type: Sentence completion
494
+ dataset:
495
+ type: xcopa
496
+ name: XCOPA (qu)
497
+ config: qu
498
+ split: validation
499
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
500
+ metrics:
501
+ - type: Accuracy
502
+ value: 54.0
503
+ - task:
504
+ type: Sentence completion
505
+ dataset:
506
+ type: xcopa
507
+ name: XCOPA (sw)
508
+ config: sw
509
+ split: validation
510
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
511
+ metrics:
512
+ - type: Accuracy
513
+ value: 55.0
514
+ - task:
515
+ type: Sentence completion
516
+ dataset:
517
+ type: xcopa
518
+ name: XCOPA (ta)
519
+ config: ta
520
+ split: validation
521
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
522
+ metrics:
523
+ - type: Accuracy
524
+ value: 59.0
525
+ - task:
526
+ type: Sentence completion
527
+ dataset:
528
+ type: xcopa
529
+ name: XCOPA (th)
530
+ config: th
531
+ split: validation
532
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
533
+ metrics:
534
+ - type: Accuracy
535
+ value: 65.0
536
+ - task:
537
+ type: Sentence completion
538
+ dataset:
539
+ type: xcopa
540
+ name: XCOPA (tr)
541
+ config: tr
542
+ split: validation
543
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
544
+ metrics:
545
+ - type: Accuracy
546
+ value: 58.0
547
+ - task:
548
+ type: Sentence completion
549
+ dataset:
550
+ type: xcopa
551
+ name: XCOPA (vi)
552
+ config: vi
553
+ split: validation
554
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
555
+ metrics:
556
+ - type: Accuracy
557
+ value: 54.0
558
+ - task:
559
+ type: Sentence completion
560
+ dataset:
561
+ type: xcopa
562
+ name: XCOPA (zh)
563
+ config: zh
564
+ split: validation
565
+ revision: 37f73c60fb123111fa5af5f9b705d0b3747fd187
566
+ metrics:
567
+ - type: Accuracy
568
+ value: 56.0
569
+ - task:
570
+ type: Sentence completion
571
+ dataset:
572
+ type: Muennighoff/xstory_cloze
573
+ name: XStoryCloze (ar)
574
+ config: ar
575
+ split: validation
576
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
577
+ metrics:
578
+ - type: Accuracy
579
+ value: 48.78
580
+ - task:
581
+ type: Sentence completion
582
+ dataset:
583
+ type: Muennighoff/xstory_cloze
584
+ name: XStoryCloze (es)
585
+ config: es
586
+ split: validation
587
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
588
+ metrics:
589
+ - type: Accuracy
590
+ value: 55.2
591
+ - task:
592
+ type: Sentence completion
593
+ dataset:
594
+ type: Muennighoff/xstory_cloze
595
+ name: XStoryCloze (eu)
596
+ config: eu
597
+ split: validation
598
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
599
+ metrics:
600
+ - type: Accuracy
601
+ value: 52.95
602
+ - task:
603
+ type: Sentence completion
604
+ dataset:
605
+ type: Muennighoff/xstory_cloze
606
+ name: XStoryCloze (hi)
607
+ config: hi
608
+ split: validation
609
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
610
+ metrics:
611
+ - type: Accuracy
612
+ value: 53.01
613
+ - task:
614
+ type: Sentence completion
615
+ dataset:
616
+ type: Muennighoff/xstory_cloze
617
+ name: XStoryCloze (id)
618
+ config: id
619
+ split: validation
620
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
621
+ metrics:
622
+ - type: Accuracy
623
+ value: 53.08
624
+ - task:
625
+ type: Sentence completion
626
+ dataset:
627
+ type: Muennighoff/xstory_cloze
628
+ name: XStoryCloze (my)
629
+ config: my
630
+ split: validation
631
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
632
+ metrics:
633
+ - type: Accuracy
634
+ value: 51.82
635
+ - task:
636
+ type: Sentence completion
637
+ dataset:
638
+ type: Muennighoff/xstory_cloze
639
+ name: XStoryCloze (ru)
640
+ config: ru
641
+ split: validation
642
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
643
+ metrics:
644
+ - type: Accuracy
645
+ value: 49.7
646
+ - task:
647
+ type: Sentence completion
648
+ dataset:
649
+ type: Muennighoff/xstory_cloze
650
+ name: XStoryCloze (sw)
651
+ config: sw
652
+ split: validation
653
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
654
+ metrics:
655
+ - type: Accuracy
656
+ value: 54.53
657
+ - task:
658
+ type: Sentence completion
659
+ dataset:
660
+ type: Muennighoff/xstory_cloze
661
+ name: XStoryCloze (te)
662
+ config: te
663
+ split: validation
664
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
665
+ metrics:
666
+ - type: Accuracy
667
+ value: 53.67
668
+ - task:
669
+ type: Sentence completion
670
+ dataset:
671
+ type: Muennighoff/xstory_cloze
672
+ name: XStoryCloze (zh)
673
+ config: zh
674
+ split: validation
675
+ revision: 8bb76e594b68147f1a430e86829d07189622b90d
676
+ metrics:
677
+ - type: Accuracy
678
+ value: 57.78
679
+ ---
680
+
681
+ ![xmtf](https://github.com/bigscience-workshop/xmtf/blob/master/xmtf_banner.png?raw=true)
682
+
683
+ # Table of Contents
684
+
685
+ 1. [Model Summary](#model-summary)
686
+ 2. [Use](#use)
687
+ 3. [Limitations](#limitations)
688
+ 4. [Training](#training)
689
+ 5. [Evaluation](#evaluation)
690
+ 7. [Citation](#citation)
691
+
692
+ # Model Summary
693
+
694
+ > We present BLOOMZ & mT0, a family of models capable of following human instructions in dozens of languages zero-shot. We finetune BLOOM & mT5 pretrained multilingual language models on our crosslingual task mixture (xP3) and find our resulting models capable of crosslingual generalization to unseen tasks & languages.
695
+
696
+ - **Repository:** [bigscience-workshop/xmtf](https://github.com/bigscience-workshop/xmtf)
697
+ - **Paper:** [Crosslingual Generalization through Multitask Finetuning](https://arxiv.org/abs/2211.01786)
698
+ - **Point of Contact:** [Niklas Muennighoff](mailto:[email protected])
699
+ - **Languages:** Refer to [mc4](https://huggingface.co/datasets/mc4) for pretraining & [xP3](https://huggingface.co/datasets/bigscience/xP3) for finetuning language proportions. It understands both pretraining & finetuning languages.
700
+ - **BLOOMZ & mT0 Model Family:**
701
+
702
+ <div class="max-w-full overflow-auto">
703
+ <table>
704
+ <tr>
705
+ <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/bigscience/xP3>xP3</a>. Recommended for prompting in English.
706
+ </tr>
707
+ <tr>
708
+ <td>Parameters</td>
709
+ <td>300M</td>
710
+ <td>580M</td>
711
+ <td>1.2B</td>
712
+ <td>3.7B</td>
713
+ <td>13B</td>
714
+ <td>560M</td>
715
+ <td>1.1B</td>
716
+ <td>1.7B</td>
717
+ <td>3B</td>
718
+ <td>7.1B</td>
719
+ <td>176B</td>
720
+ </tr>
721
+ <tr>
722
+ <td>Finetuned Model</td>
723
+ <td><a href=https://huggingface.co/bigscience/mt0-small>mt0-small</a></td>
724
+ <td><a href=https://huggingface.co/bigscience/mt0-base>mt0-base</a></td>
725
+ <td><a href=https://huggingface.co/bigscience/mt0-large>mt0-large</a></td>
726
+ <td><a href=https://huggingface.co/bigscience/mt0-xl>mt0-xl</a></td>
727
+ <td><a href=https://huggingface.co/bigscience/mt0-xxl>mt0-xxl</a></td>
728
+ <td><a href=https://huggingface.co/bigscience/bloomz-560m>bloomz-560m</a></td>
729
+ <td><a href=https://huggingface.co/bigscience/bloomz-1b1>bloomz-1b1</a></td>
730
+ <td><a href=https://huggingface.co/bigscience/bloomz-1b7>bloomz-1b7</a></td>
731
+ <td><a href=https://huggingface.co/bigscience/bloomz-3b>bloomz-3b</a></td>
732
+ <td><a href=https://huggingface.co/bigscience/bloomz-7b1>bloomz-7b1</a></td>
733
+ <td><a href=https://huggingface.co/bigscience/bloomz>bloomz</a></td>
734
+ </tr>
735
+ </tr>
736
+ <tr>
737
+ <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/bigscience/xP3mt>xP3mt</a>. Recommended for prompting in non-English.</th>
738
+ </tr>
739
+ <tr>
740
+ <td>Finetuned Model</td>
741
+ <td></td>
742
+ <td></td>
743
+ <td></td>
744
+ <td></td>
745
+ <td><a href=https://huggingface.co/bigscience/mt0-xxl-mt>mt0-xxl-mt</a></td>
746
+ <td></td>
747
+ <td></td>
748
+ <td></td>
749
+ <td></td>
750
+ <td><a href=https://huggingface.co/bigscience/bloomz-7b1-mt>bloomz-7b1-mt</a></td>
751
+ <td><a href=https://huggingface.co/bigscience/bloomz-mt>bloomz-mt</a></td>
752
+ </tr>
753
+ <th colspan="12">Multitask finetuned on <a style="font-weight:bold" href=https://huggingface.co/datasets/Muennighoff/P3>P3</a>. Released for research purposes only. Strictly inferior to above models!</th>
754
+ </tr>
755
+ <tr>
756
+ <td>Finetuned Model</td>
757
+ <td></td>
758
+ <td></td>
759
+ <td></td>
760
+ <td></td>
761
+ <td><a href=https://huggingface.co/bigscience/mt0-xxl-p3>mt0-xxl-p3</a></td>
762
+ <td></td>
763
+ <td></td>
764
+ <td></td>
765
+ <td></td>
766
+ <td><a href=https://huggingface.co/bigscience/bloomz-7b1-p3>bloomz-7b1-p3</a></td>
767
+ <td><a href=https://huggingface.co/bigscience/bloomz-p3>bloomz-p3</a></td>
768
+ </tr>
769
+ <th colspan="12">Original pretrained checkpoints. Not recommended.</th>
770
+ <tr>
771
+ <td>Pretrained Model</td>
772
+ <td><a href=https://huggingface.co/google/mt5-small>mt5-small</a></td>
773
+ <td><a href=https://huggingface.co/google/mt5-base>mt5-base</a></td>
774
+ <td><a href=https://huggingface.co/google/mt5-large>mt5-large</a></td>
775
+ <td><a href=https://huggingface.co/google/mt5-xl>mt5-xl</a></td>
776
+ <td><a href=https://huggingface.co/google/mt5-xxl>mt5-xxl</a></td>
777
+ <td><a href=https://huggingface.co/bigscience/bloom-560m>bloom-560m</a></td>
778
+ <td><a href=https://huggingface.co/bigscience/bloom-1b1>bloom-1b1</a></td>
779
+ <td><a href=https://huggingface.co/bigscience/bloom-1b7>bloom-1b7</a></td>
780
+ <td><a href=https://huggingface.co/bigscience/bloom-3b>bloom-3b</a></td>
781
+ <td><a href=https://huggingface.co/bigscience/bloom-7b1>bloom-7b1</a></td>
782
+ <td><a href=https://huggingface.co/bigscience/bloom>bloom</a></td>
783
+ </tr>
784
+ </table>
785
+ </div>
786
+
787
+
788
+ # Use
789
+
790
+ ## Intended use
791
+
792
+ We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "*Translate to English: Je t’aime.*", the model will most likely answer "*I love you.*". Some prompt ideas from our paper:
793
+ - 一个传奇的开端,一个不灭的神话,这不仅仅是一部电影,而是作为一个走进新时代的标签,永远彪炳史册。你认为这句话的立场是赞扬、中立还是批评?
794
+ - Suggest at least five related search terms to "Mạng neural nhân tạo".
795
+ - Write a fairy tale about a troll saving a princess from a dangerous dragon. The fairy tale is a masterpiece that has achieved praise worldwide and its moral is "Heroes Come in All Shapes and Sizes". Story (in Spanish):
796
+ - Explain in a sentence in Telugu what is backpropagation in neural networks.
797
+
798
+ **Feel free to share your generations in the Community tab!**
799
+
800
+ ## How to use
801
+
802
+ ### CPU
803
+
804
+ <details>
805
+ <summary> Click to expand </summary>
806
+
807
+ ```python
808
+ # pip install -q transformers
809
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
810
+
811
+ checkpoint = "bigscience/mt0-small"
812
+
813
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
814
+ model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
815
+
816
+ inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt")
817
+ outputs = model.generate(inputs)
818
+ print(tokenizer.decode(outputs[0]))
819
+ ```
820
+
821
+ </details>
822
+
823
+ ### GPU
824
+
825
+ <details>
826
+ <summary> Click to expand </summary>
827
+
828
+ ```python
829
+ # pip install -q transformers accelerate
830
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
831
+
832
+ checkpoint = "bigscience/mt0-small"
833
+
834
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
835
+ model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint, torch_dtype="auto", device_map="auto")
836
+
837
+ inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt").to("cuda")
838
+ outputs = model.generate(inputs)
839
+ print(tokenizer.decode(outputs[0]))
840
+ ```
841
+
842
+ </details>
843
+
844
+ ### GPU in 8bit
845
+
846
+ <details>
847
+ <summary> Click to expand </summary>
848
+
849
+ ```python
850
+ # pip install -q transformers accelerate bitsandbytes
851
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
852
+
853
+ checkpoint = "bigscience/mt0-small"
854
+
855
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
856
+ model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint, device_map="auto", load_in_8bit=True)
857
+
858
+ inputs = tokenizer.encode("Translate to English: Je t’aime.", return_tensors="pt").to("cuda")
859
+ outputs = model.generate(inputs)
860
+ print(tokenizer.decode(outputs[0]))
861
+ ```
862
+
863
+ </details>
864
+
865
+ <!-- Necessary for whitespace -->
866
+ ###
867
+
868
+ # Limitations
869
+
870
+ **Prompt Engineering:** The performance may vary depending on the prompt. For BLOOMZ models, we recommend making it very clear when the input stops to avoid the model trying to continue it. For example, the prompt "*Translate to English: Je t'aime*" without the full stop (.) at the end, may result in the model trying to continue the French sentence. Better prompts are e.g. "*Translate to English: Je t'aime.*", "*Translate to English: Je t'aime. Translation:*" "*What is "Je t'aime." in English?*", where it is clear for the model when it should answer. Further, we recommend providing the model as much context as possible. For example, if you want it to answer in Telugu, then tell the model, e.g. "*Explain in a sentence in Telugu what is backpropagation in neural networks.*".
871
+
872
+ # Training
873
+
874
+ ## Model
875
+
876
+ - **Architecture:** Same as [mt5-small](https://huggingface.co/google/mt5-small), also refer to the `config.json` file
877
+ - **Finetuning steps:** 25000
878
+ - **Finetuning tokens:** 4.62 billion
879
+ - **Precision:** bfloat16
880
+
881
+ ## Hardware
882
+
883
+ - **TPUs:** TPUv4-64
884
+
885
+ ## Software
886
+
887
+ - **Orchestration:** [T5X](https://github.com/google-research/t5x)
888
+ - **Neural networks:** [Jax](https://github.com/google/jax)
889
+
890
+ # Evaluation
891
+
892
+ We refer to Table 7 from our [paper](https://arxiv.org/abs/2211.01786) & [bigscience/evaluation-results](https://huggingface.co/datasets/bigscience/evaluation-results) for zero-shot results on unseen tasks. The sidebar reports zero-shot performance of the best prompt per dataset config.
893
+
894
+ # Citation
895
+ ```bibtex
896
+ @article{muennighoff2022crosslingual,
897
+ title={Crosslingual generalization through multitask finetuning},
898
+ author={Muennighoff, Niklas and Wang, Thomas and Sutawika, Lintang and Roberts, Adam and Biderman, Stella and Scao, Teven Le and Bari, M Saiful and Shen, Sheng and Yong, Zheng-Xin and Schoelkopf, Hailey and others},
899
+ journal={arXiv preprint arXiv:2211.01786},
900
+ year={2022}
901
+ }
902
+ ```
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-small",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "mt5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.23.1",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e3d320de3e92f254a1d03b0a5771ac960e3e151f9ee00bfbb3d0deb9da42daa
3
+ size 1200729516
onnx/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bigscience/mt0-small",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "mt5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "transformers_version": "4.30.2",
28
+ "use_cache": true,
29
+ "vocab_size": 250112
30
+ }
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:763b6326c43df7c1ac4032991009f20210bc217cdd6e6861ef491bbababbc768
3
+ size 1125416109
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390f8de9e630c728013dc0a583a8ad27fb0a1fc3faf95bdd320748570f9abe2f
3
+ size 1125646795
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a9cc37d8e83d44ba89b125c1537d5567f2797b95a5044a7fe1eee1199d23e4c
3
+ size 1112816423
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b92e6ee2bc3733a0a55232680419852673a5fa6b3bec4a85c132a7c4873d94a9
3
+ size 587887587
onnx/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.2"
7
+ }
onnx/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
onnx/spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
onnx/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99cc999819aaabf74898a252863b10d86fbcd86e8b3f65c118ff334ff85c5ea5
3
+ size 16315121
onnx/tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "clean_up_tokenization_spaces": true,
4
+ "eos_token": "</s>",
5
+ "extra_ids": 0,
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "pad_token": "<pad>",
8
+ "sp_model_kwargs": {},
9
+ "tokenizer_class": "T5Tokenizer",
10
+ "unk_token": "<unk>"
11
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f423353c85760b90fc82bf74a641791aa0884dbd6202326613bd9d850deed11
3
+ size 1200768069
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93c3578052e1605d8332eb961bc08d72e246071974e4cc54aa6991826b802aa5
3
+ size 16330369
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "eos_token": "</s>",
4
+ "extra_ids": 0,
5
+ "name_or_path": "google/mt5-large",
6
+ "pad_token": "<pad>",
7
+ "sp_model_kwargs": {},
8
+ "special_tokens_map_file": "/home/patrick/.cache/torch/transformers/685ac0ca8568ec593a48b61b0a3c272beee9bc194a3c7241d15dcadb5f875e53.f76030f3ec1b96a8199b2593390c610e76ca8028ef3d24680000619ffb646276",
9
+ "tokenizer_class": "T5Tokenizer",
10
+ "unk_token": "<unk>"
11
+ }