diff --git "a/data/dataset_card/dataset_cards_zh.csv" "b/data/dataset_card/dataset_cards_zh.csv" new file mode 100644--- /dev/null +++ "b/data/dataset_card/dataset_cards_zh.csv" @@ -0,0 +1,65767 @@ +dataset_id,yaml_metadata,markdown_content +ceval/ceval-exam,"{""license"": ""cc-by-nc-sa-4.0"", ""task_categories"": [""text-classification"", ""multiple-choice"", ""question-answering""], ""language"": [""zh""], ""pretty_name"": ""C-Eval"", ""size_categories"": [""10K: `` + +### Data Splits + +For each configuration subset, the data is split into ""train"", ""validation"" and ""test"" sets, each containing the +following number of examples: + +| | Train | Validation | Test | +|:-------------|--------:|-------------:|-------:| +| ace | 100 | 100 | 100 | +| af | 5000 | 1000 | 1000 | +| als | 100 | 100 | 100 | +| am | 100 | 100 | 100 | +| an | 1000 | 1000 | 1000 | +| ang | 100 | 100 | 100 | +| ar | 20000 | 10000 | 10000 | +| arc | 100 | 100 | 100 | +| arz | 100 | 100 | 100 | +| as | 100 | 100 | 100 | +| ast | 1000 | 1000 | 1000 | +| ay | 100 | 100 | 100 | +| az | 10000 | 1000 | 1000 | +| ba | 100 | 100 | 100 | +| bar | 100 | 100 | 100 | +| bat-smg | 100 | 100 | 100 | +| be | 15000 | 1000 | 1000 | +| be-x-old | 5000 | 1000 | 1000 | +| bg | 20000 | 10000 | 10000 | +| bh | 100 | 100 | 100 | +| bn | 10000 | 1000 | 1000 | +| bo | 100 | 100 | 100 | +| br | 1000 | 1000 | 1000 | +| bs | 15000 | 1000 | 1000 | +| ca | 20000 | 10000 | 10000 | +| cbk-zam | 100 | 100 | 100 | +| cdo | 100 | 100 | 100 | +| ce | 100 | 100 | 100 | +| ceb | 100 | 100 | 100 | +| ckb | 1000 | 1000 | 1000 | +| co | 100 | 100 | 100 | +| crh | 100 | 100 | 100 | +| cs | 20000 | 10000 | 10000 | +| csb | 100 | 100 | 100 | +| cv | 100 | 100 | 100 | +| cy | 10000 | 1000 | 1000 | +| da | 20000 | 10000 | 10000 | +| de | 20000 | 10000 | 10000 | +| diq | 100 | 100 | 100 | +| dv | 100 | 100 | 100 | +| el | 20000 | 10000 | 10000 | +| eml | 100 | 100 | 100 | +| en | 20000 | 10000 | 10000 | +| eo | 15000 | 10000 | 10000 | +| es | 20000 | 10000 | 10000 | +| et | 15000 | 10000 | 10000 | +| eu | 10000 | 10000 | 10000 | +| ext | 100 | 100 | 100 | +| fa | 20000 | 10000 | 10000 | +| fi | 20000 | 10000 | 10000 | +| fiu-vro | 100 | 100 | 100 | +| fo | 100 | 100 | 100 | +| fr | 20000 | 10000 | 10000 | +| frr | 100 | 100 | 100 | +| fur | 100 | 100 | 100 | +| fy | 1000 | 1000 | 1000 | +| ga | 1000 | 1000 | 1000 | +| gan | 100 | 100 | 100 | +| gd | 100 | 100 | 100 | +| gl | 15000 | 10000 | 10000 | +| gn | 100 | 100 | 100 | +| gu | 100 | 100 | 100 | +| hak | 100 | 100 | 100 | +| he | 20000 | 10000 | 10000 | +| hi | 5000 | 1000 | 1000 | +| hr | 20000 | 10000 | 10000 | +| hsb | 100 | 100 | 100 | +| hu | 20000 | 10000 | 10000 | +| hy | 15000 | 1000 | 1000 | +| ia | 100 | 100 | 100 | +| id | 20000 | 10000 | 10000 | +| ig | 100 | 100 | 100 | +| ilo | 100 | 100 | 100 | +| io | 100 | 100 | 100 | +| is | 1000 | 1000 | 1000 | +| it | 20000 | 10000 | 10000 | +| ja | 20000 | 10000 | 10000 | +| jbo | 100 | 100 | 100 | +| jv | 100 | 100 | 100 | +| ka | 10000 | 10000 | 10000 | +| kk | 1000 | 1000 | 1000 | +| km | 100 | 100 | 100 | +| kn | 100 | 100 | 100 | +| ko | 20000 | 10000 | 10000 | +| ksh | 100 | 100 | 100 | +| ku | 100 | 100 | 100 | +| ky | 100 | 100 | 100 | +| la | 5000 | 1000 | 1000 | +| lb | 5000 | 1000 | 1000 | +| li | 100 | 100 | 100 | +| lij | 100 | 100 | 100 | +| lmo | 100 | 100 | 100 | +| ln | 100 | 100 | 100 | +| lt | 10000 | 10000 | 10000 | +| lv | 10000 | 10000 | 10000 | +| map-bms | 100 | 100 | 100 | +| mg | 100 | 100 | 100 | +| mhr | 100 | 100 | 100 | +| mi | 100 | 100 | 100 | +| min | 100 | 100 | 100 | +| mk | 10000 | 1000 | 1000 | +| ml | 10000 | 1000 | 1000 | +| mn | 100 | 100 | 100 | +| mr | 5000 | 1000 | 1000 | +| ms | 20000 | 1000 | 1000 | +| mt | 100 | 100 | 100 | +| mwl | 100 | 100 | 100 | +| my | 100 | 100 | 100 | +| mzn | 100 | 100 | 100 | +| nap | 100 | 100 | 100 | +| nds | 100 | 100 | 100 | +| ne | 100 | 100 | 100 | +| nl | 20000 | 10000 | 10000 | +| nn | 20000 | 1000 | 1000 | +| no | 20000 | 10000 | 10000 | +| nov | 100 | 100 | 100 | +| oc | 100 | 100 | 100 | +| or | 100 | 100 | 100 | +| os | 100 | 100 | 100 | +| pa | 100 | 100 | 100 | +| pdc | 100 | 100 | 100 | +| pl | 20000 | 10000 | 10000 | +| pms | 100 | 100 | 100 | +| pnb | 100 | 100 | 100 | +| ps | 100 | 100 | 100 | +| pt | 20000 | 10000 | 10000 | +| qu | 100 | 100 | 100 | +| rm | 100 | 100 | 100 | +| ro | 20000 | 10000 | 10000 | +| ru | 20000 | 10000 | 10000 | +| rw | 100 | 100 | 100 | +| sa | 100 | 100 | 100 | +| sah | 100 | 100 | 100 | +| scn | 100 | 100 | 100 | +| sco | 100 | 100 | 100 | +| sd | 100 | 100 | 100 | +| sh | 20000 | 10000 | 10000 | +| si | 100 | 100 | 100 | +| simple | 20000 | 1000 | 1000 | +| sk | 20000 | 10000 | 10000 | +| sl | 15000 | 10000 | 10000 | +| so | 100 | 100 | 100 | +| sq | 5000 | 1000 | 1000 | +| sr | 20000 | 10000 | 10000 | +| su | 100 | 100 | 100 | +| sv | 20000 | 10000 | 10000 | +| sw | 1000 | 1000 | 1000 | +| szl | 100 | 100 | 100 | +| ta | 15000 | 1000 | 1000 | +| te | 1000 | 1000 | 1000 | +| tg | 100 | 100 | 100 | +| th | 20000 | 10000 | 10000 | +| tk | 100 | 100 | 100 | +| tl | 10000 | 1000 | 1000 | +| tr | 20000 | 10000 | 10000 | +| tt | 1000 | 1000 | 1000 | +| ug | 100 | 100 | 100 | +| uk | 20000 | 10000 | 10000 | +| ur | 20000 | 1000 | 1000 | +| uz | 1000 | 1000 | 1000 | +| vec | 100 | 100 | 100 | +| vep | 100 | 100 | 100 | +| vi | 20000 | 10000 | 10000 | +| vls | 100 | 100 | 100 | +| vo | 100 | 100 | 100 | +| wa | 100 | 100 | 100 | +| war | 100 | 100 | 100 | +| wuu | 100 | 100 | 100 | +| xmf | 100 | 100 | 100 | +| yi | 100 | 100 | 100 | +| yo | 100 | 100 | 100 | +| zea | 100 | 100 | 100 | +| zh | 20000 | 10000 | 10000 | +| zh-classical | 100 | 100 | 100 | +| zh-min-nan | 100 | 100 | 100 | +| zh-yue | 20000 | 10000 | 10000 | + +## Dataset Creation + +### Curation Rationale + +[More Information Needed] + +### Source Data + +#### Initial Data Collection and Normalization + +[More Information Needed] + +#### Who are the source language producers? + +[More Information Needed] + +### Annotations + +#### Annotation process + +[More Information Needed] + +#### Who are the annotators? + +[More Information Needed] + +### Personal and Sensitive Information + +[More Information Needed] + +## Considerations for Using the Data + +### Social Impact of Dataset + +[More Information Needed] + +### Discussion of Biases + +[More Information Needed] + +### Other Known Limitations + +[More Information Needed] + +## Additional Information + +### Dataset Curators + +[More Information Needed] + +### Licensing Information + +[More Information Needed] + +### Citation Information + +The original 282 datasets are associated with this article + +``` +@inproceedings{pan-etal-2017-cross, + title = ""Cross-lingual Name Tagging and Linking for 282 Languages"", + author = ""Pan, Xiaoman and + Zhang, Boliang and + May, Jonathan and + Nothman, Joel and + Knight, Kevin and + Ji, Heng"", + booktitle = ""Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)"", + month = jul, + year = ""2017"", + address = ""Vancouver, Canada"", + publisher = ""Association for Computational Linguistics"", + url = ""https://www.aclweb.org/anthology/P17-1178"", + doi = ""10.18653/v1/P17-1178"", + pages = ""1946--1958"", + abstract = ""The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data."", +} +``` + +while the 176 languages supported in this version are associated with the following article + +``` +@inproceedings{rahimi-etal-2019-massively, + title = ""Massively Multilingual Transfer for {NER}"", + author = ""Rahimi, Afshin and + Li, Yuan and + Cohn, Trevor"", + booktitle = ""Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics"", + month = jul, + year = ""2019"", + address = ""Florence, Italy"", + publisher = ""Association for Computational Linguistics"", + url = ""https://www.aclweb.org/anthology/P19-1015"", + pages = ""151--164"", +} +``` + + +### Contributions + +Thanks to [@lewtun](https://github.com/lewtun) and [@rabeehk](https://github.com/rabeehk) for adding this dataset." +facebook/xnli,"{""language"": [""ar"", ""bg"", ""de"", ""el"", ""en"", ""es"", ""fr"", ""hi"", ""ru"", ""sw"", ""th"", ""tr"", ""ur"", ""vi"", ""zh""], ""paperswithcode_id"": ""xnli"", ""pretty_name"": ""Cross-lingual Natural Language Inference"", ""dataset_info"": [{""config_name"": ""all_languages"", ""features"": [{""name"": ""premise"", ""dtype"": {""translation"": {""languages"": [""ar"", ""bg"", ""de"", ""el"", ""en"", ""es"", ""fr"", ""hi"", ""ru"", ""sw"", ""th"", ""tr"", ""ur"", ""vi"", ""zh""]}}}, {""name"": ""hypothesis"", ""dtype"": {""translation_variable_languages"": {""languages"": [""ar"", ""bg"", ""de"", ""el"", ""en"", ""es"", ""fr"", ""hi"", ""ru"", ""sw"", ""th"", ""tr"", ""ur"", ""vi"", ""zh""], ""num_languages"": 15}}}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 1581471691, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 19387432, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 9566179, ""num_examples"": 2490}], ""download_size"": 963942271, ""dataset_size"": 1610425302}, {""config_name"": ""ar"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 107399614, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1294553, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 633001, ""num_examples"": 2490}], ""download_size"": 59215902, ""dataset_size"": 109327168}, {""config_name"": ""bg"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 125973225, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1573034, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 774061, ""num_examples"": 2490}], ""download_size"": 66117878, ""dataset_size"": 128320320}, {""config_name"": ""de"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 84684140, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 996488, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 494604, ""num_examples"": 2490}], ""download_size"": 55973883, ""dataset_size"": 86175232}, {""config_name"": ""el"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 139753358, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1704785, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 841226, ""num_examples"": 2490}], ""download_size"": 74551247, ""dataset_size"": 142299369}, {""config_name"": ""en"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 74444026, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 875134, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 433463, ""num_examples"": 2490}], ""download_size"": 50627367, ""dataset_size"": 75752623}, {""config_name"": ""es"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 81383284, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 969813, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 478422, ""num_examples"": 2490}], ""download_size"": 53677157, ""dataset_size"": 82831519}, {""config_name"": ""fr"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 85808779, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1029239, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 510104, ""num_examples"": 2490}], ""download_size"": 55968680, ""dataset_size"": 87348122}, {""config_name"": ""hi"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 170593964, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 2073073, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 1023915, ""num_examples"": 2490}], ""download_size"": 70908548, ""dataset_size"": 173690952}, {""config_name"": ""ru"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 129859615, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1603466, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 786442, ""num_examples"": 2490}], ""download_size"": 70702606, ""dataset_size"": 132249523}, {""config_name"": ""sw"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 69285725, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 871651, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 429850, ""num_examples"": 2490}], ""download_size"": 45564152, ""dataset_size"": 70587226}, {""config_name"": ""th"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 176062892, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 2147015, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 1061160, ""num_examples"": 2490}], ""download_size"": 77222045, ""dataset_size"": 179271067}, {""config_name"": ""tr"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 71637140, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 934934, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 459308, ""num_examples"": 2490}], ""download_size"": 48509680, ""dataset_size"": 73031382}, {""config_name"": ""ur"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 96441486, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1416241, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 699952, ""num_examples"": 2490}], ""download_size"": 46682785, ""dataset_size"": 98557679}, {""config_name"": ""vi"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 101417430, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 1190217, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 590680, ""num_examples"": 2490}], ""download_size"": 57690058, ""dataset_size"": 103198327}, {""config_name"": ""zh"", ""features"": [{""name"": ""premise"", ""dtype"": ""string""}, {""name"": ""hypothesis"", ""dtype"": ""string""}, {""name"": ""label"", ""dtype"": {""class_label"": {""names"": {""0"": ""entailment"", ""1"": ""neutral"", ""2"": ""contradiction""}}}}], ""splits"": [{""name"": ""train"", ""num_bytes"": 72224841, ""num_examples"": 392702}, {""name"": ""test"", ""num_bytes"": 777929, ""num_examples"": 5010}, {""name"": ""validation"", ""num_bytes"": 384851, ""num_examples"": 2490}], ""download_size"": 48269855, ""dataset_size"": 73387621}], ""configs"": [{""config_name"": ""all_languages"", ""data_files"": [{""split"": ""train"", ""path"": ""all_languages/train-*""}, {""split"": ""test"", ""path"": ""all_languages/test-*""}, {""split"": ""validation"", ""path"": ""all_languages/validation-*""}]}, {""config_name"": ""ar"", ""data_files"": [{""split"": ""train"", ""path"": ""ar/train-*""}, {""split"": ""test"", ""path"": ""ar/test-*""}, {""split"": ""validation"", ""path"": ""ar/validation-*""}]}, {""config_name"": ""bg"", ""data_files"": [{""split"": ""train"", ""path"": ""bg/train-*""}, {""split"": ""test"", ""path"": ""bg/test-*""}, {""split"": ""validation"", ""path"": ""bg/validation-*""}]}, {""config_name"": ""de"", ""data_files"": [{""split"": ""train"", ""path"": ""de/train-*""}, {""split"": ""test"", ""path"": ""de/test-*""}, {""split"": ""validation"", ""path"": ""de/validation-*""}]}, {""config_name"": ""el"", ""data_files"": [{""split"": ""train"", ""path"": ""el/train-*""}, {""split"": ""test"", ""path"": ""el/test-*""}, {""split"": ""validation"", ""path"": ""el/validation-*""}]}, {""config_name"": ""en"", ""data_files"": [{""split"": ""train"", ""path"": ""en/train-*""}, {""split"": ""test"", ""path"": ""en/test-*""}, {""split"": ""validation"", ""path"": ""en/validation-*""}]}, {""config_name"": ""es"", ""data_files"": [{""split"": ""train"", ""path"": ""es/train-*""}, {""split"": ""test"", ""path"": ""es/test-*""}, {""split"": ""validation"", ""path"": ""es/validation-*""}]}, {""config_name"": ""fr"", ""data_files"": [{""split"": ""train"", ""path"": ""fr/train-*""}, {""split"": ""test"", ""path"": ""fr/test-*""}, {""split"": ""validation"", ""path"": ""fr/validation-*""}]}, {""config_name"": ""hi"", ""data_files"": [{""split"": ""train"", ""path"": ""hi/train-*""}, {""split"": ""test"", ""path"": ""hi/test-*""}, {""split"": ""validation"", ""path"": ""hi/validation-*""}]}, {""config_name"": ""ru"", ""data_files"": [{""split"": ""train"", ""path"": ""ru/train-*""}, {""split"": ""test"", ""path"": ""ru/test-*""}, {""split"": ""validation"", ""path"": ""ru/validation-*""}]}, {""config_name"": ""sw"", ""data_files"": [{""split"": ""train"", ""path"": ""sw/train-*""}, {""split"": ""test"", ""path"": ""sw/test-*""}, {""split"": ""validation"", ""path"": ""sw/validation-*""}]}, {""config_name"": ""th"", ""data_files"": [{""split"": ""train"", ""path"": ""th/train-*""}, {""split"": ""test"", ""path"": ""th/test-*""}, {""split"": ""validation"", ""path"": ""th/validation-*""}]}, {""config_name"": ""tr"", ""data_files"": [{""split"": ""train"", ""path"": ""tr/train-*""}, {""split"": ""test"", ""path"": ""tr/test-*""}, {""split"": ""validation"", ""path"": ""tr/validation-*""}]}, {""config_name"": ""ur"", ""data_files"": [{""split"": ""train"", ""path"": ""ur/train-*""}, {""split"": ""test"", ""path"": ""ur/test-*""}, {""split"": ""validation"", ""path"": ""ur/validation-*""}]}, {""config_name"": ""vi"", ""data_files"": [{""split"": ""train"", ""path"": ""vi/train-*""}, {""split"": ""test"", ""path"": ""vi/test-*""}, {""split"": ""validation"", ""path"": ""vi/validation-*""}]}, {""config_name"": ""zh"", ""data_files"": [{""split"": ""train"", ""path"": ""zh/train-*""}, {""split"": ""test"", ""path"": ""zh/test-*""}, {""split"": ""validation"", ""path"": ""zh/validation-*""}]}]}","# Dataset Card for ""xnli"" + +## Table of Contents +- [Dataset Description](#dataset-description) + - [Dataset Summary](#dataset-summary) + - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) + - [Languages](#languages) +- [Dataset Structure](#dataset-structure) + - [Data Instances](#data-instances) + - [Data Fields](#data-fields) + - [Data Splits](#data-splits) +- [Dataset Creation](#dataset-creation) + - [Curation Rationale](#curation-rationale) + - [Source Data](#source-data) + - [Annotations](#annotations) + - [Personal and Sensitive Information](#personal-and-sensitive-information) +- [Considerations for Using the Data](#considerations-for-using-the-data) + - [Social Impact of Dataset](#social-impact-of-dataset) + - [Discussion of Biases](#discussion-of-biases) + - [Other Known Limitations](#other-known-limitations) +- [Additional Information](#additional-information) + - [Dataset Curators](#dataset-curators) + - [Licensing Information](#licensing-information) + - [Citation Information](#citation-information) + - [Contributions](#contributions) + +## Dataset Description + +- **Homepage:** [https://www.nyu.edu/projects/bowman/xnli/](https://www.nyu.edu/projects/bowman/xnli/) +- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) +- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) +- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) +- **Size of downloaded dataset files:** 7.74 GB +- **Size of the generated dataset:** 3.23 GB +- **Total amount of disk used:** 10.97 GB + +### Dataset Summary + +XNLI is a subset of a few thousand examples from MNLI which has been translated +into a 14 different languages (some low-ish resource). As with MNLI, the goal is +to predict textual entailment (does sentence A imply/contradict/neither sentence +B) and is a classification task (given two sentences, predict one of three +labels). + +### Supported Tasks and Leaderboards + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Languages + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +## Dataset Structure + +### Data Instances + +#### all_languages + +- **Size of downloaded dataset files:** 483.96 MB +- **Size of the generated dataset:** 1.61 GB +- **Total amount of disk used:** 2.09 GB + +An example of 'train' looks as follows. +``` +This example was too long and was cropped: + +{ + ""hypothesis"": ""{\""language\"": [\""ar\"", \""bg\"", \""de\"", \""el\"", \""en\"", \""es\"", \""fr\"", \""hi\"", \""ru\"", \""sw\"", \""th\"", \""tr\"", \""ur\"", \""vi\"", \""zh\""], \""translation\"": [\""احد اع..."", + ""label"": 0, + ""premise"": ""{\""ar\"": \""واحدة من رقابنا ستقوم بتنفيذ تعليماتك كلها بكل دقة\"", \""bg\"": \""един от нашите номера ще ви даде инструкции .\"", \""de\"": \""Eine ..."" +} +``` + +#### ar + +- **Size of downloaded dataset files:** 483.96 MB +- **Size of the generated dataset:** 109.32 MB +- **Total amount of disk used:** 593.29 MB + +An example of 'validation' looks as follows. +``` +{ + ""hypothesis"": ""اتصل بأمه حالما أوصلته حافلة المدرسية."", + ""label"": 1, + ""premise"": ""وقال، ماما، لقد عدت للمنزل."" +} +``` + +#### bg + +- **Size of downloaded dataset files:** 483.96 MB +- **Size of the generated dataset:** 128.32 MB +- **Total amount of disk used:** 612.28 MB + +An example of 'train' looks as follows. +``` +This example was too long and was cropped: + +{ + ""hypothesis"": ""\""губиш нещата на следното ниво , ако хората си припомнят .\""..."", + ""label"": 0, + ""premise"": ""\""по време на сезона и предполагам , че на твоето ниво ще ги загубиш на следващото ниво , ако те решат да си припомнят отбора на ..."" +} +``` + +#### de + +- **Size of downloaded dataset files:** 483.96 MB +- **Size of the generated dataset:** 86.17 MB +- **Total amount of disk used:** 570.14 MB + +An example of 'train' looks as follows. +``` +This example was too long and was cropped: + +{ + ""hypothesis"": ""Man verliert die Dinge auf die folgende Ebene , wenn sich die Leute erinnern ."", + ""label"": 0, + ""premise"": ""\""Du weißt , während der Saison und ich schätze , auf deiner Ebene verlierst du sie auf die nächste Ebene , wenn sie sich entschl..."" +} +``` + +#### el + +- **Size of downloaded dataset files:** 483.96 MB +- **Size of the generated dataset:** 142.30 MB +- **Total amount of disk used:** 626.26 MB + +An example of 'validation' looks as follows. +``` +This example was too long and was cropped: + +{ + ""hypothesis"": ""\""Τηλεφώνησε στη μαμά του μόλις το σχολικό λεωφορείο τον άφησε.\""..."", + ""label"": 1, + ""premise"": ""Και είπε, Μαμά, έφτασα στο σπίτι."" +} +``` + +### Data Fields + +The data fields are the same among all splits. + +#### all_languages +- `premise`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`. +- `hypothesis`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`. +- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). + +#### ar +- `premise`: a `string` feature. +- `hypothesis`: a `string` feature. +- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). + +#### bg +- `premise`: a `string` feature. +- `hypothesis`: a `string` feature. +- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). + +#### de +- `premise`: a `string` feature. +- `hypothesis`: a `string` feature. +- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). + +#### el +- `premise`: a `string` feature. +- `hypothesis`: a `string` feature. +- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). + +### Data Splits + +| name |train |validation|test| +|-------------|-----:|---------:|---:| +|all_languages|392702| 2490|5010| +|ar |392702| 2490|5010| +|bg |392702| 2490|5010| +|de |392702| 2490|5010| +|el |392702| 2490|5010| + +## Dataset Creation + +### Curation Rationale + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Source Data + +#### Initial Data Collection and Normalization + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +#### Who are the source language producers? + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Annotations + +#### Annotation process + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +#### Who are the annotators? + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Personal and Sensitive Information + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +## Considerations for Using the Data + +### Social Impact of Dataset + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Discussion of Biases + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Other Known Limitations + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +## Additional Information + +### Dataset Curators + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Licensing Information + +[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) + +### Citation Information + +``` +@InProceedings{conneau2018xnli, + author = {Conneau, Alexis + and Rinott, Ruty + and Lample, Guillaume + and Williams, Adina + and Bowman, Samuel R. + and Schwenk, Holger + and Stoyanov, Veselin}, + title = {XNLI: Evaluating Cross-lingual Sentence Representations}, + booktitle = {Proceedings of the 2018 Conference on Empirical Methods + in Natural Language Processing}, + year = {2018}, + publisher = {Association for Computational Linguistics}, + location = {Brussels, Belgium}, +} +``` + + +### Contributions + +Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset." +google/xtreme,"{""annotations_creators"": [""found""], ""language_creators"": [""found""], ""language"": [""af"", ""ar"", ""bg"", ""bn"", ""de"", ""el"", ""en"", ""es"", ""et"", ""eu"", ""fa"", ""fi"", ""fr"", ""he"", ""hi"", ""hu"", ""id"", ""it"", ""ja"", ""jv"", ""ka"", ""kk"", ""ko"", ""ml"", ""mr"", ""ms"", ""my"", ""nl"", ""pt"", ""ru"", ""sw"", ""ta"", ""te"", ""th"", ""tl"", ""tr"", ""ur"", ""vi"", ""yo"", ""zh""], ""license"": [""apache-2.0"", ""cc-by-4.0"", ""cc-by-2.0"", ""cc-by-sa-4.0"", ""other"", ""cc-by-nc-4.0""], ""multilinguality"": [""multilingual"", ""translation""], ""size_categories"": [""n<1K"", ""1K Note: In order to avoid discrepancies caused by different tokenizers, we use the word count (using Python's split function) to calculate the average length of English datasets and code datasets, and use the character count to calculate the average length of Chinese datasets. + +# Task description +| Task | Task Description | +| :---------------- | :----------------------------------------------------------- | +| HotpotQA | Answer related questions based on multiple given documents | +| 2WikiMultihopQA | Answer related questions based on multiple given documents | +| MuSiQue | Answer related questions based on multiple given documents | +| DuReader | Answer related Chinese questions based on multiple retrieved documents | +| MultiFieldQA-en | Answer English questions based on a long article, which comes from a relatively diverse field | +| MultiFieldQA-zh | Answer Chinese questions based on a long article, which comes from a relatively diverse field | +| NarrativeQA | Answer questions based on stories or scripts, including understanding of important elements such as characters, plots, themes, etc. | +| Qasper | Answer questions based on a NLP research paper, questions proposed and answered by NLP practitioners | +| GovReport | A summarization task that requires summarizing government work reports | +| MultiNews | A multi-doc summarization that requires summarizing over multiple news | +| QMSum | A summarization task that requires summarizing meeting records based on user queries | +| VCSUM | A summarization task that requires summarizing Chinese meeting records | +| SAMSum | A dialogue summarization task, providing several few-shot examples | +| TriviaQA | Single document question answering task, providing several few-shot examples | +| NQ | Single document question answering task, providing several few-shot examples | +| TREC | A classification task that requires categorizing questions, includes 50 categories in total | +| LSHT | A Chinese classification task that requires categorizing news, includes 24 categories in total | +| PassageRetrieval-en | Given 30 English Wikipedia paragraphs, determine which paragraph the given summary corresponds to | +| PassageCount | Determine the total number of different paragraphs in a given repetitive article | +| PassageRetrieval-zh | Given several Chinese paragraphs from the C4 data set, determine which paragraph the given abstract corresponds to | +| LCC | Given a long piece of code, predict the next line of code | +| RepoBench-P | Given code in multiple files within a GitHub repository (including cross-file dependencies), predict the next line of code | + +# Task construction +> Note: For all tasks constructed from existing datasets, we use data from the validation or test set of the existing dataset (except for VCSUM). + +- The tasks of [HotpotQA](https://hotpotqa.github.io/), [2WikiMultihopQA](https://aclanthology.org/2020.coling-main.580/), [MuSiQue](https://arxiv.org/abs/2108.00573), and [DuReader](https://github.com/baidu/DuReader) are built based on the original datasets and processed to be suitable for long context evaluation. Specifically, for questions in the validation set, we select the evidence passage that contains the answer and several distracting articles. These articles together with the original question constitute the input of the tasks. +- The tasks of MultiFiedQA-zh and MultiFieldQA-en consist of long artical data from about 10 sources, including Latex papers, judicial documents, government work reports, and PDF documents indexed by Google. For each long artical, we invite several PhD and master students to annotate, i.e., to ask questions based on the long artical and give the correct answers. To better automate evaluation, we ask the annotators to propose questions with definitive answers as much as possible. +- The tasks of [NarrativeQA](https://arxiv.org/pdf/1712.07040.pdf), [Qasper](https://arxiv.org/pdf/2105.03011.pdf), [GovReport](https://arxiv.org/pdf/2104.02112.pdf), [QMSum](https://arxiv.org/pdf/2104.05938.pdf) and [MultiNews](https://aclanthology.org/P19-1102.pdf) directly use the data provided by the original papers. In the specific construction, we use the template provided by [ZeroSCROLLS](https://www.zero.scrolls-benchmark.com/) to convert the corresponding data into pure text input. +- The [VCSUM](https://arxiv.org/abs/2305.05280) task is built based on the original dataset, and we design a corresponding template to convert the corresponding data into pure text input. +- The [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) task is constructed in the manner of [CoLT5](https://arxiv.org/abs/2303.09752), which provides several examples of question and answering based on documents, and requires the language model to answer related questions based on new documents. +- The tasks of [SAMSum](https://aclanthology.org/D19-5409.pdf), [TREC](https://aclanthology.org/C02-1150.pdf) and [LSHT](http://tcci.ccf.org.cn/conference/2014/dldoc/evatask6.pdf) are built based on the original datasets. For each question in the validation set, we sample several data from the training set to form few-shot examples. These examples together with the questions in the validation set constitute the input for this task. +- The PassageRetrieval-en task is constructed based on English Wikipedia. For each piece of data, we randomly sample 30 paragraphs from English Wikipedia and select one for summarization (using GPT-3.5-Turbo). This task requires the model to give the original paragraph name to which the summary corresponds. +- The PassageCount task is constructed based on the English wiki. For each piece of data, we randomly sample several passages from English Wikipedia, repeat each paragraph at random several times, and finally shuffle the paragraphs. This task requires the model to determine the total number of different paragraphs in the given context. +- The PasskeyRetrieval-zh task is constructed based on [C4](https://arxiv.org/abs/1910.10683). For each piece of data, we randomly sample several Chinese paragraphs from C4 and select one of them for summarization (using GPT-3.5-Turbo). This task requires the model to give the original paragraph name to which the summary corresponds. +- For the [LCC](https://arxiv.org/abs/2306.14893) task, we sample from the original code completion dataset. In the [RepoBench-P](https://arxiv.org/abs/2306.03091) task, we select the most challenging XF-F (Cross-File-First) setting from the original dataset and refer to the Oracle-Filled scenario in the paper. For each original piece of data, we randomly extract multiple cross-file code snippets, including the gold cross-file code snippet, and concatenate them as input, requiring the model to effectively use cross-file code for completion. + +# LongBench-E statistics +| Task | Task Type | \#data in 0-4k | \#data in 4-8k | \#data in 8k+| +| :--------- | :-----------:| :-----------: |:---------: | :-------------: | +| HotpotQA | Multi-doc QA | 100 |100 |100 | +| 2WikiMultihopQA| Multi-doc QA | 100 |100 |100 | +| MultiFieldQA-en| Single-doc QA | 67 |70 |13 | +| Qasper| Single-doc QA | 100 |100 |24 | +| GovReport| Summarization | 100 |100 |100 | +| MultiNews| Summarization | 100 |100 |94 | +| TriviaQA| Few shot | 100 |100 |100 | +| SAMSum| Few shot | 100 |100 |100 | +| TREC| Few shot | 100 |100 |100 | +| PassageRetrieval-en| Synthetic | 100 |100 |100 | +| PassageCount| Synthetic | 100 |100 |100 | +| LCC| Code | 100 |100 |100 | +| RepoBench-P| Code | 100 |100 |100 | + +# Citation +``` +@misc{bai2023longbench, + title={LongBench: A Bilingual, Multitask Benchmark for Long Context Understanding}, + author={Yushi Bai and Xin Lv and Jiajie Zhang and Hongchang Lyu and Jiankai Tang and Zhidian Huang and Zhengxiao Du and Xiao Liu and Aohan Zeng and Lei Hou and Yuxiao Dong and Jie Tang and Juanzi Li}, + year={2023}, + eprint={2308.14508}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +```" +clips/mqa,"{""annotations_creators"": [""no-annotation""], ""language_creators"": [""other""], ""language"": [""ca"", ""en"", ""de"", ""es"", ""fr"", ""ru"", ""ja"", ""it"", ""zh"", ""pt"", ""nl"", ""tr"", ""pl"", ""vi"", ""ar"", ""id"", ""uk"", ""ro"", false, ""th"", ""sv"", ""el"", ""fi"", ""he"", ""da"", ""cs"", ""ko"", ""fa"", ""hi"", ""hu"", ""sk"", ""lt"", ""et"", ""hr"", ""is"", ""lv"", ""ms"", ""bg"", ""sr"", ""ca""], ""license"": [""cc0-1.0""], ""multilinguality"": [""multilingual""], ""pretty_name"": ""MQA - a Multilingual FAQ and CQA Dataset"", ""size_categories"": [""unknown""], ""source_datasets"": [""original""], ""task_categories"": [""question-answering""], ""task_ids"": [""multiple-choice-qa""]}","# MQA +MQA is a Multilingual corpus of Questions and Answers (MQA) parsed from the [Common Crawl](https://commoncrawl.org/). Questions are divided in two types: *Frequently Asked Questions (FAQ)* and *Community Question Answering (CQA)*. +```python +from datasets import load_dataset +all_data = load_dataset(""clips/mqa"", language=""en"") +{ + ""name"": ""the title of the question (if any)"", + ""text"": ""the body of the question (if any)"", + ""answers"": [{ + ""text"": ""the text of the answer"", + ""is_accepted"": ""true|false"" + }] +} +faq_data = load_dataset(""clips/mqa"", scope=""faq"", language=""en"") +cqa_data = load_dataset(""clips/mqa"", scope=""cqa"", language=""en"") +``` + +## Languages +We collected around **234M pairs** of questions and answers in **39 languages**. To download a language specific subset you need to specify the language key as configuration. See below for an example. +```python +load_dataset(""clips/mqa"", language=""en"") # replace ""en"" by any language listed below +``` + +| Language | FAQ | CQA | +|:-----------|------------:|-----------:| +| en | 174,696,414 | 14,082,180 | +| de | 17,796,992 | 1,094,606 | +| es | 14,967,582 | 845,836 | +| fr | 13,096,727 | 1,299,359 | +| ru | 12,435,022 | 1,715,131 | +| it | 6,850,573 | 455,027 | +| ja | 6,369,706 | 2,089,952 | +| zh | 5,940,796 | 579,596 | +| pt | 5,851,286 | 373,982 | +| nl | 4,882,511 | 503,376 | +| tr | 3,893,964 | 370,975 | +| pl | 3,766,531 | 70,559 | +| vi | 2,795,227 | 96,528 | +| id | 2,253,070 | 200,441 | +| ar | 2,211,795 | 805,661 | +| uk | 2,090,611 | 27,260 | +| el | 1,758,618 | 17,167 | +| no | 1,752,820 | 11,786 | +| sv | 1,733,582 | 20,024 | +| fi | 1,717,221 | 41,371 | +| ro | 1,689,471 | 93,222 | +| th | 1,685,463 | 73,204 | +| da | 1,554,581 | 16,398 | +| he | 1,422,449 | 88,435 | +| ko | 1,361,901 | 49,061 | +| cs | 1,224,312 | 143,863 | +| hu | 878,385 | 27,639 | +| fa | 787,420 | 118,805 | +| sk | 785,101 | 4,615 | +| lt | 672,105 | 301 | +| et | 547,208 | 441 | +| hi | 516,342 | 205,645 | +| hr | 458,958 | 11,677 | +| is | 437,748 | 37 | +| lv | 428,002 | 88 | +| ms | 230,568 | 7,460 | +| bg | 198,671 | 5,320 | +| sr | 110,270 | 3,980 | +| ca | 100,201 | 1,914 | + +## FAQ vs. CQA +You can download the *Frequently Asked Questions* (FAQ) or the *Community Question Answering* (CQA) part of the dataset. + +```python +faq = load_dataset(""clips/mqa"", scope=""faq"") +cqa = load_dataset(""clips/mqa"", scope=""cqa"") +all = load_dataset(""clips/mqa"", scope=""all"") +``` +Although FAQ and CQA questions share the same structure, CQA questions can have multiple answers for a given questions, while FAQ questions have a single answer. FAQ questions typically only have a title (`name` key), while CQA have a title and a body (`name` and `text`). + +## Nesting and Data Fields +You can specify three different nesting level: `question`, `page` and `domain`. +#### Question +```python +load_dataset(""clips/mqa"", level=""question"") # default +``` +The default level is the question object: +- **name**: the title of the question(if any) in markdown format +- **text**: the body of the question (if any) in markdown format +- **answers**: a list of answers + - **text**: the title of the answer (if any) in markdown format + - **name**: the body of the answer in markdown format + - **is_accepted**: true if the answer is selected. + +#### Page +This level returns a list of questions present on the same page. This is mostly useful for FAQs since CQAs already have one question per page. +```python +load_dataset(""clips/mqa"", level=""page"") +``` + +#### Domain +This level returns a list of pages present on the web domain. This is a good way to cope with FAQs duplication by sampling one page per domain at each epoch. + +```python +load_dataset(""clips/mqa"", level=""domain"") +``` + +## Source Data + +This section was adapted from the source data description of [OSCAR](https://huggingface.co/datasets/oscar#source-data) + +Common Crawl is a non-profit foundation which produces and maintains an open repository of web crawled data that is both accessible and analysable. Common Crawl's complete web archive consists of petabytes of data collected over 8 years of web crawling. The repository contains raw web page HTML data (WARC files), metdata extracts (WAT files) and plain text extracts (WET files). The organisation's crawlers has always respected nofollow and robots.txt policies. + +To construct MQA, we used the WARC files of Common Crawl. + +## People +This model was developed by [Maxime De Bruyn](https://maximedb.vercel.app), Ehsan Lotfi, Jeska Buhmann and Walter Daelemans. + +## Licensing Information +``` +These data are released under this licensing scheme. +We do not own any of the text from which these data has been extracted. +We license the actual packaging of these data under the Creative Commons CC0 license (""no rights reserved"") http://creativecommons.org/publicdomain/zero/1.0/ + +Should you consider that our data contains material that is owned by you and should therefore not be reproduced here, please: +* Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted. +* Clearly identify the copyrighted work claimed to be infringed. +* Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material. + +We will comply to legitimate requests by removing the affected sources from the next release of the corpus. +``` + +## Citation information +``` +@inproceedings{de-bruyn-etal-2021-mfaq, + title = ""{MFAQ}: a Multilingual {FAQ} Dataset"", + author = ""De Bruyn, Maxime and + Lotfi, Ehsan and + Buhmann, Jeska and + Daelemans, Walter"", + booktitle = ""Proceedings of the 3rd Workshop on Machine Reading for Question Answering"", + month = nov, + year = ""2021"", + address = ""Punta Cana, Dominican Republic"", + publisher = ""Association for Computational Linguistics"", + url = ""https://aclanthology.org/2021.mrqa-1.1"", + pages = ""1--13"", +} + +```" +wikimedia/wikipedia,"{""language"": [""ab"", ""ace"", ""ady"", ""af"", ""alt"", ""am"", ""ami"", ""an"", ""ang"", ""anp"", ""ar"", ""arc"", ""ary"", ""arz"", ""as"", ""ast"", ""atj"", ""av"", ""avk"", ""awa"", ""ay"", ""az"", ""azb"", ""ba"", ""ban"", ""bar"", ""bbc"", ""bcl"", ""be"", ""bg"", ""bh"", ""bi"", ""bjn"", ""blk"", ""bm"", ""bn"", ""bo"", ""bpy"", ""br"", ""bs"", ""bug"", ""bxr"", ""ca"", ""cbk"", ""cdo"", ""ce"", ""ceb"", ""ch"", ""chr"", ""chy"", ""ckb"", ""co"", ""cr"", ""crh"", ""cs"", ""csb"", ""cu"", ""cv"", ""cy"", ""da"", ""dag"", ""de"", ""dga"", ""din"", ""diq"", ""dsb"", ""dty"", ""dv"", ""dz"", ""ee"", ""el"", ""eml"", ""en"", ""eo"", ""es"", ""et"", ""eu"", ""ext"", ""fa"", ""fat"", ""ff"", ""fi"", ""fj"", ""fo"", ""fon"", ""fr"", ""frp"", ""frr"", ""fur"", ""fy"", ""ga"", ""gag"", ""gan"", ""gcr"", ""gd"", ""gl"", ""glk"", ""gn"", ""gom"", ""gor"", ""got"", ""gpe"", ""gsw"", ""gu"", ""guc"", ""gur"", ""guw"", ""gv"", ""ha"", ""hak"", ""haw"", ""hbs"", ""he"", ""hi"", ""hif"", ""hr"", ""hsb"", ""ht"", ""hu"", ""hy"", ""hyw"", ""ia"", ""id"", ""ie"", ""ig"", ""ik"", ""ilo"", ""inh"", ""io"", ""is"", ""it"", ""iu"", ""ja"", ""jam"", ""jbo"", ""jv"", ""ka"", ""kaa"", ""kab"", ""kbd"", ""kbp"", ""kcg"", ""kg"", ""ki"", ""kk"", ""kl"", ""km"", ""kn"", ""ko"", ""koi"", ""krc"", ""ks"", ""ksh"", ""ku"", ""kv"", ""kw"", ""ky"", ""la"", ""lad"", ""lb"", ""lbe"", ""lez"", ""lfn"", ""lg"", ""li"", ""lij"", ""lld"", ""lmo"", ""ln"", ""lo"", ""lt"", ""ltg"", ""lv"", ""lzh"", ""mad"", ""mai"", ""map"", ""mdf"", ""mg"", ""mhr"", ""mi"", ""min"", ""mk"", ""ml"", ""mn"", ""mni"", ""mnw"", ""mr"", ""mrj"", ""ms"", ""mt"", ""mwl"", ""my"", ""myv"", ""mzn"", ""nah"", ""nan"", ""nap"", ""nds"", ""ne"", ""new"", ""nia"", ""nl"", ""nn"", ""no"", ""nov"", ""nqo"", ""nrf"", ""nso"", ""nv"", ""ny"", ""oc"", ""olo"", ""om"", ""or"", ""os"", ""pa"", ""pag"", ""pam"", ""pap"", ""pcd"", ""pcm"", ""pdc"", ""pfl"", ""pi"", ""pih"", ""pl"", ""pms"", ""pnb"", ""pnt"", ""ps"", ""pt"", ""pwn"", ""qu"", ""rm"", ""rmy"", ""rn"", ""ro"", ""ru"", ""rue"", ""rup"", ""rw"", ""sa"", ""sah"", ""sat"", ""sc"", ""scn"", ""sco"", ""sd"", ""se"", ""sg"", ""sgs"", ""shi"", ""shn"", ""si"", ""sk"", ""skr"", ""sl"", ""sm"", ""smn"", ""sn"", ""so"", ""sq"", ""sr"", ""srn"", ""ss"", ""st"", ""stq"", ""su"", ""sv"", ""sw"", ""szl"", ""szy"", ""ta"", ""tay"", ""tcy"", ""te"", ""tet"", ""tg"", ""th"", ""ti"", ""tk"", ""tl"", ""tly"", ""tn"", ""to"", ""tpi"", ""tr"", ""trv"", ""ts"", ""tt"", ""tum"", ""tw"", ""ty"", ""tyv"", ""udm"", ""ug"", ""uk"", ""ur"", ""uz"", ""ve"", ""vec"", ""vep"", ""vi"", ""vls"", ""vo"", ""vro"", ""wa"", ""war"", ""wo"", ""wuu"", ""xal"", ""xh"", ""xmf"", ""yi"", ""yo"", ""yue"", ""za"", ""zea"", ""zgh"", ""zh"", ""zu""], ""license"": [""cc-by-sa-3.0"", ""gfdl""], ""size_categories"": [""n<1K"", ""1K + + + +### Supported Tasks and Leaderboards + +The dataset is generally used for Language Modeling. + +### Languages + +You can find the list of languages here: https://meta.wikimedia.org/wiki/List_of_Wikipedias + +## Dataset Structure + +### Data Instances + +An example looks as follows: +``` +{'id': '1', + 'url': 'https://simple.wikipedia.org/wiki/April', + 'title': 'April', + 'text': 'April is the fourth month...' +} +``` + +### Data Fields + +The data fields are the same among all configurations: +- `id` (`str`): ID of the article. +- `url` (`str`): URL of the article. +- `title` (`str`): Title of the article. +- `text` (`str`): Text content of the article. + +### Data Splits + +All configurations contain a single `train` split. + +## Dataset Creation + +### Curation Rationale + +[More Information Needed] + +### Source Data + +#### Initial Data Collection and Normalization + +The dataset is built from the Wikipedia dumps: https://dumps.wikimedia.org + +You can find the full list of languages and dates here: https://dumps.wikimedia.org/backup-index.html + +The articles have been parsed using the [`mwparserfromhell`](https://mwparserfromhell.readthedocs.io) tool. + +When uploading the data files for the 20231101 dump, we noticed that the Wikimedia Dumps website does not contain this date dump +for the ""bbc"", ""dga"", nor ""zgh"" Wikipedias. We have reported the issue to the Wikimedia Phabricator: https://phabricator.wikimedia.org/T351761 + +#### Who are the source language producers? + +[More Information Needed] + +### Annotations + +#### Annotation process + +[More Information Needed] + +#### Who are the annotators? + +[More Information Needed] + +### Personal and Sensitive Information + +[More Information Needed] + +## Considerations for Using the Data + +### Social Impact of Dataset + +[More Information Needed] + +### Discussion of Biases + +[More Information Needed] + +### Other Known Limitations + +[More Information Needed] + +## Additional Information + +### Dataset Curators + +[More Information Needed] + +### Licensing Information + +Copyright licensing information: https://dumps.wikimedia.org/legal.html + +All original textual content is licensed under the [GNU Free Documentation License](https://www.gnu.org/licenses/fdl-1.3.html) (GFDL) +and the [Creative Commons Attribution-Share-Alike 3.0 License](https://creativecommons.org/licenses/by-sa/3.0/). +Some text may be available only under the Creative Commons license; see their [Terms of Use](https://foundation.wikimedia.org/wiki/Policy:Terms_of_Use) for details. +Text written by some authors may be released under additional licenses or into the public domain. + +### Citation Information + +``` +@ONLINE{wikidump, + author = ""Wikimedia Foundation"", + title = ""Wikimedia Downloads"", + url = ""https://dumps.wikimedia.org"" +} +```" +MBZUAI/Bactrian-X,"{""license"": ""cc-by-nc-4.0"", ""task_categories"": [""text-generation""], ""language"": [""af"", ""ar"", ""az"", ""bn"", ""cs"", ""de"", ""en"", ""es"", ""et"", ""fi"", ""fr"", ""gl"", ""gu"", ""he"", ""hi"", ""hr"", ""id"", ""it"", ""ja"", ""ka"", ""kk"", ""km"", ""ko"", ""lt"", ""lv"", ""mk"", ""ml"", ""mn"", ""mr"", ""my"", ""ne"", ""nl"", ""pl"", ""ps"", ""pt"", ""ro"", ""ru"", ""si"", ""sl"", ""sv"", ""sw"", ""ta"", ""te"", ""th"", ""tl"", ""tr"", ""uk"", ""ur"", ""vi"", ""xh"", ""zh""], ""tags"": [""instruction-finetuning"", ""multilingual""], ""pretty_name"": ""Bactrian-X""}","# Dataset Card for ""Bactrian-X"" + +## Table of Contents +- [Dataset Description](#a-dataset-description) + - [Dataset Summary](#dataset-summary) + - [Languages](#languages) +- [Dataset Structure](#b-dataset-structure) + - [Data Fields](#data-fields) + - [Data Instances](#data-instances) + - [Data in 52 Languages](#data-in-52-languages) +- [Dataset Creation](#c-dataset-creation) +- [Considerations for Using the Data](#d-considerations-for-using-the-data) +- [Additional Information](#e-additional-information) + + +## A. Dataset Description + +- **Homepage:** https://github.com/mbzuai-nlp/Bactrian-X +- **Repository:** https://huggingface.co/datasets/MBZUAI/Bactrian-X +- **Paper:** to-be-soon released + +### Dataset Summary + + +

+ +

+ + +The Bactrain-X dataset is a collection of 3.4M instruction-response pairs in 52 languages, that are obtained by translating 67K English instructions ([alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca) + [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data)) into 51 languages using Google Translate API. The translated instructions are then fed to ChatGPT (`gpt-3.5-turbo`) to obtain its natural responses, resulting in 3.4M instruction-response pairs in 52 languages (52 languages x 67k instances = 3.4M instances). + +### Languages + +We follow 52 languages of [mBART-50](https://arxiv.org/abs/2008.00401), with details can be found [below](#data-in-52-languages). + +## B. Dataset Structure + +### Data Fields + +The data fields are as follows: + +* `instruction`: describes the task the model should perform. Each of the 67K instructions is unique. +* `input`: optional context or input for the task. For example, when the instruction is ""Summarize the following article"", the input is the article. Around 40% of the examples have an input. +* `output`: the answer to the instruction as generated by `gpt-3.5-turbo`. +* `id`: the unique index for each data piece in a specific language. The same `id` across splits corresponds to the same original English data. + +### Data Instances + +An example in `en` looks as follows. + +```json +{ +""instruction"": ""What is the main idea of this text?"", +""input"": ""The robotics industry has become one of the fastest growing sectors in today’s economy. Robotics technology is being applied in various industries and applications, such as manufacturing, healthcare, and defense."", +""output"": ""The main idea of the text is that the robotics industry is rapidly growing and its technology is being used in many different industries and applications."", +""id"": ""alpaca-107"" +} +``` + +An example in `ar` looks as follows. +```json +{ +""instruction"": ""قم بالوصول إلى إعدادات المتصفح الافتراضية على جهاز الكمبيوتر."", +""input"": """", +""output"": ""لا يمكن لي كمساعد افتراضي القيام بهذه المهمة كونها تحتاج إلى الوصول إلى الجهاز المادي. يرجى الرجوع إلى دليل المستخدم للكمبيوتر الخاص بك أو البحث على الإنترنت لمعرفة كيفية الوصول إلى إعدادات المتصفح الافتراضية على جهاز الكمبيوتر الخاص بك."", +""id"": ""alpaca-41997"" +} +``` + +An example in `id` looks as follows. +```json +{ +""instruction"": ""Berapa banyak angka penting yang harus digunakan untuk nilai 0,000187?"", +""input"": """", +""output"": ""Ada tiga angka penting dalam nilai 0,000187."", +""id"": ""alpaca-40382"" +} +``` + +An example in `de` looks as follows. +```json +{ +""instruction"": ""Komponieren Sie ein Musikstück mit einem Titel, der Trauer bezeichnet."", +""input"": """", +""output"": """"Schmerzendes Herz""\n\nDas Stück beginnt mit einer langsamen, traurigen Melodie, gespielt von einem Solo-Violoncello. Die Harmonien, die von einem Klavier begleitet werden, sind durcheinander und dissonant, um ein Gefühl von Schmerz und Trauer zu vermitteln.\n\nIm Laufe des Stücks treten andere Instrumente ein, wie Streicher, Flöte und Oboe, um die Melodie zu unterstützen und zu intensivieren. Der Rhythmus bleibt langsam und schwerfällig, um die Schwere des Schmerzes darzustellen.\n\nIn der Mitte des Stücks gibt es einen Höhepunkt, wo die Melodie und Harmonien sich verdichten und ein Gefühl von Überwältigung erzeugen. Der Solo-Violoncello spielt eine traurige und schnelle Phrase, die die Verzweiflung und Isolation des Protagonisten ausdrückt.\n\nDas Stück endet mit einer Wiederholung der ersten Melodie, gespielt von einem Solo-Klavier, während die anderen Instrumente auslaufen. Die Harmonien werden jedoch weicher und sanfter, um einen Hauch von Hoffnung zu vermitteln, dass der Schmerz langsam vergeht."", +""id"": ""alpaca-33619"" +} +``` + +An example in `fr` looks as follows. +```json +{ +""instruction"": ""Les Ceanothus ont-ils des racines peu profondes ?"", +""input"": """", +""output"": ""En tant qu'IA, je ne peux pas répondre avec une certitude absolue, mais voici des informations générales sur les racines de Ceanothus : \n\nLes Ceanothus ont tendance à avoir un système racinaire étendu et peu profond. Les racines peuvent s'étaler jusqu'à 4 à 6 fois la taille de la plante et s'étendre à environ 60 à 90 cm de profondeur. Cependant, cela peut varier en fonction du type de sol et de la variété de Ceanothus. Certaines variétés de Ceanothus pourraient avoir des racines plus profondes si les conditions s'y prêtent."", +""id"": ""dolly-10940"" +} +``` + + +### Data in 52 Languages + +| No | Languages | Code | Train | +| ---|---------------- | ----- | ----- | +| 1 | [Afrikaans](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/af) | af_ZA | 67017 | +| 2 | [Arabic](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ar) | ar_AR | 67017 | +| 3 | [Azerbaijani](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/az) | az_AZ | 67017 | +| 4 | [Bengali](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/bn) | bn_IN | 67017 | +| 5 | [Czech](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/cs) | cs_CZ | 67017 | +| 6 | [German](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/de) | de_DE | 67017 | +| 7 | [English](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/en) | en_XX | 67017 | +| 8 | [Spanish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/es) | es_XX | 67017 | +| 9 | [Estonian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/et) | et_EE | 67017 | +| 10 | [Persian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fa) | fa_IR | 67017 | +| 11 | [Finnish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fi) | fi_FI | 67017 | +| 12 | [French](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fr) | fr_XX | 67017 | +| 13 | [Galician](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/gl) | gl_ES | 67017 | +| 14 | [Gujarati](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/gu) | gu_IN | 67017 | +| 15 | [Hebrew](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/he) | he_IL | 67017 | +| 16 | [Hindi](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/hi) | hi_IN | 67017 | +| 17 | [Croatian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/hr) | hr_HR | 67017 | +| 18 | [Indonesian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/id) | id_ID | 67017 | +| 19 | [Italian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/it) | it_IT | 67017 | +| 20 | [Japanese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ja) | ja_XX | 67017 | +| 21 | [Georgian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ka) | ka_GE | 67017 | +| 22 | [Kazakh](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/kk) | kk_KZ | 67017 | +| 23 | [Khmer](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/km) | km_KH | 67017 | +| 24 | [Korean](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ko) | ko_KR | 67017 | +| 25 | [Lithuanian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/lt) | lt_LT | 67017 | +| 26 | [Latvian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/lv) | lv_LV | 67017 | +| 27 | [Macedonian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mk) | mk_MK | 67017 | +| 28 | [Malayalam](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ml) | ml_IN | 67017 | +| 29 | [Mongolian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mn) | mn_MN | 67017 | +| 30 | [Marathi](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mr) | mr_IN | 67017 | +| 31 | [Burmese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/my) | my_MM | 67017 | +| 32 | [Nepali](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ne) | ne_NP | 67017 | +| 33 | [Dutch](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/nl) | nl_XX | 67017 | +| 34 | [Polish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/pl) | pl_PL | 67017 | +| 35 | [Pashto](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ps) | ps_AF | 67017 | +| 36 | [Portuguese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/pt) | pt_XX | 67017 | +| 37 | [Romanian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ro) | ro_RO | 67017 | +| 38 | [Russian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ru) | ru_RU | 67017 | +| 39 | [Sinhala](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/si) | si_LK | 67017 | +| 40 | [Slovene](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sl) | sl_SI | 67017 | +| 41 | [Swedish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sv) | sv_SE | 67017 | +| 42 | [Swahili](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sw) | sw_KE | 67017 | +| 43 | [Tamil](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ta) | ta_IN | 67017 | +| 44 | [Telugu](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/te) | te_IN | 67017 | +| 45 | [Thai](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/th) | th_TH | 67017 | +| 46 | [Tagalog](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/tl) | tl_XX | 67017 | +| 47 | [Turkish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/tr) | tr_TR | 67017 | +| 48 | [Ukrainian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/uk) | uk_UA | 67017 | +| 49 | [Urdu](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ur) | ur_PK | 67017 | +| 50 | [Vietnamese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/vi) | vi_VN | 67017 | +| 51 | [Xhosa](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/xh) | xh_ZA | 67017 | +| 52 | [Chinese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/zh) | zh_CN | 67017 | + +## C. Dataset Creation + +1. English Instructions: The English instuctions are obtained from [alpaca-53k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data). +2. Instruction Translation: The instructions (and inputs) are translated into 51 languages using Google Translation API (conducted on April 2023). +3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023). + +## D. Considerations for Using the Data + +### Social Impact of Dataset + +NLP for everyone: this dataset helps to democratize the cutting-edge instruction-following models in 52 languages. This dataset also allows the first experiment on the multilingual LoRA-based LLaMA model. + +### Discussion of Biases + +(1) Translation bias; (2) Potential English-culture bias in the translated dataset. + +### Other Known Limitations + +The `Bactrian-X` data is generated by a language model (`gpt-3.5-turbo`) and inevitably contains some errors or biases. We encourage users to use this data with caution and propose new methods to filter or improve the imperfections. + +## E. Additional Information + +### Dataset Curators + +[Haonan Li](https://haonan-li.github.io/) and [Fajri Koto](http://www.fajrikoto.com) + +### Licensing Information + +The dataset is available under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/legalcode). + +### Citation Information + + +``` +@misc{li2023bactrianx, + title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation}, + author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin}, + year={2023}, + eprint={2305.15011}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Contributions + +Thanks to [@haonan-li](https://github.com/haonan-li), [@fajri91](https://github.com/fajri91) for adding this dataset." +csebuetnlp/xlsum,"{""annotations_creators"": [""found""], ""language_creators"": [""found""], ""language"": [""am"", ""ar"", ""az"", ""bn"", ""my"", ""zh"", ""en"", ""fr"", ""gu"", ""ha"", ""hi"", ""ig"", ""id"", ""ja"", ""rn"", ""ko"", ""ky"", ""mr"", ""ne"", ""om"", ""ps"", ""fa"", ""pcm"", ""pt"", ""pa"", ""ru"", ""gd"", ""sr"", ""si"", ""so"", ""es"", ""sw"", ""ta"", ""te"", ""th"", ""ti"", ""tr"", ""uk"", ""ur"", ""uz"", ""vi"", ""cy"", ""yo""], ""license"": [""cc-by-nc-sa-4.0""], ""multilinguality"": [""multilingual""], ""size_categories"": [""1M \n Q: \n A: \n B: \n C: \n D: \n Answer: ```. We perform prediction by picking the answer within `[A, B, C, D]` that has the highest probability relatively to the others. +- **Few-shot in-context learning (translated examples)** ^ + - Same as above, except the samples from the training set are translated to the target language so that the examples and evaluation data are in the same language. The training samples can be human or machine-translated. + + +#### With finetuning +- **English finetune & multilingual evaluation** + - The model is finetuned to the task using the English training set, probably with a sequence classification head. Then the model is evaluated in all the target languages individually. For results presented in the paper we used [the HuggingFace library](https://huggingface.co/docs/transformers/en/model_doc/xlm-roberta#transformers.XLMRobertaForMultipleChoice). +- **English finetune & cross-lingual evaluation** + - Same as above, except the model is evaluated in a cross-lingual setting, where for each question, the passage & answers could be provided in a different language. For example, passage could be in language `x`, question in language `y`, and answers in language `z`. +- **Translate-train** ^ + - For each target language, the model is individually finetuned on training samples that have been machine-translated from English to that language. Each model is then evaluated in the respective target language. +- **Translate-train-all** + - Similar to above, except here the model is trained on translated samples from all target languages at once. The single finetuned model is then evaluated on all target languages. +- **Translate-train-all & cross-lingual evaluation** + - Same as above, except the single finetuned model is evaluated in a cross-lingual setting, where for each question, the passage & answers could be provided in a different language. +- **Translate-test** + - The model is finetuned using the English training data and then the evaluation dataset is machine-translated to English and evaluated on the English. + - This setting is primarily a reflection of the quality of the machine translation system, but is useful for comparison to multilingual models. + +In addition, there are 83 additional languages in FLORES-200 for which questions were not translated for Belebele. Since the passages exist in those target languages, machine-translating the questions & answers may enable decent evaluation of machine reading comprehension in those languages. + +## Training Set + +As discussed in the paper, we also provide an assembled training set consisting of samples at the [github repo](https://github.com/facebookresearch/belebele). + +The Belebele dataset is intended to be used only as a test set, and not for training or validation. Therefore, for models that require additional task-specific training, we instead propose using an assembled training set consisting of samples from pre-existing multiple-choice QA datasets in English. We considered diverse datasets, and determine the most compatible to be [RACE](https://www.cs.cmu.edu/~glai1/data/race/), [SciQ](https://allenai.org/data/sciq), [MultiRC](https://cogcomp.seas.upenn.edu/multirc/), [MCTest](https://mattr1.github.io/mctest/), [MCScript2.0](https://aclanthology.org/S19-1012/), and [ReClor](https://whyu.me/reclor/). + +For each of the six datasets, we unpack and restructure the passages and questions from their respective formats. We then filter out less suitable samples (e.g. questions with multiple correct answers). In the end, the dataset comprises 67.5k training samples and 3.7k development samples, more than half of which are from RACE. We provide a script (`assemble_training_set.py`) to reconstruct this dataset for anyone to perform task finetuning. + +Since the training set is a joint sample of other datasets, it is governed by a different license. We do not claim any of that work or datasets to be our own. See the Licenses section in the README of https://github.com/facebookresearch/belebele . + +## Languages in Belebele + +FLORES-200 Code | English Name | Script | Family +---|---|---|--- +acm_Arab | Mesopotamian Arabic | Arab | Afro-Asiatic +afr_Latn | Afrikaans | Latn | Germanic +als_Latn | Tosk Albanian | Latn | Paleo-Balkanic +amh_Ethi | Amharic | Ethi | Afro-Asiatic +apc_Arab | North Levantine Arabic | Arab | Afro-Asiatic +arb_Arab | Modern Standard Arabic | Arab | Afro-Asiatic +arb_Latn | Modern Standard Arabic (Romanized) | Latn | Afro-Asiatic +ars_Arab | Najdi Arabic | Arab | Afro-Asiatic +ary_arab | Moroccan Arabic | Arab | Afro-Asiatic +arz_Arab | Egyptian Arabic | Arab | Afro-Asiatic +asm_Beng | Assamese | Beng | Indo-Aryan +azj_Latn | North Azerbaijani | Latn | Turkic +bam_Latn | Bambara | Latn | Mande +ben_Beng | Bengali | Beng | Indo-Aryan +ben_Latn | Bengali (Romanized) | Latn | Indo-Aryan +bod_Tibt | Standard Tibetan | Tibt | Sino-Tibetan +bul_Cyrl | Bulgarian | Cyrl | Balto-Slavic +cat_Latn | Catalan | Latn | Romance +ceb_Latn | Cebuano | Latn | Austronesian +ces_Latn | Czech | Latn | Balto-Slavic +ckb_Arab | Central Kurdish | Arab | Iranian +dan_Latn | Danish | Latn | Germanic +deu_Latn | German | Latn | Germanic +ell_Grek | Greek | Grek | Hellenic +eng_Latn | English | Latn | Germanic +est_Latn | Estonian | Latn | Uralic +eus_Latn | Basque | Latn | Basque +fin_Latn | Finnish | Latn | Uralic +fra_Latn | French | Latn | Romance +fuv_Latn | Nigerian Fulfulde | Latn | Atlantic-Congo +gaz_Latn | West Central Oromo | Latn | Afro-Asiatic +grn_Latn | Guarani | Latn | Tupian +guj_Gujr | Gujarati | Gujr | Indo-Aryan +hat_Latn | Haitian Creole | Latn | Atlantic-Congo +hau_Latn | Hausa | Latn | Afro-Asiatic +heb_Hebr | Hebrew | Hebr | Afro-Asiatic +hin_Deva | Hindi | Deva | Indo-Aryan +hin_Latn | Hindi (Romanized) | Latn | Indo-Aryan +hrv_Latn | Croatian | Latn | Balto-Slavic +hun_Latn | Hungarian | Latn | Uralic +hye_Armn | Armenian | Armn | Armenian +ibo_Latn | Igbo | Latn | Atlantic-Congo +ilo_Latn | Ilocano | Latn | Austronesian +ind_Latn | Indonesian | Latn | Austronesian +isl_Latn | Icelandic | Latn | Germanic +ita_Latn | Italian | Latn | Romance +jav_Latn | Javanese | Latn | Austronesian +jpn_Jpan | Japanese | Jpan | Japonic +kac_Latn | Jingpho | Latn | Sino-Tibetan +kan_Knda | Kannada | Knda | Dravidian +kat_Geor | Georgian | Geor | kartvelian +kaz_Cyrl | Kazakh | Cyrl | Turkic +kea_Latn | Kabuverdianu | Latn | Portuguese Creole +khk_Cyrl | Halh Mongolian | Cyrl | Mongolic +khm_Khmr | Khmer | Khmr | Austroasiatic +kin_Latn | Kinyarwanda | Latn | Atlantic-Congo +kir_Cyrl | Kyrgyz | Cyrl | Turkic +kor_Hang | Korean | Hang | Koreanic +lao_Laoo | Lao | Laoo | Kra-Dai +lin_Latn | Lingala | Latn | Atlantic-Congo +lit_Latn | Lithuanian | Latn | Balto-Slavic +lug_Latn | Ganda | Latn | Atlantic-Congo +luo_Latn | Luo | Latn | Nilo-Saharan +lvs_Latn | Standard Latvian | Latn | Balto-Slavic +mal_Mlym | Malayalam | Mlym | Dravidian +mar_Deva | Marathi | Deva | Indo-Aryan +mkd_Cyrl | Macedonian | Cyrl | Balto-Slavic +mlt_Latn | Maltese | Latn | Afro-Asiatic +mri_Latn | Maori | Latn | Austronesian +mya_Mymr | Burmese | Mymr | Sino-Tibetan +nld_Latn | Dutch | Latn | Germanic +nob_Latn | Norwegian Bokmål | Latn | Germanic +npi_Deva | Nepali | Deva | Indo-Aryan +npi_Latn | Nepali (Romanized) | Latn | Indo-Aryan +nso_Latn | Northern Sotho | Latn | Atlantic-Congo +nya_Latn | Nyanja | Latn | Afro-Asiatic +ory_Orya | Odia | Orya | Indo-Aryan +pan_Guru | Eastern Panjabi | Guru | Indo-Aryan +pbt_Arab | Southern Pashto | Arab | Indo-Aryan +pes_Arab | Western Persian | Arab | Iranian +plt_Latn | Plateau Malagasy | Latn | Austronesian +pol_Latn | Polish | Latn | Balto-Slavic +por_Latn | Portuguese | Latn | Romance +ron_Latn | Romanian | Latn | Romance +rus_Cyrl | Russian | Cyrl | Balto-Slavic +shn_Mymr | Shan | Mymr | Kra-Dai +sin_Latn | Sinhala (Romanized) | Latn | Indo-Aryan +sin_Sinh | Sinhala | Sinh | Indo-Aryan +slk_Latn | Slovak | Latn | Balto-Slavic +slv_Latn | Slovenian | Latn | Balto-Slavic +sna_Latn | Shona | Latn | Atlantic-Congo +snd_Arab | Sindhi | Arab | Indo-Aryan +som_Latn | Somali | Latn | Afro-Asiatic +sot_Latn | Southern Sotho | Latn | Atlantic-Congo +spa_Latn | Spanish | Latn | Romance +srp_Cyrl | Serbian | Cyrl | Balto-Slavic +ssw_Latn | Swati | Latn | Atlantic-Congo +sun_Latn | Sundanese | Latn | Austronesian +swe_Latn | Swedish | Latn | Germanic +swh_Latn | Swahili | Latn | Atlantic-Congo +tam_Taml | Tamil | Taml | Dravidian +tel_Telu | Telugu | Telu | Dravidian +tgk_Cyrl | Tajik | Cyrl | Iranian +tgl_Latn | Tagalog | Latn | Austronesian +tha_Thai | Thai | Thai | Kra-Dai +tir_Ethi | Tigrinya | Ethi | Afro-Asiatic +tsn_Latn | Tswana | Latn | Atlantic-Congo +tso_Latn | Tsonga | Latn | Afro-Asiatic +tur_Latn | Turkish | Latn | Turkic +ukr_Cyrl | Ukrainian | Cyrl | Balto-Slavic +urd_Arab | Urdu | Arab | Indo-Aryan +urd_Latn | Urdu (Romanized) | Latn | Indo-Aryan +uzn_Latn | Northern Uzbek | Latn | Turkic +vie_Latn | Vietnamese | Latn | Austroasiatic +war_Latn | Waray | Latn | Austronesian +wol_Latn | Wolof | Latn | Atlantic-Congo +xho_Latn | Xhosa | Latn | Atlantic-Congo +yor_Latn | Yoruba | Latn | Atlantic-Congo +zho_Hans | Chinese (Simplified) | Hans | Sino-Tibetan +zho_Hant | Chinese (Traditional) | Hant | Sino-Tibetan +zsm_Latn | Standard Malay | Latn | Austronesian +zul_Latn | Zulu | Latn | Atlantic-Congo" +Helsinki-NLP/news_commentary,"{""annotations_creators"": [""found""], ""language_creators"": [""found""], ""language"": [""ar"", ""cs"", ""de"", ""en"", ""es"", ""fr"", ""it"", ""ja"", ""nl"", ""pt"", ""ru"", ""zh""], ""license"": [""unknown""], ""multilinguality"": [""multilingual""], ""size_categories"": [""10K We initially collected a starting set of a thousand problems and natural language solutions by hiring freelance contractors on Upwork (upwork.com). We then worked with Surge AI (surgehq.ai), an NLP data labeling platform, to scale up our data collection. After collecting the full dataset, we asked workers to re-solve all problems, with no workers re-solving problems they originally wrote. We checked whether their final answers agreed with the original solu- tions, and any problems that produced disagreements were either repaired or discarded. We then performed another round of agreement checks on a smaller subset of problems, finding that 1.7% of problems still produce disagreements among contractors. We estimate this to be the fraction of problems that con- tain breaking errors or ambiguities. It is possible that a larger percentage of problems contain subtle errors. + +#### Who are the source language producers? + +[Needs More Information] + +### Annotations + +#### Annotation process + +[Needs More Information] + +#### Who are the annotators? + +Surge AI (surgehq.ai) + +### Personal and Sensitive Information + +[Needs More Information] + +## Considerations for Using the Data + +### Social Impact of Dataset + +[Needs More Information] + +### Discussion of Biases + +[Needs More Information] + +### Other Known Limitations + +[Needs More Information] + +## Additional Information + +### Dataset Curators + +[Needs More Information] + +### Licensing Information + +The GSM8K dataset is licensed under the [MIT License](https://opensource.org/licenses/MIT). + +### Citation Information + +```bibtex +@article{cobbe2021gsm8k, + title={Training Verifiers to Solve Math Word Problems}, + author={Cobbe, Karl and Kosaraju, Vineet and Bavarian, Mohammad and Chen, Mark and Jun, Heewoo and Kaiser, Lukasz and Plappert, Matthias and Tworek, Jerry and Hilton, Jacob and Nakano, Reiichiro and Hesse, Christopher and Schulman, John}, + journal={arXiv preprint arXiv:2110.14168}, + year={2021} +} +@misc{shi2022language, + title={Language Models are Multilingual Chain-of-Thought Reasoners}, + author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei}, + year={2022}, + eprint={2210.03057}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Contributions + +Thanks to [@juletx](https://github.com/juletx) for adding this dataset." +mozilla-foundation/common_voice_17_0,"{""pretty_name"": ""Common Voice Corpus 17.0"", ""annotations_creators"": [""crowdsourced""], ""language_creators"": [""crowdsourced""], ""language"": [""ab"", ""af"", ""am"", ""ar"", ""as"", ""ast"", ""az"", ""ba"", ""bas"", ""be"", ""bg"", ""bn"", ""br"", ""ca"", ""ckb"", ""cnh"", ""cs"", ""cv"", ""cy"", ""da"", ""de"", ""dv"", ""dyu"", ""el"", ""en"", ""eo"", ""es"", ""et"", ""eu"", ""fa"", ""fi"", ""fr"", ""fy"", ""ga"", ""gl"", ""gn"", ""ha"", ""he"", ""hi"", ""hsb"", ""ht"", ""hu"", ""hy"", ""ia"", ""id"", ""ig"", ""is"", ""it"", ""ja"", ""ka"", ""kab"", ""kk"", ""kmr"", ""ko"", ""ky"", ""lg"", ""lij"", ""lo"", ""lt"", ""ltg"", ""lv"", ""mdf"", ""mhr"", ""mk"", ""ml"", ""mn"", ""mr"", ""mrj"", ""mt"", ""myv"", ""nan"", ""ne"", ""nhi"", ""nl"", ""nn"", ""nso"", ""oc"", ""or"", ""os"", ""pa"", ""pl"", ""ps"", ""pt"", ""quy"", ""rm"", ""ro"", ""ru"", ""rw"", ""sah"", ""sat"", ""sc"", ""sk"", ""skr"", ""sl"", ""sq"", ""sr"", ""sv"", ""sw"", ""ta"", ""te"", ""th"", ""ti"", ""tig"", ""tk"", ""tok"", ""tr"", ""tt"", ""tw"", ""ug"", ""uk"", ""ur"", ""uz"", ""vi"", ""vot"", ""yi"", ""yo"", ""yue"", ""zgh"", ""zh"", ""zu"", ""zza""], ""language_bcp47"": [""zh-CN"", ""zh-HK"", ""zh-TW"", ""sv-SE"", ""rm-sursilv"", ""rm-vallader"", ""pa-IN"", ""nn-NO"", ""ne-NP"", ""nan-tw"", ""hy-AM"", ""ga-IE"", ""fy-NL""], ""license"": [""cc0-1.0""], ""multilinguality"": [""multilingual""], ""source_datasets"": [""extended|common_voice""], ""paperswithcode_id"": ""common-voice"", ""extra_gated_prompt"": ""By clicking on \u201cAccess repository\u201d below, you also agree to not attempt to determine the identity of speakers in the Common Voice dataset.""}","# Dataset Card for Common Voice Corpus 17.0 + +## Table of Contents +- [Dataset Description](#dataset-description) + - [Dataset Summary](#dataset-summary) + - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) + - [Languages](#languages) +- [Dataset Structure](#dataset-structure) + - [Data Instances](#data-instances) + - [Data Fields](#data-fields) + - [Data Splits](#data-splits) +- [Dataset Creation](#dataset-creation) + - [Curation Rationale](#curation-rationale) + - [Source Data](#source-data) + - [Annotations](#annotations) + - [Personal and Sensitive Information](#personal-and-sensitive-information) +- [Considerations for Using the Data](#considerations-for-using-the-data) + - [Social Impact of Dataset](#social-impact-of-dataset) + - [Discussion of Biases](#discussion-of-biases) + - [Other Known Limitations](#other-known-limitations) +- [Additional Information](#additional-information) + - [Dataset Curators](#dataset-curators) + - [Licensing Information](#licensing-information) + - [Citation Information](#citation-information) + - [Contributions](#contributions) + +## Dataset Description + +- **Homepage:** https://commonvoice.mozilla.org/en/datasets +- **Repository:** https://github.com/common-voice/common-voice +- **Paper:** https://arxiv.org/abs/1912.06670 +- **Leaderboard:** https://paperswithcode.com/dataset/common-voice +- **Point of Contact:** [Vaibhav Srivastav](mailto:vaibhav@huggingface.co) + +### Dataset Summary + +The Common Voice dataset consists of a unique MP3 and corresponding text file. +Many of the 31175 recorded hours in the dataset also include demographic metadata like age, sex, and accent +that can help improve the accuracy of speech recognition engines. + +The dataset currently consists of 20408 validated hours in 124 languages, but more voices and languages are always added. +Take a look at the [Languages](https://commonvoice.mozilla.org/en/languages) page to request a language or start contributing. + +You can donate to this non-profit, donation-funded project here (https://commonvoice.mozilla.org/?form=common-voice) + +### Supported Tasks and Leaderboards + +The results for models trained on the Common Voice datasets are available via the +[🤗 Speech Bench](https://huggingface.co/spaces/huggingface/hf-speech-bench) + +### Languages + +``` +Abkhaz, Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Asturian, Azerbaijani, Basaa, Bashkir, Basque, Belarusian, Bengali, Breton, Bulgarian, Cantonese, Catalan, Central Kurdish, Chinese (China), Chinese (Hong Kong), Chinese (Taiwan), Chuvash, Czech, Danish, Dhivehi, Dioula, Dutch, English, Erzya, Esperanto, Estonian, Finnish, French, Frisian, Galician, Georgian, German, Greek, Guarani, Haitian, Hakha Chin, Hausa, Hebrew, Hill Mari, Hindi, Hungarian, Icelandic, Igbo, Indonesian, Interlingua, Irish, Italian, Japanese, Kabyle, Kazakh, Kinyarwanda, Korean, Kurmanji Kurdish, Kyrgyz, Lao, Latgalian, Latvian, Ligurian, Lithuanian, Luganda, Macedonian, Malayalam, Maltese, Marathi, Meadow Mari, Moksha, Mongolian, Nepali, Northern Sotho, Norwegian Nynorsk, Occitan, Odia, Ossetian, Pashto, Persian, Polish, Portuguese, Punjabi, Quechua Chanka, Romanian, Romansh Sursilvan, Romansh Vallader, Russian, Sakha, Santali (Ol Chiki), Saraiki, Sardinian, Serbian, Slovak, Slovenian, Sorbian, Upper, Spanish, Swahili, Swedish, Taiwanese (Minnan), Tamazight, Tamil, Tatar, Telugu, Thai, Tigre, Tigrinya, Toki Pona, Turkish, Turkmen, Twi, Ukrainian, Urdu, Uyghur, Uzbek, Vietnamese, Votic, Welsh, Western Sierra Puebla Nahuatl, Yiddish, Yoruba, Zaza, Zulu +``` + +## How to use + +The `datasets` library allows you to load and pre-process your dataset in pure Python, at scale. The dataset can be downloaded and prepared in one call to your local drive by using the `load_dataset` function. + +For example, to download the Hindi config, simply specify the corresponding language config name (i.e., ""hi"" for Hindi): +```python +from datasets import load_dataset + +cv_17 = load_dataset(""mozilla-foundation/common_voice_17_0"", ""hi"", split=""train"") +``` + +Using the datasets library, you can also stream the dataset on-the-fly by adding a `streaming=True` argument to the `load_dataset` function call. Loading a dataset in streaming mode loads individual samples of the dataset at a time, rather than downloading the entire dataset to disk. +```python +from datasets import load_dataset + +cv_17 = load_dataset(""mozilla-foundation/common_voice_17_0"", ""hi"", split=""train"", streaming=True) + +print(next(iter(cv_17))) +``` + +*Bonus*: create a [PyTorch dataloader](https://huggingface.co/docs/datasets/use_with_pytorch) directly with your own datasets (local/streamed). + +### Local + +```python +from datasets import load_dataset +from torch.utils.data.sampler import BatchSampler, RandomSampler + +cv_17 = load_dataset(""mozilla-foundation/common_voice_17_0"", ""hi"", split=""train"") + +batch_sampler = BatchSampler(RandomSampler(cv_17), batch_size=32, drop_last=False) +dataloader = DataLoader(cv_17, batch_sampler=batch_sampler) +``` + +### Streaming + +```python +from datasets import load_dataset +from torch.utils.data import DataLoader + +cv_17 = load_dataset(""mozilla-foundation/common_voice_17_0"", ""hi"", split=""train"") +dataloader = DataLoader(cv_17, batch_size=32) +``` + +To find out more about loading and preparing audio datasets, head over to [hf.co/blog/audio-datasets](https://huggingface.co/blog/audio-datasets). + +### Example scripts + +Train your own CTC or Seq2Seq Automatic Speech Recognition models on Common Voice 16 with `transformers` - [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition). + +## Dataset Structure + +### Data Instances + +A typical data point comprises the `path` to the audio file and its `sentence`. +Additional fields include `accent`, `age`, `client_id`, `up_votes`, `down_votes`, `gender`, `locale` and `segment`. + +```python +{ + 'client_id': 'd59478fbc1ee646a28a3c652a119379939123784d99131b865a89f8b21c81f69276c48bd574b81267d9d1a77b83b43e6d475a6cfc79c232ddbca946ae9c7afc5', + 'path': 'et/clips/common_voice_et_18318995.mp3', + 'audio': { + 'path': 'et/clips/common_voice_et_18318995.mp3', + 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), + 'sampling_rate': 48000 + }, + 'sentence': 'Tasub kokku saada inimestega, keda tunned juba ammust ajast saati.', + 'up_votes': 2, + 'down_votes': 0, + 'age': 'twenties', + 'gender': 'male', + 'accent': '', + 'locale': 'et', + 'segment': '' +} +``` + +### Data Fields + +`client_id` (`string`): An id for which client (voice) made the recording + +`path` (`string`): The path to the audio file + +`audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0][""audio""]` the audio file is automatically decoded and resampled to `dataset.features[""audio""].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `""audio""` column, *i.e.* `dataset[0][""audio""]` should **always** be preferred over `dataset[""audio""][0]`. + +`sentence` (`string`): The sentence the user was prompted to speak + +`up_votes` (`int64`): How many upvotes the audio file has received from reviewers + +`down_votes` (`int64`): How many downvotes the audio file has received from reviewers + +`age` (`string`): The age of the speaker (e.g. `teens`, `twenties`, `fifties`) + +`gender` (`string`): The gender of the speaker + +`accent` (`string`): Accent of the speaker + +`locale` (`string`): The locale of the speaker + +`segment` (`string`): Usually an empty field + +### Data Splits + +The speech material has been subdivided into portions for dev, train, test, validated, invalidated, reported and other. + +The validated data is data that has been validated with reviewers and received upvotes that the data is of high quality. + +The invalidated data is data has been invalidated by reviewers +and received downvotes indicating that the data is of low quality. + +The reported data is data that has been reported, for different reasons. + +The other data is data that has not yet been reviewed. + +The dev, test, train are all data that has been reviewed, deemed of high quality and split into dev, test and train. + +## Data Preprocessing Recommended by Hugging Face + +The following are data preprocessing steps advised by the Hugging Face team. They are accompanied by an example code snippet that shows how to put them to practice. + +Many examples in this dataset have trailing quotations marks, e.g _“the cat sat on the mat.“_. These trailing quotation marks do not change the actual meaning of the sentence, and it is near impossible to infer whether a sentence is a quotation or not a quotation from audio data alone. In these cases, it is advised to strip the quotation marks, leaving: _the cat sat on the mat_. + +In addition, the majority of training sentences end in punctuation ( . or ? or ! ), whereas just a small proportion do not. In the dev set, **almost all** sentences end in punctuation. Thus, it is recommended to append a full-stop ( . ) to the end of the small number of training examples that do not end in punctuation. + +```python +from datasets import load_dataset + +ds = load_dataset(""mozilla-foundation/common_voice_17"", ""en"", use_auth_token=True) + +def prepare_dataset(batch): + """"""Function to preprocess the dataset with the .map method"""""" + transcription = batch[""sentence""] + + if transcription.startswith('""') and transcription.endswith('""'): + # we can remove trailing quotation marks as they do not affect the transcription + transcription = transcription[1:-1] + + if transcription[-1] not in [""."", ""?"", ""!""]: + # append a full-stop to sentences that do not end in punctuation + transcription = transcription + ""."" + + batch[""sentence""] = transcription + + return batch + +ds = ds.map(prepare_dataset, desc=""preprocess dataset"") +``` + +## Dataset Creation + +### Curation Rationale + +[Needs More Information] + +### Source Data + +#### Initial Data Collection and Normalization + +[Needs More Information] + +#### Who are the source language producers? + +[Needs More Information] + +### Annotations + +#### Annotation process + +[Needs More Information] + +#### Who are the annotators? + +[Needs More Information] + +### Personal and Sensitive Information + +The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in the Common Voice dataset. + +## Considerations for Using the Data + +### Social Impact of Dataset + +The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in the Common Voice dataset. + +### Discussion of Biases + +[More Information Needed] + +### Other Known Limitations + +[More Information Needed] + +## Additional Information + +### Dataset Curators + +[More Information Needed] + +### Licensing Information + +Public Domain, [CC-0](https://creativecommons.org/share-your-work/public-domain/cc0/) + +### Citation Information + +``` +@inproceedings{commonvoice:2020, + author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.}, + title = {Common Voice: A Massively-Multilingual Speech Corpus}, + booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)}, + pages = {4211--4215}, + year = 2020 +} +```" +INK-USC/xcsr,"{""annotations_creators"": [""crowdsourced""], ""language_creators"": [""crowdsourced"", ""machine-generated""], ""language"": [""ar"", ""de"", ""en"", ""es"", ""fr"", ""hi"", ""it"", ""ja"", ""nl"", ""pl"", ""pt"", ""ru"", ""sw"", ""ur"", ""vi"", ""zh""], ""license"": [""mit""], ""multilinguality"": [""multilingual""], ""size_categories"": [""1K