Datasets:
Uploading tokenizer_robustness_completion_chinese_word_ordering subset
Browse files
README.md
CHANGED
@@ -790,6 +790,40 @@ dataset_info:
|
|
790 |
num_examples: 1
|
791 |
download_size: 5564
|
792 |
dataset_size: 187
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
793 |
configs:
|
794 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
795 |
data_files:
|
@@ -883,6 +917,10 @@ configs:
|
|
883 |
data_files:
|
884 |
- split: test
|
885 |
path: tokenizer_robustness_completion_chinese_transliteration_vs_translation/test-*
|
|
|
|
|
|
|
|
|
886 |
---
|
887 |
|
888 |
# Dataset Card for Tokenization Robustness
|
|
|
790 |
num_examples: 1
|
791 |
download_size: 5564
|
792 |
dataset_size: 187
|
793 |
+
- config_name: tokenizer_robustness_completion_chinese_word_ordering
|
794 |
+
features:
|
795 |
+
- name: question
|
796 |
+
dtype: string
|
797 |
+
- name: choices
|
798 |
+
list: string
|
799 |
+
- name: answer
|
800 |
+
dtype: int64
|
801 |
+
- name: answer_label
|
802 |
+
dtype: string
|
803 |
+
- name: split
|
804 |
+
dtype: string
|
805 |
+
- name: subcategories
|
806 |
+
dtype: string
|
807 |
+
- name: category
|
808 |
+
dtype: string
|
809 |
+
- name: lang
|
810 |
+
dtype: string
|
811 |
+
- name: second_lang
|
812 |
+
dtype: string
|
813 |
+
- name: notes
|
814 |
+
dtype: string
|
815 |
+
- name: id
|
816 |
+
dtype: string
|
817 |
+
- name: set_id
|
818 |
+
dtype: float64
|
819 |
+
- name: variation_id
|
820 |
+
dtype: float64
|
821 |
+
splits:
|
822 |
+
- name: test
|
823 |
+
num_bytes: 548
|
824 |
+
num_examples: 4
|
825 |
+
download_size: 5586
|
826 |
+
dataset_size: 548
|
827 |
configs:
|
828 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
829 |
data_files:
|
|
|
917 |
data_files:
|
918 |
- split: test
|
919 |
path: tokenizer_robustness_completion_chinese_transliteration_vs_translation/test-*
|
920 |
+
- config_name: tokenizer_robustness_completion_chinese_word_ordering
|
921 |
+
data_files:
|
922 |
+
- split: test
|
923 |
+
path: tokenizer_robustness_completion_chinese_word_ordering/test-*
|
924 |
---
|
925 |
|
926 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_word_ordering/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a71b3b72254e3b8da01e0429c836eb057a0e795a2373e4ba4953f93b8559608d
|
3 |
+
size 5586
|