Datasets:
Uploading tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space subset
Browse files
README.md
CHANGED
@@ -858,6 +858,40 @@ dataset_info:
|
|
858 |
num_examples: 3
|
859 |
download_size: 6209
|
860 |
dataset_size: 859
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
861 |
configs:
|
862 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
863 |
data_files:
|
@@ -959,6 +993,10 @@ configs:
|
|
959 |
data_files:
|
960 |
- split: test
|
961 |
path: tokenizer_robustness_completion_chinese_word_reordering/test-*
|
|
|
|
|
|
|
|
|
962 |
---
|
963 |
|
964 |
# Dataset Card for Tokenization Robustness
|
|
|
858 |
num_examples: 3
|
859 |
download_size: 6209
|
860 |
dataset_size: 859
|
861 |
+
- config_name: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space
|
862 |
+
features:
|
863 |
+
- name: question
|
864 |
+
dtype: string
|
865 |
+
- name: choices
|
866 |
+
list: string
|
867 |
+
- name: answer
|
868 |
+
dtype: int64
|
869 |
+
- name: answer_label
|
870 |
+
dtype: string
|
871 |
+
- name: split
|
872 |
+
dtype: string
|
873 |
+
- name: subcategories
|
874 |
+
dtype: string
|
875 |
+
- name: category
|
876 |
+
dtype: string
|
877 |
+
- name: lang
|
878 |
+
dtype: string
|
879 |
+
- name: second_lang
|
880 |
+
dtype: string
|
881 |
+
- name: notes
|
882 |
+
dtype: string
|
883 |
+
- name: id
|
884 |
+
dtype: string
|
885 |
+
- name: set_id
|
886 |
+
dtype: float64
|
887 |
+
- name: variation_id
|
888 |
+
dtype: float64
|
889 |
+
splits:
|
890 |
+
- name: test
|
891 |
+
num_bytes: 5558
|
892 |
+
num_examples: 26
|
893 |
+
download_size: 7633
|
894 |
+
dataset_size: 5558
|
895 |
configs:
|
896 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
897 |
data_files:
|
|
|
993 |
data_files:
|
994 |
- split: test
|
995 |
path: tokenizer_robustness_completion_chinese_word_reordering/test-*
|
996 |
+
- config_name: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space
|
997 |
+
data_files:
|
998 |
+
- split: test
|
999 |
+
path: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space/test-*
|
1000 |
---
|
1001 |
|
1002 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b89c8545accd3b9b6bc50a3e8f3a5a3f9fb3e3dd5e55f6f6345cb6f39e658806
|
3 |
+
size 7633
|