Datasets:
Uploading tokenizer_robustness_completion_chinese_word_reordering subset
Browse files
README.md
CHANGED
@@ -824,6 +824,40 @@ dataset_info:
|
|
824 |
num_examples: 4
|
825 |
download_size: 5586
|
826 |
dataset_size: 548
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
827 |
configs:
|
828 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
829 |
data_files:
|
@@ -921,6 +955,10 @@ configs:
|
|
921 |
data_files:
|
922 |
- split: test
|
923 |
path: tokenizer_robustness_completion_chinese_word_ordering/test-*
|
|
|
|
|
|
|
|
|
924 |
---
|
925 |
|
926 |
# Dataset Card for Tokenization Robustness
|
|
|
824 |
num_examples: 4
|
825 |
download_size: 5586
|
826 |
dataset_size: 548
|
827 |
+
- config_name: tokenizer_robustness_completion_chinese_word_reordering
|
828 |
+
features:
|
829 |
+
- name: question
|
830 |
+
dtype: string
|
831 |
+
- name: choices
|
832 |
+
list: string
|
833 |
+
- name: answer
|
834 |
+
dtype: int64
|
835 |
+
- name: answer_label
|
836 |
+
dtype: string
|
837 |
+
- name: split
|
838 |
+
dtype: string
|
839 |
+
- name: subcategories
|
840 |
+
dtype: string
|
841 |
+
- name: category
|
842 |
+
dtype: string
|
843 |
+
- name: lang
|
844 |
+
dtype: string
|
845 |
+
- name: second_lang
|
846 |
+
dtype: string
|
847 |
+
- name: notes
|
848 |
+
dtype: string
|
849 |
+
- name: id
|
850 |
+
dtype: string
|
851 |
+
- name: set_id
|
852 |
+
dtype: float64
|
853 |
+
- name: variation_id
|
854 |
+
dtype: float64
|
855 |
+
splits:
|
856 |
+
- name: test
|
857 |
+
num_bytes: 859
|
858 |
+
num_examples: 3
|
859 |
+
download_size: 6209
|
860 |
+
dataset_size: 859
|
861 |
configs:
|
862 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
863 |
data_files:
|
|
|
955 |
data_files:
|
956 |
- split: test
|
957 |
path: tokenizer_robustness_completion_chinese_word_ordering/test-*
|
958 |
+
- config_name: tokenizer_robustness_completion_chinese_word_reordering
|
959 |
+
data_files:
|
960 |
+
- split: test
|
961 |
+
path: tokenizer_robustness_completion_chinese_word_reordering/test-*
|
962 |
---
|
963 |
|
964 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_word_reordering/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a7b6538aa177f8a326df89ff1d4b934cf7e40930da2c51ca477a479c93087596
|
3 |
+
size 6209
|