Datasets:
Uploading tokenizer_robustness_completion_chinese_transliteration_variations subset
Browse files
README.md
CHANGED
@@ -722,6 +722,40 @@ dataset_info:
|
|
722 |
num_examples: 30
|
723 |
download_size: 7704
|
724 |
dataset_size: 5304
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
725 |
configs:
|
726 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
727 |
data_files:
|
@@ -807,6 +841,10 @@ configs:
|
|
807 |
data_files:
|
808 |
- split: test
|
809 |
path: tokenizer_robustness_completion_chinese_traditional_vs_simplified/test-*
|
|
|
|
|
|
|
|
|
810 |
---
|
811 |
|
812 |
# Dataset Card for Tokenization Robustness
|
|
|
722 |
num_examples: 30
|
723 |
download_size: 7704
|
724 |
dataset_size: 5304
|
725 |
+
- config_name: tokenizer_robustness_completion_chinese_transliteration_variations
|
726 |
+
features:
|
727 |
+
- name: question
|
728 |
+
dtype: string
|
729 |
+
- name: choices
|
730 |
+
list: string
|
731 |
+
- name: answer
|
732 |
+
dtype: int64
|
733 |
+
- name: answer_label
|
734 |
+
dtype: string
|
735 |
+
- name: split
|
736 |
+
dtype: string
|
737 |
+
- name: subcategories
|
738 |
+
dtype: string
|
739 |
+
- name: category
|
740 |
+
dtype: string
|
741 |
+
- name: lang
|
742 |
+
dtype: string
|
743 |
+
- name: second_lang
|
744 |
+
dtype: string
|
745 |
+
- name: notes
|
746 |
+
dtype: string
|
747 |
+
- name: id
|
748 |
+
dtype: string
|
749 |
+
- name: set_id
|
750 |
+
dtype: float64
|
751 |
+
- name: variation_id
|
752 |
+
dtype: float64
|
753 |
+
splits:
|
754 |
+
- name: test
|
755 |
+
num_bytes: 374
|
756 |
+
num_examples: 2
|
757 |
+
download_size: 5636
|
758 |
+
dataset_size: 374
|
759 |
configs:
|
760 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
761 |
data_files:
|
|
|
841 |
data_files:
|
842 |
- split: test
|
843 |
path: tokenizer_robustness_completion_chinese_traditional_vs_simplified/test-*
|
844 |
+
- config_name: tokenizer_robustness_completion_chinese_transliteration_variations
|
845 |
+
data_files:
|
846 |
+
- split: test
|
847 |
+
path: tokenizer_robustness_completion_chinese_transliteration_variations/test-*
|
848 |
---
|
849 |
|
850 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_transliteration_variations/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d54e33e6ecba92f4bfe4af41c93c320d2e030a8928a6ff11754adfb7589359c
|
3 |
+
size 5636
|