Datasets:
Uploading tokenizer_robustness_completion_chinese_technical_product_names subset
Browse files
README.md
CHANGED
@@ -620,6 +620,40 @@ dataset_info:
|
|
620 |
num_examples: 1
|
621 |
download_size: 5450
|
622 |
dataset_size: 155
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
623 |
configs:
|
624 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
625 |
data_files:
|
@@ -693,6 +727,10 @@ configs:
|
|
693 |
data_files:
|
694 |
- split: test
|
695 |
path: tokenizer_robustness_completion_chinese_symbol_vs_name/test-*
|
|
|
|
|
|
|
|
|
696 |
---
|
697 |
|
698 |
# Dataset Card for Tokenization Robustness
|
|
|
620 |
num_examples: 1
|
621 |
download_size: 5450
|
622 |
dataset_size: 155
|
623 |
+
- config_name: tokenizer_robustness_completion_chinese_technical_product_names
|
624 |
+
features:
|
625 |
+
- name: question
|
626 |
+
dtype: string
|
627 |
+
- name: choices
|
628 |
+
list: string
|
629 |
+
- name: answer
|
630 |
+
dtype: int64
|
631 |
+
- name: answer_label
|
632 |
+
dtype: string
|
633 |
+
- name: split
|
634 |
+
dtype: string
|
635 |
+
- name: subcategories
|
636 |
+
dtype: string
|
637 |
+
- name: category
|
638 |
+
dtype: string
|
639 |
+
- name: lang
|
640 |
+
dtype: string
|
641 |
+
- name: second_lang
|
642 |
+
dtype: string
|
643 |
+
- name: notes
|
644 |
+
dtype: string
|
645 |
+
- name: id
|
646 |
+
dtype: string
|
647 |
+
- name: set_id
|
648 |
+
dtype: float64
|
649 |
+
- name: variation_id
|
650 |
+
dtype: float64
|
651 |
+
splits:
|
652 |
+
- name: test
|
653 |
+
num_bytes: 191
|
654 |
+
num_examples: 1
|
655 |
+
download_size: 5602
|
656 |
+
dataset_size: 191
|
657 |
configs:
|
658 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
659 |
data_files:
|
|
|
727 |
data_files:
|
728 |
- split: test
|
729 |
path: tokenizer_robustness_completion_chinese_symbol_vs_name/test-*
|
730 |
+
- config_name: tokenizer_robustness_completion_chinese_technical_product_names
|
731 |
+
data_files:
|
732 |
+
- split: test
|
733 |
+
path: tokenizer_robustness_completion_chinese_technical_product_names/test-*
|
734 |
---
|
735 |
|
736 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_technical_product_names/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e35bcc9d3f8c8ab39b62014a85f719df9847269f35ced8adc33bbad73a11a6f5
|
3 |
+
size 5602
|