Datasets:
Uploading tokenizer_robustness_completion_chinese_sentence_boundaries subset
Browse files
README.md
CHANGED
@@ -552,6 +552,40 @@ dataset_info:
|
|
552 |
num_examples: 24
|
553 |
download_size: 7229
|
554 |
dataset_size: 4166
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
555 |
configs:
|
556 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
557 |
data_files:
|
@@ -617,6 +651,10 @@ configs:
|
|
617 |
data_files:
|
618 |
- split: test
|
619 |
path: tokenizer_robustness_completion_chinese_romanization/test-*
|
|
|
|
|
|
|
|
|
620 |
---
|
621 |
|
622 |
# Dataset Card for Tokenization Robustness
|
|
|
552 |
num_examples: 24
|
553 |
download_size: 7229
|
554 |
dataset_size: 4166
|
555 |
+
- config_name: tokenizer_robustness_completion_chinese_sentence_boundaries
|
556 |
+
features:
|
557 |
+
- name: question
|
558 |
+
dtype: string
|
559 |
+
- name: choices
|
560 |
+
list: string
|
561 |
+
- name: answer
|
562 |
+
dtype: int64
|
563 |
+
- name: answer_label
|
564 |
+
dtype: string
|
565 |
+
- name: split
|
566 |
+
dtype: string
|
567 |
+
- name: subcategories
|
568 |
+
dtype: string
|
569 |
+
- name: category
|
570 |
+
dtype: string
|
571 |
+
- name: lang
|
572 |
+
dtype: string
|
573 |
+
- name: second_lang
|
574 |
+
dtype: string
|
575 |
+
- name: notes
|
576 |
+
dtype: string
|
577 |
+
- name: id
|
578 |
+
dtype: string
|
579 |
+
- name: set_id
|
580 |
+
dtype: float64
|
581 |
+
- name: variation_id
|
582 |
+
dtype: float64
|
583 |
+
splits:
|
584 |
+
- name: test
|
585 |
+
num_bytes: 379
|
586 |
+
num_examples: 1
|
587 |
+
download_size: 6073
|
588 |
+
dataset_size: 379
|
589 |
configs:
|
590 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
591 |
data_files:
|
|
|
651 |
data_files:
|
652 |
- split: test
|
653 |
path: tokenizer_robustness_completion_chinese_romanization/test-*
|
654 |
+
- config_name: tokenizer_robustness_completion_chinese_sentence_boundaries
|
655 |
+
data_files:
|
656 |
+
- split: test
|
657 |
+
path: tokenizer_robustness_completion_chinese_sentence_boundaries/test-*
|
658 |
---
|
659 |
|
660 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_sentence_boundaries/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b3a8562f166ce31ac5fd58cc023fd541fbe3ad71f8da8a75f345b1c1a34e6bb
|
3 |
+
size 6073
|