Datasets:
Uploading tokenizer_robustness_completion_chinese_official_vs_common_names subset
Browse files
README.md
CHANGED
@@ -484,6 +484,40 @@ dataset_info:
|
|
484 |
num_examples: 7
|
485 |
download_size: 5945
|
486 |
dataset_size: 1104
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
487 |
configs:
|
488 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
489 |
data_files:
|
@@ -541,6 +575,10 @@ configs:
|
|
541 |
data_files:
|
542 |
- split: test
|
543 |
path: tokenizer_robustness_completion_chinese_numerical_formats/test-*
|
|
|
|
|
|
|
|
|
544 |
---
|
545 |
|
546 |
# Dataset Card for Tokenization Robustness
|
|
|
484 |
num_examples: 7
|
485 |
download_size: 5945
|
486 |
dataset_size: 1104
|
487 |
+
- config_name: tokenizer_robustness_completion_chinese_official_vs_common_names
|
488 |
+
features:
|
489 |
+
- name: question
|
490 |
+
dtype: string
|
491 |
+
- name: choices
|
492 |
+
list: string
|
493 |
+
- name: answer
|
494 |
+
dtype: int64
|
495 |
+
- name: answer_label
|
496 |
+
dtype: string
|
497 |
+
- name: split
|
498 |
+
dtype: string
|
499 |
+
- name: subcategories
|
500 |
+
dtype: string
|
501 |
+
- name: category
|
502 |
+
dtype: string
|
503 |
+
- name: lang
|
504 |
+
dtype: string
|
505 |
+
- name: second_lang
|
506 |
+
dtype: string
|
507 |
+
- name: notes
|
508 |
+
dtype: string
|
509 |
+
- name: id
|
510 |
+
dtype: string
|
511 |
+
- name: set_id
|
512 |
+
dtype: float64
|
513 |
+
- name: variation_id
|
514 |
+
dtype: float64
|
515 |
+
splits:
|
516 |
+
- name: test
|
517 |
+
num_bytes: 362
|
518 |
+
num_examples: 2
|
519 |
+
download_size: 5622
|
520 |
+
dataset_size: 362
|
521 |
configs:
|
522 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
523 |
data_files:
|
|
|
575 |
data_files:
|
576 |
- split: test
|
577 |
path: tokenizer_robustness_completion_chinese_numerical_formats/test-*
|
578 |
+
- config_name: tokenizer_robustness_completion_chinese_official_vs_common_names
|
579 |
+
data_files:
|
580 |
+
- split: test
|
581 |
+
path: tokenizer_robustness_completion_chinese_official_vs_common_names/test-*
|
582 |
---
|
583 |
|
584 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_official_vs_common_names/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:569b71d8b0409a0155b9bc1c3343b1a043dd622b2da3a207afb49ef6468dfd3e
|
3 |
+
size 5622
|