Datasets:
Uploading tokenizer_robustness_completion_chinese_homoglyphs subset
Browse files
README.md
CHANGED
@@ -382,6 +382,40 @@ dataset_info:
|
|
382 |
num_examples: 11
|
383 |
download_size: 6441
|
384 |
dataset_size: 1920
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
configs:
|
386 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
387 |
data_files:
|
@@ -427,6 +461,10 @@ configs:
|
|
427 |
data_files:
|
428 |
- split: test
|
429 |
path: tokenizer_robustness_completion_chinese_equivalent_expressions/test-*
|
|
|
|
|
|
|
|
|
430 |
---
|
431 |
|
432 |
# Dataset Card for Tokenization Robustness
|
|
|
382 |
num_examples: 11
|
383 |
download_size: 6441
|
384 |
dataset_size: 1920
|
385 |
+
- config_name: tokenizer_robustness_completion_chinese_homoglyphs
|
386 |
+
features:
|
387 |
+
- name: question
|
388 |
+
dtype: string
|
389 |
+
- name: choices
|
390 |
+
list: string
|
391 |
+
- name: answer
|
392 |
+
dtype: int64
|
393 |
+
- name: answer_label
|
394 |
+
dtype: string
|
395 |
+
- name: split
|
396 |
+
dtype: string
|
397 |
+
- name: subcategories
|
398 |
+
dtype: string
|
399 |
+
- name: category
|
400 |
+
dtype: string
|
401 |
+
- name: lang
|
402 |
+
dtype: string
|
403 |
+
- name: second_lang
|
404 |
+
dtype: string
|
405 |
+
- name: notes
|
406 |
+
dtype: string
|
407 |
+
- name: id
|
408 |
+
dtype: string
|
409 |
+
- name: set_id
|
410 |
+
dtype: float64
|
411 |
+
- name: variation_id
|
412 |
+
dtype: float64
|
413 |
+
splits:
|
414 |
+
- name: test
|
415 |
+
num_bytes: 4251
|
416 |
+
num_examples: 24
|
417 |
+
download_size: 7378
|
418 |
+
dataset_size: 4251
|
419 |
configs:
|
420 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
421 |
data_files:
|
|
|
461 |
data_files:
|
462 |
- split: test
|
463 |
path: tokenizer_robustness_completion_chinese_equivalent_expressions/test-*
|
464 |
+
- config_name: tokenizer_robustness_completion_chinese_homoglyphs
|
465 |
+
data_files:
|
466 |
+
- split: test
|
467 |
+
path: tokenizer_robustness_completion_chinese_homoglyphs/test-*
|
468 |
---
|
469 |
|
470 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_homoglyphs/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:664ae39e7dcf5e4972d65385ec84627bde9dc4a0feb80c076f6c784ce4a11c42
|
3 |
+
size 7378
|