Datasets:
Uploading tokenizer_robustness_completion_chinese_equivalent_expressions subset
Browse files
README.md
CHANGED
@@ -348,6 +348,40 @@ dataset_info:
|
|
348 |
num_examples: 2
|
349 |
download_size: 5588
|
350 |
dataset_size: 386
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
configs:
|
352 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
353 |
data_files:
|
@@ -389,6 +423,10 @@ configs:
|
|
389 |
data_files:
|
390 |
- split: test
|
391 |
path: tokenizer_robustness_completion_chinese_date_formats/test-*
|
|
|
|
|
|
|
|
|
392 |
---
|
393 |
|
394 |
# Dataset Card for Tokenization Robustness
|
|
|
348 |
num_examples: 2
|
349 |
download_size: 5588
|
350 |
dataset_size: 386
|
351 |
+
- config_name: tokenizer_robustness_completion_chinese_equivalent_expressions
|
352 |
+
features:
|
353 |
+
- name: question
|
354 |
+
dtype: string
|
355 |
+
- name: choices
|
356 |
+
list: string
|
357 |
+
- name: answer
|
358 |
+
dtype: int64
|
359 |
+
- name: answer_label
|
360 |
+
dtype: string
|
361 |
+
- name: split
|
362 |
+
dtype: string
|
363 |
+
- name: subcategories
|
364 |
+
dtype: string
|
365 |
+
- name: category
|
366 |
+
dtype: string
|
367 |
+
- name: lang
|
368 |
+
dtype: string
|
369 |
+
- name: second_lang
|
370 |
+
dtype: string
|
371 |
+
- name: notes
|
372 |
+
dtype: string
|
373 |
+
- name: id
|
374 |
+
dtype: string
|
375 |
+
- name: set_id
|
376 |
+
dtype: float64
|
377 |
+
- name: variation_id
|
378 |
+
dtype: float64
|
379 |
+
splits:
|
380 |
+
- name: test
|
381 |
+
num_bytes: 1920
|
382 |
+
num_examples: 11
|
383 |
+
download_size: 6441
|
384 |
+
dataset_size: 1920
|
385 |
configs:
|
386 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
387 |
data_files:
|
|
|
423 |
data_files:
|
424 |
- split: test
|
425 |
path: tokenizer_robustness_completion_chinese_date_formats/test-*
|
426 |
+
- config_name: tokenizer_robustness_completion_chinese_equivalent_expressions
|
427 |
+
data_files:
|
428 |
+
- split: test
|
429 |
+
path: tokenizer_robustness_completion_chinese_equivalent_expressions/test-*
|
430 |
---
|
431 |
|
432 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_equivalent_expressions/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b84cedaa966c1e47188853e1236458246f0a45349ebff996a9512af77c67ae1
|
3 |
+
size 6441
|