Datasets:
Uploading tokenizer_robustness_completion_chinese_compounds subset
Browse files
README.md
CHANGED
@@ -246,6 +246,40 @@ dataset_info:
|
|
246 |
num_examples: 11
|
247 |
download_size: 6564
|
248 |
dataset_size: 2159
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
configs:
|
250 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
251 |
data_files:
|
@@ -275,6 +309,10 @@ configs:
|
|
275 |
data_files:
|
276 |
- split: test
|
277 |
path: tokenizer_robustness_completion_chinese_colloquial/test-*
|
|
|
|
|
|
|
|
|
278 |
---
|
279 |
|
280 |
# Dataset Card for Tokenization Robustness
|
|
|
246 |
num_examples: 11
|
247 |
download_size: 6564
|
248 |
dataset_size: 2159
|
249 |
+
- config_name: tokenizer_robustness_completion_chinese_compounds
|
250 |
+
features:
|
251 |
+
- name: question
|
252 |
+
dtype: string
|
253 |
+
- name: choices
|
254 |
+
list: string
|
255 |
+
- name: answer
|
256 |
+
dtype: int64
|
257 |
+
- name: answer_label
|
258 |
+
dtype: string
|
259 |
+
- name: split
|
260 |
+
dtype: string
|
261 |
+
- name: subcategories
|
262 |
+
dtype: string
|
263 |
+
- name: category
|
264 |
+
dtype: string
|
265 |
+
- name: lang
|
266 |
+
dtype: string
|
267 |
+
- name: second_lang
|
268 |
+
dtype: string
|
269 |
+
- name: notes
|
270 |
+
dtype: string
|
271 |
+
- name: id
|
272 |
+
dtype: string
|
273 |
+
- name: set_id
|
274 |
+
dtype: float64
|
275 |
+
- name: variation_id
|
276 |
+
dtype: float64
|
277 |
+
splits:
|
278 |
+
- name: test
|
279 |
+
num_bytes: 947
|
280 |
+
num_examples: 3
|
281 |
+
download_size: 6322
|
282 |
+
dataset_size: 947
|
283 |
configs:
|
284 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
285 |
data_files:
|
|
|
309 |
data_files:
|
310 |
- split: test
|
311 |
path: tokenizer_robustness_completion_chinese_colloquial/test-*
|
312 |
+
- config_name: tokenizer_robustness_completion_chinese_compounds
|
313 |
+
data_files:
|
314 |
+
- split: test
|
315 |
+
path: tokenizer_robustness_completion_chinese_compounds/test-*
|
316 |
---
|
317 |
|
318 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_compounds/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:951c8a21313e2b3c7447dcc91481c44060af3cf4b71a2f8808c0b32ecf6bb670
|
3 |
+
size 6322
|