Datasets:
Uploading tokenizer_robustness_completion_chinese_colloquial subset
Browse files
README.md
CHANGED
@@ -212,6 +212,40 @@ dataset_info:
|
|
212 |
num_examples: 38
|
213 |
download_size: 8204
|
214 |
dataset_size: 6889
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
configs:
|
216 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
217 |
data_files:
|
@@ -237,6 +271,10 @@ configs:
|
|
237 |
data_files:
|
238 |
- split: test
|
239 |
path: tokenizer_robustness_completion_chinese_code_language_script_switching/test-*
|
|
|
|
|
|
|
|
|
240 |
---
|
241 |
|
242 |
# Dataset Card for Tokenization Robustness
|
|
|
212 |
num_examples: 38
|
213 |
download_size: 8204
|
214 |
dataset_size: 6889
|
215 |
+
- config_name: tokenizer_robustness_completion_chinese_colloquial
|
216 |
+
features:
|
217 |
+
- name: question
|
218 |
+
dtype: string
|
219 |
+
- name: choices
|
220 |
+
list: string
|
221 |
+
- name: answer
|
222 |
+
dtype: int64
|
223 |
+
- name: answer_label
|
224 |
+
dtype: string
|
225 |
+
- name: split
|
226 |
+
dtype: string
|
227 |
+
- name: subcategories
|
228 |
+
dtype: string
|
229 |
+
- name: category
|
230 |
+
dtype: string
|
231 |
+
- name: lang
|
232 |
+
dtype: string
|
233 |
+
- name: second_lang
|
234 |
+
dtype: string
|
235 |
+
- name: notes
|
236 |
+
dtype: string
|
237 |
+
- name: id
|
238 |
+
dtype: string
|
239 |
+
- name: set_id
|
240 |
+
dtype: float64
|
241 |
+
- name: variation_id
|
242 |
+
dtype: float64
|
243 |
+
splits:
|
244 |
+
- name: test
|
245 |
+
num_bytes: 2159
|
246 |
+
num_examples: 11
|
247 |
+
download_size: 6564
|
248 |
+
dataset_size: 2159
|
249 |
configs:
|
250 |
- config_name: tokenizer_robustness_completion_chinese_borrowing
|
251 |
data_files:
|
|
|
271 |
data_files:
|
272 |
- split: test
|
273 |
path: tokenizer_robustness_completion_chinese_code_language_script_switching/test-*
|
274 |
+
- config_name: tokenizer_robustness_completion_chinese_colloquial
|
275 |
+
data_files:
|
276 |
+
- split: test
|
277 |
+
path: tokenizer_robustness_completion_chinese_colloquial/test-*
|
278 |
---
|
279 |
|
280 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_colloquial/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c88617a593db3eac3a6fc4281890cd62b21e6c11660ad9bc39de10609eaac7a9
|
3 |
+
size 6564
|