Datasets:
Uploading tokenizer_robustness_completion_english_space_removal subset
Browse files
README.md
CHANGED
|
@@ -2860,6 +2860,130 @@ dataset_info:
|
|
| 2860 |
num_examples: 74
|
| 2861 |
download_size: 47994
|
| 2862 |
dataset_size: 42951
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2863 |
configs:
|
| 2864 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
| 2865 |
data_files:
|
|
@@ -2953,6 +3077,10 @@ configs:
|
|
| 2953 |
data_files:
|
| 2954 |
- split: test
|
| 2955 |
path: tokenizer_robustness_completion_english_similar_words/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2956 |
---
|
| 2957 |
|
| 2958 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 2860 |
num_examples: 74
|
| 2861 |
download_size: 47994
|
| 2862 |
dataset_size: 42951
|
| 2863 |
+
- config_name: tokenizer_robustness_completion_english_space_removal
|
| 2864 |
+
features:
|
| 2865 |
+
- name: question
|
| 2866 |
+
dtype: string
|
| 2867 |
+
- name: choices
|
| 2868 |
+
list: string
|
| 2869 |
+
- name: answer
|
| 2870 |
+
dtype: int64
|
| 2871 |
+
- name: answer_label
|
| 2872 |
+
dtype: string
|
| 2873 |
+
- name: split
|
| 2874 |
+
dtype: string
|
| 2875 |
+
- name: subcategories
|
| 2876 |
+
dtype: string
|
| 2877 |
+
- name: category
|
| 2878 |
+
dtype: string
|
| 2879 |
+
- name: lang
|
| 2880 |
+
dtype: string
|
| 2881 |
+
- name: second_lang
|
| 2882 |
+
dtype: string
|
| 2883 |
+
- name: notes
|
| 2884 |
+
dtype: string
|
| 2885 |
+
- name: id
|
| 2886 |
+
dtype: string
|
| 2887 |
+
- name: set_id
|
| 2888 |
+
dtype: string
|
| 2889 |
+
- name: variation_id
|
| 2890 |
+
dtype: string
|
| 2891 |
+
- name: vanilla_cos_sim_to_canonical
|
| 2892 |
+
struct:
|
| 2893 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2894 |
+
dtype: float64
|
| 2895 |
+
- name: Qwen/Qwen3-8B
|
| 2896 |
+
dtype: float64
|
| 2897 |
+
- name: bigscience/bloom
|
| 2898 |
+
dtype: float64
|
| 2899 |
+
- name: common-pile/comma-v0.1-1t
|
| 2900 |
+
dtype: float64
|
| 2901 |
+
- name: facebook/xglm-564M
|
| 2902 |
+
dtype: float64
|
| 2903 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2904 |
+
dtype: float64
|
| 2905 |
+
- name: google/byt5-small
|
| 2906 |
+
dtype: float64
|
| 2907 |
+
- name: google/gemma-2-2b
|
| 2908 |
+
dtype: float64
|
| 2909 |
+
- name: gpt2
|
| 2910 |
+
dtype: float64
|
| 2911 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2912 |
+
dtype: float64
|
| 2913 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2914 |
+
dtype: float64
|
| 2915 |
+
- name: mistralai/tekken
|
| 2916 |
+
dtype: float64
|
| 2917 |
+
- name: tiktoken/gpt-4o
|
| 2918 |
+
dtype: float64
|
| 2919 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2920 |
+
dtype: float64
|
| 2921 |
+
- name: trimmed_cos_sim_to_canonical
|
| 2922 |
+
struct:
|
| 2923 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2924 |
+
dtype: float64
|
| 2925 |
+
- name: Qwen/Qwen3-8B
|
| 2926 |
+
dtype: float64
|
| 2927 |
+
- name: bigscience/bloom
|
| 2928 |
+
dtype: float64
|
| 2929 |
+
- name: common-pile/comma-v0.1-1t
|
| 2930 |
+
dtype: float64
|
| 2931 |
+
- name: facebook/xglm-564M
|
| 2932 |
+
dtype: float64
|
| 2933 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2934 |
+
dtype: float64
|
| 2935 |
+
- name: google/byt5-small
|
| 2936 |
+
dtype: float64
|
| 2937 |
+
- name: google/gemma-2-2b
|
| 2938 |
+
dtype: float64
|
| 2939 |
+
- name: gpt2
|
| 2940 |
+
dtype: float64
|
| 2941 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2942 |
+
dtype: float64
|
| 2943 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2944 |
+
dtype: float64
|
| 2945 |
+
- name: mistralai/tekken
|
| 2946 |
+
dtype: float64
|
| 2947 |
+
- name: tiktoken/gpt-4o
|
| 2948 |
+
dtype: float64
|
| 2949 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2950 |
+
dtype: float64
|
| 2951 |
+
- name: token_counts
|
| 2952 |
+
struct:
|
| 2953 |
+
- name: CohereLabs/aya-expanse-8b
|
| 2954 |
+
dtype: int64
|
| 2955 |
+
- name: Qwen/Qwen3-8B
|
| 2956 |
+
dtype: int64
|
| 2957 |
+
- name: bigscience/bloom
|
| 2958 |
+
dtype: int64
|
| 2959 |
+
- name: common-pile/comma-v0.1-1t
|
| 2960 |
+
dtype: int64
|
| 2961 |
+
- name: facebook/xglm-564M
|
| 2962 |
+
dtype: int64
|
| 2963 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 2964 |
+
dtype: int64
|
| 2965 |
+
- name: google/byt5-small
|
| 2966 |
+
dtype: int64
|
| 2967 |
+
- name: google/gemma-2-2b
|
| 2968 |
+
dtype: int64
|
| 2969 |
+
- name: gpt2
|
| 2970 |
+
dtype: int64
|
| 2971 |
+
- name: meta-llama/Llama-3.2-1B
|
| 2972 |
+
dtype: int64
|
| 2973 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 2974 |
+
dtype: int64
|
| 2975 |
+
- name: mistralai/tekken
|
| 2976 |
+
dtype: int64
|
| 2977 |
+
- name: tiktoken/gpt-4o
|
| 2978 |
+
dtype: int64
|
| 2979 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 2980 |
+
dtype: int64
|
| 2981 |
+
splits:
|
| 2982 |
+
- name: test
|
| 2983 |
+
num_bytes: 20441
|
| 2984 |
+
num_examples: 40
|
| 2985 |
+
download_size: 39744
|
| 2986 |
+
dataset_size: 20441
|
| 2987 |
configs:
|
| 2988 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
| 2989 |
data_files:
|
|
|
|
| 3077 |
data_files:
|
| 3078 |
- split: test
|
| 3079 |
path: tokenizer_robustness_completion_english_similar_words/test-*
|
| 3080 |
+
- config_name: tokenizer_robustness_completion_english_space_removal
|
| 3081 |
+
data_files:
|
| 3082 |
+
- split: test
|
| 3083 |
+
path: tokenizer_robustness_completion_english_space_removal/test-*
|
| 3084 |
---
|
| 3085 |
|
| 3086 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_english_space_removal/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fa2424e8229761cfd7c098eeebc3fba74343463c76a162335c145cdfec1fc54
|
| 3 |
+
size 39744
|