Update tokenizer_config.json
Browse files- tokenizer_config.json +8 -1
tokenizer_config.json
CHANGED
|
@@ -35,7 +35,14 @@
|
|
| 35 |
"extra_special_tokens": {},
|
| 36 |
"legacy": true,
|
| 37 |
"model_max_length": 1000000000000000019884624838656,
|
| 38 |
-
"pad_token":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
"sp_model_kwargs": {},
|
| 40 |
"spaces_between_special_tokens": false,
|
| 41 |
"tokenizer_class": "LlamaTokenizer",
|
|
|
|
| 35 |
"extra_special_tokens": {},
|
| 36 |
"legacy": true,
|
| 37 |
"model_max_length": 1000000000000000019884624838656,
|
| 38 |
+
"pad_token": {
|
| 39 |
+
"__type": "AddedToken",
|
| 40 |
+
"content": "<|end▁of▁sentence|>",
|
| 41 |
+
"lstrip": false,
|
| 42 |
+
"normalized": true,
|
| 43 |
+
"rstrip": false,
|
| 44 |
+
"single_word": false
|
| 45 |
+
},
|
| 46 |
"sp_model_kwargs": {},
|
| 47 |
"spaces_between_special_tokens": false,
|
| 48 |
"tokenizer_class": "LlamaTokenizer",
|