set model_max_length to the maximum length of model context (131072 tokens)
Browse files- tokenizer_config.json +1 -1
tokenizer_config.json
CHANGED
@@ -9011,7 +9011,7 @@
|
|
9011 |
"eos_token": "</s>",
|
9012 |
"extra_special_tokens": {},
|
9013 |
"legacy": true,
|
9014 |
-
"model_max_length":
|
9015 |
"pad_token": "<pad>",
|
9016 |
"processor_class": "PixtralProcessor",
|
9017 |
"tokenizer_class": "LlamaTokenizerFast",
|
|
|
9011 |
"eos_token": "</s>",
|
9012 |
"extra_special_tokens": {},
|
9013 |
"legacy": true,
|
9014 |
+
"model_max_length": 131072,
|
9015 |
"pad_token": "<pad>",
|
9016 |
"processor_class": "PixtralProcessor",
|
9017 |
"tokenizer_class": "LlamaTokenizerFast",
|