Unable to run.. crashes with error after loading model
Here is the error message:
Traceback (most recent call last):
File "D:\text-generation-webui\modules\ui_model_menu.py", line 231, in load_model_wrapper
shared.model, shared.tokenizer = load_model(selected_model, loader)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\modules\models.py", line 101, in load_model
tokenizer = load_tokenizer(model_name, model)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\modules\models.py", line 123, in load_tokenizer
tokenizer = AutoTokenizer.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\installer_files\env\Lib\site-packages\transformers\models\auto\tokenization_auto.py", line 896, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\installer_files\env\Lib\site-packages\transformers\tokenization_utils_base.py", line 2291, in from_pretrained
return cls._from_pretrained(
^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\installer_files\env\Lib\site-packages\transformers\tokenization_utils_base.py", line 2525, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\text-generation-webui\installer_files\env\Lib\site-packages\transformers\models\qwen2\tokenization_qwen2_fast.py", line 120, in init
super().init(
File "D:\text-generation-webui\installer_files\env\Lib\site-packages\transformers\tokenization_utils_fast.py", line 115, in init
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Exception: data did not match any variant of untagged enum ModelWrapper at line 757443 column 3
Update seems to have fixed it... Why are the VRAM requirements so high? Can't even run past 6000 context with 24gb + 8gb cards.