Getting Value Error when loading any fixes?

#1
by drntt96 - opened

Use a pipeline as a high-level helper

from transformers import pipeline

messages = [
{"role": "user", "content": "Who are you?"},
]
pipe = pipeline("image-text-to-text", model="NAMAA-Space/Qari-OCR-0.2.2.1-VL-2B-Instruct")
pipe(messages)


ValueError Traceback (most recent call last)
in <cell line: 0>()
5 {"role": "user", "content": "Who are you?"},
6 ]
----> 7 pipe = pipeline("image-text-to-text", model="NAMAA-Space/Qari-OCR-0.2.2.1-VL-2B-Instruct")
8 pipe(messages)

1 frames
/usr/local/lib/python3.11/dist-packages/transformers/pipelines/base.py in infer_framework_load_model(model, config, model_classes, task, framework, **model_kwargs)
302 for class_name, trace in all_traceback.items():
303 error += f"while loading with {class_name}, an error is thrown:\n{trace}\n"
--> 304 raise ValueError(
305 f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
306 )

ValueError: Could not load model NAMAA-Space/Qari-OCR-0.2.2.1-VL-2B-Instruct with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>, <class 'transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VLForConditionalGeneration'>). See the original errors:

while loading with AutoModelForImageTextToText, an error is thrown:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/dist-packages/transformers/pipelines/base.py", line 291, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/transformers/models/auto/auto_factory.py", line 571, in from_pretrained
return model_class.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py", line 279, in _wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py", line 4482, in from_pretrained
model.load_adapter(
File "/usr/local/lib/python3.11/dist-packages/transformers/integrations/peft.py", line 220, in load_adapter
inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs)
File "/usr/local/lib/python3.11/dist-packages/peft/mapping.py", line 260, in inject_adapter_in_model
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/lora/model.py", line 141, in init
def init(self, model, config, adapter_name, low_cpu_mem_usage: bool = False) -> None:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/tuners_utils.py", line 184, in init
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/tuners_utils.py", line 437, in inject_adapter
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/lora/model.py", line 488, in _prepare_adapter_config
"""

ValueError: Please specify target_modules in peft_config

while loading with Qwen2VLForConditionalGeneration, an error is thrown:
Traceback (most recent call last):
File "/usr/local/lib/python3.11/dist-packages/transformers/pipelines/base.py", line 291, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py", line 279, in _wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py", line 4482, in from_pretrained
model.load_adapter(
File "/usr/local/lib/python3.11/dist-packages/transformers/integrations/peft.py", line 220, in load_adapter
inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs)
File "/usr/local/lib/python3.11/dist-packages/peft/mapping.py", line 260, in inject_adapter_in_model
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/lora/model.py", line 141, in init
def init(self, model, config, adapter_name, low_cpu_mem_usage: bool = False) -> None:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/tuners_utils.py", line 184, in init
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/tuners_utils.py", line 437, in inject_adapter
File "/usr/local/lib/python3.11/dist-packages/peft/tuners/lora/model.py", line 488, in _prepare_adapter_config
"""

ValueError: Please specify target_modules in peft_config

Network for Advancing Modern ArabicNLP & AI org

That worked, thanks! not sure why I have imported the same packages, but thanks.

drntt96 changed discussion status to closed

from PIL import Image
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
import torch
import os
from qwen_vl_utils import process_vision_info

model_name = "NAMAA-Space/Qari-OCR-0.2.2.1-VL-2B-Instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
processor = AutoProcessor.from_pretrained(model_name)
max_tokens = 2000


error

None of the available devices available_devices = None are supported by the bitsandbytes version you have installed: bnb_supported_devices = {'npu', '"cpu" (needs an Intel CPU and intel_extension_for_pytorch installed and compatible with the PyTorch version)', 'xpu', 'hpu', 'mps', 'cuda'}. Please check the docs to see if the backend you intend to use is available and how to install it: https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend

RuntimeError Traceback (most recent call last)
in <cell line: 0>()
8
9 model_name = "NAMAA-Space/Qari-OCR-0.2.2.1-VL-2B-Instruct"
---> 10 model = Qwen2VLForConditionalGeneration.from_pretrained(
11 model_name,
12 torch_dtype="auto",

4 frames
/usr/local/lib/python3.11/dist-packages/transformers/integrations/bitsandbytes.py in _validate_bnb_multi_backend_availability(raise_exception)
515
516 logger.error(err_msg)
--> 517 raise RuntimeError(err_msg)
518
519 logger.warning("No supported devices found for bitsandbytes multi-backend.")

RuntimeError: None of the available devices available_devices = None are supported by the bitsandbytes version you have installed: bnb_supported_devices = {'npu', '"cpu" (needs an Intel CPU and intel_extension_for_pytorch installed and compatible with the PyTorch version)', 'xpu', 'hpu', 'mps', 'cuda'}. Please check the docs to see if the backend you intend to use is available and how to install it: https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend

Your need to confirm your account before you can post a new comment.

Sign up or log in to comment