### Note: DO NOT use quantized model or quantization_bit when merging lora adapters ### model # model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct # model_name_or_path: llava-hf/llama3-llava-next-8b-hf model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct adapter_name_or_path: saves/mol-instruct/checkpoint-4000 template: llama3 trust_remote_code: true ### export export_dir: saves/merged_models/mol-instruct-llama3-checkpoint-4000 export_size: 5 export_device: cpu # choices: [cpu, auto] export_legacy_format: false