KaiChen1998 commited on
Commit
694f7e2
·
verified ·
1 Parent(s): 80524d9

use flash attention

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -35,8 +35,8 @@ LLM_MODEL_PATH = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
35
  processor = AutoProcessor.from_pretrained(MLLM_MODEL_PATH)
36
  tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_PATH)
37
 
38
- mllm = Qwen2_5_VLForConditionalGeneration.from_pretrained(MLLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
39
- llm = AutoModelForCausalLM.from_pretrained(LLM_MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
40
 
41
  mllm_sampling = dict(do_sample=False, temperature=0, max_new_tokens=8192)
42
  llm_sampling = dict(temperature=0.6, top_p=0.95, max_new_tokens=8192)
 
35
  processor = AutoProcessor.from_pretrained(MLLM_MODEL_PATH)
36
  tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_PATH)
37
 
38
+ mllm = Qwen2_5_VLForConditionalGeneration.from_pretrained(MLLM_MODEL_PATH, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto")
39
+ llm = AutoModelForCausalLM.from_pretrained(LLM_MODEL_PATH, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto")
40
 
41
  mllm_sampling = dict(do_sample=False, temperature=0, max_new_tokens=8192)
42
  llm_sampling = dict(temperature=0.6, top_p=0.95, max_new_tokens=8192)