tingwei.wang commited on
Commit
7ab7ce6
·
1 Parent(s): db580a6

fix:fix some bug

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -6,13 +6,13 @@ import time
6
  import torch
7
  import spaces
8
 
9
- MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
10
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
11
- model = Qwen2_VLForConditionalGeneration.from_pretrained(
12
  MODEL_ID,
13
  trust_remote_code=True,
14
  torch_dtype=torch.bfloat16
15
- ).to("cpu").eval()
16
 
17
  @spaces.GPU
18
  def model_inference(input_dict, history):
@@ -78,7 +78,7 @@ examples = [
78
 
79
  demo = gr.ChatInterface(
80
  fn=model_inference,
81
- description="# **Qwen2-VL-2B-Instruct**",
82
  examples=examples,
83
  textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
84
  stop_btn="Stop Generation",
 
6
  import torch
7
  import spaces
8
 
9
+ MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
10
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
11
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
12
  MODEL_ID,
13
  trust_remote_code=True,
14
  torch_dtype=torch.bfloat16
15
+ ).to("cuda").eval()
16
 
17
  @spaces.GPU
18
  def model_inference(input_dict, history):
 
78
 
79
  demo = gr.ChatInterface(
80
  fn=model_inference,
81
+ description="# **Qwen2.5-VL-3B-Instruct**",
82
  examples=examples,
83
  textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
84
  stop_btn="Stop Generation",