RaushanTurganbay HF staff commited on
Commit
73cabdf
·
verified ·
1 Parent(s): 77c5ee4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -1
README.md CHANGED
@@ -7,6 +7,7 @@ tags:
7
  - image-text-to-text
8
  language:
9
  - en
 
10
  ---
11
 
12
  # LLaVa-Next Model Card
@@ -52,7 +53,20 @@ model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-next-1
52
  # prepare image and text prompt, using the appropriate prompt template
53
  url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
54
  image = Image.open(requests.get(url, stream=True).raw)
55
- prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  inputs = processor(prompt, image, return_tensors="pt").to(model.device)
58
 
 
7
  - image-text-to-text
8
  language:
9
  - en
10
+ pipeline_tag: image-text-to-text
11
  ---
12
 
13
  # LLaVa-Next Model Card
 
53
  # prepare image and text prompt, using the appropriate prompt template
54
  url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
55
  image = Image.open(requests.get(url, stream=True).raw)
56
+
57
+ # Define a chat histiry and use `apply_chat_template` to get correctly formatted prompt
58
+ # Each value in "content" has to be a list of dicts with types ("text", "image")
59
+ conversation = [
60
+ {
61
+
62
+ "role": "user",
63
+ "content": [
64
+ {"type": "text", "text": "What is shown in this image?"},
65
+ {"type": "image"},
66
+ ],
67
+ },
68
+ ]
69
+ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
70
 
71
  inputs = processor(prompt, image, return_tensors="pt").to(model.device)
72