ColorfulAI commited on
Commit
332ec02
·
1 Parent(s): 8b0c63a
Files changed (1) hide show
  1. README.md +110 -2
README.md CHANGED
@@ -3,12 +3,120 @@ license: mit
3
  ---
4
  # M4-Audio-LongVA-7B-Qwen2
5
 
6
- Enhancing Interactive Capabilities in VideoLLM
 
 
7
 
8
  M4-Audio-7B is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [M4-IT](https://huggingface.co/datasets/ColorfulAI/M4-IT) dataset, which comprises 9,963 visual-audio instruction tuning instances. This training was conducted without any special modifications to the existing training pipeline.
9
 
 
10
  ## Usage
11
 
12
- ![images](./assets/framework.png)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  For more information about the interaction inference pipeline, please visit the [M4 GitHub repository](https://github.com/patrick-tssn/M4).
 
3
  ---
4
  # M4-Audio-LongVA-7B-Qwen2
5
 
6
+ Enhancing Omni Interactive Capabilities in MLLM
7
+
8
+ ![images](./assets/framework.png)
9
 
10
  M4-Audio-7B is an extension of [LongVA-7B](https://github.com/EvolvingLMMs-Lab/LongVA), further trained using the [M4-IT](https://huggingface.co/datasets/ColorfulAI/M4-IT) dataset, which comprises 9,963 visual-audio instruction tuning instances. This training was conducted without any special modifications to the existing training pipeline.
11
 
12
+
13
  ## Usage
14
 
15
+
16
+ *Please refer to [M4](https://github.com/patrick-tssn/M4) to install relvevant packages*
17
+
18
+ ```python
19
+ import os
20
+ from PIL import Image
21
+ import numpy as np
22
+ import torchaudio
23
+ import torch
24
+ from decord import VideoReader, cpu
25
+ import whisper
26
+ # fix seed
27
+ torch.manual_seed(0)
28
+
29
+ from intersuit.model.builder import load_pretrained_model
30
+ from intersuit.mm_utils import tokenizer_image_speech_tokens, process_images
31
+ from intersuit.constants import IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX
32
+
33
+ import ChatTTS
34
+ chat = ChatTTS.Chat()
35
+ chat.load(source='local', compile=True)
36
+
37
+ import warnings
38
+ warnings.filterwarnings("ignore")
39
+
40
+ model_path = "checkpoints/M4-Audio-LongVA-7B-Qwen2"
41
+ video_path = "local_demo/assets/water.mp4"
42
+ audio_path = "local_demo/wav/infer.wav"
43
+ new_audio_path = "local_demo/wav/new_infer.wav"
44
+ max_frames_num = 16 # you can change this to several thousands so long you GPU memory can handle it :)
45
+ gen_kwargs = {"do_sample": True, "temperature": 0.5, "top_p": None, "num_beams": 1, "use_cache": True, "max_new_tokens": 1024}
46
+ tokenizer, model, image_processor, _ = load_pretrained_model(model_path, None, "llava_qwen", device_map="cuda:0", attn_implementation="eager")
47
+
48
+ # original query
49
+ query = "Give a detailed caption of the video as if I am blind."
50
+ query = None # comment this to use ChatTTS to convert the query to audio
51
+ prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image><|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
52
+ input_ids = tokenizer_image_speech_tokens(prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
53
+ pad_token_ids = (tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id)
54
+ attention_masks = input_ids.ne(pad_token_ids).to(input_ids.device)
55
+ # audio input
56
+ if query is not None:
57
+ audio_path = "./local_demo/wav/" + "infer.wav"
58
+ if os.path.exists(audio_path): os.remove(audio_path) # refresh
59
+ if not os.path.exists(audio_path):
60
+ wav = chat.infer(query)
61
+ try:
62
+ torchaudio.save(audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
63
+ except:
64
+ torchaudio.save(audio_path, torch.from_numpy(wav), 24000)
65
+ speech = whisper.load_audio(audio_path)
66
+ speech = whisper.pad_or_trim(speech)
67
+ speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
68
+ speech_length = torch.LongTensor([speech.shape[0]]).to(model.device)
69
+
70
+ # new query
71
+ new_query = "How many people in the video?"
72
+ new_query = "Okay, I see."
73
+ new_query = "Sorry to interrupt."
74
+ new_query_pos = 10 # which token encounter the new query
75
+ new_query = None # comment this to use ChatTTS to convert the query to audio
76
+ new_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<speech>\n<|im_end|>\n<|im_start|>assistant\n"
77
+ new_input_ids = tokenizer_image_speech_tokens(new_prompt, tokenizer, IMAGE_TOKEN_INDEX, SPEECH_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
78
+ # audio input
79
+ if new_query is not None:
80
+ new_audio_path = "./local_demo/wav/" + "new_infer.wav"
81
+ if os.path.exists(new_audio_path): os.remove(new_audio_path) # refresh
82
+ if not os.path.exists(new_audio_path):
83
+ wav = chat.infer(new_query)
84
+ try:
85
+ torchaudio.save(new_audio_path, torch.from_numpy(wav).unsqueeze(0), 24000)
86
+ except:
87
+ torchaudio.save(new_audio_path, torch.from_numpy(wav), 24000)
88
+ new_speech = whisper.load_audio(new_audio_path)
89
+ new_speech = whisper.pad_or_trim(new_speech)
90
+ new_speech = whisper.log_mel_spectrogram(new_speech, n_mels=128).permute(1, 0).to(device=model.device, dtype=torch.float16)
91
+ new_speech_length = torch.LongTensor([new_speech.shape[0]]).to(model.device)
92
+
93
+ #video input
94
+ vr = VideoReader(video_path, ctx=cpu(0))
95
+ total_frame_num = len(vr)
96
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
97
+ frame_idx = uniform_sampled_frames.tolist()
98
+ frames = vr.get_batch(frame_idx).asnumpy()
99
+ video_tensor = image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(model.device, dtype=torch.bfloat16)
100
+
101
+
102
+ with torch.inference_mode():
103
+ output_ids = model.generate_parallel(input_ids,
104
+ attention_mask=attention_masks,
105
+ images=[video_tensor],
106
+ modalities=["video"],
107
+ speeches=speech.unsqueeze(0),
108
+ speech_lengths=speech_length,
109
+ new_query=new_input_ids,
110
+ new_query_pos=new_query_pos,
111
+ new_speeches=new_speech.unsqueeze(0),
112
+ new_speech_lengths=new_speech_length,
113
+ query_str=query,
114
+ new_query_str=new_query,
115
+ tokenizer=tokenizer,
116
+ **gen_kwargs)
117
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
118
+
119
+ ```
120
+
121
 
122
  For more information about the interaction inference pipeline, please visit the [M4 GitHub repository](https://github.com/patrick-tssn/M4).