Update README.md
Browse files
README.md
CHANGED
@@ -74,7 +74,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
74 |
import torch
|
75 |
|
76 |
device = "cuda"
|
77 |
-
repo_id = "
|
78 |
|
79 |
processor = AutoProcessor.from_pretrained(repo_id)
|
80 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
@@ -118,7 +118,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
118 |
import torch
|
119 |
|
120 |
device = "cuda"
|
121 |
-
repo_id = "
|
122 |
|
123 |
processor = AutoProcessor.from_pretrained(repo_id)
|
124 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
@@ -176,7 +176,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
176 |
import torch
|
177 |
|
178 |
device = "cuda"
|
179 |
-
repo_id = "
|
180 |
|
181 |
processor = AutoProcessor.from_pretrained(repo_id)
|
182 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
@@ -215,7 +215,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
215 |
import torch
|
216 |
|
217 |
device = "cuda"
|
218 |
-
repo_id = "
|
219 |
|
220 |
processor = AutoProcessor.from_pretrained(repo_id)
|
221 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
@@ -254,7 +254,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
254 |
import torch
|
255 |
|
256 |
device = "cuda"
|
257 |
-
repo_id = "
|
258 |
|
259 |
processor = AutoProcessor.from_pretrained(repo_id)
|
260 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
@@ -317,7 +317,7 @@ from transformers import VoxtralForConditionalGeneration, AutoProcessor
|
|
317 |
import torch
|
318 |
|
319 |
device = "cuda"
|
320 |
-
repo_id = "
|
321 |
|
322 |
processor = AutoProcessor.from_pretrained(repo_id)
|
323 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
74 |
import torch
|
75 |
|
76 |
device = "cuda"
|
77 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
78 |
|
79 |
processor = AutoProcessor.from_pretrained(repo_id)
|
80 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
118 |
import torch
|
119 |
|
120 |
device = "cuda"
|
121 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
122 |
|
123 |
processor = AutoProcessor.from_pretrained(repo_id)
|
124 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
176 |
import torch
|
177 |
|
178 |
device = "cuda"
|
179 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
180 |
|
181 |
processor = AutoProcessor.from_pretrained(repo_id)
|
182 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
215 |
import torch
|
216 |
|
217 |
device = "cuda"
|
218 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
219 |
|
220 |
processor = AutoProcessor.from_pretrained(repo_id)
|
221 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
254 |
import torch
|
255 |
|
256 |
device = "cuda"
|
257 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
258 |
|
259 |
processor = AutoProcessor.from_pretrained(repo_id)
|
260 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|
|
|
317 |
import torch
|
318 |
|
319 |
device = "cuda"
|
320 |
+
repo_id = "MohamedRashad/Voxtral-Mini-3B-2507-transformers"
|
321 |
|
322 |
processor = AutoProcessor.from_pretrained(repo_id)
|
323 |
model = VoxtralForConditionalGeneration.from_pretrained(repo_id, torch_dtype=torch.bfloat16, device_map=device)
|