Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +807 -0
- chat_template.jinja +32 -0
- config.json +96 -0
- configuration_nemotron_h.py +245 -0
- generation_config.json +11 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +589 -0
- modeling_nemotron_h.py +1638 -0
- nemotron_toolcall_parser_no_streaming.py +110 -0
- recipe.yaml +32 -0
- special_tokens_map.json +23 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,807 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
license_name: nvidia-open-model-license
|
4 |
+
license_link: >-
|
5 |
+
https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/
|
6 |
+
pipeline_tag: text-generation
|
7 |
+
datasets:
|
8 |
+
- nvidia/Nemotron-Post-Training-Dataset-v1
|
9 |
+
- nvidia/Nemotron-Post-Training-Dataset-v2
|
10 |
+
- nvidia/Nemotron-Pretraining-Dataset-sample
|
11 |
+
- nvidia/Nemotron-CC-v2
|
12 |
+
- nvidia/Nemotron-CC-Math-v1
|
13 |
+
- nvidia/Nemotron-Pretraining-SFT-v1
|
14 |
+
language:
|
15 |
+
- en
|
16 |
+
- es
|
17 |
+
- fr
|
18 |
+
- de
|
19 |
+
- it
|
20 |
+
- ja
|
21 |
+
library_name: transformers
|
22 |
+
tags:
|
23 |
+
- nvidia
|
24 |
+
- pytorch
|
25 |
+
track_downloads: true
|
26 |
+
base_model:
|
27 |
+
- nvidia/NVIDIA-Nemotron-Nano-9B-v2
|
28 |
+
---
|
29 |
+
# NVIDIA-Nemotron-Nano-9B-v2
|
30 |
+
|
31 |
+

|
32 |
+
|
33 |
+
|
34 |
+
**Model Developer:** NVIDIA Corporation
|
35 |
+
|
36 |
+
**Model Dates:**
|
37 |
+
|
38 |
+
June 2025 \- August 2025
|
39 |
+
|
40 |
+
**Data Freshness:**
|
41 |
+
|
42 |
+
September 2024
|
43 |
+
|
44 |
+
The pretraining data has a cutoff date of September 2024.
|
45 |
+
|
46 |
+
## Model Overview
|
47 |
+
|
48 |
+
NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and tasks by first generating a reasoning trace and then concluding with a final response. The model's reasoning capabilities can be controlled via a system prompt. If the user prefers the model to provide its final answer without intermediate reasoning traces, it can be configured to do so, albeit with a slight decrease in accuracy for harder prompts that require reasoning. Conversely, allowing the model to generate reasoning traces first generally results in higher-quality final solutions to queries and tasks.
|
49 |
+
|
50 |
+
The model uses a hybrid architecture consisting primarily of Mamba-2 and MLP layers combined with just four Attention layers. For the architecture, please refer to the [Nemotron-H tech report](https://arxiv.org/abs/2504.03624).
|
51 |
+
The model was trained using [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) and [NeMo-RL](https://github.com/NVIDIA-NeMo/RL).
|
52 |
+
|
53 |
+
The supported languages include: English, German, Spanish, French, Italian, and Japanese. Improved using Qwen.
|
54 |
+
|
55 |
+
This model is ready for commercial use.
|
56 |
+
|
57 |
+
|
58 |
+
## License/Terms of Use
|
59 |
+
|
60 |
+
GOVERNING TERMS: This trial service is governed by the [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Use of this model is governed by the [NVIDIA Open Model License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/).
|
61 |
+
|
62 |
+
|
63 |
+
## Evaluation Results
|
64 |
+
|
65 |
+
### Benchmark Results (Reasoning On)
|
66 |
+
|
67 |
+
We evaluated our model in **Reasoning-On** mode across all benchmarks, except RULER, which is evaluated in **Reasoning-Off** mode.
|
68 |
+
|
69 |
+
|
70 |
+
| Benchmark | Qwen3-8B | NVIDIA-Nemotron-Nano-9B-v2 |
|
71 |
+
| :---- | ----: | ----: |
|
72 |
+
| AIME25 | 69.3% | 72.1% |
|
73 |
+
| MATH500 | 96.3% | 97.8% |
|
74 |
+
| GPQA | 59.6% | 64.0% |
|
75 |
+
| LCB | 59.5% | 71.1% |
|
76 |
+
| BFCL v3 | 66.3% | 66.9% |
|
77 |
+
| IFEval (Instruction Strict) | 89.4% | 90.3% |
|
78 |
+
| HLE | 4.4% | 6.5% |
|
79 |
+
| RULER (128K) | 74.1% | 78.9% |
|
80 |
+
|
81 |
+
|
82 |
+
All evaluations were done using [NeMo-Skills](https://github.com/NVIDIA/NeMo-Skills). We published a [tutorial](https://nvidia.github.io/NeMo-Skills/tutorials/2025/08/22/reproducing-nvidia-nemotron-nano-9b-v2-evals/) with all details necessary to reproduce our evaluation results.
|
83 |
+
|
84 |
+
|
85 |
+
## Reasoning Budget Control
|
86 |
+
|
87 |
+
This model supports runtime “thinking” budget control. During inference, the user can specify how many tokens the model is allowed to "think".
|
88 |
+
|
89 |
+

|
90 |
+
|
91 |
+
|
92 |
+
## Model Architecture
|
93 |
+
|
94 |
+
- Architecture Type: Mamba2-Transformer Hybrid
|
95 |
+
- Network Architecture: Nemotron-Hybrid
|
96 |
+
|
97 |
+
### Deployment Geography: Global
|
98 |
+
|
99 |
+
### Use Case
|
100 |
+
|
101 |
+
NVIDIA-Nemotron-Nano-9B-v2 is a general purpose reasoning and chat model intended to be used in English and coding languages. Other non-English languages (German, French, Italian, Spanish and Japanese) are also supported. Developers designing AI Agent systems, chatbots, RAG systems, and other AI-powered applications. Also suitable for typical instruction-following tasks.
|
102 |
+
|
103 |
+
### Release Date: 08/18/2025
|
104 |
+
|
105 |
+
- Huggingface 08/18/2025 via https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2
|
106 |
+
- API Catalog 08/18/2025 via https://build.nvidia.com/nvidia/nvidia-nemotron-nano-9b-v2
|
107 |
+
|
108 |
+
## References
|
109 |
+
|
110 |
+
- [NVIDIA Nemotron Nano 2: An Accurate and Efficient Hybrid Mamba-Transformer Reasoning Model](https://arxiv.org/abs/2508.14444)
|
111 |
+
|
112 |
+
|
113 |
+
## Input
|
114 |
+
|
115 |
+
- Input Type(s): Text
|
116 |
+
- Input Format(s): String
|
117 |
+
- Input Parameters: One-Dimensional (1D): Sequences
|
118 |
+
- Other Properties Related to Input: Context length up to 128K. Supported languages include German, Spanish, French, Italian, Korean, Portuguese, Russian, Japanese, Chinese and English.
|
119 |
+
|
120 |
+
## Output
|
121 |
+
|
122 |
+
- Output Type(s): Text
|
123 |
+
- Output Format: String
|
124 |
+
- Output Parameters: One-Dimensional (1D): Sequences up to 128K
|
125 |
+
|
126 |
+
Our models are designed and optimized to run on NVIDIA GPU-accelerated systems. By leveraging NVIDIA’s hardware (e.g. GPU cores) and software frameworks (e.g., CUDA libraries), the model achieves faster training and inference times compared to CPU-only solutions.
|
127 |
+
|
128 |
+
## Software Integration
|
129 |
+
|
130 |
+
- Runtime Engine(s): NeMo 25.07.nemotron-nano-v2
|
131 |
+
- Supported Hardware Microarchitecture Compatibility: NVIDIA A10G, NVIDIA H100-80GB, NVIDIA A100
|
132 |
+
- Operating System(s): Linux
|
133 |
+
|
134 |
+
### **Use it with Transformers**
|
135 |
+
|
136 |
+
The snippet below shows how to use this model with Huggingface Transformers (tested on version 4.48.3).
|
137 |
+
|
138 |
+
```
|
139 |
+
import torch
|
140 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
141 |
+
|
142 |
+
# Load tokenizer and model
|
143 |
+
tokenizer = AutoTokenizer.from_pretrained("nvidia/NVIDIA-Nemotron-Nano-9B-v2")
|
144 |
+
model = AutoModelForCausalLM.from_pretrained(
|
145 |
+
"nvidia/NVIDIA-Nemotron-Nano-9B-v2",
|
146 |
+
torch_dtype=torch.bfloat16,
|
147 |
+
trust_remote_code=True,
|
148 |
+
device_map="auto"
|
149 |
+
)
|
150 |
+
```
|
151 |
+
|
152 |
+
Case 1: `/think` or no reasoning signal is provided in the system prompt, reasoning will be set to `True`
|
153 |
+
|
154 |
+
```
|
155 |
+
messages = [
|
156 |
+
{"role": "system", "content": "/think"},
|
157 |
+
{"role": "user", "content": "Write a haiku about GPUs"},
|
158 |
+
]
|
159 |
+
```
|
160 |
+
|
161 |
+
Case 2: `/no_think` is provided, reasoning will be set to `False`
|
162 |
+
|
163 |
+
```
|
164 |
+
messages = [
|
165 |
+
{"role": "system", "content": "/no_think"},
|
166 |
+
{"role": "user", "content": "Write a haiku about GPUs"},
|
167 |
+
]
|
168 |
+
```
|
169 |
+
|
170 |
+
Note: `/think` or `/no_think` keywords can also be provided in “user” messages for turn-level reasoning control.
|
171 |
+
|
172 |
+
The rest of the inference snippet remains the same
|
173 |
+
|
174 |
+
```
|
175 |
+
tokenized_chat = tokenizer.apply_chat_template(
|
176 |
+
messages,
|
177 |
+
tokenize=True,
|
178 |
+
add_generation_prompt=True,
|
179 |
+
return_tensors="pt"
|
180 |
+
).to(model.device)
|
181 |
+
|
182 |
+
outputs = model.generate(
|
183 |
+
tokenized_chat,
|
184 |
+
max_new_tokens=32,
|
185 |
+
eos_token_id=tokenizer.eos_token_id
|
186 |
+
)
|
187 |
+
print(tokenizer.decode(outputs[0]))
|
188 |
+
```
|
189 |
+
|
190 |
+
We recommend setting `temperature` to `0.6`, `top_p` to `0.95` for reasoning True and greedy search for reasoning False, and increase `max_new_tokens` to `1024` or higher for reasoning True.
|
191 |
+
|
192 |
+
### **Use it with TRT-LLM**
|
193 |
+
|
194 |
+
The snippet below shows how to use this model with TRT-LLM. We tested this on the following [commit](https://github.com/NVIDIA/TensorRT-LLM/tree/46c5a564446673cdd0f56bcda938d53025b6d04e) and followed these [instructions](https://github.com/NVIDIA/TensorRT-LLM/blob/46c5a564446673cdd0f56bcda938d53025b6d04e/docs/source/installation/build-from-source-linux.md#option-2-build-tensorrt-llm-step-by-step) to build and install TRT-LLM in a docker container.
|
195 |
+
|
196 |
+
```
|
197 |
+
from tensorrt_llm import SamplingParams
|
198 |
+
from tensorrt_llm._torch import LLM
|
199 |
+
from tensorrt_llm._torch.pyexecutor.config import PyTorchConfig
|
200 |
+
from tensorrt_llm.llmapi import KvCacheConfig
|
201 |
+
from transformers import AutoTokenizer
|
202 |
+
pytorch_config = PyTorchConfig(
|
203 |
+
disable_overlap_scheduler=True, enable_trtllm_decoder=True
|
204 |
+
)
|
205 |
+
kv_cache_config = KvCacheConfig(
|
206 |
+
enable_block_reuse=False,
|
207 |
+
)
|
208 |
+
```
|
209 |
+
|
210 |
+
```
|
211 |
+
model_id = "nvidia/NVIDIA-Nemotron-Nano-9B-v2"
|
212 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
213 |
+
|
214 |
+
llm = LLM(
|
215 |
+
model=model_id,
|
216 |
+
max_seq_len=32678,
|
217 |
+
max_batch_size=4,
|
218 |
+
pytorch_backend_config=pytorch_config,
|
219 |
+
kv_cache_config=kv_cache_config,
|
220 |
+
tensor_parallel_size=8,
|
221 |
+
)
|
222 |
+
messages = [
|
223 |
+
{"role": "system", "content": "/think"},
|
224 |
+
{"role": "user", "content": "Write a haiku about GPUs"},
|
225 |
+
]
|
226 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
227 |
+
sampling_params = SamplingParams(
|
228 |
+
max_tokens=512,
|
229 |
+
temperature=0.6,
|
230 |
+
top_p=0.95,
|
231 |
+
add_special_tokens=False,
|
232 |
+
)
|
233 |
+
outputs = llm.generate([prompt], sampling_params)
|
234 |
+
print(outputs[0].outputs[0].text)
|
235 |
+
```
|
236 |
+
|
237 |
+
### **Use it with vLLM**
|
238 |
+
|
239 |
+
The snippet below shows how to use this model with vLLM. Use the latest version of vLLM and follow these instructions to build and install vLLM.
|
240 |
+
|
241 |
+
```shell
|
242 |
+
pip install -U "vllm>=0.10.1"
|
243 |
+
```
|
244 |
+
|
245 |
+
Now you can run run the server with:
|
246 |
+
|
247 |
+
```shell
|
248 |
+
vllm serve nvidia/NVIDIA-Nemotron-Nano-9B-v2 \
|
249 |
+
--trust-remote-code \
|
250 |
+
--max-num-seqs 64 \
|
251 |
+
--mamba_ssm_cache_dtype float32
|
252 |
+
```
|
253 |
+
|
254 |
+
Note:
|
255 |
+
- Remember to add \`--mamba\_ssm\_cache\_dtype float32\` for accurate quality. Without this option, the model’s accuracy may degrade.
|
256 |
+
- If you encounter a CUDA OOM issue, try `--max-num-seqs 64` and consider lower the value further if the error persists.
|
257 |
+
|
258 |
+
|
259 |
+
|
260 |
+
Alternativly, you can use Docker to launch a vLLM server.
|
261 |
+
|
262 |
+
```
|
263 |
+
export TP_SIZE=1 # Adjust this value based on the number of GPUs you want to use
|
264 |
+
docker run --runtime nvidia --gpus all \
|
265 |
+
-v ~/.cache/huggingface:/root/.cache/huggingface \
|
266 |
+
--env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \
|
267 |
+
-p 8000:8000 \
|
268 |
+
--ipc=host \
|
269 |
+
vllm/vllm-openai:v0.10.1 \
|
270 |
+
--model nvidia/NVIDIA-Nemotron-Nano-9B-v2 \
|
271 |
+
--tensor-parallel-size ${TP_SIZE} \
|
272 |
+
--max-num-seqs 64 \
|
273 |
+
--max-model-len 131072 \
|
274 |
+
--trust-remote-code \
|
275 |
+
--mamba_ssm_cache_dtype float32
|
276 |
+
```
|
277 |
+
|
278 |
+
#### Using Budget Control with a vLLM Server
|
279 |
+
|
280 |
+
The thinking budget allows developers to keep accuracy high and meet response‑time targets \- which is especially crucial for customer support, autonomous agent steps, and edge devices where every millisecond counts.
|
281 |
+
|
282 |
+
With budget control, you can set a limit for internal reasoning:
|
283 |
+
|
284 |
+
* `max_thinking_tokens`: This is a threshold that will attempt to end the reasoning trace at the next newline encountered in the reasoning trace. If no newline is encountered within 500 tokens, it will abruptly end the reasoning trace at \`max\_thinking\_tokens \+ 500\`.
|
285 |
+
|
286 |
+
Start a vLLM server:
|
287 |
+
|
288 |
+
```shell
|
289 |
+
vllm serve nvidia/NVIDIA-Nemotron-Nano-9B-v2 \
|
290 |
+
--trust-remote-code \
|
291 |
+
--mamba_ssm_cache_dtype float32
|
292 |
+
```
|
293 |
+
|
294 |
+
Client for supporting budget control:
|
295 |
+
|
296 |
+
```py
|
297 |
+
from typing import Any, Dict, List
|
298 |
+
|
299 |
+
import openai
|
300 |
+
from transformers import AutoTokenizer
|
301 |
+
|
302 |
+
|
303 |
+
class ThinkingBudgetClient:
|
304 |
+
def __init__(self, base_url: str, api_key: str, tokenizer_name_or_path: str):
|
305 |
+
self.base_url = base_url
|
306 |
+
self.api_key = api_key
|
307 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path)
|
308 |
+
self.client = openai.OpenAI(base_url=self.base_url, api_key=self.api_key)
|
309 |
+
|
310 |
+
|
311 |
+
def chat_completion(
|
312 |
+
self,
|
313 |
+
model: str,
|
314 |
+
messages: List[Dict[str, Any]],
|
315 |
+
max_thinking_budget: int = 512,
|
316 |
+
max_tokens: int = 1024,
|
317 |
+
**kwargs,
|
318 |
+
) -> Dict[str, Any]:
|
319 |
+
assert (
|
320 |
+
max_tokens > max_thinking_budget
|
321 |
+
), f"thinking budget must be smaller than maximum new tokens. Given {max_tokens=} and {max_thinking_budget=}"
|
322 |
+
|
323 |
+
|
324 |
+
# 1. first call chat completion to get reasoning content
|
325 |
+
response = self.client.chat.completions.create(
|
326 |
+
model=model, messages=messages, max_tokens=max_thinking_budget, **kwargs
|
327 |
+
)
|
328 |
+
content = response.choices[0].message.content
|
329 |
+
|
330 |
+
|
331 |
+
reasoning_content = content
|
332 |
+
if not "</think>" in reasoning_content:
|
333 |
+
# reasoning content is too long, closed with a period (.)
|
334 |
+
reasoning_content = f"{reasoning_content}.\n</think>\n\n"
|
335 |
+
reasoning_tokens_len = len(
|
336 |
+
self.tokenizer.encode(reasoning_content, add_special_tokens=False)
|
337 |
+
)
|
338 |
+
remaining_tokens = max_tokens - reasoning_tokens_len
|
339 |
+
assert (
|
340 |
+
remaining_tokens > 0
|
341 |
+
), f"remaining tokens must be positive. Given {remaining_tokens=}. Increase the max_tokens or lower the max_thinking_budget."
|
342 |
+
|
343 |
+
|
344 |
+
# 2. append reasoning content to messages and call completion
|
345 |
+
messages.append({"role": "assistant", "content": reasoning_content})
|
346 |
+
prompt = self.tokenizer.apply_chat_template(
|
347 |
+
messages,
|
348 |
+
tokenize=False,
|
349 |
+
continue_final_message=True,
|
350 |
+
)
|
351 |
+
response = self.client.completions.create(
|
352 |
+
model=model, prompt=prompt, max_tokens=remaining_tokens, **kwargs
|
353 |
+
)
|
354 |
+
|
355 |
+
|
356 |
+
response_data = {
|
357 |
+
"reasoning_content": reasoning_content.strip().strip("</think>").strip(),
|
358 |
+
"content": response.choices[0].text,
|
359 |
+
"finish_reason": response.choices[0].finish_reason,
|
360 |
+
}
|
361 |
+
return response_data
|
362 |
+
```
|
363 |
+
|
364 |
+
Calling the server with a budget (Restricted to 32 tokens here as an example)
|
365 |
+
|
366 |
+
```py
|
367 |
+
tokenizer_name_or_path = "nvidia/NVIDIA-Nemotron-Nano-9B-v2"
|
368 |
+
client = ThinkingBudgetClient(
|
369 |
+
base_url="http://localhost:8000/v1", # Nano 9B v2 deployed in thinking mode
|
370 |
+
api_key="EMPTY",
|
371 |
+
tokenizer_name_or_path=tokenizer_name_or_path,
|
372 |
+
)
|
373 |
+
|
374 |
+
|
375 |
+
result = client.chat_completion(
|
376 |
+
model="nvidia/NVIDIA-Nemotron-Nano-9B-v2",
|
377 |
+
messages=[
|
378 |
+
{"role": "system", "content": "You are a helpful assistant. /think"},
|
379 |
+
{"role": "user", "content": "What is 2+2?"},
|
380 |
+
],
|
381 |
+
max_thinking_budget=32,
|
382 |
+
max_tokens=512,
|
383 |
+
temperature=0.6,
|
384 |
+
top_p=0.95,
|
385 |
+
)
|
386 |
+
print(result)
|
387 |
+
```
|
388 |
+
|
389 |
+
You should see output similar to the following:
|
390 |
+
|
391 |
+
```
|
392 |
+
{'reasoning_content': "Okay, the user asked, What is 2+2? Let me think. Well, 2 plus 2 equals 4. That's a basic.", 'content': '2 + 2 equals **4**.\n', 'finish_reason': 'stop'}
|
393 |
+
```
|
394 |
+
|
395 |
+
#### Using Tool-Calling with a vLLM Server
|
396 |
+
|
397 |
+
Start a vLLM server with native tool-calling:
|
398 |
+
|
399 |
+
```shell
|
400 |
+
git clone https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2
|
401 |
+
|
402 |
+
vllm serve nvidia/NVIDIA-Nemotron-Nano-9B-v2 \
|
403 |
+
--trust-remote-code \
|
404 |
+
--mamba_ssm_cache_dtype float32 \
|
405 |
+
--enable-auto-tool-choice \
|
406 |
+
--tool-parser-plugin "NVIDIA-Nemotron-Nano-9B-v2/nemotron_toolcall_parser_no_streaming.py" \
|
407 |
+
--tool-call-parser "nemotron_json"
|
408 |
+
```
|
409 |
+
|
410 |
+
## After launching a vLLM server, you can call the server with tool-call support using a Python script like below:
|
411 |
+
|
412 |
+
```py
|
413 |
+
from openai import OpenAI
|
414 |
+
|
415 |
+
client = OpenAI(
|
416 |
+
base_url="http://0.0.0.0:5000/v1",
|
417 |
+
api_key="dummy",
|
418 |
+
)
|
419 |
+
|
420 |
+
completion = client.chat.completions.create(
|
421 |
+
model="nvidia/NVIDIA-Nemotron-Nano-9B-v2",
|
422 |
+
messages=[
|
423 |
+
{"role": "system", "content": ""},
|
424 |
+
{"role": "user", "content": "My bill is $100. What will be the amount for 18% tip?"}
|
425 |
+
],
|
426 |
+
tools=[
|
427 |
+
{
|
428 |
+
"type": "function",
|
429 |
+
"function": {
|
430 |
+
"name": "calculate_tip",
|
431 |
+
"parameters": {
|
432 |
+
"type": "object",
|
433 |
+
"properties": {
|
434 |
+
"bill_total": {
|
435 |
+
"type": "integer",
|
436 |
+
"description": "The total amount of the bill"
|
437 |
+
},
|
438 |
+
"tip_percentage": {
|
439 |
+
"type": "integer",
|
440 |
+
"description": "The percentage of tip to be applied"
|
441 |
+
}
|
442 |
+
},
|
443 |
+
"required": ["bill_total", "tip_percentage"]
|
444 |
+
}
|
445 |
+
}
|
446 |
+
},
|
447 |
+
{
|
448 |
+
"type": "function",
|
449 |
+
"function": {
|
450 |
+
"name": "convert_currency",
|
451 |
+
"parameters": {
|
452 |
+
"type": "object",
|
453 |
+
"properties": {
|
454 |
+
"amount": {
|
455 |
+
"type": "integer",
|
456 |
+
"description": "The amount to be converted"
|
457 |
+
},
|
458 |
+
"from_currency": {
|
459 |
+
"type": "string",
|
460 |
+
"description": "The currency code to convert from"
|
461 |
+
},
|
462 |
+
"to_currency": {
|
463 |
+
"type": "string",
|
464 |
+
"description": "The currency code to convert to"
|
465 |
+
}
|
466 |
+
},
|
467 |
+
"required": ["from_currency", "amount", "to_currency"]
|
468 |
+
}
|
469 |
+
}
|
470 |
+
}
|
471 |
+
],
|
472 |
+
temperature=0.6,
|
473 |
+
top_p=0.95,
|
474 |
+
max_tokens=32768,
|
475 |
+
stream=False
|
476 |
+
)
|
477 |
+
|
478 |
+
print(completion.choices[0].message.content)
|
479 |
+
print(completion.choices[0].message.tool_calls)
|
480 |
+
```
|
481 |
+
|
482 |
+
You should see output similar to the following:
|
483 |
+
|
484 |
+
```
|
485 |
+
<think>
|
486 |
+
Okay, let's see. The user has a bill of $100 and wants to know the amount for an 18% tip. Hmm, I need to calculate the tip based on the bill total and the percentage. The tools provided include calculate_tip, which takes bill_total and tip_percentage as parameters. So the bill_total here is 100, and the tip_percentage is 18. I should call the calculate_tip function with these values. Wait, do I need to check if the parameters are integers? The bill is $100, which is an integer, and 18% is also an integer. So that fits the function's requirements. I don't need to convert any currency here because the user is asking about a tip in the same currency. So the correct tool to use is calculate_tip with those parameters.
|
487 |
+
</think>
|
488 |
+
|
489 |
+
[ChatCompletionMessageToolCall(id='chatcmpl-tool-e341c6954d2c48c2a0e9071c7bdefd8b', function=Function(arguments='{"bill_total": 100, "tip_percentage": 18}', name='calculate_tip'), type='function')]
|
490 |
+
```
|
491 |
+
|
492 |
+
## Model Version
|
493 |
+
|
494 |
+
- v1.0
|
495 |
+
|
496 |
+
## Prompt Format
|
497 |
+
|
498 |
+
We follow the jinja chat template provided below. This template conditionally adds `<think>\n` to the start of the Assistant response if `/think` is found in either the system prompt or any user message. If no reasoning signal is added, the model defaults to reasoning "on" mode. The chat template adds `<think></think>` to the start of the Assistant response if `/no_think` is found in the system prompt. Thus enforcing reasoning on/off behavior.
|
499 |
+
|
500 |
+
```
|
501 |
+
{%- set ns = namespace(enable_thinking = true) %}
|
502 |
+
|
503 |
+
{%- for message in messages -%}
|
504 |
+
{%- set content = message['content'] -%}
|
505 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
506 |
+
{%- if '/think' in content -%}
|
507 |
+
{%- set ns.enable_thinking = true -%}
|
508 |
+
{%- elif '/no_think' in content -%}
|
509 |
+
{%- set ns.enable_thinking = false -%}
|
510 |
+
{%- endif -%}
|
511 |
+
{%- endif -%}
|
512 |
+
{%- endfor -%}
|
513 |
+
|
514 |
+
{%- if messages[0]['role'] != 'system' -%}
|
515 |
+
{%- set ns.non_tool_system_content = '' -%}
|
516 |
+
{{- '<SPECIAL_10>System\n' -}}
|
517 |
+
{%- else -%}
|
518 |
+
{%- set ns.non_tool_system_content = messages[0]['content']
|
519 |
+
.replace('/think', '')
|
520 |
+
.replace('/no_think', '')
|
521 |
+
.strip()
|
522 |
+
-%}
|
523 |
+
{{- '<SPECIAL_10>System\n' + ns.non_tool_system_content }}
|
524 |
+
{%- endif -%}
|
525 |
+
|
526 |
+
{%- if tools -%}
|
527 |
+
{%- if ns.non_tool_system_content is defined and ns.non_tool_system_content != '' -%}
|
528 |
+
{{- '\n\n' -}}
|
529 |
+
{%- endif -%}
|
530 |
+
|
531 |
+
{{- 'You can use the following tools to assist the user if required:' -}}
|
532 |
+
{{- '\n<AVAILABLE_TOOLS>[' -}}
|
533 |
+
{%- for tool in tools -%}
|
534 |
+
{{- (tool.function if tool.function is defined else tool) | tojson -}}
|
535 |
+
{{- ', ' if not loop.last else '' -}}
|
536 |
+
{%- endfor -%}
|
537 |
+
{{- ']</AVAILABLE_TOOLS>\n\n' -}}
|
538 |
+
|
539 |
+
{{- 'If you decide to call any tool(s), use the following format:\n' -}}
|
540 |
+
{{- '<TOOLCALL>[{{"name": "tool_name1", "arguments": "tool_args1"}}, ' -}}
|
541 |
+
{{- '{{"name": "tool_name2", "arguments": "tool_args2"}}]</TOOLCALL>\n\n' -}}
|
542 |
+
|
543 |
+
{{- 'The user will execute tool-calls and return responses from tool(s) in this format:\n' -}}
|
544 |
+
{{- '<TOOL_RESPONSE>[{{"tool_response1"}}, {{"tool_response2"}}]</TOOL_RESPONSE>\n\n' -}}
|
545 |
+
|
546 |
+
{{- 'Based on the tool responses, you can call additional tools if needed, correct tool calls if any errors are found, or just respond to the user.' -}}
|
547 |
+
{%- endif -%}
|
548 |
+
|
549 |
+
{{- '\n' -}}
|
550 |
+
|
551 |
+
{%- set messages = messages[1:] if messages[0]['role'] == 'system' else messages -%}
|
552 |
+
|
553 |
+
{%- if messages[-1]['role'] == 'assistant' -%}
|
554 |
+
{%- set ns.last_turn_assistant_content = messages[-1]['content'].strip() -%}
|
555 |
+
{%- set messages = messages[:-1] -%}
|
556 |
+
{%- endif -%}
|
557 |
+
|
558 |
+
{%- for message in messages -%}
|
559 |
+
{%- set content = message['content'] -%}
|
560 |
+
|
561 |
+
{%- if message['role'] == 'user' -%}
|
562 |
+
{{- '<SPECIAL_11>User\n' + content.replace('/think', '').replace('/no_think', '').strip() + '\n' }}
|
563 |
+
|
564 |
+
{%- elif message['role'] == 'tool' -%}
|
565 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}
|
566 |
+
{{- '<SPECIAL_11>User\n' + '<TOOL_RESPONSE>[' }}
|
567 |
+
{%- endif -%}
|
568 |
+
{{- message['content'] -}}
|
569 |
+
{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}
|
570 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}
|
571 |
+
{{- ']</TOOL_RESPONSE>\n' -}}
|
572 |
+
{%- endif -%}
|
573 |
+
|
574 |
+
{%- elif message['role'] == 'assistant' -%}
|
575 |
+
{%- if '</think>' in content -%}
|
576 |
+
{%- set content = content.split('</think>')[1].strip() %}
|
577 |
+
{%- endif -%}
|
578 |
+
|
579 |
+
{{- '<SPECIAL_11>Assistant\n' + content.strip() }}
|
580 |
+
|
581 |
+
{%- if message.tool_calls -%}
|
582 |
+
{%- if content.strip() != '' -%}
|
583 |
+
{{- '\n\n' -}}
|
584 |
+
{%- endif -%}
|
585 |
+
{{- '<TOOLCALL>[' -}}
|
586 |
+
{%- for call in message.tool_calls -%}
|
587 |
+
{%- set fn = call.function if call.function is defined else call -%}
|
588 |
+
{{- '{"name": "' + fn.name + '", "arguments": ' -}}
|
589 |
+
{%- if fn.arguments is string -%}
|
590 |
+
{{- fn.arguments -}}
|
591 |
+
{%- else -%}
|
592 |
+
{{- fn.arguments | tojson -}}
|
593 |
+
{%- endif -%}
|
594 |
+
{{- '}' + (', ' if not loop.last else '') -}}
|
595 |
+
{%- endfor -%}
|
596 |
+
{{- ']</TOOLCALL>' -}}
|
597 |
+
{%- endif -%}
|
598 |
+
|
599 |
+
{{- '\n<SPECIAL_12>\n' -}}
|
600 |
+
{%- endif -%}
|
601 |
+
{%- endfor -%}
|
602 |
+
|
603 |
+
{%- if add_generation_prompt -%}
|
604 |
+
{{- '<SPECIAL_11>Assistant\n' -}}
|
605 |
+
{%- if ns.enable_thinking is defined and ns.enable_thinking is false -%}
|
606 |
+
{{- '<think></think>' -}}
|
607 |
+
{%- else -%}
|
608 |
+
{{- '<think>\n' -}}
|
609 |
+
{%- endif -%}
|
610 |
+
{%- if ns.last_turn_assistant_content is defined and ns.last_turn_assistant_content != '' -%}
|
611 |
+
{{- ns.last_turn_assistant_content -}}
|
612 |
+
{%- endif -%}
|
613 |
+
|
614 |
+
{%- else -%}
|
615 |
+
{%- if ns.last_turn_assistant_content is defined and ns.last_turn_assistant_content != '' -%}
|
616 |
+
{{- '<SPECIAL_11>Assistant\n' -}}
|
617 |
+
{%- if ns.enable_thinking is defined and ns.enable_thinking is false -%}
|
618 |
+
{{- '<think></think>' -}}
|
619 |
+
{%- else -%}
|
620 |
+
{{- '<think>\n' -}}
|
621 |
+
{%- endif -%}
|
622 |
+
{{- ns.last_turn_assistant_content -}}
|
623 |
+
|
624 |
+
{%- if continue_final_message is defined -%}
|
625 |
+
{%- if continue_final_message is false -%}
|
626 |
+
{{- '\n<SPECIAL_12>\n' -}}
|
627 |
+
{%- endif -%}
|
628 |
+
{%- else -%}
|
629 |
+
{{- '\n<SPECIAL_12>\n' -}}
|
630 |
+
{%- endif -%}
|
631 |
+
{%- endif -%}
|
632 |
+
{%- endif -%}
|
633 |
+
```
|
634 |
+
|
635 |
+
##
|
636 |
+
|
637 |
+
## Training, Testing, and Evaluation Datasets
|
638 |
+
|
639 |
+
### Training datasets
|
640 |
+
|
641 |
+
* Data Modality: Text
|
642 |
+
* Text Training Data Size: More than 10 Trillion Tokens
|
643 |
+
* Train/Test/Valid Split: We used 100% of the corpus for pre-training and relied on external benchmarks for testing.
|
644 |
+
* Data Collection Method by dataset: Hybrid: Automated, Human, Synthetic
|
645 |
+
* Labeling Method by dataset: Hybrid: Automated, Human, Synthetic
|
646 |
+
|
647 |
+
|
648 |
+
**Properties:** The post-training corpus for NVIDIA-Nemotron-Nano-9B-v2 consists of English and multilingual text (German, Spanish, French, Italian, Korean, Portuguese, Russian, Japanese, Chinese and English). Our sources cover a variety of document types such as: webpages, dialogue, articles, and other written materials. The corpus spans domains including code, legal, math, science, finance, and more. We also include a small portion of question-answering, and alignment style data to improve model accuracies. For several of the domains listed above we used synthetic data, specifically reasoning traces, from DeepSeek R1/R1-0528, Qwen3-235B-A22B, Nemotron 4 340B, Qwen2.5-32B-Instruct-AWQ, Qwen2.5-14B-Instruct, Qwen 2.5 72B.
|
649 |
+
|
650 |
+
The pre-training corpus for NVIDIA-Nemotron-Nano-9B-v2 consists of high-quality curated and synthetically-generated data. It is trained in the English language, as well as 15 multilingual languages and 43 programming languages. Our sources cover a variety of document types such as: webpages, dialogue, articles, and other written materials. The corpus spans domains including legal, math, science, finance, and more. We also include a small portion of question-answering, and alignment style data to improve model accuracy. The model was pre-trained for approximately twenty trillion tokens.
|
651 |
+
|
652 |
+
Alongside the model, we release our [final pretraining data](https://huggingface.co/collections/nvidia/nemotron-pre-training-dataset-689d9de36f84279d83786b35), as outlined in this section. For ease of analysis, there is a sample set that is ungated. For all remaining code, math and multilingual data, gating and approval is required, and the dataset is permissively licensed for model training purposes.
|
653 |
+
|
654 |
+
More details on the datasets and synthetic data generation methods can be found in the technical report [NVIDIA Nemotron Nano 2: An Accurate and Efficient Hybrid Mamba-Transformer Reasoning Model](https://research.nvidia.com/labs/adlr/files/NVIDIA-Nemotron-Nano-2-Technical-Report.pdf) .
|
655 |
+
|
656 |
+
|
657 |
+
## Public Datasets
|
658 |
+
|
659 |
+
| Dataset | Collection Period |
|
660 |
+
| :---- | :---- |
|
661 |
+
| [Problems in Elementary Mathematics for Home Study](https://archive.org/details/AntonovVygodskyNikitinSankinProblemsInElementaryMathematicsForHomeStudyMir1982) | 4/23/2025 |
|
662 |
+
| [GSM8K](https://github.com/openai/grade-school-math) | 4/23/2025 |
|
663 |
+
| [PRM800K](https://github.com/openai/prm800k) | 4/23/2025 |
|
664 |
+
| [CC-NEWS](https://commoncrawl.org/blog/news-dataset-available) | 4/23/2025 |
|
665 |
+
| [Common Crawl](https://commoncrawl.org/) | 4/23/2025 |
|
666 |
+
| [Wikimedia](https://dumps.wikimedia.org/) | 4/23/2025 |
|
667 |
+
| [Bespoke-Stratos-17k](https://huggingface.co/datasets/bespokelabs/Bespoke-Stratos-17k) | 4/23/2025 |
|
668 |
+
| [tigerbot-kaggle-leetcodesolutions-en-2k](https://huggingface.co/datasets/TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k) | 4/23/2025 |
|
669 |
+
| [glaive-function-calling-v2](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2) | 4/23/2025 |
|
670 |
+
| [APIGen Function-Calling](https://huggingface.co/datasets/Salesforce/xlam-function-calling-60k) | 4/23/2025 |
|
671 |
+
| [LMSYS-Chat-1M](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) | 4/23/2025 |
|
672 |
+
| [Open Textbook Library \- CC BY-SA & GNU subset](https://open.umn.edu/opentextbooks/textbooks/) and [OpenStax \- CC BY-SA subset](https://openstax.org/) | 4/23/2025 |
|
673 |
+
| [Advanced Reasoning Benchmark](https://github.com/TheDuckAI/arb), [tigerbot-kaggle-leetcodesolutions-en-2k](https://huggingface.co/datasets/TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k), [PRM800K](https://github.com/openai/prm800k), and [SciBench](https://github.com/mandyyyyii/scibench) | 4/23/2025 |
|
674 |
+
| [FineWeb-2](https://huggingface.co/datasets/HuggingFaceFW/fineweb-2) | 4/23/2025 |
|
675 |
+
| [Court Listener](https://www.courtlistener.com/help/api/bulk-data/) | Legacy Download |
|
676 |
+
| [peS2o](https://huggingface.co/datasets/allenai/peS2o) | Legacy Download |
|
677 |
+
| [OpenWebMath](https://huggingface.co/datasets/open-web-math/open-web-math) | Legacy Download |
|
678 |
+
| [BioRxiv](https://www.biorxiv.org/tdm) | Legacy Download |
|
679 |
+
| [PMC Open Access Subset](https://pmc.ncbi.nlm.nih.gov/tools/openftlist/) | Legacy Download |
|
680 |
+
| [OpenWebText2](https://openwebtext2.readthedocs.io/en/latest/) | Legacy Download |
|
681 |
+
| [Stack Exchange Data Dump](https://archive.org/details/stackexchange) | Legacy Download |
|
682 |
+
| [PubMed Abstracts](https://github.com/thoppe/The-Pile-PubMed) | Legacy Download |
|
683 |
+
| [NIH ExPorter](https://exporter.nih.gov/ExPORTER_Catalog.aspx) | Legacy Download |
|
684 |
+
| [arXiv](https://info.arxiv.org/help/bulk_data/index.html) | Legacy Download |
|
685 |
+
| [BigScience Workshop Datasets](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#datasets) | Legacy Download |
|
686 |
+
| [Reddit Dataset](https://files.pushshift.io/reddit/) | Legacy Download |
|
687 |
+
| [SEC's Electronic Data Gathering, Analysis, and Retrieval (EDGAR)](https://www.sec.gov/search-filings) | Legacy Download |
|
688 |
+
| [Public Software Heritage S3](https://docs.softwareheritage.org/devel/swh-export/graph/dataset.html#summary-of-dataset-versions) | Legacy Download |
|
689 |
+
| [The Stack](https://huggingface.co/datasets/bigcode/the-stack) | Legacy Download |
|
690 |
+
| [mC4](https://huggingface.co/datasets/legacy-datasets/mc4) | Legacy Download |
|
691 |
+
| [Advanced Mathematical Problem Solving](https://github.com/hendrycks/math?tab=readme-ov-file) | Legacy Download |
|
692 |
+
| [MathPile](https://github.com/GAIR-NLP/MathPile/) | Legacy Download |
|
693 |
+
| [NuminaMath CoT](https://huggingface.co/datasets/AI-MO/NuminaMath-CoT) | Legacy Download |
|
694 |
+
| [PMC Article](https://pmc.ncbi.nlm.nih.gov/tools/textmining/) | Legacy Download |
|
695 |
+
| [FLAN](https://github.com/google-research/FLAN) | Legacy Download |
|
696 |
+
| [Advanced Reasoning Benchmark](https://github.com/TheDuckAI/arb) | Legacy Download |
|
697 |
+
| [SciBench](https://github.com/mandyyyyii/scibench) | Legacy Download |
|
698 |
+
| [WikiTableQuestions](https://huggingface.co/datasets/wikitablequestions) | Legacy Download |
|
699 |
+
| [FinQA](https://finqasite.github.io/) | Legacy Download |
|
700 |
+
| [Riddles](https://github.com/crawsome/riddles) | Legacy Download |
|
701 |
+
| [Problems in Elementary Mathematics for Home Study](https://archive.org/details/AntonovVygodskyNikitinSankinProblemsInElementaryMathematicsForHomeStudyMir1982) | Legacy Download |
|
702 |
+
| [MedMCQA](https://huggingface.co/datasets/openlifescienceai/medmcqa) | Legacy Download |
|
703 |
+
| [Cosmos QA](https://huggingface.co/datasets/allenai/cosmos_qa) | Legacy Download |
|
704 |
+
| [MCTest](https://huggingface.co/datasets/sagnikrayc/mctest) | Legacy Download |
|
705 |
+
| [AI2's Reasoning Challenge](https://huggingface.co/datasets/ai2_arc) | Legacy Download |
|
706 |
+
| [OpenBookQA](https://github.com/allenai/OpenBookQA) | Legacy Download |
|
707 |
+
| [MMLU Auxiliary Train](https://huggingface.co/datasets/cais/mmlu/viewer/all/auxiliary_train) | Legacy Download |
|
708 |
+
| [social-chemestry-101](https://huggingface.co/datasets/tasksource/social-chemestry-101) | Legacy Download |
|
709 |
+
| [Moral Stories](https://huggingface.co/datasets/demelin/moral_stories) | Legacy Download |
|
710 |
+
| [The Common Pile v0.1](https://huggingface.co/common-pile) | Legacy Download |
|
711 |
+
| [FineMath](https://huggingface.co/datasets/HuggingFaceTB/finemath) | Legacy Download |
|
712 |
+
| [MegaMath](https://huggingface.co/datasets/LLM360/MegaMath) | Legacy Download |
|
713 |
+
| [FastChat](https://github.com/lm-sys/FastChat) | 6/30/2025 |
|
714 |
+
|
715 |
+
## Private Non-publicly Accessible Datasets of Third Parties
|
716 |
+
|
717 |
+
| Dataset |
|
718 |
+
| :---- |
|
719 |
+
| Global Regulation |
|
720 |
+
| Workbench |
|
721 |
+
|
722 |
+
## Online Dataset Sources
|
723 |
+
|
724 |
+
The English Common Crawl data was downloaded from the Common Crawl Foundation (see their [FAQ](https://commoncrawl.org/faq) for details on their crawling) and includes the snapshots CC-MAIN-2013-20 through CC-MAIN-2025-13. The data was subsequently deduplicated and filtered in various ways described in the [Nemotron-CC paper](https://arxiv.org/abs/2412.02595).
|
725 |
+
|
726 |
+
Additionally, we extracted data for fifteen languages from the following three Common Crawl snapshots: CC-MAIN-2024-51, CC-MAIN-2025-08, CC-MAIN-2025-18. The fifteen languages included were Arabic, Chinese, Danish, Dutch, French, German, Italian, Japanese, Korean, Polish, Portuguese, Russian, Spanish, Swedish, and Thai. As we did not have reliable multilingual model-based quality classifiers available, we applied just heuristic filtering instead—similar to what we did for lower quality English data in the Nemotron-CC pipeline, but selectively removing some filters for some languages that did not work well. Deduplication was done in the same way as for Nemotron-CC.
|
727 |
+
|
728 |
+
The GitHub Crawl was collected using the GitHub REST API and the Amazon S3 API. Each crawl was operated in accordance with the rate limits set by its respective source, either GitHub or S3. We collect raw source code and subsequently remove any having a license which does not exist in our permissive-license set (for additional details, refer to the technical report).
|
729 |
+
|
730 |
+
| Dataset | Modality | Dataset Size (Tokens) | Collection Period |
|
731 |
+
| :---- | :---- | :---- | :---- |
|
732 |
+
| English Common Crawl | Text | 3.360T | 4/8/2025 |
|
733 |
+
| Multilingual Common Crawl | Text | 812.7B | 5/1/2025 |
|
734 |
+
| GitHub Crawl | Text | 747.4B | 4/29/2025 |
|
735 |
+
|
736 |
+
## NVIDIA-Sourced Synthetic Datasets
|
737 |
+
|
738 |
+
| Dataset | Modality | Dataset Size (Tokens) | Seed Dataset | Model(s) used for generation |
|
739 |
+
| :---- | :---- | :---- | :---- | :---- |
|
740 |
+
| Synthetic Art of Problem Solving from DeepSeek-R1 | Text | 25.5B | [Art of Problem Solving](https://artofproblemsolving.com/company); [American Mathematics Competitions 8](https://artofproblemsolving.com/wiki/index.php/AMC_8_Problems_and_Solutions); [American Mathematics Competitions 10](https://artofproblemsolving.com/wiki/index.php/AMC_10_Problems_and_Solutions); | [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1) |
|
741 |
+
| Synthetic Moral Stories and Social Chemistry from Mixtral-8x22B-v0.1 | Text | 327M | [social-chemestry-101](https://huggingface.co/datasets/tasksource/social-chemestry-101); [Moral Stories](https://huggingface.co/datasets/demelin/moral_stories) | [Mixtral-8x22B-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-v0.1) |
|
742 |
+
| Synthetic Social Sciences seeded with OpenStax from DeepSeek-V3, Mixtral-8x22B-v0.1, and Qwen2.5-72B | Text | 83.6M | [OpenStax \- CC BY-SA subset](https://openstax.org/) | [DeepSeek-V3](https://huggingface.co/deepseek-ai/DeepSeek-V3); [Mixtral-8x22B-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-v0.1); [Qwen2.5-72B](https://huggingface.co/Qwen/Qwen2.5-72B) |
|
743 |
+
| Synthetic Health Sciences seeded with OpenStax from DeepSeek-V3, Mixtral-8x22B-v0.1, and Qwen2.5-72B | Text | 9.7M | [OpenStax \- CC BY-SA subset](https://openstax.org/) | [DeepSeek-V3](https://huggingface.co/deepseek-ai/DeepSeek-V3); [Mixtral-8x22B-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-v0.1); [Qwen2.5-72B](https://huggingface.co/Qwen/Qwen2.5-72B) |
|
744 |
+
| Synthetic STEM seeded with OpenStax, Open Textbook Library, and GSM8K from DeepSeek-R1, DeepSeek-V3, DeepSeek-V3-0324, and Qwen2.5-72B | Text | 175M | [OpenStax \- CC BY-SA subset](https://openstax.org/); [GSM8K](https://github.com/openai/grade-school-math); [Open Textbook Library \- CC BY-SA & GNU subset](https://open.umn.edu/opentextbooks/textbooks/) | [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1), [DeepSeek-V3](https://huggingface.co/deepseek-ai/DeepSeek-V3); [DeepSeek-V3-0324](https://huggingface.co/deepseek-ai/DeepSeek-V3-0324); [Qwen2.5-72B](https://huggingface.co/Qwen/Qwen2.5-72B) |
|
745 |
+
| [Nemotron-PrismMath](https://huggingface.co/datasets/nvidia/Nemotron-PrismMath) | Text | 4.6B | [Big-Math-RL-Verified](https://huggingface.co/datasets/SynthLabsAI/Big-Math-RL-Verified); [OpenR1-Math-220k](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) | [Qwen2.5-0.5B-instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct), [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct); [DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) |
|
746 |
+
| Synthetic Question Answering Data from Papers and Permissible Books from Qwen2.5-72B-Instruct | Text | 350M | [arXiv](https://info.arxiv.org/help/bulk_data/index.html); [National Institutes of Health ExPorter](https://www.nih.gov/); [BioRxiv](https://www.biorxiv.org/tdm); [PMC Article](https://pmc.ncbi.nlm.nih.gov/tools/textmining/); [USPTO Backgrounds](https://data.uspto.gov/apis/transition-guide/bdss#pats); [peS2o](https://huggingface.co/datasets/allenai/peS2o); Global Regulation; [CORE](https://core.ac.uk/documentation/dataset); [PG-19](https://github.com/google-deepmind/pg19); [DOAB CC BY & CC BY-SA subset](https://www.doabooks.org/en); [NDLTD](https://ndltd.org/thesis-resources/global-etd-search/) | [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) |
|
747 |
+
| Synthetic FineMath-4+ Reprocessed from DeepSeek-V3 | Text | 9.2B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [DeepSeek-V3](https://huggingface.co/deepseek-ai/DeepSeek-V3) |
|
748 |
+
| Synthetic FineMath-3+ Reprocessed from phi-4 | Text | 27.6B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
749 |
+
| Synthetic Union-3+ Reprocessed from phi-4 | Text | 93.1B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
750 |
+
| Refreshed [Nemotron-MIND](https://huggingface.co/datasets/nvidia/Nemotron-MIND) from phi-4 | Text | 73B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
751 |
+
| Synthetic Union-4+ Reprocessed from phi-4 | Text | 14.12B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
752 |
+
| Synthetic Union-3+ minus 4+ Reprocessed from phi-4 | Text | 78.95B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
753 |
+
| Synthetic Union-3 Refreshed from phi-4 | Text | 80.94B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
754 |
+
| Synthetic Union-4+ Refreshed from phi-4 | Text | 52.32B | [Common Crawl](https://commoncrawl.org/latest-crawl) | [phi-4](https://huggingface.co/microsoft/phi-4) |
|
755 |
+
| Synthetic AGIEval seeded with AQUA-RAT, LogiQA, and AR-LSAT from DeepSeek-V3 and DeepSeek-V3-0324 | Text | 4.0B | [AQUA-RAT](https://huggingface.co/datasets/deepmind/aqua_rat); [LogiQA](https://huggingface.co/datasets/lucasmccabe/logiqa); [AR-LSAT](https://github.com/zhongwanjun/AR-LSAT) | [DeepSeek-V3](https://huggingface.co/deepseek-ai/DeepSeek-V3); [DeepSeek-V3-0324](https://huggingface.co/deepseek-ai/DeepSeek-V3-0324) |
|
756 |
+
| Synthetic AGIEval seeded with AQUA-RAT, LogiQA, and AR-LSAT from Qwen3-30B-A3B | Text | 4.2B | [AQUA-RAT](https://huggingface.co/datasets/deepmind/aqua_rat); [LogiQA](https://huggingface.co/datasets/lucasmccabe/logiqa); [AR-LSAT](https://github.com/zhongwanjun/AR-LSAT) | [Qwen3-30B-A3B](https://huggingface.co/Qwen/Qwen3-30B-A3B) |
|
757 |
+
| Synthetic Art of Problem Solving from Qwen2.5-32B-Instruct, Qwen2.5-Math-72B, Qwen2.5-Math-7B, and Qwen2.5-72B-Instruct | Text | 83.1B | [Art of Problem Solving](https://artofproblemsolving.com/company); [American Mathematics Competitions 8](https://artofproblemsolving.com/wiki/index.php/AMC_8_Problems_and_Solutions); [American Mathematics Competitions 10](https://artofproblemsolving.com/wiki/index.php/AMC_10_Problems_and_Solutions); [GSM8K](https://github.com/openai/grade-school-math); [PRM800K](https://github.com/openai/prm800k) | [Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct); [Qwen2.5-Math-72B](https://huggingface.co/Qwen/Qwen2.5-Math-72B); [Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B); [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) |
|
758 |
+
| Synthetic MMLU Auxiliary Train from DeepSeek-R1 | Text | 0.5B | [MMLU Auxiliary Train](https://huggingface.co/datasets/cais/mmlu/viewer/all/auxiliary_train) | [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1) |
|
759 |
+
| Synthetic Long Context Continued Post-Training Data from Papers and Permissible Books from Qwen2.5-72B-Instruct | Text | 5.4B | [arXiv](https://info.arxiv.org/help/bulk_data/index.html); [National Institutes of Health ExPorter](https://www.nih.gov/); [BioRxiv](https://www.biorxiv.org/tdm); [PMC Article](https://pmc.ncbi.nlm.nih.gov/tools/textmining/); [USPTO Backgrounds](https://data.uspto.gov/apis/transition-guide/bdss#pats); [peS2o](https://huggingface.co/datasets/allenai/peS2o); Global Regulation; [CORE](https://core.ac.uk/documentation/dataset); [PG-19](https://github.com/google-deepmind/pg19); [DOAB CC BY & CC BY-SA subset](https://www.doabooks.org/en); [NDLTD](https://ndltd.org/thesis-resources/global-etd-search/) | [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) |
|
760 |
+
| Synthetic Common Crawl from Qwen3-30B-A3B and Mistral-Nemo-12B-Instruct | Text | 1.949T | [Common Crawl](https://commoncrawl.org/) | [Qwen3-30B-A3B](https://huggingface.co/Qwen/Qwen3-30B-A3B); [Mistral-NeMo-12B-Instruct](https://huggingface.co/nvidia/Mistral-NeMo-12B-Instruct) |
|
761 |
+
| Synthetic Multilingual Data from Common Crawl from Qwen3-30B-A3B | Text | 997.3B | [Common Crawl](https://commoncrawl.org/) | [Qwen3-30B-A3B](https://huggingface.co/Qwen/Qwen3-30B-A3B) |
|
762 |
+
| Synthetic Multilingual Data from Wikimedia from Qwen3-30B-A3B | Text | 55.1B | [Wikimedia](https://dumps.wikimedia.org/) | [Qwen3-30B-A3B](https://huggingface.co/Qwen/Qwen3-30B-A3B) |
|
763 |
+
| Synthetic OpenMathReasoning from DeepSeek-R1-0528 | Text | 1.5M | [OpenMathReasoning](https://huggingface.co/datasets/nvidia/OpenMathReasoning) | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528) |
|
764 |
+
| Synthetic OpenCodeReasoning from DeepSeek-R1-0528 | Text | 1.1M | [OpenCodeReasoning](https://huggingface.co/datasets/nvidia/OpenCodeReasoning) | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528) |
|
765 |
+
| Synthetic Science Data from DeepSeek-R1-0528 | Text | 1.5M | \- | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528) |
|
766 |
+
| Synthetic Humanity's Last Exam from DeepSeek-R1-0528 | Text | 460K | [Humanity's Last Exam](https://huggingface.co/datasets/cais/hle) | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528) |
|
767 |
+
| Synthetic ToolBench from Qwen3-235B-A22B | Text | 400K | [ToolBench](https://github.com/OpenBMB/ToolBench) | [Qwen3-235B-A22B](https://huggingface.co/Qwen/Qwen3-235B-A22B) |
|
768 |
+
| Synthetic Nemotron Content Safety Dataset V2, eval-safety, Gretel Synthetic Safety Alignment, and RedTeam\_2K from DeepSeek-R1-0528 | Text | 52K | [Nemotron Content Safety Dataset V2](https://huggingface.co/datasets/nvidia/Aegis-AI-Content-Safety-Dataset-2.0); [eval-safety](https://github.com/CrystalEye42/eval-safety/blob/main/malicious_tasks_dataset.yaml); [Gretel Synthetic Safety Alignment](https://huggingface.co/datasets/gretelai/gretel-safety-alignment-en-v1); [RedTeam\_2K](https://huggingface.co/datasets/JailbreakV-28K/JailBreakV-28k/viewer/RedTeam_2K) | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528) |
|
769 |
+
| Synthetic HelpSteer from Qwen3-235B-A22B | Text | 120K | [HelpSteer3](https://huggingface.co/datasets/nvidia/HelpSteer3); [HelpSteer2](https://huggingface.co/datasets/nvidia/HelpSteer2) | [Qwen3-235B-A22B](https://huggingface.co/Qwen/Qwen3-235B-A22B) |
|
770 |
+
| Synthetic Alignment data from Mixtral-8x22B-Instruct-v0.1, Mixtral-8x7B-Instruct-v0.1, and Nemotron-4 Family | Text | 400K | [HelpSteer2](https://huggingface.co/datasets/nvidia/HelpSteer2); [C4](https://huggingface.co/datasets/allenai/c4); [LMSYS-Chat-1M](https://huggingface.co/datasets/lmsys/lmsys-chat-1m); [ShareGPT52K](https://huggingface.co/datasets/RyokoAI/ShareGPT52K); [tigerbot-kaggle-leetcodesolutions-en-2k](https://huggingface.co/datasets/TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k); [GSM8K](https://github.com/openai/grade-school-math); [PRM800K](https://github.com/openai/prm800k); lm\_identity (NVIDIA internal); [FinQA](https://finqasite.github.io/); [WikiTableQuestions](https://huggingface.co/datasets/wikitablequestions); [Riddles](https://github.com/crawsome/riddles); ChatQA nvolve-multiturn (NVIDIA internal); [glaive-function-calling-v2](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2); [SciBench](https://github.com/mandyyyyii/scibench); [OpenBookQA](https://github.com/allenai/OpenBookQA); [Advanced Reasoning Benchmark](https://github.com/TheDuckAI/arb); [Public Software Heritage S3](https://docs.softwareheritage.org/devel/swh-export/graph/dataset.html#summary-of-dataset-versions); [Khan Academy Math Keywords](https://www.khanacademy.org/math) | Nemotron-4-15B-Base (NVIDIA internal); Nemotron-4-15B-Instruct (NVIDIA internal); [Nemotron-4-340B-Base](https://huggingface.co/nvidia/Nemotron-4-340B-Base); [Nemotron-4-340B-Instruct](https://huggingface.co/nvidia/Nemotron-4-340B-Instruct); [Nemotron-4-340B-Reward](https://huggingface.co/nvidia/Nemotron-4-340B-Reward); [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1); [Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1) |
|
771 |
+
| Synthetic LMSYS-Chat-1M from Qwen3-235B-A22B | Text | 1M | [LMSYS-Chat-1M](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) | [Qwen3-235B-A22B](https://huggingface.co/Qwen/Qwen3-235B-A22B) |
|
772 |
+
| Synthetic Multilingual Reasoning data from DeepSeek-R1-0528, Qwen2.5-32B-Instruct-AWQ, and Qwen2.5-14B-Instruct | Text | 25M | [OpenMathReasoning](https://huggingface.co/datasets/nvidia/OpenMathReasoning); [OpenCodeReasoning](https://huggingface.co/datasets/nvidia/OpenCodeReasoning) | [DeepSeek-R1-0528](https://huggingface.co/deepseek-ai/DeepSeek-R1-0528); [Qwen2.5-32B-Instruct-AWQ](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct-AWQ) (translation); [Qwen2.5-14B-Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (translation); |
|
773 |
+
| Synthetic Multilingual Reasoning data from Qwen3-235B-A22B and Gemma 3 Post-Trained models | Text | 5M | [WildChat](https://huggingface.co/datasets/allenai/WildChat-1M) | [Qwen3-235B-A22B](https://huggingface.co/Qwen/Qwen3-235B-A22B); [Gemma 3 PT 12B](https://huggingface.co/google/gemma-3-12b-it); [Gemma 3 PT 27B](https://huggingface.co/google/gemma-3-27b-it) |
|
774 |
+
|
775 |
+
### Evaluation Dataset:
|
776 |
+
|
777 |
+
* Data Collection Method by dataset: Hybrid: Human, Synthetic
|
778 |
+
* Labeling Method by dataset: Hybrid: Automated, Human, Synthetic
|
779 |
+
|
780 |
+
## Inference
|
781 |
+
|
782 |
+
- ## Engines: HF, vLLM, TRT-LLM
|
783 |
+
|
784 |
+
- ## Test Hardware NVIDIA A10G 24GB, H100 80GB
|
785 |
+
|
786 |
+
## Ethical Considerations
|
787 |
+
|
788 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our [Trustworthy AI terms of service](https://www.nvidia.com/en-us/agreements/trustworthy-ai/terms/), developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
|
789 |
+
|
790 |
+
For more detailed information on ethical considerations for this model, please see the Model Card++ [Bias](./bias.md), [Explainability](./explainability.md), [Safety & Security](./safety.md), and [Privacy](./privacy.md) Subcards.
|
791 |
+
|
792 |
+
Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
|
793 |
+
|
794 |
+
|
795 |
+
## Citation
|
796 |
+
|
797 |
+
```
|
798 |
+
@misc{nvidia2025nvidianemotronnano2,
|
799 |
+
title={NVIDIA Nemotron Nano 2: An Accurate and Efficient Hybrid Mamba-Transformer Reasoning Model},
|
800 |
+
author={NVIDIA},
|
801 |
+
year={2025},
|
802 |
+
eprint={2508.14444},
|
803 |
+
archivePrefix={arXiv},
|
804 |
+
primaryClass={cs.CL},
|
805 |
+
url={https://arxiv.org/abs/2508.14444},
|
806 |
+
}
|
807 |
+
```
|
chat_template.jinja
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{%- set ns = namespace(enable_thinking=true) %}{%- for message in messages -%}{%- set content = message['content'] -%}{%- if message['role'] == 'user' or message['role'] == 'system' -%}{%- if '/think' in content -%}{%- set ns.enable_thinking = true -%}{%- elif '/no_think' in content -%}{%- set ns.enable_thinking = false -%}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if messages[0]['role'] != 'system' -%}{%- set ns.non_tool_system_content = '' -%}{{- '<SPECIAL_10>System
|
2 |
+
' -}}{%- else -%}{%- set ns.non_tool_system_content = messages[0]['content'].replace('/think', '').replace('/no_think', '').strip() -%}{{- '<SPECIAL_10>System
|
3 |
+
' + ns.non_tool_system_content }}{%- endif -%}{%- if tools -%}{%- if ns.non_tool_system_content is defined and ns.non_tool_system_content != '' -%}{{- '
|
4 |
+
|
5 |
+
' -}}{%- endif -%}{{- 'You can use the following tools to assist the user if required:' -}}{{- '
|
6 |
+
<AVAILABLE_TOOLS>[' -}}{%- for tool in tools -%}{{- (tool.function if tool.function is defined else tool) | tojson -}}{{- ', ' if not loop.last else '' -}}{%- endfor -%}{{- ']</AVAILABLE_TOOLS>
|
7 |
+
|
8 |
+
' -}}{{- 'If you decide to call any tool(s), use the following format:
|
9 |
+
' -}}{{- '<TOOLCALL>[{{"name": "tool_name1", "arguments": "tool_args1"}}, ' -}}{{- '{{"name": "tool_name2", "arguments": "tool_args2"}}]</TOOLCALL>
|
10 |
+
|
11 |
+
' -}}{{- 'The user will execute tool-calls and return responses from tool(s) in this format:
|
12 |
+
' -}}{{- '<TOOL_RESPONSE>[{{"tool_response1"}}, {{"tool_response2"}}]</TOOL_RESPONSE>
|
13 |
+
|
14 |
+
' -}}{{- 'Based on the tool responses, you can call additional tools if needed, correct tool calls if any errors are found, or just respond to the user.' -}}{%- endif -%}{{- '
|
15 |
+
' -}}{%- set messages = messages[1:] if messages[0]['role'] == 'system' else messages -%}{%- if messages[-1]['role'] == 'assistant' -%}{%- set ns.last_turn_assistant_content = messages[-1]['content'].strip() -%}{%- set messages = messages[:-1] -%}{%- endif -%}{%- for message in messages %}{%- set content = message['content'] %}{%- if message['role'] == 'user' -%}{{- '<SPECIAL_11>User
|
16 |
+
' + content.replace('/think', '').replace('/no_think', '').strip() + '
|
17 |
+
' }}{%- elif message['role'] == 'tool' -%}{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}{{- '<SPECIAL_11>User
|
18 |
+
' + '<TOOL_RESPONSE>[' }}{%- endif -%}{{- message['content'] -}}{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}{{- ']</TOOL_RESPONSE>
|
19 |
+
' -}}{%- endif -%}{%- elif message['role'] == 'assistant' -%}{%- if '</think>' in content -%}{%- set content = content.split('</think>')[1].strip() %}{%- endif -%}{{- '<SPECIAL_11>Assistant
|
20 |
+
' + content.strip() }}{%- if message.tool_calls -%}{%- if content.strip() != '' -%}{{- '
|
21 |
+
|
22 |
+
' -}}{%- endif -%}{{- '<TOOLCALL>[' -}}{%- for call in message.tool_calls -%}{%- set fn = call.function if call.function is defined else call -%}{{- '{"name": "' + fn.name + '", "arguments": ' -}}{%- if fn.arguments is string -%}{{- fn.arguments -}}{%- else -%}{{- fn.arguments | tojson -}}{%- endif -%}{{- '}' + (', ' if not loop.last else '') -}}{%- endfor -%}{{- ']</TOOLCALL>' -}}{%- endif -%}{{- '
|
23 |
+
<SPECIAL_12>
|
24 |
+
' -}}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '<SPECIAL_11>Assistant
|
25 |
+
' -}}{%- if ns.enable_thinking is defined and ns.enable_thinking is false -%}{{- '<think></think>' -}}{%- else -%}{{- '<think>
|
26 |
+
' -}}{%- endif -%}{%- if ns.last_turn_assistant_content is defined and ns.last_turn_assistant_content != '' -%}{{- ns.last_turn_assistant_content -}}{%- endif -%}{%- else -%}{%- if ns.last_turn_assistant_content is defined and ns.last_turn_assistant_content != '' -%}{{- '<SPECIAL_11>Assistant
|
27 |
+
' -}}{%- if ns.enable_thinking is defined and ns.enable_thinking is false -%}{{- '<think></think>' -}}{%- else -%}{{- '<think>
|
28 |
+
' -}}{%- endif -%}{{- ns.last_turn_assistant_content -}}{%- if continue_final_message is defined -%}{%- if continue_final_message is false -%}{{- '
|
29 |
+
<SPECIAL_12>
|
30 |
+
' -}}{%- endif -%}{%- else -%}{{- '
|
31 |
+
<SPECIAL_12>
|
32 |
+
' -}}{%- endif -%}{%- endif -%}{%- endif -%}
|
config.json
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"NemotronHForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_nemotron_h.NemotronHConfig",
|
9 |
+
"AutoModelForCausalLM": "modeling_nemotron_h.NemotronHForCausalLM"
|
10 |
+
},
|
11 |
+
"bos_token_id": 1,
|
12 |
+
"chunk_size": 128,
|
13 |
+
"conv_kernel": 4,
|
14 |
+
"dtype": "bfloat16",
|
15 |
+
"eos_token_id": 12,
|
16 |
+
"expand": 2,
|
17 |
+
"head_dim": 128,
|
18 |
+
"hidden_dropout": 0.0,
|
19 |
+
"hidden_size": 4480,
|
20 |
+
"hybrid_override_pattern": "M-M-M-MM-M-M-M*-M-M-M*-M-M-M-M*-M-M-M-M*-M-MM-M-M-M-M-M-",
|
21 |
+
"initializer_range": 0.02,
|
22 |
+
"intermediate_size": 15680,
|
23 |
+
"layer_norm_epsilon": 1e-05,
|
24 |
+
"mamba_head_dim": 80,
|
25 |
+
"mamba_hidden_act": "silu",
|
26 |
+
"mamba_num_groups": 8,
|
27 |
+
"mamba_num_heads": 128,
|
28 |
+
"mamba_proj_bias": false,
|
29 |
+
"mamba_state_dim": 128,
|
30 |
+
"max_position_embeddings": 131072,
|
31 |
+
"mlp_bias": false,
|
32 |
+
"mlp_hidden_act": "relu2",
|
33 |
+
"model_type": "nemotron_h",
|
34 |
+
"n_groups": 8,
|
35 |
+
"num_attention_heads": 40,
|
36 |
+
"num_hidden_layers": 56,
|
37 |
+
"num_key_value_heads": 8,
|
38 |
+
"num_logits_to_keep": 1,
|
39 |
+
"num_query_groups": 8,
|
40 |
+
"pad_token_id": 0,
|
41 |
+
"quantization_config": {
|
42 |
+
"config_groups": {
|
43 |
+
"group_0": {
|
44 |
+
"format": "pack-quantized",
|
45 |
+
"input_activations": null,
|
46 |
+
"output_activations": null,
|
47 |
+
"targets": [
|
48 |
+
"Linear"
|
49 |
+
],
|
50 |
+
"weights": {
|
51 |
+
"actorder": null,
|
52 |
+
"block_structure": null,
|
53 |
+
"dynamic": false,
|
54 |
+
"group_size": 32,
|
55 |
+
"num_bits": 8,
|
56 |
+
"observer": "mse",
|
57 |
+
"observer_kwargs": {},
|
58 |
+
"strategy": "group",
|
59 |
+
"symmetric": true,
|
60 |
+
"type": "int"
|
61 |
+
}
|
62 |
+
}
|
63 |
+
},
|
64 |
+
"format": "pack-quantized",
|
65 |
+
"global_compression_ratio": null,
|
66 |
+
"ignore": [
|
67 |
+
"lm_head"
|
68 |
+
],
|
69 |
+
"kv_cache_scheme": null,
|
70 |
+
"quant_method": "compressed-tensors",
|
71 |
+
"quantization_status": "compressed",
|
72 |
+
"sparsity_config": {},
|
73 |
+
"transform_config": {},
|
74 |
+
"version": "0.10.3.dev47+ge463fe6"
|
75 |
+
},
|
76 |
+
"rescale_prenorm_residual": true,
|
77 |
+
"residual_in_fp32": false,
|
78 |
+
"rms_norm_eps": 1e-05,
|
79 |
+
"sliding_window": null,
|
80 |
+
"ssm_state_size": 128,
|
81 |
+
"tie_word_embeddings": false,
|
82 |
+
"time_step_floor": 0.0001,
|
83 |
+
"time_step_limit": [
|
84 |
+
0.0,
|
85 |
+
Infinity
|
86 |
+
],
|
87 |
+
"time_step_max": 0.1,
|
88 |
+
"time_step_min": 0.001,
|
89 |
+
"time_step_rank": 256,
|
90 |
+
"transformers_version": "4.56.0.dev0",
|
91 |
+
"use_bias": false,
|
92 |
+
"use_cache": true,
|
93 |
+
"use_conv_bias": true,
|
94 |
+
"use_mamba_kernels": true,
|
95 |
+
"vocab_size": 131072
|
96 |
+
}
|
configuration_nemotron_h.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""NemotronH model configuration"""
|
17 |
+
|
18 |
+
import re
|
19 |
+
|
20 |
+
from transformers.configuration_utils import PretrainedConfig
|
21 |
+
from transformers.utils import logging
|
22 |
+
|
23 |
+
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
|
27 |
+
class NemotronHConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`NemotronHModel`]. It is used to instantiate a
|
30 |
+
NemotronH model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
31 |
+
with the defaults will yield a similar configuration to that of the NemotronH-v0.1 model.
|
32 |
+
|
33 |
+
[todo](todo)
|
34 |
+
|
35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
36 |
+
documentation from [`PretrainedConfig`] for more information.
|
37 |
+
|
38 |
+
|
39 |
+
Args:
|
40 |
+
vocab_size (`int`, *optional*, defaults to 131072):
|
41 |
+
Vocabulary size of the NemotronH model. Defines the number of different tokens that can be represented by the
|
42 |
+
`inputs_ids` passed when calling [`NemotronHModel`]
|
43 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
44 |
+
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
|
45 |
+
model has a output word embedding layer.
|
46 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
47 |
+
Dimension of the hidden representations.
|
48 |
+
intermediate_size (`int`, *optional*, defaults to 21504):
|
49 |
+
Dimension of the MLP representations.
|
50 |
+
num_hidden_layers (`int`, *optional*, defaults to 52):
|
51 |
+
Number of hidden layers in the Transformer encoder.
|
52 |
+
hybrid_override_pattern (`str`, *optional*, defaults to `"M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M-"`):
|
53 |
+
The pattern of the hybrid model. The pattern is a string of characters where each character represents M: Mamba2, *: Attention, -: MLP
|
54 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
55 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
56 |
+
attention_head_dim (`int`, *optional*, defaults to 128):
|
57 |
+
Dimension of each attention head.
|
58 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
59 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
60 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
61 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
|
62 |
+
mlp_hidden_act (`str`, *optional*, defaults to "relu2"):
|
63 |
+
The non-linear activation function in the MLP layers.
|
64 |
+
attention_bias (`bool`, *optional*, defaults to `False`):
|
65 |
+
Whether to use bias in attention layers.
|
66 |
+
mlp_bias (`bool`, *optional*, defaults to `False`):
|
67 |
+
Whether to use bias in MLP layers.
|
68 |
+
use_bias (`bool`, *optional*, defaults to `False`):
|
69 |
+
Whether to use bias in the model.
|
70 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
71 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
72 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
73 |
+
The epsilon used by the layer normalization layers.
|
74 |
+
residual_in_fp32 (`bool`, *optional*, defaults to `False`):
|
75 |
+
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model.
|
76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
78 |
+
relevant if `config.is_decoder=True`.
|
79 |
+
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
|
80 |
+
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
|
81 |
+
integer value, only last `num_logits_to_keep` logits will be calculated.
|
82 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
83 |
+
The id of the padding token.
|
84 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
85 |
+
The id of the "beginning-of-sequence" token.
|
86 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
87 |
+
The id of the "end-of-sequence" token.
|
88 |
+
sliding_window (`int`, *optional*, defaults to None):
|
89 |
+
Sliding window attention window size.
|
90 |
+
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
91 |
+
The maximum sequence length that this model might ever be used with.
|
92 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
93 |
+
The dropout ratio for the attention probabilities.
|
94 |
+
hidden_dropout (`float`, *optional*, defaults to 0.0):
|
95 |
+
The dropout ratio for the hidden states.
|
96 |
+
use_mamba_kernels (`bool`, *optional*, defaults to `True`):
|
97 |
+
Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
|
98 |
+
`causal-conv1d` are installed, and the mamba modules are running on a CUDA device.
|
99 |
+
ssm_state_size (`int`, *optional*, defaults to 128):
|
100 |
+
The dimension of the mamba state space latents.
|
101 |
+
mamba_num_heads (`int`, *optional*, defaults to 128):
|
102 |
+
Number of heads in Mamba layers.
|
103 |
+
mamba_n_groups (`int`, *optional*, defaults to 8):
|
104 |
+
Number of groups in Mamba layers.
|
105 |
+
mamba_head_dim (`int`, *optional*, defaults to 64):
|
106 |
+
Dimension of each Mamba head.
|
107 |
+
mamba_d_conv (`int`, *optional*, defaults to 4):
|
108 |
+
The size of the mamba convolution kernel.
|
109 |
+
mamba_expand (`int`, *optional*, defaults to 2):
|
110 |
+
Expanding factor used to determine the mamba intermediate size.
|
111 |
+
mamba_hidden_act (`str`, *optional*, defaults to "silu"):
|
112 |
+
The non-linear activation function in the Mamba layers.
|
113 |
+
mamba_dt_min (`float`, *optional*, defaults to 0.001):
|
114 |
+
Minimum value for the time step in Mamba.
|
115 |
+
mamba_dt_max (`float`, *optional*, defaults to 0.1):
|
116 |
+
Maximum value for the time step in Mamba.
|
117 |
+
mamba_dt_limit (`tuple`, *optional*, defaults to (0.0, float("inf"))):
|
118 |
+
Limits for the time step in Mamba.
|
119 |
+
mamba_dt_init_floor (`float`, *optional*, defaults to 1e-4):
|
120 |
+
Floor value for time step initialization in Mamba.
|
121 |
+
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
|
122 |
+
Whether to use bias in the convolution layer of the mamba mixer block.
|
123 |
+
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
|
124 |
+
Whether to use bias in the input and output projections of the mamba mixer block.
|
125 |
+
mamba_chunk_size (`int`, *optional*, defaults to 256):
|
126 |
+
Size of chunks for Mamba processing.
|
127 |
+
rescale_prenorm_residual (`bool`, *optional*, defaults to `True`):
|
128 |
+
Whether to rescale the pre-normalization residual connections.
|
129 |
+
"""
|
130 |
+
|
131 |
+
model_type = "nemotron_h"
|
132 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
133 |
+
|
134 |
+
def __init__(
|
135 |
+
self,
|
136 |
+
vocab_size=131072,
|
137 |
+
tie_word_embeddings=False,
|
138 |
+
hidden_size=4096,
|
139 |
+
intermediate_size=21504,
|
140 |
+
num_hidden_layers=52,
|
141 |
+
hybrid_override_pattern="M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M-",
|
142 |
+
num_attention_heads=32,
|
143 |
+
#attention_head_dim=128,
|
144 |
+
head_dim=128,
|
145 |
+
num_key_value_heads=8, # nemo: num_query_groups
|
146 |
+
mlp_hidden_act="relu2",
|
147 |
+
attention_bias=False,
|
148 |
+
mlp_bias=False,
|
149 |
+
use_bias=False,
|
150 |
+
initializer_range=0.02, # nemo: init_method_std
|
151 |
+
layer_norm_epsilon=1e-5, # nemo: layernorm_epsilon
|
152 |
+
residual_in_fp32=False, # Megatron Core default value
|
153 |
+
use_cache=True,
|
154 |
+
num_logits_to_keep=1,
|
155 |
+
pad_token_id=0,
|
156 |
+
bos_token_id=1,
|
157 |
+
eos_token_id=2,
|
158 |
+
sliding_window=None,
|
159 |
+
max_position_embeddings=4096,
|
160 |
+
attention_dropout=0.0,
|
161 |
+
hidden_dropout=0.0, # * ADDED
|
162 |
+
use_mamba_kernels=True,
|
163 |
+
ssm_state_size=128, # mamba_state_size
|
164 |
+
mamba_num_heads=128,
|
165 |
+
mamba_n_groups=8, # nemo: mamba_ssm_ngroups = num_heads
|
166 |
+
mamba_head_dim=64,
|
167 |
+
mamba_d_conv=4,
|
168 |
+
mamba_expand=2,
|
169 |
+
mamba_hidden_act="silu",
|
170 |
+
mamba_dt_min=0.001,
|
171 |
+
mamba_dt_max=0.1,
|
172 |
+
mamba_dt_limit=(0.0, float("inf")),
|
173 |
+
mamba_dt_init_floor=1e-4,
|
174 |
+
mamba_conv_bias=True,
|
175 |
+
mamba_proj_bias=False,
|
176 |
+
mamba_chunk_size=256,
|
177 |
+
rescale_prenorm_residual=True,
|
178 |
+
**kwargs,
|
179 |
+
):
|
180 |
+
self.vocab_size = vocab_size
|
181 |
+
self.tie_word_embeddings = tie_word_embeddings
|
182 |
+
self.hidden_size = hidden_size
|
183 |
+
self.intermediate_size = intermediate_size
|
184 |
+
self.num_hidden_layers = num_hidden_layers
|
185 |
+
self.hybrid_override_pattern = hybrid_override_pattern
|
186 |
+
self.num_attention_heads = num_attention_heads
|
187 |
+
#self.attention_head_dim = attention_head_dim
|
188 |
+
self.head_dim = head_dim
|
189 |
+
self.sliding_window = sliding_window
|
190 |
+
self.max_position_embeddings = max_position_embeddings
|
191 |
+
self.attention_dropout = attention_dropout
|
192 |
+
self.hidden_dropout = hidden_dropout
|
193 |
+
|
194 |
+
# Validate hybrid_override_pattern
|
195 |
+
# M: Mamba2, *: Attention, -: MLP
|
196 |
+
assert len(self.hybrid_override_pattern) == self.num_hidden_layers, "hybrid_override_pattern must have the same length as num_hidden_layers"
|
197 |
+
assert re.match(r"^[*-M]+$", self.hybrid_override_pattern), "hybrid_override_pattern must only contain characters 'M', '*', or '-'"
|
198 |
+
|
199 |
+
# for backward compatibility
|
200 |
+
if num_key_value_heads is None:
|
201 |
+
num_key_value_heads = num_attention_heads
|
202 |
+
|
203 |
+
self.num_key_value_heads = num_key_value_heads
|
204 |
+
self.mlp_hidden_act = mlp_hidden_act
|
205 |
+
self.attention_bias = attention_bias
|
206 |
+
self.mlp_bias = mlp_bias
|
207 |
+
self.use_bias = use_bias
|
208 |
+
self.initializer_range = initializer_range
|
209 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
210 |
+
self.residual_in_fp32 = residual_in_fp32
|
211 |
+
|
212 |
+
self.use_cache = use_cache
|
213 |
+
self.num_logits_to_keep = num_logits_to_keep
|
214 |
+
|
215 |
+
self.use_mamba_kernels = use_mamba_kernels
|
216 |
+
self.n_groups = mamba_n_groups
|
217 |
+
self.mamba_head_dim = mamba_head_dim
|
218 |
+
self.ssm_state_size = ssm_state_size
|
219 |
+
self.mamba_num_heads = mamba_num_heads
|
220 |
+
self.conv_kernel = mamba_d_conv
|
221 |
+
self.expand = mamba_expand
|
222 |
+
self.mamba_hidden_act = mamba_hidden_act
|
223 |
+
self.time_step_min = mamba_dt_min
|
224 |
+
self.time_step_max = mamba_dt_max
|
225 |
+
self.time_step_limit = mamba_dt_limit
|
226 |
+
self.time_step_floor = mamba_dt_init_floor
|
227 |
+
self.use_conv_bias = mamba_conv_bias
|
228 |
+
self.mamba_proj_bias = mamba_proj_bias
|
229 |
+
self.chunk_size = mamba_chunk_size
|
230 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
231 |
+
|
232 |
+
super().__init__(
|
233 |
+
pad_token_id=pad_token_id,
|
234 |
+
bos_token_id=bos_token_id,
|
235 |
+
eos_token_id=eos_token_id,
|
236 |
+
tie_word_embeddings=tie_word_embeddings,
|
237 |
+
**kwargs,
|
238 |
+
)
|
239 |
+
|
240 |
+
@property
|
241 |
+
def layers_block_type(self):
|
242 |
+
return [
|
243 |
+
"mamba" if self.hybrid_override_pattern[i] == "M" else
|
244 |
+
"attention" if self.hybrid_override_pattern[i] == "*" else "mlp"
|
245 |
+
for i in range(self.num_hidden_layers)]
|
generation_config.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": [
|
5 |
+
2,
|
6 |
+
11,
|
7 |
+
12
|
8 |
+
],
|
9 |
+
"pad_token_id": 0,
|
10 |
+
"transformers_version": "4.56.0.dev0"
|
11 |
+
}
|
model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f100f6613ed78055471bcb03fe85a5f311aedea4d8cdc5d4d96e5e517849652f
|
3 |
+
size 4971148368
|
model-00002-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d56a4ceeb23e97ac92a21735762b16bfd90e3edc0b0b3792a846c5c267c9fc80
|
3 |
+
size 4401323672
|
model-00003-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03b8b2b40a4fca2c6b5443160d09e30fce3da835df4411df90dce102c2d0d443
|
3 |
+
size 1174405248
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,589 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_parameters": 3345499888,
|
4 |
+
"total_size": 10546811776
|
5 |
+
},
|
6 |
+
"weight_map": {
|
7 |
+
"backbone.embeddings.weight": "model-00001-of-00003.safetensors",
|
8 |
+
"backbone.layers.0.mixer.A_log": "model-00001-of-00003.safetensors",
|
9 |
+
"backbone.layers.0.mixer.D": "model-00001-of-00003.safetensors",
|
10 |
+
"backbone.layers.0.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
11 |
+
"backbone.layers.0.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
12 |
+
"backbone.layers.0.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
13 |
+
"backbone.layers.0.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
14 |
+
"backbone.layers.0.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
15 |
+
"backbone.layers.0.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
16 |
+
"backbone.layers.0.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
17 |
+
"backbone.layers.0.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
18 |
+
"backbone.layers.0.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
19 |
+
"backbone.layers.0.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
20 |
+
"backbone.layers.0.norm.weight": "model-00001-of-00003.safetensors",
|
21 |
+
"backbone.layers.1.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
22 |
+
"backbone.layers.1.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
23 |
+
"backbone.layers.1.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
24 |
+
"backbone.layers.1.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
25 |
+
"backbone.layers.1.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
26 |
+
"backbone.layers.1.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
27 |
+
"backbone.layers.1.norm.weight": "model-00001-of-00003.safetensors",
|
28 |
+
"backbone.layers.10.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
29 |
+
"backbone.layers.10.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
30 |
+
"backbone.layers.10.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
31 |
+
"backbone.layers.10.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
32 |
+
"backbone.layers.10.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
33 |
+
"backbone.layers.10.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
34 |
+
"backbone.layers.10.norm.weight": "model-00001-of-00003.safetensors",
|
35 |
+
"backbone.layers.11.mixer.A_log": "model-00001-of-00003.safetensors",
|
36 |
+
"backbone.layers.11.mixer.D": "model-00001-of-00003.safetensors",
|
37 |
+
"backbone.layers.11.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
38 |
+
"backbone.layers.11.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
39 |
+
"backbone.layers.11.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
40 |
+
"backbone.layers.11.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
41 |
+
"backbone.layers.11.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
42 |
+
"backbone.layers.11.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
43 |
+
"backbone.layers.11.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
44 |
+
"backbone.layers.11.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
45 |
+
"backbone.layers.11.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
46 |
+
"backbone.layers.11.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
47 |
+
"backbone.layers.11.norm.weight": "model-00001-of-00003.safetensors",
|
48 |
+
"backbone.layers.12.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
49 |
+
"backbone.layers.12.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
50 |
+
"backbone.layers.12.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
51 |
+
"backbone.layers.12.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
52 |
+
"backbone.layers.12.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
53 |
+
"backbone.layers.12.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
54 |
+
"backbone.layers.12.norm.weight": "model-00001-of-00003.safetensors",
|
55 |
+
"backbone.layers.13.mixer.A_log": "model-00001-of-00003.safetensors",
|
56 |
+
"backbone.layers.13.mixer.D": "model-00001-of-00003.safetensors",
|
57 |
+
"backbone.layers.13.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
58 |
+
"backbone.layers.13.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
59 |
+
"backbone.layers.13.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
60 |
+
"backbone.layers.13.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
61 |
+
"backbone.layers.13.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
62 |
+
"backbone.layers.13.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
63 |
+
"backbone.layers.13.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
64 |
+
"backbone.layers.13.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
65 |
+
"backbone.layers.13.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
66 |
+
"backbone.layers.13.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
67 |
+
"backbone.layers.13.norm.weight": "model-00001-of-00003.safetensors",
|
68 |
+
"backbone.layers.14.mixer.k_proj.weight_packed": "model-00001-of-00003.safetensors",
|
69 |
+
"backbone.layers.14.mixer.k_proj.weight_scale": "model-00001-of-00003.safetensors",
|
70 |
+
"backbone.layers.14.mixer.k_proj.weight_shape": "model-00001-of-00003.safetensors",
|
71 |
+
"backbone.layers.14.mixer.o_proj.weight_packed": "model-00001-of-00003.safetensors",
|
72 |
+
"backbone.layers.14.mixer.o_proj.weight_scale": "model-00001-of-00003.safetensors",
|
73 |
+
"backbone.layers.14.mixer.o_proj.weight_shape": "model-00001-of-00003.safetensors",
|
74 |
+
"backbone.layers.14.mixer.q_proj.weight_packed": "model-00001-of-00003.safetensors",
|
75 |
+
"backbone.layers.14.mixer.q_proj.weight_scale": "model-00001-of-00003.safetensors",
|
76 |
+
"backbone.layers.14.mixer.q_proj.weight_shape": "model-00001-of-00003.safetensors",
|
77 |
+
"backbone.layers.14.mixer.v_proj.weight_packed": "model-00001-of-00003.safetensors",
|
78 |
+
"backbone.layers.14.mixer.v_proj.weight_scale": "model-00001-of-00003.safetensors",
|
79 |
+
"backbone.layers.14.mixer.v_proj.weight_shape": "model-00001-of-00003.safetensors",
|
80 |
+
"backbone.layers.14.norm.weight": "model-00001-of-00003.safetensors",
|
81 |
+
"backbone.layers.15.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
82 |
+
"backbone.layers.15.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
83 |
+
"backbone.layers.15.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
84 |
+
"backbone.layers.15.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
85 |
+
"backbone.layers.15.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
86 |
+
"backbone.layers.15.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
87 |
+
"backbone.layers.15.norm.weight": "model-00001-of-00003.safetensors",
|
88 |
+
"backbone.layers.16.mixer.A_log": "model-00001-of-00003.safetensors",
|
89 |
+
"backbone.layers.16.mixer.D": "model-00001-of-00003.safetensors",
|
90 |
+
"backbone.layers.16.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
91 |
+
"backbone.layers.16.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
92 |
+
"backbone.layers.16.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
93 |
+
"backbone.layers.16.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
94 |
+
"backbone.layers.16.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
95 |
+
"backbone.layers.16.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
96 |
+
"backbone.layers.16.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
97 |
+
"backbone.layers.16.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
98 |
+
"backbone.layers.16.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
99 |
+
"backbone.layers.16.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
100 |
+
"backbone.layers.16.norm.weight": "model-00001-of-00003.safetensors",
|
101 |
+
"backbone.layers.17.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
102 |
+
"backbone.layers.17.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
103 |
+
"backbone.layers.17.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
104 |
+
"backbone.layers.17.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
105 |
+
"backbone.layers.17.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
106 |
+
"backbone.layers.17.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
107 |
+
"backbone.layers.17.norm.weight": "model-00001-of-00003.safetensors",
|
108 |
+
"backbone.layers.18.mixer.A_log": "model-00001-of-00003.safetensors",
|
109 |
+
"backbone.layers.18.mixer.D": "model-00001-of-00003.safetensors",
|
110 |
+
"backbone.layers.18.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
111 |
+
"backbone.layers.18.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
112 |
+
"backbone.layers.18.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
113 |
+
"backbone.layers.18.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
114 |
+
"backbone.layers.18.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
115 |
+
"backbone.layers.18.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
116 |
+
"backbone.layers.18.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
117 |
+
"backbone.layers.18.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
118 |
+
"backbone.layers.18.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
119 |
+
"backbone.layers.18.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
120 |
+
"backbone.layers.18.norm.weight": "model-00001-of-00003.safetensors",
|
121 |
+
"backbone.layers.19.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
122 |
+
"backbone.layers.19.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
123 |
+
"backbone.layers.19.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
124 |
+
"backbone.layers.19.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
125 |
+
"backbone.layers.19.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
126 |
+
"backbone.layers.19.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
127 |
+
"backbone.layers.19.norm.weight": "model-00001-of-00003.safetensors",
|
128 |
+
"backbone.layers.2.mixer.A_log": "model-00001-of-00003.safetensors",
|
129 |
+
"backbone.layers.2.mixer.D": "model-00001-of-00003.safetensors",
|
130 |
+
"backbone.layers.2.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
131 |
+
"backbone.layers.2.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
132 |
+
"backbone.layers.2.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
133 |
+
"backbone.layers.2.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
134 |
+
"backbone.layers.2.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
135 |
+
"backbone.layers.2.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
136 |
+
"backbone.layers.2.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
137 |
+
"backbone.layers.2.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
138 |
+
"backbone.layers.2.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
139 |
+
"backbone.layers.2.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
140 |
+
"backbone.layers.2.norm.weight": "model-00001-of-00003.safetensors",
|
141 |
+
"backbone.layers.20.mixer.A_log": "model-00001-of-00003.safetensors",
|
142 |
+
"backbone.layers.20.mixer.D": "model-00001-of-00003.safetensors",
|
143 |
+
"backbone.layers.20.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
144 |
+
"backbone.layers.20.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
145 |
+
"backbone.layers.20.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
146 |
+
"backbone.layers.20.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
147 |
+
"backbone.layers.20.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
148 |
+
"backbone.layers.20.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
149 |
+
"backbone.layers.20.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
150 |
+
"backbone.layers.20.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
151 |
+
"backbone.layers.20.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
152 |
+
"backbone.layers.20.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
153 |
+
"backbone.layers.20.norm.weight": "model-00001-of-00003.safetensors",
|
154 |
+
"backbone.layers.21.mixer.k_proj.weight_packed": "model-00001-of-00003.safetensors",
|
155 |
+
"backbone.layers.21.mixer.k_proj.weight_scale": "model-00001-of-00003.safetensors",
|
156 |
+
"backbone.layers.21.mixer.k_proj.weight_shape": "model-00001-of-00003.safetensors",
|
157 |
+
"backbone.layers.21.mixer.o_proj.weight_packed": "model-00001-of-00003.safetensors",
|
158 |
+
"backbone.layers.21.mixer.o_proj.weight_scale": "model-00001-of-00003.safetensors",
|
159 |
+
"backbone.layers.21.mixer.o_proj.weight_shape": "model-00001-of-00003.safetensors",
|
160 |
+
"backbone.layers.21.mixer.q_proj.weight_packed": "model-00001-of-00003.safetensors",
|
161 |
+
"backbone.layers.21.mixer.q_proj.weight_scale": "model-00001-of-00003.safetensors",
|
162 |
+
"backbone.layers.21.mixer.q_proj.weight_shape": "model-00001-of-00003.safetensors",
|
163 |
+
"backbone.layers.21.mixer.v_proj.weight_packed": "model-00001-of-00003.safetensors",
|
164 |
+
"backbone.layers.21.mixer.v_proj.weight_scale": "model-00001-of-00003.safetensors",
|
165 |
+
"backbone.layers.21.mixer.v_proj.weight_shape": "model-00001-of-00003.safetensors",
|
166 |
+
"backbone.layers.21.norm.weight": "model-00001-of-00003.safetensors",
|
167 |
+
"backbone.layers.22.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
168 |
+
"backbone.layers.22.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
169 |
+
"backbone.layers.22.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
170 |
+
"backbone.layers.22.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
171 |
+
"backbone.layers.22.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
172 |
+
"backbone.layers.22.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
173 |
+
"backbone.layers.22.norm.weight": "model-00001-of-00003.safetensors",
|
174 |
+
"backbone.layers.23.mixer.A_log": "model-00001-of-00003.safetensors",
|
175 |
+
"backbone.layers.23.mixer.D": "model-00001-of-00003.safetensors",
|
176 |
+
"backbone.layers.23.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
177 |
+
"backbone.layers.23.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
178 |
+
"backbone.layers.23.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
179 |
+
"backbone.layers.23.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
180 |
+
"backbone.layers.23.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
181 |
+
"backbone.layers.23.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
182 |
+
"backbone.layers.23.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
183 |
+
"backbone.layers.23.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
184 |
+
"backbone.layers.23.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
185 |
+
"backbone.layers.23.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
186 |
+
"backbone.layers.23.norm.weight": "model-00001-of-00003.safetensors",
|
187 |
+
"backbone.layers.24.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
188 |
+
"backbone.layers.24.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
189 |
+
"backbone.layers.24.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
190 |
+
"backbone.layers.24.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
191 |
+
"backbone.layers.24.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
192 |
+
"backbone.layers.24.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
193 |
+
"backbone.layers.24.norm.weight": "model-00001-of-00003.safetensors",
|
194 |
+
"backbone.layers.25.mixer.A_log": "model-00001-of-00003.safetensors",
|
195 |
+
"backbone.layers.25.mixer.D": "model-00001-of-00003.safetensors",
|
196 |
+
"backbone.layers.25.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
197 |
+
"backbone.layers.25.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
198 |
+
"backbone.layers.25.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
199 |
+
"backbone.layers.25.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
200 |
+
"backbone.layers.25.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
201 |
+
"backbone.layers.25.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
202 |
+
"backbone.layers.25.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
203 |
+
"backbone.layers.25.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
204 |
+
"backbone.layers.25.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
205 |
+
"backbone.layers.25.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
206 |
+
"backbone.layers.25.norm.weight": "model-00001-of-00003.safetensors",
|
207 |
+
"backbone.layers.26.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
208 |
+
"backbone.layers.26.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
209 |
+
"backbone.layers.26.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
210 |
+
"backbone.layers.26.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
211 |
+
"backbone.layers.26.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
212 |
+
"backbone.layers.26.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
213 |
+
"backbone.layers.26.norm.weight": "model-00001-of-00003.safetensors",
|
214 |
+
"backbone.layers.27.mixer.A_log": "model-00002-of-00003.safetensors",
|
215 |
+
"backbone.layers.27.mixer.D": "model-00002-of-00003.safetensors",
|
216 |
+
"backbone.layers.27.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
217 |
+
"backbone.layers.27.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
218 |
+
"backbone.layers.27.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
219 |
+
"backbone.layers.27.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
220 |
+
"backbone.layers.27.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
221 |
+
"backbone.layers.27.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
222 |
+
"backbone.layers.27.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
223 |
+
"backbone.layers.27.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
224 |
+
"backbone.layers.27.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
225 |
+
"backbone.layers.27.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
226 |
+
"backbone.layers.27.norm.weight": "model-00002-of-00003.safetensors",
|
227 |
+
"backbone.layers.28.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
228 |
+
"backbone.layers.28.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
229 |
+
"backbone.layers.28.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
230 |
+
"backbone.layers.28.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
231 |
+
"backbone.layers.28.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
232 |
+
"backbone.layers.28.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
233 |
+
"backbone.layers.28.norm.weight": "model-00002-of-00003.safetensors",
|
234 |
+
"backbone.layers.29.mixer.A_log": "model-00002-of-00003.safetensors",
|
235 |
+
"backbone.layers.29.mixer.D": "model-00002-of-00003.safetensors",
|
236 |
+
"backbone.layers.29.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
237 |
+
"backbone.layers.29.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
238 |
+
"backbone.layers.29.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
239 |
+
"backbone.layers.29.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
240 |
+
"backbone.layers.29.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
241 |
+
"backbone.layers.29.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
242 |
+
"backbone.layers.29.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
243 |
+
"backbone.layers.29.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
244 |
+
"backbone.layers.29.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
245 |
+
"backbone.layers.29.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
246 |
+
"backbone.layers.29.norm.weight": "model-00002-of-00003.safetensors",
|
247 |
+
"backbone.layers.3.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
248 |
+
"backbone.layers.3.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
249 |
+
"backbone.layers.3.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
250 |
+
"backbone.layers.3.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
251 |
+
"backbone.layers.3.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
252 |
+
"backbone.layers.3.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
253 |
+
"backbone.layers.3.norm.weight": "model-00001-of-00003.safetensors",
|
254 |
+
"backbone.layers.30.mixer.k_proj.weight_packed": "model-00002-of-00003.safetensors",
|
255 |
+
"backbone.layers.30.mixer.k_proj.weight_scale": "model-00002-of-00003.safetensors",
|
256 |
+
"backbone.layers.30.mixer.k_proj.weight_shape": "model-00002-of-00003.safetensors",
|
257 |
+
"backbone.layers.30.mixer.o_proj.weight_packed": "model-00002-of-00003.safetensors",
|
258 |
+
"backbone.layers.30.mixer.o_proj.weight_scale": "model-00002-of-00003.safetensors",
|
259 |
+
"backbone.layers.30.mixer.o_proj.weight_shape": "model-00002-of-00003.safetensors",
|
260 |
+
"backbone.layers.30.mixer.q_proj.weight_packed": "model-00002-of-00003.safetensors",
|
261 |
+
"backbone.layers.30.mixer.q_proj.weight_scale": "model-00002-of-00003.safetensors",
|
262 |
+
"backbone.layers.30.mixer.q_proj.weight_shape": "model-00002-of-00003.safetensors",
|
263 |
+
"backbone.layers.30.mixer.v_proj.weight_packed": "model-00002-of-00003.safetensors",
|
264 |
+
"backbone.layers.30.mixer.v_proj.weight_scale": "model-00002-of-00003.safetensors",
|
265 |
+
"backbone.layers.30.mixer.v_proj.weight_shape": "model-00002-of-00003.safetensors",
|
266 |
+
"backbone.layers.30.norm.weight": "model-00002-of-00003.safetensors",
|
267 |
+
"backbone.layers.31.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
268 |
+
"backbone.layers.31.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
269 |
+
"backbone.layers.31.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
270 |
+
"backbone.layers.31.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
271 |
+
"backbone.layers.31.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
272 |
+
"backbone.layers.31.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
273 |
+
"backbone.layers.31.norm.weight": "model-00002-of-00003.safetensors",
|
274 |
+
"backbone.layers.32.mixer.A_log": "model-00002-of-00003.safetensors",
|
275 |
+
"backbone.layers.32.mixer.D": "model-00002-of-00003.safetensors",
|
276 |
+
"backbone.layers.32.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
277 |
+
"backbone.layers.32.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
278 |
+
"backbone.layers.32.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
279 |
+
"backbone.layers.32.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
280 |
+
"backbone.layers.32.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
281 |
+
"backbone.layers.32.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
282 |
+
"backbone.layers.32.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
283 |
+
"backbone.layers.32.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
284 |
+
"backbone.layers.32.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
285 |
+
"backbone.layers.32.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
286 |
+
"backbone.layers.32.norm.weight": "model-00002-of-00003.safetensors",
|
287 |
+
"backbone.layers.33.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
288 |
+
"backbone.layers.33.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
289 |
+
"backbone.layers.33.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
290 |
+
"backbone.layers.33.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
291 |
+
"backbone.layers.33.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
292 |
+
"backbone.layers.33.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
293 |
+
"backbone.layers.33.norm.weight": "model-00002-of-00003.safetensors",
|
294 |
+
"backbone.layers.34.mixer.A_log": "model-00002-of-00003.safetensors",
|
295 |
+
"backbone.layers.34.mixer.D": "model-00002-of-00003.safetensors",
|
296 |
+
"backbone.layers.34.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
297 |
+
"backbone.layers.34.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
298 |
+
"backbone.layers.34.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
299 |
+
"backbone.layers.34.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
300 |
+
"backbone.layers.34.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
301 |
+
"backbone.layers.34.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
302 |
+
"backbone.layers.34.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
303 |
+
"backbone.layers.34.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
304 |
+
"backbone.layers.34.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
305 |
+
"backbone.layers.34.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
306 |
+
"backbone.layers.34.norm.weight": "model-00002-of-00003.safetensors",
|
307 |
+
"backbone.layers.35.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
308 |
+
"backbone.layers.35.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
309 |
+
"backbone.layers.35.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
310 |
+
"backbone.layers.35.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
311 |
+
"backbone.layers.35.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
312 |
+
"backbone.layers.35.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
313 |
+
"backbone.layers.35.norm.weight": "model-00002-of-00003.safetensors",
|
314 |
+
"backbone.layers.36.mixer.A_log": "model-00002-of-00003.safetensors",
|
315 |
+
"backbone.layers.36.mixer.D": "model-00002-of-00003.safetensors",
|
316 |
+
"backbone.layers.36.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
317 |
+
"backbone.layers.36.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
318 |
+
"backbone.layers.36.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
319 |
+
"backbone.layers.36.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
320 |
+
"backbone.layers.36.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
321 |
+
"backbone.layers.36.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
322 |
+
"backbone.layers.36.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
323 |
+
"backbone.layers.36.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
324 |
+
"backbone.layers.36.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
325 |
+
"backbone.layers.36.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
326 |
+
"backbone.layers.36.norm.weight": "model-00002-of-00003.safetensors",
|
327 |
+
"backbone.layers.37.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
328 |
+
"backbone.layers.37.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
329 |
+
"backbone.layers.37.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
330 |
+
"backbone.layers.37.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
331 |
+
"backbone.layers.37.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
332 |
+
"backbone.layers.37.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
333 |
+
"backbone.layers.37.norm.weight": "model-00002-of-00003.safetensors",
|
334 |
+
"backbone.layers.38.mixer.A_log": "model-00002-of-00003.safetensors",
|
335 |
+
"backbone.layers.38.mixer.D": "model-00002-of-00003.safetensors",
|
336 |
+
"backbone.layers.38.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
337 |
+
"backbone.layers.38.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
338 |
+
"backbone.layers.38.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
339 |
+
"backbone.layers.38.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
340 |
+
"backbone.layers.38.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
341 |
+
"backbone.layers.38.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
342 |
+
"backbone.layers.38.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
343 |
+
"backbone.layers.38.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
344 |
+
"backbone.layers.38.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
345 |
+
"backbone.layers.38.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
346 |
+
"backbone.layers.38.norm.weight": "model-00002-of-00003.safetensors",
|
347 |
+
"backbone.layers.39.mixer.k_proj.weight_packed": "model-00002-of-00003.safetensors",
|
348 |
+
"backbone.layers.39.mixer.k_proj.weight_scale": "model-00002-of-00003.safetensors",
|
349 |
+
"backbone.layers.39.mixer.k_proj.weight_shape": "model-00002-of-00003.safetensors",
|
350 |
+
"backbone.layers.39.mixer.o_proj.weight_packed": "model-00002-of-00003.safetensors",
|
351 |
+
"backbone.layers.39.mixer.o_proj.weight_scale": "model-00002-of-00003.safetensors",
|
352 |
+
"backbone.layers.39.mixer.o_proj.weight_shape": "model-00002-of-00003.safetensors",
|
353 |
+
"backbone.layers.39.mixer.q_proj.weight_packed": "model-00002-of-00003.safetensors",
|
354 |
+
"backbone.layers.39.mixer.q_proj.weight_scale": "model-00002-of-00003.safetensors",
|
355 |
+
"backbone.layers.39.mixer.q_proj.weight_shape": "model-00002-of-00003.safetensors",
|
356 |
+
"backbone.layers.39.mixer.v_proj.weight_packed": "model-00002-of-00003.safetensors",
|
357 |
+
"backbone.layers.39.mixer.v_proj.weight_scale": "model-00002-of-00003.safetensors",
|
358 |
+
"backbone.layers.39.mixer.v_proj.weight_shape": "model-00002-of-00003.safetensors",
|
359 |
+
"backbone.layers.39.norm.weight": "model-00002-of-00003.safetensors",
|
360 |
+
"backbone.layers.4.mixer.A_log": "model-00001-of-00003.safetensors",
|
361 |
+
"backbone.layers.4.mixer.D": "model-00001-of-00003.safetensors",
|
362 |
+
"backbone.layers.4.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
363 |
+
"backbone.layers.4.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
364 |
+
"backbone.layers.4.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
365 |
+
"backbone.layers.4.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
366 |
+
"backbone.layers.4.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
367 |
+
"backbone.layers.4.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
368 |
+
"backbone.layers.4.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
369 |
+
"backbone.layers.4.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
370 |
+
"backbone.layers.4.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
371 |
+
"backbone.layers.4.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
372 |
+
"backbone.layers.4.norm.weight": "model-00001-of-00003.safetensors",
|
373 |
+
"backbone.layers.40.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
374 |
+
"backbone.layers.40.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
375 |
+
"backbone.layers.40.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
376 |
+
"backbone.layers.40.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
377 |
+
"backbone.layers.40.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
378 |
+
"backbone.layers.40.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
379 |
+
"backbone.layers.40.norm.weight": "model-00002-of-00003.safetensors",
|
380 |
+
"backbone.layers.41.mixer.A_log": "model-00002-of-00003.safetensors",
|
381 |
+
"backbone.layers.41.mixer.D": "model-00002-of-00003.safetensors",
|
382 |
+
"backbone.layers.41.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
383 |
+
"backbone.layers.41.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
384 |
+
"backbone.layers.41.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
385 |
+
"backbone.layers.41.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
386 |
+
"backbone.layers.41.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
387 |
+
"backbone.layers.41.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
388 |
+
"backbone.layers.41.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
389 |
+
"backbone.layers.41.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
390 |
+
"backbone.layers.41.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
391 |
+
"backbone.layers.41.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
392 |
+
"backbone.layers.41.norm.weight": "model-00002-of-00003.safetensors",
|
393 |
+
"backbone.layers.42.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
394 |
+
"backbone.layers.42.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
395 |
+
"backbone.layers.42.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
396 |
+
"backbone.layers.42.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
397 |
+
"backbone.layers.42.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
398 |
+
"backbone.layers.42.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
399 |
+
"backbone.layers.42.norm.weight": "model-00002-of-00003.safetensors",
|
400 |
+
"backbone.layers.43.mixer.A_log": "model-00002-of-00003.safetensors",
|
401 |
+
"backbone.layers.43.mixer.D": "model-00002-of-00003.safetensors",
|
402 |
+
"backbone.layers.43.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
403 |
+
"backbone.layers.43.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
404 |
+
"backbone.layers.43.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
405 |
+
"backbone.layers.43.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
406 |
+
"backbone.layers.43.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
407 |
+
"backbone.layers.43.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
408 |
+
"backbone.layers.43.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
409 |
+
"backbone.layers.43.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
410 |
+
"backbone.layers.43.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
411 |
+
"backbone.layers.43.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
412 |
+
"backbone.layers.43.norm.weight": "model-00002-of-00003.safetensors",
|
413 |
+
"backbone.layers.44.mixer.A_log": "model-00002-of-00003.safetensors",
|
414 |
+
"backbone.layers.44.mixer.D": "model-00002-of-00003.safetensors",
|
415 |
+
"backbone.layers.44.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
416 |
+
"backbone.layers.44.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
417 |
+
"backbone.layers.44.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
418 |
+
"backbone.layers.44.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
419 |
+
"backbone.layers.44.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
420 |
+
"backbone.layers.44.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
421 |
+
"backbone.layers.44.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
422 |
+
"backbone.layers.44.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
423 |
+
"backbone.layers.44.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
424 |
+
"backbone.layers.44.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
425 |
+
"backbone.layers.44.norm.weight": "model-00002-of-00003.safetensors",
|
426 |
+
"backbone.layers.45.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
427 |
+
"backbone.layers.45.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
428 |
+
"backbone.layers.45.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
429 |
+
"backbone.layers.45.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
430 |
+
"backbone.layers.45.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
431 |
+
"backbone.layers.45.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
432 |
+
"backbone.layers.45.norm.weight": "model-00002-of-00003.safetensors",
|
433 |
+
"backbone.layers.46.mixer.A_log": "model-00002-of-00003.safetensors",
|
434 |
+
"backbone.layers.46.mixer.D": "model-00002-of-00003.safetensors",
|
435 |
+
"backbone.layers.46.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
436 |
+
"backbone.layers.46.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
437 |
+
"backbone.layers.46.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
438 |
+
"backbone.layers.46.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
439 |
+
"backbone.layers.46.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
440 |
+
"backbone.layers.46.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
441 |
+
"backbone.layers.46.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
442 |
+
"backbone.layers.46.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
443 |
+
"backbone.layers.46.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
444 |
+
"backbone.layers.46.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
445 |
+
"backbone.layers.46.norm.weight": "model-00002-of-00003.safetensors",
|
446 |
+
"backbone.layers.47.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
447 |
+
"backbone.layers.47.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
448 |
+
"backbone.layers.47.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
449 |
+
"backbone.layers.47.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
450 |
+
"backbone.layers.47.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
451 |
+
"backbone.layers.47.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
452 |
+
"backbone.layers.47.norm.weight": "model-00002-of-00003.safetensors",
|
453 |
+
"backbone.layers.48.mixer.A_log": "model-00002-of-00003.safetensors",
|
454 |
+
"backbone.layers.48.mixer.D": "model-00002-of-00003.safetensors",
|
455 |
+
"backbone.layers.48.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
456 |
+
"backbone.layers.48.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
457 |
+
"backbone.layers.48.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
458 |
+
"backbone.layers.48.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
459 |
+
"backbone.layers.48.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
460 |
+
"backbone.layers.48.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
461 |
+
"backbone.layers.48.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
462 |
+
"backbone.layers.48.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
463 |
+
"backbone.layers.48.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
464 |
+
"backbone.layers.48.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
465 |
+
"backbone.layers.48.norm.weight": "model-00002-of-00003.safetensors",
|
466 |
+
"backbone.layers.49.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
467 |
+
"backbone.layers.49.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
468 |
+
"backbone.layers.49.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
469 |
+
"backbone.layers.49.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
470 |
+
"backbone.layers.49.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
471 |
+
"backbone.layers.49.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
472 |
+
"backbone.layers.49.norm.weight": "model-00002-of-00003.safetensors",
|
473 |
+
"backbone.layers.5.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
474 |
+
"backbone.layers.5.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
475 |
+
"backbone.layers.5.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
476 |
+
"backbone.layers.5.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
477 |
+
"backbone.layers.5.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
478 |
+
"backbone.layers.5.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
479 |
+
"backbone.layers.5.norm.weight": "model-00001-of-00003.safetensors",
|
480 |
+
"backbone.layers.50.mixer.A_log": "model-00002-of-00003.safetensors",
|
481 |
+
"backbone.layers.50.mixer.D": "model-00002-of-00003.safetensors",
|
482 |
+
"backbone.layers.50.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
483 |
+
"backbone.layers.50.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
484 |
+
"backbone.layers.50.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
485 |
+
"backbone.layers.50.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
486 |
+
"backbone.layers.50.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
487 |
+
"backbone.layers.50.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
488 |
+
"backbone.layers.50.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
489 |
+
"backbone.layers.50.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
490 |
+
"backbone.layers.50.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
491 |
+
"backbone.layers.50.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
492 |
+
"backbone.layers.50.norm.weight": "model-00002-of-00003.safetensors",
|
493 |
+
"backbone.layers.51.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
494 |
+
"backbone.layers.51.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
495 |
+
"backbone.layers.51.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
496 |
+
"backbone.layers.51.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
497 |
+
"backbone.layers.51.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
498 |
+
"backbone.layers.51.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
499 |
+
"backbone.layers.51.norm.weight": "model-00002-of-00003.safetensors",
|
500 |
+
"backbone.layers.52.mixer.A_log": "model-00002-of-00003.safetensors",
|
501 |
+
"backbone.layers.52.mixer.D": "model-00002-of-00003.safetensors",
|
502 |
+
"backbone.layers.52.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
503 |
+
"backbone.layers.52.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
504 |
+
"backbone.layers.52.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
505 |
+
"backbone.layers.52.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
506 |
+
"backbone.layers.52.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
507 |
+
"backbone.layers.52.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
508 |
+
"backbone.layers.52.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
509 |
+
"backbone.layers.52.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
510 |
+
"backbone.layers.52.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
511 |
+
"backbone.layers.52.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
512 |
+
"backbone.layers.52.norm.weight": "model-00002-of-00003.safetensors",
|
513 |
+
"backbone.layers.53.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
514 |
+
"backbone.layers.53.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
515 |
+
"backbone.layers.53.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
516 |
+
"backbone.layers.53.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
517 |
+
"backbone.layers.53.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
518 |
+
"backbone.layers.53.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
519 |
+
"backbone.layers.53.norm.weight": "model-00002-of-00003.safetensors",
|
520 |
+
"backbone.layers.54.mixer.A_log": "model-00002-of-00003.safetensors",
|
521 |
+
"backbone.layers.54.mixer.D": "model-00002-of-00003.safetensors",
|
522 |
+
"backbone.layers.54.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
|
523 |
+
"backbone.layers.54.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
|
524 |
+
"backbone.layers.54.mixer.dt_bias": "model-00002-of-00003.safetensors",
|
525 |
+
"backbone.layers.54.mixer.in_proj.weight_packed": "model-00002-of-00003.safetensors",
|
526 |
+
"backbone.layers.54.mixer.in_proj.weight_scale": "model-00002-of-00003.safetensors",
|
527 |
+
"backbone.layers.54.mixer.in_proj.weight_shape": "model-00002-of-00003.safetensors",
|
528 |
+
"backbone.layers.54.mixer.norm.weight": "model-00002-of-00003.safetensors",
|
529 |
+
"backbone.layers.54.mixer.out_proj.weight_packed": "model-00002-of-00003.safetensors",
|
530 |
+
"backbone.layers.54.mixer.out_proj.weight_scale": "model-00002-of-00003.safetensors",
|
531 |
+
"backbone.layers.54.mixer.out_proj.weight_shape": "model-00002-of-00003.safetensors",
|
532 |
+
"backbone.layers.54.norm.weight": "model-00002-of-00003.safetensors",
|
533 |
+
"backbone.layers.55.mixer.down_proj.weight_packed": "model-00002-of-00003.safetensors",
|
534 |
+
"backbone.layers.55.mixer.down_proj.weight_scale": "model-00002-of-00003.safetensors",
|
535 |
+
"backbone.layers.55.mixer.down_proj.weight_shape": "model-00002-of-00003.safetensors",
|
536 |
+
"backbone.layers.55.mixer.up_proj.weight_packed": "model-00002-of-00003.safetensors",
|
537 |
+
"backbone.layers.55.mixer.up_proj.weight_scale": "model-00002-of-00003.safetensors",
|
538 |
+
"backbone.layers.55.mixer.up_proj.weight_shape": "model-00002-of-00003.safetensors",
|
539 |
+
"backbone.layers.55.norm.weight": "model-00002-of-00003.safetensors",
|
540 |
+
"backbone.layers.6.mixer.A_log": "model-00001-of-00003.safetensors",
|
541 |
+
"backbone.layers.6.mixer.D": "model-00001-of-00003.safetensors",
|
542 |
+
"backbone.layers.6.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
543 |
+
"backbone.layers.6.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
544 |
+
"backbone.layers.6.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
545 |
+
"backbone.layers.6.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
546 |
+
"backbone.layers.6.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
547 |
+
"backbone.layers.6.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
548 |
+
"backbone.layers.6.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
549 |
+
"backbone.layers.6.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
550 |
+
"backbone.layers.6.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
551 |
+
"backbone.layers.6.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
552 |
+
"backbone.layers.6.norm.weight": "model-00001-of-00003.safetensors",
|
553 |
+
"backbone.layers.7.mixer.A_log": "model-00001-of-00003.safetensors",
|
554 |
+
"backbone.layers.7.mixer.D": "model-00001-of-00003.safetensors",
|
555 |
+
"backbone.layers.7.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
556 |
+
"backbone.layers.7.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
557 |
+
"backbone.layers.7.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
558 |
+
"backbone.layers.7.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
559 |
+
"backbone.layers.7.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
560 |
+
"backbone.layers.7.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
561 |
+
"backbone.layers.7.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
562 |
+
"backbone.layers.7.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
563 |
+
"backbone.layers.7.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
564 |
+
"backbone.layers.7.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
565 |
+
"backbone.layers.7.norm.weight": "model-00001-of-00003.safetensors",
|
566 |
+
"backbone.layers.8.mixer.down_proj.weight_packed": "model-00001-of-00003.safetensors",
|
567 |
+
"backbone.layers.8.mixer.down_proj.weight_scale": "model-00001-of-00003.safetensors",
|
568 |
+
"backbone.layers.8.mixer.down_proj.weight_shape": "model-00001-of-00003.safetensors",
|
569 |
+
"backbone.layers.8.mixer.up_proj.weight_packed": "model-00001-of-00003.safetensors",
|
570 |
+
"backbone.layers.8.mixer.up_proj.weight_scale": "model-00001-of-00003.safetensors",
|
571 |
+
"backbone.layers.8.mixer.up_proj.weight_shape": "model-00001-of-00003.safetensors",
|
572 |
+
"backbone.layers.8.norm.weight": "model-00001-of-00003.safetensors",
|
573 |
+
"backbone.layers.9.mixer.A_log": "model-00001-of-00003.safetensors",
|
574 |
+
"backbone.layers.9.mixer.D": "model-00001-of-00003.safetensors",
|
575 |
+
"backbone.layers.9.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
|
576 |
+
"backbone.layers.9.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
|
577 |
+
"backbone.layers.9.mixer.dt_bias": "model-00001-of-00003.safetensors",
|
578 |
+
"backbone.layers.9.mixer.in_proj.weight_packed": "model-00001-of-00003.safetensors",
|
579 |
+
"backbone.layers.9.mixer.in_proj.weight_scale": "model-00001-of-00003.safetensors",
|
580 |
+
"backbone.layers.9.mixer.in_proj.weight_shape": "model-00001-of-00003.safetensors",
|
581 |
+
"backbone.layers.9.mixer.norm.weight": "model-00001-of-00003.safetensors",
|
582 |
+
"backbone.layers.9.mixer.out_proj.weight_packed": "model-00001-of-00003.safetensors",
|
583 |
+
"backbone.layers.9.mixer.out_proj.weight_scale": "model-00001-of-00003.safetensors",
|
584 |
+
"backbone.layers.9.mixer.out_proj.weight_shape": "model-00001-of-00003.safetensors",
|
585 |
+
"backbone.layers.9.norm.weight": "model-00001-of-00003.safetensors",
|
586 |
+
"backbone.norm_f.weight": "model-00002-of-00003.safetensors",
|
587 |
+
"lm_head.weight": "model-00003-of-00003.safetensors"
|
588 |
+
}
|
589 |
+
}
|
modeling_nemotron_h.py
ADDED
@@ -0,0 +1,1638 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""PyTorch NemotronH model."""
|
17 |
+
|
18 |
+
import math
|
19 |
+
from dataclasses import dataclass
|
20 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.utils.checkpoint
|
24 |
+
from torch import nn
|
25 |
+
from torch.nn import CrossEntropyLoss
|
26 |
+
|
27 |
+
from transformers.activations import ACT2FN
|
28 |
+
from transformers.cache_utils import DynamicCache # we need __iter__ and __len__ of pkv
|
29 |
+
from transformers.generation import GenerationMixin
|
30 |
+
from transformers.modeling_attn_mask_utils import (
|
31 |
+
AttentionMaskConverter,
|
32 |
+
)
|
33 |
+
from transformers.modeling_utils import PreTrainedModel
|
34 |
+
from transformers.utils import (
|
35 |
+
ModelOutput,
|
36 |
+
add_code_sample_docstrings,
|
37 |
+
add_start_docstrings,
|
38 |
+
add_start_docstrings_to_model_forward,
|
39 |
+
logging,
|
40 |
+
)
|
41 |
+
from transformers.utils.import_utils import (
|
42 |
+
is_causal_conv1d_available,
|
43 |
+
is_flash_attn_2_available,
|
44 |
+
is_flash_attn_greater_or_equal_2_10,
|
45 |
+
is_mamba_2_ssm_available,
|
46 |
+
)
|
47 |
+
from .configuration_nemotron_h import NemotronHConfig
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
|
53 |
+
# Copied from transformers.models.mamba.modeling_mamba2.modeling_mamba2.py with MAMBA2->NEMOTRONH,Mamba2->NemotronH
|
54 |
+
# For Mamba2 components Mamba2->NemotronHMamba2
|
55 |
+
if is_mamba_2_ssm_available():
|
56 |
+
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
|
57 |
+
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
|
58 |
+
else:
|
59 |
+
mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, selective_state_update = None, None, None
|
60 |
+
|
61 |
+
try:
|
62 |
+
#from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated
|
63 |
+
from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn
|
64 |
+
except ImportError:
|
65 |
+
raise ImportError("mamba-ssm is required by the Mamba model but cannot be imported")
|
66 |
+
|
67 |
+
if is_causal_conv1d_available():
|
68 |
+
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
|
69 |
+
else:
|
70 |
+
causal_conv1d_update, causal_conv1d_fn = None, None
|
71 |
+
|
72 |
+
if is_flash_attn_2_available():
|
73 |
+
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
74 |
+
|
75 |
+
is_fast_path_available = all(
|
76 |
+
(
|
77 |
+
selective_state_update,
|
78 |
+
mamba_chunk_scan_combined,
|
79 |
+
mamba_split_conv1d_scan_combined,
|
80 |
+
causal_conv1d_fn,
|
81 |
+
causal_conv1d_update,
|
82 |
+
)
|
83 |
+
)
|
84 |
+
|
85 |
+
|
86 |
+
_CHECKPOINT_FOR_DOC = "nvidia/Nemotron-H-56B-Base-8K"
|
87 |
+
_CONFIG_FOR_DOC = "NemotronHConfig"
|
88 |
+
|
89 |
+
|
90 |
+
# Helper methods for segment sum computation
|
91 |
+
|
92 |
+
|
93 |
+
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
|
94 |
+
"""
|
95 |
+
Padding x tensor with `pad_size` on the seq_len dim (dim=1)
|
96 |
+
|
97 |
+
Assumes that we only have tensors of either size 4 or 3
|
98 |
+
"""
|
99 |
+
pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
|
100 |
+
|
101 |
+
return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
|
102 |
+
|
103 |
+
|
104 |
+
def reshape_into_chunks(input_tensor, pad_size, chunk_size):
|
105 |
+
"""
|
106 |
+
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
|
107 |
+
simultaneously splitting it into chunk sequences.
|
108 |
+
|
109 |
+
Assumes that we only have tensors of either size 4 or 3
|
110 |
+
"""
|
111 |
+
# [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
|
112 |
+
input_tensor = pad_tensor_by_size(input_tensor, pad_size)
|
113 |
+
|
114 |
+
if len(input_tensor.shape) == 3:
|
115 |
+
# [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
|
116 |
+
return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
|
117 |
+
else:
|
118 |
+
# [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
|
119 |
+
return input_tensor.reshape(
|
120 |
+
input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
def segment_sum(input_tensor):
|
125 |
+
"""
|
126 |
+
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
|
127 |
+
"""
|
128 |
+
chunk_size = input_tensor.size(-1)
|
129 |
+
# 1. expand input tensor to have an additional dimension and repeat along that dimension
|
130 |
+
# [..., chunk_size] -> [..., chunk_size, chunk_size]
|
131 |
+
input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
|
132 |
+
# 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
|
133 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
|
134 |
+
input_tensor = input_tensor.masked_fill(~mask, 0)
|
135 |
+
# 3. compute actual cumsum
|
136 |
+
tensor_segsum = torch.cumsum(input_tensor, dim=-2)
|
137 |
+
|
138 |
+
# 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
|
139 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
|
140 |
+
tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
|
141 |
+
return tensor_segsum
|
142 |
+
|
143 |
+
|
144 |
+
def apply_mask_to_padding_states(hidden_states, attention_mask):
|
145 |
+
"""
|
146 |
+
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
|
147 |
+
"""
|
148 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
149 |
+
dtype = hidden_states.dtype
|
150 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
151 |
+
|
152 |
+
return hidden_states
|
153 |
+
|
154 |
+
# Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py
|
155 |
+
class HybridMambaAttentionDynamicCache(DynamicCache):
|
156 |
+
"""
|
157 |
+
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
|
158 |
+
(which has a constant shape regardless of seq_len).
|
159 |
+
|
160 |
+
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
|
161 |
+
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
|
162 |
+
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
|
163 |
+
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
|
164 |
+
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
|
165 |
+
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
|
166 |
+
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
|
167 |
+
"""
|
168 |
+
|
169 |
+
def __init__(self, config, batch_size, dtype=torch.float16, device=None):
|
170 |
+
super().__init__()
|
171 |
+
self.dtype = dtype
|
172 |
+
self.hybrid_override_pattern = config.hybrid_override_pattern
|
173 |
+
self.has_previous_state = False # only used by mamba
|
174 |
+
#intermediate_size = config.expand * config.hidden_size
|
175 |
+
intermediate_size = config.mamba_num_heads * config.mamba_head_dim
|
176 |
+
ssm_state_size = config.ssm_state_size
|
177 |
+
conv_kernel_size = config.conv_kernel
|
178 |
+
self.conv_states = []
|
179 |
+
self.ssm_states = []
|
180 |
+
self.transformer_layers = []
|
181 |
+
for i in range(config.num_hidden_layers):
|
182 |
+
if self.hybrid_override_pattern[i] == "M":
|
183 |
+
# Mamba layer
|
184 |
+
self.conv_states += [
|
185 |
+
torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
|
186 |
+
]
|
187 |
+
self.ssm_states += [
|
188 |
+
torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype)
|
189 |
+
]
|
190 |
+
else:
|
191 |
+
# Attention or MLP layer
|
192 |
+
self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
|
193 |
+
self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
|
194 |
+
self.transformer_layers.append(i)
|
195 |
+
|
196 |
+
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
|
197 |
+
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
|
198 |
+
|
199 |
+
def update(
|
200 |
+
self,
|
201 |
+
key_states: torch.Tensor,
|
202 |
+
value_states: torch.Tensor,
|
203 |
+
layer_idx: int,
|
204 |
+
cache_kwargs: Optional[Dict[str, Any]] = None,
|
205 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
206 |
+
# Update the cache
|
207 |
+
if self.key_cache[layer_idx].shape[-1] == 0:
|
208 |
+
self.key_cache[layer_idx] = key_states
|
209 |
+
self.value_cache[layer_idx] = value_states
|
210 |
+
else:
|
211 |
+
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
|
212 |
+
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
|
213 |
+
|
214 |
+
return self.key_cache[layer_idx], self.value_cache[layer_idx]
|
215 |
+
|
216 |
+
def reorder_cache(self, beam_idx: torch.LongTensor):
|
217 |
+
"""Reorders the cache for beam search, given the selected beam indices."""
|
218 |
+
for layer_idx in range(len(self.key_cache)):
|
219 |
+
device = self.key_cache[layer_idx].device
|
220 |
+
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
|
221 |
+
device = self.value_cache[layer_idx].device
|
222 |
+
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
|
223 |
+
|
224 |
+
device = self.conv_states[layer_idx].device
|
225 |
+
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
|
226 |
+
device = self.ssm_states[layer_idx].device
|
227 |
+
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
|
228 |
+
|
229 |
+
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
|
230 |
+
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
|
231 |
+
# take any layer that contains cache and not empty tensor
|
232 |
+
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
|
233 |
+
if len(self.key_cache) <= layer_idx:
|
234 |
+
return 0
|
235 |
+
return self.key_cache[layer_idx].shape[-2]
|
236 |
+
|
237 |
+
def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
|
238 |
+
raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
|
239 |
+
|
240 |
+
@classmethod
|
241 |
+
def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
|
242 |
+
raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
|
243 |
+
|
244 |
+
# Copied from modeling_mamba2.py
|
245 |
+
def update_conv_state(
|
246 |
+
self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False
|
247 |
+
) -> torch.Tensor:
|
248 |
+
if cache_init:
|
249 |
+
self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device)
|
250 |
+
else:
|
251 |
+
self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1)
|
252 |
+
self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device)
|
253 |
+
return self.conv_states[layer_idx]
|
254 |
+
|
255 |
+
def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
|
256 |
+
self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
|
257 |
+
return self.ssm_states[layer_idx]
|
258 |
+
|
259 |
+
def reset(self):
|
260 |
+
self.conv_states.zero_()
|
261 |
+
self.ssm_states.zero_()
|
262 |
+
|
263 |
+
class MambaRMSNormGated(torch.nn.Module):
|
264 |
+
def __init__(self, hidden_size, group_size, eps=1e-5):
|
265 |
+
super().__init__()
|
266 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
267 |
+
self.variance_epsilon = eps
|
268 |
+
self.group_size = group_size
|
269 |
+
|
270 |
+
# jan28b version
|
271 |
+
def forward(self, hidden_states, gate=None):
|
272 |
+
return rmsnorm_fn(x=hidden_states,
|
273 |
+
weight=self.weight,
|
274 |
+
bias=None, # No bias
|
275 |
+
z=gate,
|
276 |
+
eps=self.variance_epsilon,
|
277 |
+
group_size=self.group_size,
|
278 |
+
norm_before_gate=False
|
279 |
+
)
|
280 |
+
|
281 |
+
class NemotronHMamba2Mixer(nn.Module):
|
282 |
+
"""
|
283 |
+
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
|
284 |
+
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
|
285 |
+
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
|
286 |
+
and is why Mamba is called **selective** state spaces)
|
287 |
+
"""
|
288 |
+
|
289 |
+
def __init__(self, config: NemotronHConfig, layer_idx: int):
|
290 |
+
super().__init__()
|
291 |
+
self.num_heads = config.mamba_num_heads
|
292 |
+
self.hidden_size = config.hidden_size
|
293 |
+
self.ssm_state_size = config.ssm_state_size
|
294 |
+
self.conv_kernel_size = config.conv_kernel
|
295 |
+
self.intermediate_size = config.mamba_num_heads * config.mamba_head_dim
|
296 |
+
self.layer_idx = layer_idx
|
297 |
+
self.use_conv_bias = config.use_conv_bias
|
298 |
+
self.activation = config.mamba_hidden_act
|
299 |
+
self.act = ACT2FN[config.mamba_hidden_act]
|
300 |
+
|
301 |
+
self.layer_norm_epsilon = config.layer_norm_epsilon
|
302 |
+
|
303 |
+
self.n_groups = config.n_groups
|
304 |
+
self.head_dim = config.mamba_head_dim
|
305 |
+
self.chunk_size = config.chunk_size
|
306 |
+
|
307 |
+
self.time_step_limit = config.time_step_limit
|
308 |
+
self.time_step_min = config.time_step_min
|
309 |
+
self.time_step_max = config.time_step_max
|
310 |
+
|
311 |
+
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
|
312 |
+
self.conv1d = nn.Conv1d(
|
313 |
+
in_channels=self.conv_dim,
|
314 |
+
out_channels=self.conv_dim,
|
315 |
+
bias=config.use_conv_bias,
|
316 |
+
kernel_size=config.conv_kernel,
|
317 |
+
groups=self.conv_dim,
|
318 |
+
padding=config.conv_kernel - 1,
|
319 |
+
)
|
320 |
+
|
321 |
+
# projection of the input hidden states
|
322 |
+
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
|
323 |
+
self.in_proj = nn.Linear(
|
324 |
+
self.hidden_size,
|
325 |
+
projection_size,
|
326 |
+
bias=config.use_bias,
|
327 |
+
)
|
328 |
+
# selective projection used to make dt, B and C input dependant
|
329 |
+
|
330 |
+
# time step projection (discretization)
|
331 |
+
# instantiate once and copy inv_dt in init_weights of PretrainedModel
|
332 |
+
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
|
333 |
+
|
334 |
+
# S4D real initialization. These are not discretized!
|
335 |
+
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
|
336 |
+
A = torch.arange(1, self.num_heads + 1)
|
337 |
+
self.A_log = nn.Parameter(torch.log(A))
|
338 |
+
self.A_log._no_weight_decay = True
|
339 |
+
self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon, group_size=self.intermediate_size // self.n_groups)
|
340 |
+
self.D = nn.Parameter(torch.ones(self.num_heads))
|
341 |
+
self.D._no_weight_decay = True
|
342 |
+
|
343 |
+
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
|
344 |
+
self.use_bias = config.use_bias
|
345 |
+
|
346 |
+
if not is_fast_path_available:
|
347 |
+
logger.warning_once(
|
348 |
+
"The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
|
349 |
+
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
|
350 |
+
" https://github.com/Dao-AILab/causal-conv1d"
|
351 |
+
)
|
352 |
+
|
353 |
+
def cuda_kernels_forward(
|
354 |
+
self,
|
355 |
+
hidden_states: torch.Tensor,
|
356 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
357 |
+
cache_position: Optional[torch.LongTensor] = None,
|
358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
359 |
+
):
|
360 |
+
# 1. Gated MLP's linear projection
|
361 |
+
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
|
362 |
+
projected_states = self.in_proj(hidden_states)
|
363 |
+
|
364 |
+
# Set up dimensions for reshapes later
|
365 |
+
batch_size, seq_len, _ = hidden_states.shape
|
366 |
+
groups_time_state_size = self.n_groups * self.ssm_state_size
|
367 |
+
d_mlp = (
|
368 |
+
projected_states.shape[-1]
|
369 |
+
- 2 * self.intermediate_size
|
370 |
+
- 2 * self.n_groups * self.ssm_state_size
|
371 |
+
- self.num_heads
|
372 |
+
) // 2
|
373 |
+
|
374 |
+
# Single step calculations via cache
|
375 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
376 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
|
377 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
378 |
+
)
|
379 |
+
|
380 |
+
# 2. Convolution sequence transformation
|
381 |
+
hidden_states_B_C = causal_conv1d_update(
|
382 |
+
hidden_states_B_C,
|
383 |
+
cache_params.conv_states[self.layer_idx],
|
384 |
+
self.conv1d.weight.squeeze(1),
|
385 |
+
self.conv1d.bias,
|
386 |
+
self.activation,
|
387 |
+
)
|
388 |
+
|
389 |
+
hidden_states, B, C = torch.split(
|
390 |
+
hidden_states_B_C,
|
391 |
+
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
|
392 |
+
dim=-1,
|
393 |
+
)
|
394 |
+
|
395 |
+
# 3. SSM transformation
|
396 |
+
A = -torch.exp(self.A_log.float()) # (nheads,)
|
397 |
+
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
398 |
+
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
|
399 |
+
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
|
400 |
+
D = self.D[:, None, ...].expand(-1, self.head_dim)
|
401 |
+
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
|
402 |
+
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
|
403 |
+
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
|
404 |
+
hidden_states = selective_state_update(
|
405 |
+
cache_params.ssm_states[self.layer_idx],
|
406 |
+
hidden_states_reshaped,
|
407 |
+
dt,
|
408 |
+
A,
|
409 |
+
B,
|
410 |
+
C,
|
411 |
+
D,
|
412 |
+
z=None,
|
413 |
+
dt_bias=dt_bias,
|
414 |
+
dt_softplus=True,
|
415 |
+
)
|
416 |
+
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
|
417 |
+
hidden_states = self.norm(hidden_states, gate)
|
418 |
+
|
419 |
+
# 4. Final linear projection
|
420 |
+
out = self.out_proj(hidden_states)[:, None, ...]
|
421 |
+
|
422 |
+
# Fused calculations or step by step if no initialized cache is found
|
423 |
+
else:
|
424 |
+
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
|
425 |
+
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
|
426 |
+
|
427 |
+
# 2-4. Fused kernel for conv1d, SSM, and the final projection
|
428 |
+
if self.training and cache_params is None:
|
429 |
+
out = mamba_split_conv1d_scan_combined(
|
430 |
+
projected_states,
|
431 |
+
self.conv1d.weight.squeeze(1),
|
432 |
+
self.conv1d.bias,
|
433 |
+
self.dt_bias,
|
434 |
+
A,
|
435 |
+
D=self.D,
|
436 |
+
chunk_size=self.chunk_size,
|
437 |
+
seq_idx=None, # was seq_idx
|
438 |
+
activation=self.activation,
|
439 |
+
rmsnorm_weight=self.norm.weight,
|
440 |
+
rmsnorm_eps=self.norm.variance_epsilon,
|
441 |
+
outproj_weight=self.out_proj.weight,
|
442 |
+
outproj_bias=self.out_proj.bias,
|
443 |
+
headdim=self.head_dim,
|
444 |
+
ngroups=self.n_groups,
|
445 |
+
norm_before_gate=False,
|
446 |
+
return_final_states=False,
|
447 |
+
**dt_limit_kwargs,
|
448 |
+
)
|
449 |
+
|
450 |
+
else:
|
451 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
452 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
453 |
+
)
|
454 |
+
|
455 |
+
# 2. Convolution sequence transformation
|
456 |
+
# Init cache
|
457 |
+
if cache_params is not None:
|
458 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
459 |
+
conv_states = nn.functional.pad(
|
460 |
+
hidden_states_B_C_transposed,
|
461 |
+
(cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
|
462 |
+
)
|
463 |
+
cache_params.update_conv_state(
|
464 |
+
layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True
|
465 |
+
)
|
466 |
+
|
467 |
+
if self.activation not in ["silu", "swish"]:
|
468 |
+
hidden_states_B_C = self.act(
|
469 |
+
self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
|
470 |
+
)
|
471 |
+
else:
|
472 |
+
hidden_states_B_C = causal_conv1d_fn(
|
473 |
+
x=hidden_states_B_C.transpose(1, 2),
|
474 |
+
weight=self.conv1d.weight.squeeze(1),
|
475 |
+
bias=self.conv1d.bias,
|
476 |
+
activation=self.activation,
|
477 |
+
).transpose(1, 2)
|
478 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
479 |
+
hidden_states, B, C = torch.split(
|
480 |
+
hidden_states_B_C,
|
481 |
+
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
|
482 |
+
dim=-1,
|
483 |
+
)
|
484 |
+
|
485 |
+
# 3. SSM transformation
|
486 |
+
scan_output, ssm_state = mamba_chunk_scan_combined(
|
487 |
+
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
|
488 |
+
dt,
|
489 |
+
A,
|
490 |
+
B.view(batch_size, seq_len, self.n_groups, -1),
|
491 |
+
C.view(batch_size, seq_len, self.n_groups, -1),
|
492 |
+
chunk_size=self.chunk_size,
|
493 |
+
D=self.D,
|
494 |
+
z=None,
|
495 |
+
seq_idx=None,
|
496 |
+
return_final_states=True,
|
497 |
+
dt_bias=self.dt_bias,
|
498 |
+
dt_softplus=True,
|
499 |
+
**dt_limit_kwargs,
|
500 |
+
)
|
501 |
+
|
502 |
+
# Init cache
|
503 |
+
if ssm_state is not None and cache_params is not None:
|
504 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
505 |
+
|
506 |
+
scan_output = scan_output.view(batch_size, seq_len, -1)
|
507 |
+
|
508 |
+
# Multiply "gate" branch and apply extra normalization layer
|
509 |
+
scan_output = self.norm(scan_output, gate)
|
510 |
+
|
511 |
+
# 4. Final linear projection
|
512 |
+
out = self.out_proj(scan_output)
|
513 |
+
return out
|
514 |
+
|
515 |
+
# fmt: off
|
516 |
+
def torch_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position:Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None):
|
517 |
+
batch_size, seq_len, _ = input_states.shape
|
518 |
+
dtype = input_states.dtype
|
519 |
+
|
520 |
+
# 1. Gated MLP's linear projection
|
521 |
+
input_states = apply_mask_to_padding_states(input_states, attention_mask)
|
522 |
+
projected_states = self.in_proj(input_states)
|
523 |
+
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size-self.num_heads) // 2
|
524 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
525 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
526 |
+
)
|
527 |
+
|
528 |
+
# 2. Convolution sequence transformation
|
529 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
530 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False)
|
531 |
+
|
532 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
533 |
+
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
|
534 |
+
|
535 |
+
hidden_states_B_C = torch.sum(
|
536 |
+
conv_states * self.conv1d.weight.squeeze(1), dim=-1
|
537 |
+
)
|
538 |
+
if self.use_conv_bias:
|
539 |
+
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
|
540 |
+
hidden_states_B_C = self.act(hidden_states_B_C)
|
541 |
+
else:
|
542 |
+
# Init cache
|
543 |
+
if cache_params is not None:
|
544 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
545 |
+
conv_states = nn.functional.pad(
|
546 |
+
hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
|
547 |
+
)
|
548 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True)
|
549 |
+
|
550 |
+
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
|
551 |
+
|
552 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
553 |
+
hidden_states, B, C = torch.split(
|
554 |
+
hidden_states_B_C,
|
555 |
+
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
|
556 |
+
dim=-1
|
557 |
+
)
|
558 |
+
|
559 |
+
# 3. SSM transformation
|
560 |
+
A = -torch.exp(self.A_log.float()) # [num_heads]
|
561 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
562 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
563 |
+
cache_device = cache_params.ssm_states.device
|
564 |
+
|
565 |
+
# Note: there is no need to pad parameter matrices here, as there is just one new token
|
566 |
+
# for batched generation
|
567 |
+
dt = dt[:, 0, :][:, None, ...]
|
568 |
+
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
|
569 |
+
# [num_heads] -> [num_heads, head_dim]
|
570 |
+
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
|
571 |
+
|
572 |
+
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
|
573 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
574 |
+
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
575 |
+
# [bsz, num_heads, head_dim, state_size]
|
576 |
+
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
|
577 |
+
|
578 |
+
# Discretize B
|
579 |
+
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
|
580 |
+
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
|
581 |
+
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
582 |
+
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
|
583 |
+
B = B.reshape(batch_size, -1, B.shape[-1])
|
584 |
+
# [bsz, num_heads, head_dim, state_size]
|
585 |
+
dB = dt[..., None] * B[..., None, :]
|
586 |
+
|
587 |
+
# Discretize x into dB
|
588 |
+
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
|
589 |
+
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
|
590 |
+
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
|
591 |
+
|
592 |
+
# State calculation
|
593 |
+
cache_params.update_ssm_state(
|
594 |
+
layer_idx=self.layer_idx,
|
595 |
+
new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx
|
596 |
+
)
|
597 |
+
|
598 |
+
# Subsequent output
|
599 |
+
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
|
600 |
+
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
601 |
+
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
|
602 |
+
C = C.reshape(batch_size, -1, C.shape[-1])
|
603 |
+
# [bsz, num_heads, head_dim]
|
604 |
+
|
605 |
+
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
|
606 |
+
# Reshape ssm_states to merge the first two dimensions
|
607 |
+
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
|
608 |
+
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
|
609 |
+
y = torch.bmm(ssm_states_reshaped, C_reshaped)
|
610 |
+
y = y.view(batch_size, self.num_heads, self.head_dim)
|
611 |
+
|
612 |
+
# D skip connection
|
613 |
+
# [num_heads] -> [num_heads, head_dim]
|
614 |
+
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
|
615 |
+
y = (y + hidden_states * D).to(y.dtype)
|
616 |
+
|
617 |
+
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
|
618 |
+
y = y.reshape(batch_size, -1)[:, None, ...]
|
619 |
+
else:
|
620 |
+
# begin ssd naive implementation without einsums
|
621 |
+
dt = nn.functional.softplus(dt + self.dt_bias)
|
622 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
623 |
+
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
|
624 |
+
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
625 |
+
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
626 |
+
B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
627 |
+
C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
628 |
+
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
|
629 |
+
|
630 |
+
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
|
631 |
+
|
632 |
+
# Discretize x and A
|
633 |
+
hidden_states = hidden_states * dt[..., None]
|
634 |
+
A = A.to(hidden_states.dtype) * dt
|
635 |
+
|
636 |
+
# Rearrange into blocks/chunks
|
637 |
+
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
|
638 |
+
|
639 |
+
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
|
640 |
+
A = A.permute(0, 3, 1, 2)
|
641 |
+
A_cumsum = torch.cumsum(A, dim=-1)
|
642 |
+
|
643 |
+
# 1. Compute the output for each intra-chunk (diagonal blocks)
|
644 |
+
# This is the analog of a causal mask
|
645 |
+
L = torch.exp(segment_sum(A))
|
646 |
+
|
647 |
+
# Contraction of C and B to get G (attention-weights like)
|
648 |
+
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
|
649 |
+
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
|
650 |
+
|
651 |
+
# Compute M, equivalent to applying attention mask to weights
|
652 |
+
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
|
653 |
+
M = M_intermediate.sum(dim=-1)
|
654 |
+
|
655 |
+
# Compute Y_diag (apply to values)
|
656 |
+
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
|
657 |
+
|
658 |
+
# 2. Compute the state for each intra-chunk
|
659 |
+
# (right term of low-rank factorization of off-diagonal blocks; B terms)
|
660 |
+
decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
|
661 |
+
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
|
662 |
+
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
|
663 |
+
|
664 |
+
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
|
665 |
+
# (middle term of factorization of off-diag blocks; A terms)
|
666 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
667 |
+
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
|
668 |
+
else:
|
669 |
+
previous_states = torch.zeros_like(states[:, :1])
|
670 |
+
states = torch.cat([previous_states, states], dim=1)
|
671 |
+
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
|
672 |
+
decay_chunk = decay_chunk.transpose(1, 3)
|
673 |
+
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
|
674 |
+
states, ssm_state = new_states[:, :-1], new_states[:, -1]
|
675 |
+
|
676 |
+
# 4. Compute state -> output conversion per chunk
|
677 |
+
# (left term of low-rank factorization of off-diagonal blocks; C terms)
|
678 |
+
state_decay_out = torch.exp(A_cumsum)
|
679 |
+
C_times_states = (C[..., None, :] * states[:, :, None, ...])
|
680 |
+
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
|
681 |
+
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
|
682 |
+
|
683 |
+
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
|
684 |
+
y = Y_diag + Y_off
|
685 |
+
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
|
686 |
+
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
|
687 |
+
|
688 |
+
y = y + D_residual
|
689 |
+
# Cutting off padded chunks
|
690 |
+
if pad_size > 0:
|
691 |
+
y = y[:, :seq_len, :, :]
|
692 |
+
y = y.reshape(batch_size, seq_len, -1)
|
693 |
+
|
694 |
+
# Init cache
|
695 |
+
if ssm_state is not None and cache_params is not None:
|
696 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
697 |
+
|
698 |
+
scan_output = self.norm(y, gate)
|
699 |
+
|
700 |
+
# end ssd naive
|
701 |
+
|
702 |
+
# 4. Final linear projection
|
703 |
+
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
|
704 |
+
return contextualized_states
|
705 |
+
# fmt: on
|
706 |
+
|
707 |
+
def forward(
|
708 |
+
self,
|
709 |
+
hidden_states,
|
710 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
711 |
+
cache_position: Optional[torch.LongTensor] = None,
|
712 |
+
attention_mask: Optional[torch.Tensor] = None,
|
713 |
+
):
|
714 |
+
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
|
715 |
+
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
|
716 |
+
dtype = hidden_states.dtype
|
717 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
718 |
+
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
|
719 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
720 |
+
|
721 |
+
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
|
722 |
+
|
723 |
+
|
724 |
+
class NemotronHRMSNorm(nn.Module):
|
725 |
+
def __init__(self, hidden_size, eps=1e-6):
|
726 |
+
"""
|
727 |
+
NemotronHRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
|
728 |
+
"""
|
729 |
+
super().__init__()
|
730 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
731 |
+
self.variance_epsilon = eps
|
732 |
+
|
733 |
+
def forward(self, hidden_states):
|
734 |
+
input_dtype = hidden_states.dtype
|
735 |
+
hidden_states = hidden_states.to(torch.float32)
|
736 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
737 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
738 |
+
# Weights are in float32
|
739 |
+
return (self.weight.to(torch.float32) * hidden_states).to(input_dtype)
|
740 |
+
|
741 |
+
class NemotronHBlock(nn.Module):
|
742 |
+
def __init__(self, config, layer_idx):
|
743 |
+
super().__init__()
|
744 |
+
self.config = config
|
745 |
+
self.layer_idx = layer_idx
|
746 |
+
self.residual_in_fp32 = config.residual_in_fp32
|
747 |
+
self.norm = NemotronHRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
748 |
+
|
749 |
+
# M: Mamba2, *: Attention, -: MLP
|
750 |
+
self.block_type = config.layers_block_type[layer_idx]
|
751 |
+
if self.block_type == "mamba":
|
752 |
+
self.mixer = NemotronHMamba2Mixer(config, layer_idx=layer_idx)
|
753 |
+
elif self.block_type == "attention":
|
754 |
+
self.mixer = NEMOTRONH_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
755 |
+
elif self.block_type == "mlp":
|
756 |
+
self.mixer = NemotronHMLP(config, layer_idx=layer_idx)
|
757 |
+
else:
|
758 |
+
raise ValueError(f"Invalid layer pattern {config.hybrid_override_pattern[layer_idx]}")
|
759 |
+
|
760 |
+
def forward(
|
761 |
+
self,
|
762 |
+
hidden_states,
|
763 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
764 |
+
cache_position: Optional[torch.LongTensor] = None,
|
765 |
+
attention_mask: Optional[torch.Tensor] = None,
|
766 |
+
):
|
767 |
+
with torch.cuda.stream(torch.cuda.default_stream(hidden_states.device)):
|
768 |
+
# * Use torch.cuda.stream() to avoid NaN issues when using multiple GPUs
|
769 |
+
residual = hidden_states
|
770 |
+
hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
|
771 |
+
if self.residual_in_fp32:
|
772 |
+
residual = residual.to(torch.float32)
|
773 |
+
|
774 |
+
if self.block_type == "mamba":
|
775 |
+
hidden_states = self.mixer(
|
776 |
+
hidden_states, cache_params=cache_params, cache_position=cache_position
|
777 |
+
)
|
778 |
+
elif self.block_type == "attention":
|
779 |
+
hidden_states = self.mixer(
|
780 |
+
hidden_states, cache_position=cache_position
|
781 |
+
)
|
782 |
+
hidden_states = hidden_states[0]
|
783 |
+
elif self.block_type == "mlp":
|
784 |
+
hidden_states = self.mixer(
|
785 |
+
hidden_states
|
786 |
+
)
|
787 |
+
else:
|
788 |
+
raise ValueError(f"Invalid block_type: {self.block_type}")
|
789 |
+
|
790 |
+
hidden_states = residual + hidden_states
|
791 |
+
return hidden_states
|
792 |
+
|
793 |
+
|
794 |
+
# Copied from transformers.models.nemotron.modeling_nemotron Nemotron->NemotronH
|
795 |
+
class NemotronHMLP(nn.Module):
|
796 |
+
def __init__(self, config, layer_idx: Optional[int] = None):
|
797 |
+
super().__init__()
|
798 |
+
self.config = config
|
799 |
+
self.layer_idx = layer_idx
|
800 |
+
if layer_idx is None:
|
801 |
+
logger.warning_once(
|
802 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
803 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
804 |
+
"when creating this class."
|
805 |
+
)
|
806 |
+
self.hidden_size = config.hidden_size
|
807 |
+
#intermediate_size = config.expand * config.hidden_size
|
808 |
+
self.intermediate_size = config.intermediate_size
|
809 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
|
810 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
|
811 |
+
self.act_fn = ACT2FN[config.mlp_hidden_act]
|
812 |
+
|
813 |
+
def forward(self, x):
|
814 |
+
return self.down_proj(self.act_fn(self.up_proj(x)))
|
815 |
+
|
816 |
+
|
817 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
818 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
819 |
+
"""
|
820 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
821 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
822 |
+
"""
|
823 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
824 |
+
if n_rep == 1:
|
825 |
+
return hidden_states
|
826 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
827 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
828 |
+
|
829 |
+
|
830 |
+
class NemotronHAttention(nn.Module):
|
831 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
832 |
+
|
833 |
+
def __init__(self, config: NemotronHConfig, layer_idx: Optional[int] = None):
|
834 |
+
super().__init__()
|
835 |
+
self.config = config
|
836 |
+
self.layer_idx = layer_idx
|
837 |
+
if layer_idx is None:
|
838 |
+
logger.warning_once(
|
839 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
840 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
841 |
+
"when creating this class."
|
842 |
+
)
|
843 |
+
|
844 |
+
self.attention_dropout = config.attention_dropout
|
845 |
+
self.hidden_size = config.hidden_size
|
846 |
+
self.num_heads = config.num_attention_heads
|
847 |
+
if config.head_dim is not None:
|
848 |
+
self.head_dim = config.head_dim
|
849 |
+
else:
|
850 |
+
self.head_dim = config.hidden_size // config.num_attention_heads
|
851 |
+
self.num_key_value_heads = config.num_key_value_heads
|
852 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
853 |
+
self.max_position_embeddings = config.max_position_embeddings
|
854 |
+
self.is_causal = True
|
855 |
+
|
856 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
857 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
858 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
859 |
+
self.o_proj = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=config.attention_bias)
|
860 |
+
|
861 |
+
def forward(
|
862 |
+
self,
|
863 |
+
hidden_states: torch.Tensor,
|
864 |
+
# position_embeddings: Tuple[torch.Tensor, torch.Tensor], #TODO
|
865 |
+
attention_mask: Optional[torch.Tensor] = None,
|
866 |
+
position_ids: Optional[torch.LongTensor] = None,
|
867 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
868 |
+
output_attentions: bool = False,
|
869 |
+
use_cache: bool = False,
|
870 |
+
cache_position: Optional[torch.LongTensor] = None,
|
871 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
872 |
+
bsz, q_len, _ = hidden_states.size()
|
873 |
+
|
874 |
+
query_states = self.q_proj(hidden_states)
|
875 |
+
key_states = self.k_proj(hidden_states)
|
876 |
+
value_states = self.v_proj(hidden_states)
|
877 |
+
|
878 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
879 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
880 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
881 |
+
|
882 |
+
if past_key_value is not None:
|
883 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
884 |
+
|
885 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
886 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
887 |
+
|
888 |
+
causal_mask = attention_mask
|
889 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
890 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
891 |
+
|
892 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
893 |
+
query_states = query_states.contiguous()
|
894 |
+
key_states = key_states.contiguous()
|
895 |
+
value_states = value_states.contiguous()
|
896 |
+
|
897 |
+
is_causal = True if causal_mask is None and q_len > 1 else False
|
898 |
+
|
899 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
900 |
+
query_states,
|
901 |
+
key_states,
|
902 |
+
value_states,
|
903 |
+
attn_mask=causal_mask,
|
904 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
905 |
+
is_causal=is_causal,
|
906 |
+
)
|
907 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
908 |
+
#attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
909 |
+
attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim)
|
910 |
+
|
911 |
+
attn_output = self.o_proj(attn_output)
|
912 |
+
|
913 |
+
return attn_output, None, past_key_value
|
914 |
+
|
915 |
+
|
916 |
+
# Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba
|
917 |
+
#class JambaFlashAttention2(JambaAttention):
|
918 |
+
class NemotronHFlashAttention2(NemotronHAttention):
|
919 |
+
"""
|
920 |
+
Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays
|
921 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
922 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
923 |
+
"""
|
924 |
+
def __init__(self, *args, **kwargs):
|
925 |
+
super().__init__(*args, **kwargs)
|
926 |
+
|
927 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
928 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
929 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
930 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
931 |
+
|
932 |
+
def forward(
|
933 |
+
self,
|
934 |
+
hidden_states: torch.Tensor,
|
935 |
+
attention_mask: Optional[torch.Tensor] = None,
|
936 |
+
position_ids: Optional[torch.LongTensor] = None,
|
937 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
938 |
+
output_attentions: bool = False,
|
939 |
+
use_cache: bool = False,
|
940 |
+
cache_position: Optional[torch.LongTensor] = None,
|
941 |
+
**kwargs,
|
942 |
+
):
|
943 |
+
bsz, q_len, _ = hidden_states.size()
|
944 |
+
|
945 |
+
query_states = self.q_proj(hidden_states)
|
946 |
+
key_states = self.k_proj(hidden_states)
|
947 |
+
value_states = self.v_proj(hidden_states)
|
948 |
+
|
949 |
+
# Flash attention requires the input to have the shape
|
950 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
951 |
+
# therefore we just need to keep the original shape
|
952 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
|
953 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
954 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
955 |
+
|
956 |
+
if past_key_value is not None:
|
957 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
958 |
+
|
959 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
960 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
961 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
962 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
963 |
+
|
964 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
965 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
966 |
+
# cast them back in float16 just to be sure everything works as expected.
|
967 |
+
input_dtype = query_states.dtype
|
968 |
+
if input_dtype == torch.float32:
|
969 |
+
if torch.is_autocast_enabled():
|
970 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
971 |
+
# Handle the case where the model is quantized
|
972 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
973 |
+
target_dtype = self.config._pre_quantization_dtype
|
974 |
+
else:
|
975 |
+
target_dtype = self.q_proj.weight.dtype
|
976 |
+
|
977 |
+
logger.warning_once(
|
978 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
979 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
980 |
+
f" {target_dtype}."
|
981 |
+
)
|
982 |
+
|
983 |
+
query_states = query_states.to(target_dtype)
|
984 |
+
key_states = key_states.to(target_dtype)
|
985 |
+
value_states = value_states.to(target_dtype)
|
986 |
+
|
987 |
+
# Reashape to the expected shape for Flash Attention
|
988 |
+
key_states = key_states.transpose(1, 2)
|
989 |
+
value_states = value_states.transpose(1, 2)
|
990 |
+
|
991 |
+
attn_output = _flash_attention_forward(
|
992 |
+
query_states,
|
993 |
+
key_states,
|
994 |
+
value_states,
|
995 |
+
attention_mask,
|
996 |
+
q_len,
|
997 |
+
dropout=dropout_rate,
|
998 |
+
sliding_window=getattr(self.config, "sliding_window", None),
|
999 |
+
is_causal=self.is_causal,
|
1000 |
+
use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
1001 |
+
)
|
1002 |
+
|
1003 |
+
#attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
1004 |
+
attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous()
|
1005 |
+
attn_output = self.o_proj(attn_output)
|
1006 |
+
|
1007 |
+
if not output_attentions:
|
1008 |
+
attn_weights = None
|
1009 |
+
|
1010 |
+
return attn_output, attn_weights, past_key_value
|
1011 |
+
|
1012 |
+
|
1013 |
+
# Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba
|
1014 |
+
#class JambaSdpaAttention(JambaAttention):
|
1015 |
+
class NemotronHSdpaAttention(NemotronHAttention):
|
1016 |
+
"""
|
1017 |
+
Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
1018 |
+
`JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
1019 |
+
SDPA API.
|
1020 |
+
"""
|
1021 |
+
|
1022 |
+
# Adapted from NemotronHAttention.forward
|
1023 |
+
def forward(
|
1024 |
+
self,
|
1025 |
+
hidden_states: torch.Tensor,
|
1026 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1027 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1028 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
1029 |
+
output_attentions: bool = False,
|
1030 |
+
use_cache: bool = False,
|
1031 |
+
cache_position: Optional[torch.LongTensor] = None,
|
1032 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
1033 |
+
if output_attentions:
|
1034 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
1035 |
+
logger.warning_once(
|
1036 |
+
"NemotronHModel is using NemotronHSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
1037 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
1038 |
+
)
|
1039 |
+
return super().forward(
|
1040 |
+
hidden_states=hidden_states,
|
1041 |
+
attention_mask=attention_mask,
|
1042 |
+
position_ids=position_ids,
|
1043 |
+
past_key_value=past_key_value,
|
1044 |
+
output_attentions=output_attentions,
|
1045 |
+
use_cache=use_cache,
|
1046 |
+
)
|
1047 |
+
|
1048 |
+
bsz, q_len, _ = hidden_states.size()
|
1049 |
+
|
1050 |
+
query_states = self.q_proj(hidden_states)
|
1051 |
+
key_states = self.k_proj(hidden_states)
|
1052 |
+
value_states = self.v_proj(hidden_states)
|
1053 |
+
|
1054 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
1055 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
1056 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
1057 |
+
|
1058 |
+
if past_key_value is not None:
|
1059 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
1060 |
+
|
1061 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
1062 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
1063 |
+
|
1064 |
+
causal_mask = attention_mask
|
1065 |
+
if attention_mask is not None:
|
1066 |
+
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
|
1067 |
+
|
1068 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
1069 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
1070 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
1071 |
+
query_states = query_states.contiguous()
|
1072 |
+
key_states = key_states.contiguous()
|
1073 |
+
value_states = value_states.contiguous()
|
1074 |
+
|
1075 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
1076 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
1077 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
1078 |
+
is_causal = True if self.is_causal and causal_mask is None and q_len > 1 else False
|
1079 |
+
|
1080 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
1081 |
+
query_states,
|
1082 |
+
key_states,
|
1083 |
+
value_states,
|
1084 |
+
attn_mask=causal_mask,
|
1085 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
1086 |
+
is_causal=is_causal,
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
1090 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
1091 |
+
|
1092 |
+
attn_output = self.o_proj(attn_output)
|
1093 |
+
|
1094 |
+
return attn_output, None, past_key_value
|
1095 |
+
|
1096 |
+
|
1097 |
+
NEMOTRONH_ATTENTION_CLASSES = {
|
1098 |
+
"eager": NemotronHAttention,
|
1099 |
+
"flash_attention_2": NemotronHFlashAttention2,
|
1100 |
+
"sdpa": NemotronHSdpaAttention,
|
1101 |
+
}
|
1102 |
+
|
1103 |
+
# Copied from transformers.models.mamba.modeling_mamba2.Mamba2PreTrainedModel
|
1104 |
+
class NemotronHPreTrainedModel(PreTrainedModel):
|
1105 |
+
"""
|
1106 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
1107 |
+
models.
|
1108 |
+
"""
|
1109 |
+
|
1110 |
+
config_class = NemotronHConfig
|
1111 |
+
base_model_prefix = "backbone"
|
1112 |
+
_no_split_modules = ["NemotronHBlock"]
|
1113 |
+
supports_gradient_checkpointing = True
|
1114 |
+
_is_stateful = True
|
1115 |
+
|
1116 |
+
def _init_weights(self, module):
|
1117 |
+
"""Initialize the weights."""
|
1118 |
+
if isinstance(module, NemotronHMamba2Mixer):
|
1119 |
+
module.A_log._no_weight_decay = True
|
1120 |
+
module.D._no_weight_decay = True
|
1121 |
+
|
1122 |
+
dt = torch.exp(
|
1123 |
+
torch.rand(self.config.mamba_num_heads)
|
1124 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
1125 |
+
+ math.log(self.config.time_step_min)
|
1126 |
+
).clamp(min=self.config.time_step_floor)
|
1127 |
+
|
1128 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
1129 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
1130 |
+
with torch.no_grad():
|
1131 |
+
module.dt_bias.copy_(inv_dt)
|
1132 |
+
module.dt_bias._no_reinit = True
|
1133 |
+
|
1134 |
+
if isinstance(module, nn.Linear):
|
1135 |
+
if module.bias is not None:
|
1136 |
+
if not getattr(module.bias, "_no_reinit", False):
|
1137 |
+
nn.init.zeros_(module.bias)
|
1138 |
+
elif isinstance(module, nn.Embedding):
|
1139 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
1140 |
+
|
1141 |
+
# TODO: Check
|
1142 |
+
if self.config.rescale_prenorm_residual:
|
1143 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
1144 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
1145 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
1146 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
1147 |
+
#
|
1148 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
1149 |
+
for name, p in module.named_parameters():
|
1150 |
+
if name in ["out_proj.weight"]:
|
1151 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
1152 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
1153 |
+
# We need to reinit p since this code could be called multiple times
|
1154 |
+
# Having just p *= scale would repeatedly scale it down
|
1155 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
1156 |
+
with torch.no_grad():
|
1157 |
+
p /= math.sqrt(self.config.num_hidden_layers)
|
1158 |
+
|
1159 |
+
|
1160 |
+
@dataclass
|
1161 |
+
# Copied from transformers.models.mamba.modeling_mamba2.Mamba2Output with MAMBA2->NemotronH,Mamba2->NemotronH
|
1162 |
+
class NemotronHOutput(ModelOutput):
|
1163 |
+
"""
|
1164 |
+
Class for the NemotronH model outputs.
|
1165 |
+
|
1166 |
+
Args:
|
1167 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
1168 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
1169 |
+
cache_params (`HybridMambaAttentionDynamicCache`):
|
1170 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
1171 |
+
avoid providing the old `input_ids`.
|
1172 |
+
|
1173 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
1174 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
1175 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
1176 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
1177 |
+
|
1178 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
1179 |
+
"""
|
1180 |
+
|
1181 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
1182 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None
|
1183 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
1184 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
1185 |
+
|
1186 |
+
|
1187 |
+
@dataclass
|
1188 |
+
# Copied from transformers.models.mamba2.modeling_mamba2.MambaCausalLMOutput with Mamba2->NemotronH
|
1189 |
+
class NemotronHCausalLMOutput(ModelOutput):
|
1190 |
+
"""
|
1191 |
+
Base class for causal language model (or autoregressive) outputs.
|
1192 |
+
|
1193 |
+
Args:
|
1194 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
1195 |
+
Language modeling loss (for next-token prediction).
|
1196 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
1197 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
1198 |
+
cache_params (`HybridMambaAttentionDynamicCache`):
|
1199 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
1200 |
+
avoid providing the old `input_ids`.
|
1201 |
+
|
1202 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
1203 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
1204 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
1205 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
1206 |
+
|
1207 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
1208 |
+
"""
|
1209 |
+
|
1210 |
+
loss: Optional[torch.FloatTensor] = None
|
1211 |
+
logits: Optional[torch.FloatTensor] = None
|
1212 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None
|
1213 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
1214 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
1215 |
+
|
1216 |
+
|
1217 |
+
NEMOTRONH_START_DOCSTRING = r"""
|
1218 |
+
|
1219 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
1220 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
1221 |
+
etc.)
|
1222 |
+
|
1223 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
1224 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
1225 |
+
and behavior.
|
1226 |
+
|
1227 |
+
Parameters:
|
1228 |
+
config ([`NemotronHConfig`]): Model configuration class with all the parameters of the model.
|
1229 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
1230 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
1231 |
+
"""
|
1232 |
+
|
1233 |
+
NEMOTRONH_INPUTS_DOCSTRING = r"""
|
1234 |
+
Args:
|
1235 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
1236 |
+
Indices of input sequence tokens in the vocabulary.
|
1237 |
+
|
1238 |
+
If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as
|
1239 |
+
`input_ids`.
|
1240 |
+
|
1241 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1242 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1243 |
+
|
1244 |
+
[What are input IDs?](../glossary#input-ids)
|
1245 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1246 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
1247 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
1248 |
+
model's internal embedding lookup matrix.
|
1249 |
+
position_ids (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1250 |
+
Indices of positions of each input sequence tokens in the position embeddings.
|
1251 |
+
cache_params (`HybridMambaAttentionDynamicCache`, *optional*):
|
1252 |
+
If passed along, the model uses the previous state in all the blocks (which will give the output for the
|
1253 |
+
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
|
1254 |
+
use_cache (`bool`, *optional*):
|
1255 |
+
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
|
1256 |
+
output_attentions (`bool`, *optional*):
|
1257 |
+
Whether or not to return the attentions tensors of all attention layers.
|
1258 |
+
output_hidden_states (`bool`, *optional*):
|
1259 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
1260 |
+
more detail.
|
1261 |
+
return_dict (`bool`, *optional*):
|
1262 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1263 |
+
cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1264 |
+
The position of the current input in the cache. This is used to ensure that the cache is correctly updated.
|
1265 |
+
If `cache_params` is passed, `cache_position` should also be passed.
|
1266 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1267 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1268 |
+
|
1269 |
+
- 1 for tokens that are **not masked**,
|
1270 |
+
- 0 for tokens that are **masked**.
|
1271 |
+
|
1272 |
+
[What are attention masks?](../glossary#attention-mask)
|
1273 |
+
"""
|
1274 |
+
|
1275 |
+
|
1276 |
+
@add_start_docstrings(
|
1277 |
+
"The bare NemotronH Model transformer outputting raw hidden-states without any specific head on top.",
|
1278 |
+
NEMOTRONH_START_DOCSTRING,
|
1279 |
+
)
|
1280 |
+
class NemotronHModel(NemotronHPreTrainedModel):
|
1281 |
+
def __init__(self, config):
|
1282 |
+
super().__init__(config)
|
1283 |
+
|
1284 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
1285 |
+
self.layers = nn.ModuleList([NemotronHBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
1286 |
+
|
1287 |
+
self.gradient_checkpointing = False
|
1288 |
+
self.norm_f = NemotronHRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
1289 |
+
# Initialize weights and apply final processing
|
1290 |
+
self._register_load_state_dict_pre_hook(self.load_hook)
|
1291 |
+
self.post_init()
|
1292 |
+
|
1293 |
+
def load_hook(self, state_dict, prefix, *args):
|
1294 |
+
for k in state_dict:
|
1295 |
+
if "embedding." in k:
|
1296 |
+
state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
|
1297 |
+
break
|
1298 |
+
|
1299 |
+
def get_input_embeddings(self):
|
1300 |
+
return self.embeddings
|
1301 |
+
|
1302 |
+
def set_input_embeddings(self, new_embeddings):
|
1303 |
+
self.embeddings = new_embeddings
|
1304 |
+
|
1305 |
+
@add_start_docstrings_to_model_forward(NEMOTRONH_INPUTS_DOCSTRING)
|
1306 |
+
@add_code_sample_docstrings(
|
1307 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1308 |
+
output_type=NemotronHOutput,
|
1309 |
+
config_class=_CONFIG_FOR_DOC,
|
1310 |
+
)
|
1311 |
+
def forward(
|
1312 |
+
self,
|
1313 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1314 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
1315 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1316 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
1317 |
+
use_cache: Optional[bool] = None,
|
1318 |
+
output_attentions: Optional[bool] = None,
|
1319 |
+
output_hidden_states: Optional[bool] = None,
|
1320 |
+
return_dict: Optional[bool] = None,
|
1321 |
+
cache_position: Optional[torch.LongTensor] = None,
|
1322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1323 |
+
**kwargs,
|
1324 |
+
) -> Union[Tuple, NemotronHOutput]:
|
1325 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1326 |
+
output_hidden_states = (
|
1327 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1328 |
+
)
|
1329 |
+
# use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1330 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
1331 |
+
|
1332 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1333 |
+
|
1334 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
1335 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
1336 |
+
|
1337 |
+
if inputs_embeds is None:
|
1338 |
+
inputs_embeds = self.embeddings(input_ids)
|
1339 |
+
|
1340 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
1341 |
+
logger.warning_once(
|
1342 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
1343 |
+
)
|
1344 |
+
use_cache = False
|
1345 |
+
|
1346 |
+
# From zamba_modeling.py
|
1347 |
+
if use_cache and cache_params is None:
|
1348 |
+
logger.warning_once(
|
1349 |
+
"NemotronH requires an initialized `NemotronHHybridDynamicCache` to return a cache. None was "
|
1350 |
+
"provided, so no cache will be returned."
|
1351 |
+
)
|
1352 |
+
|
1353 |
+
hidden_states = inputs_embeds
|
1354 |
+
|
1355 |
+
if cache_position is None:
|
1356 |
+
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
|
1357 |
+
if position_ids is None:
|
1358 |
+
position_ids = cache_position.unsqueeze(0)
|
1359 |
+
|
1360 |
+
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
|
1361 |
+
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
|
1362 |
+
|
1363 |
+
all_hidden_states = () if output_hidden_states else None
|
1364 |
+
all_self_attns = () if output_attentions else None
|
1365 |
+
# Until HERE
|
1366 |
+
|
1367 |
+
for layer_idx, mixer_block in enumerate(self.layers):
|
1368 |
+
# Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention)
|
1369 |
+
if mixer_block.block_type == "mamba":
|
1370 |
+
layer_mask = mamba_mask
|
1371 |
+
elif mixer_block.block_type == "attention":
|
1372 |
+
layer_mask = causal_mask
|
1373 |
+
elif mixer_block.block_type == "mlp":
|
1374 |
+
layer_mask = None
|
1375 |
+
else:
|
1376 |
+
raise ValueError(f"Invalid block_type: {self.block_type}")
|
1377 |
+
|
1378 |
+
if output_hidden_states:
|
1379 |
+
all_hidden_states += (hidden_states,)
|
1380 |
+
|
1381 |
+
if self.gradient_checkpointing and self.training:
|
1382 |
+
hidden_states = self._gradient_checkpointing_func(
|
1383 |
+
mixer_block.__call__, hidden_states, cache_params, cache_position, layer_mask
|
1384 |
+
)
|
1385 |
+
else:
|
1386 |
+
hidden_states = mixer_block(
|
1387 |
+
hidden_states,
|
1388 |
+
cache_params=cache_params,
|
1389 |
+
cache_position=cache_position,
|
1390 |
+
attention_mask=layer_mask,
|
1391 |
+
)
|
1392 |
+
|
1393 |
+
# TODO: Store attentions
|
1394 |
+
# if output_attentions:
|
1395 |
+
# if layer_outputs[1] is not None:
|
1396 |
+
# # append attentions only of attention layers. Mamba layers return `None` as the attention weights
|
1397 |
+
# all_self_attns += (layer_outputs[1],)
|
1398 |
+
|
1399 |
+
# TODO (Check): should it happen before the forward pass?
|
1400 |
+
# if output_hidden_states:
|
1401 |
+
# all_hidden_states = all_hidden_states + (hidden_states,)
|
1402 |
+
|
1403 |
+
hidden_states = self.norm_f(hidden_states)
|
1404 |
+
|
1405 |
+
if output_hidden_states:
|
1406 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1407 |
+
|
1408 |
+
if not return_dict:
|
1409 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
1410 |
+
|
1411 |
+
return NemotronHOutput(
|
1412 |
+
last_hidden_state=hidden_states,
|
1413 |
+
cache_params=cache_params if use_cache else None,
|
1414 |
+
hidden_states=all_hidden_states,
|
1415 |
+
attentions=all_self_attns,
|
1416 |
+
)
|
1417 |
+
|
1418 |
+
# Copied from transformers.models.jamba.modeling_jamba.JambaModel._update_causal_mask
|
1419 |
+
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
|
1420 |
+
if self.config._attn_implementation == "flash_attention_2":
|
1421 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
1422 |
+
return attention_mask
|
1423 |
+
return None
|
1424 |
+
|
1425 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
1426 |
+
min_dtype = torch.finfo(dtype).min
|
1427 |
+
sequence_length = input_tensor.shape[1]
|
1428 |
+
target_length = cache_position[-1] + 1
|
1429 |
+
|
1430 |
+
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
|
1431 |
+
if sequence_length != 1:
|
1432 |
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
1433 |
+
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
1434 |
+
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
|
1435 |
+
if attention_mask is not None:
|
1436 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
1437 |
+
if attention_mask.dim() == 2:
|
1438 |
+
mask_length = attention_mask.shape[-1]
|
1439 |
+
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
|
1440 |
+
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
|
1441 |
+
|
1442 |
+
if (
|
1443 |
+
self.config._attn_implementation == "sdpa"
|
1444 |
+
and attention_mask is not None
|
1445 |
+
and attention_mask.device.type == "cuda"
|
1446 |
+
):
|
1447 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
1448 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
1449 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
1450 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
1451 |
+
|
1452 |
+
return causal_mask
|
1453 |
+
|
1454 |
+
def _update_mamba_mask(self, attention_mask, cache_position):
|
1455 |
+
"""
|
1456 |
+
No need for zeroing states when
|
1457 |
+
1. Cached forward
|
1458 |
+
2. Attending to all inputs
|
1459 |
+
"""
|
1460 |
+
mamba_mask = attention_mask
|
1461 |
+
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
|
1462 |
+
mamba_mask = None
|
1463 |
+
return mamba_mask
|
1464 |
+
|
1465 |
+
|
1466 |
+
@add_start_docstrings(
|
1467 |
+
"""
|
1468 |
+
The NEMOTRONH Model transformer with a language modeling head on top (linear layer with weights not tied to the input
|
1469 |
+
embeddings).
|
1470 |
+
""",
|
1471 |
+
NEMOTRONH_START_DOCSTRING,
|
1472 |
+
)
|
1473 |
+
class NemotronHForCausalLM(NemotronHPreTrainedModel, GenerationMixin):
|
1474 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1475 |
+
|
1476 |
+
def __init__(self, config):
|
1477 |
+
super().__init__(config)
|
1478 |
+
self.backbone = NemotronHModel(config)
|
1479 |
+
self.vocab_size = config.vocab_size
|
1480 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1481 |
+
|
1482 |
+
# Initialize weights and apply final processing
|
1483 |
+
self.post_init()
|
1484 |
+
|
1485 |
+
def get_input_embeddings(self):
|
1486 |
+
return self.backbone.get_input_embeddings()
|
1487 |
+
|
1488 |
+
def set_input_embeddings(self, new_embeddings):
|
1489 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
1490 |
+
|
1491 |
+
def get_output_embeddings(self):
|
1492 |
+
return self.lm_head
|
1493 |
+
|
1494 |
+
def set_output_embeddings(self, new_embeddings):
|
1495 |
+
self.lm_head = new_embeddings
|
1496 |
+
|
1497 |
+
def get_decoder(self):
|
1498 |
+
return self.model
|
1499 |
+
|
1500 |
+
def set_decoder(self, decoder):
|
1501 |
+
self.model = decoder
|
1502 |
+
|
1503 |
+
def prepare_inputs_for_generation(
|
1504 |
+
self,
|
1505 |
+
input_ids,
|
1506 |
+
past_key_values=None,
|
1507 |
+
attention_mask=None,
|
1508 |
+
inputs_embeds=None,
|
1509 |
+
cache_position=None,
|
1510 |
+
position_ids=None,
|
1511 |
+
use_cache=True,
|
1512 |
+
**kwargs,
|
1513 |
+
):
|
1514 |
+
# Copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py
|
1515 |
+
# Overwitten -- uses `cache_params` as opposed to `past_key_values`
|
1516 |
+
empty_past_kv = past_key_values is None
|
1517 |
+
|
1518 |
+
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
|
1519 |
+
# Exception 1: when passing input_embeds, input_ids may be missing entries
|
1520 |
+
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
|
1521 |
+
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
|
1522 |
+
# (we can't check exception 3 while compiling)
|
1523 |
+
if not empty_past_kv:
|
1524 |
+
if (
|
1525 |
+
inputs_embeds is not None # Exception 1
|
1526 |
+
or cache_position[-1] >= input_ids.shape[1] # Exception 3
|
1527 |
+
):
|
1528 |
+
input_ids = input_ids[:, -cache_position.shape[0] :]
|
1529 |
+
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
|
1530 |
+
input_ids = input_ids[:, cache_position]
|
1531 |
+
else:
|
1532 |
+
past_key_values = HybridMambaAttentionDynamicCache(
|
1533 |
+
self.config, input_ids.shape[0], self.dtype, device=self.device
|
1534 |
+
)
|
1535 |
+
|
1536 |
+
if attention_mask is not None and position_ids is None:
|
1537 |
+
# create position_ids on the fly for batch generation
|
1538 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1539 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1540 |
+
if not empty_past_kv:
|
1541 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1542 |
+
|
1543 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1544 |
+
if inputs_embeds is not None and empty_past_kv:
|
1545 |
+
# TODO(pjin): workaround fix for properly extending inputs_embeds;
|
1546 |
+
# longer term, may be better handled elsewhere in .generate().
|
1547 |
+
if input_ids is not None and inputs_embeds.shape[1] < input_ids.shape[1]:
|
1548 |
+
new_token_embeds = self.get_input_embeddings()(input_ids[:,inputs_embeds.shape[1]:])
|
1549 |
+
inputs_embeds = torch.cat([inputs_embeds, new_token_embeds], dim=1)
|
1550 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1551 |
+
else:
|
1552 |
+
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
|
1553 |
+
|
1554 |
+
model_inputs.update(
|
1555 |
+
{
|
1556 |
+
"position_ids": position_ids,
|
1557 |
+
"past_key_values": past_key_values,
|
1558 |
+
"use_cache": use_cache,
|
1559 |
+
"attention_mask": attention_mask,
|
1560 |
+
"logits_to_keep": self.config.num_logits_to_keep,
|
1561 |
+
"cache_position": cache_position,
|
1562 |
+
}
|
1563 |
+
)
|
1564 |
+
return model_inputs
|
1565 |
+
|
1566 |
+
@add_start_docstrings_to_model_forward(NEMOTRONH_INPUTS_DOCSTRING)
|
1567 |
+
@add_code_sample_docstrings(
|
1568 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1569 |
+
output_type=NemotronHCausalLMOutput,
|
1570 |
+
config_class=_CONFIG_FOR_DOC,
|
1571 |
+
)
|
1572 |
+
def forward(
|
1573 |
+
self,
|
1574 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1575 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1576 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1577 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
1578 |
+
labels: Optional[torch.LongTensor] = None,
|
1579 |
+
output_attentions: Optional[bool] = None,
|
1580 |
+
output_hidden_states: Optional[bool] = None,
|
1581 |
+
return_dict: Optional[bool] = None,
|
1582 |
+
use_cache: Optional[bool] = None,
|
1583 |
+
cache_position: Optional[torch.Tensor] = None,
|
1584 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1585 |
+
**kwargs, # for now we need this for generation
|
1586 |
+
) -> Union[Tuple, NemotronHCausalLMOutput]:
|
1587 |
+
r"""
|
1588 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1589 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
1590 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
1591 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
1592 |
+
"""
|
1593 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1594 |
+
|
1595 |
+
output_hidden_states = (
|
1596 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1597 |
+
)
|
1598 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1599 |
+
|
1600 |
+
nemotron_h_outputs = self.backbone(
|
1601 |
+
input_ids,
|
1602 |
+
cache_params=cache_params,
|
1603 |
+
inputs_embeds=inputs_embeds,
|
1604 |
+
output_attentions=output_attentions,
|
1605 |
+
output_hidden_states=output_hidden_states,
|
1606 |
+
return_dict=return_dict,
|
1607 |
+
use_cache=use_cache,
|
1608 |
+
cache_position=cache_position,
|
1609 |
+
attention_mask=attention_mask,
|
1610 |
+
)
|
1611 |
+
hidden_states = nemotron_h_outputs[0]
|
1612 |
+
|
1613 |
+
# TODO: Check zamba_modeling.py: https://github.com/huggingface/transformers/blob/d7188ba600e36d3fd191b12e19f1b3bb81a8404f/src/transformers/models/zamba/modeling_zamba.py#L1284C1-L1286C2
|
1614 |
+
#logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
|
1615 |
+
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
|
1616 |
+
|
1617 |
+
loss = None
|
1618 |
+
if labels is not None:
|
1619 |
+
# move labels to correct device to enable model parallelism
|
1620 |
+
labels = labels.to(logits.device)
|
1621 |
+
# Shift so that tokens < n predict n
|
1622 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1623 |
+
shift_labels = labels[..., 1:].contiguous()
|
1624 |
+
# Flatten the tokens
|
1625 |
+
loss_fct = CrossEntropyLoss()
|
1626 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
1627 |
+
|
1628 |
+
if not return_dict:
|
1629 |
+
output = (logits,) + nemotron_h_outputs[1:]
|
1630 |
+
return ((loss,) + output) if loss is not None else output
|
1631 |
+
|
1632 |
+
return NemotronHCausalLMOutput(
|
1633 |
+
loss=loss,
|
1634 |
+
logits=logits,
|
1635 |
+
cache_params=nemotron_h_outputs.cache_params,
|
1636 |
+
hidden_states=nemotron_h_outputs.hidden_states,
|
1637 |
+
attentions=nemotron_h_outputs.attentions,
|
1638 |
+
)
|
nemotron_toolcall_parser_no_streaming.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SPDX-License-Identifier: Apache-2.0
|
2 |
+
|
3 |
+
import ast
|
4 |
+
import json
|
5 |
+
import re
|
6 |
+
from collections.abc import Sequence
|
7 |
+
from typing import Union
|
8 |
+
|
9 |
+
import partial_json_parser
|
10 |
+
from partial_json_parser.core.options import Allow
|
11 |
+
|
12 |
+
from vllm.entrypoints.openai.protocol import (
|
13 |
+
ChatCompletionRequest,
|
14 |
+
DeltaFunctionCall, DeltaMessage,
|
15 |
+
DeltaToolCall,
|
16 |
+
ExtractedToolCallInformation,
|
17 |
+
FunctionCall,
|
18 |
+
ToolCall,
|
19 |
+
)
|
20 |
+
from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
|
21 |
+
ToolParser,
|
22 |
+
ToolParserManager,
|
23 |
+
)
|
24 |
+
from vllm.logger import init_logger
|
25 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
26 |
+
from vllm.utils import random_uuid
|
27 |
+
|
28 |
+
logger = init_logger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
@ToolParserManager.register_module("nemotron_json")
|
32 |
+
class NemotronJSONToolParser(ToolParser):
|
33 |
+
|
34 |
+
def __init__(self, tokenizer: AnyTokenizer):
|
35 |
+
super().__init__(tokenizer)
|
36 |
+
|
37 |
+
self.current_tool_name_sent: bool = False
|
38 |
+
self.prev_tool_call_arr: list[dict] = []
|
39 |
+
self.current_tool_id: int = -1
|
40 |
+
self.streamed_args_for_tool: list[str] = []
|
41 |
+
|
42 |
+
self.tool_call_start_token: str = "<TOOLCALL>"
|
43 |
+
self.tool_call_end_token: str = "</TOOLCALL>"
|
44 |
+
|
45 |
+
self.tool_call_regex = re.compile(r"<TOOLCALL>(.*?)</TOOLCALL>", re.DOTALL)
|
46 |
+
|
47 |
+
def extract_tool_calls(
|
48 |
+
self,
|
49 |
+
model_output: str,
|
50 |
+
request: ChatCompletionRequest,
|
51 |
+
) -> ExtractedToolCallInformation:
|
52 |
+
|
53 |
+
if self.tool_call_start_token not in model_output:
|
54 |
+
return ExtractedToolCallInformation(
|
55 |
+
tools_called=False,
|
56 |
+
tool_calls=[],
|
57 |
+
content=model_output,
|
58 |
+
)
|
59 |
+
|
60 |
+
else:
|
61 |
+
|
62 |
+
try:
|
63 |
+
str_tool_calls = self.tool_call_regex.findall(model_output)[0].strip()
|
64 |
+
if not str_tool_calls.startswith("["):
|
65 |
+
str_tool_calls = "[" + str_tool_calls
|
66 |
+
if not str_tool_calls.endswith("]"):
|
67 |
+
str_tool_calls = "]" + str_tool_calls
|
68 |
+
json_tool_calls = json.loads(str_tool_calls)
|
69 |
+
tool_calls = []
|
70 |
+
for tool_call in json_tool_calls:
|
71 |
+
try:
|
72 |
+
tool_calls.append(ToolCall(
|
73 |
+
type="function",
|
74 |
+
function=FunctionCall(
|
75 |
+
name=tool_call["name"],
|
76 |
+
arguments=json.dumps(tool_call["arguments"], ensure_ascii=False) \
|
77 |
+
if isinstance(tool_call["arguments"], dict) else tool_call["arguments"],
|
78 |
+
),
|
79 |
+
))
|
80 |
+
except:
|
81 |
+
continue
|
82 |
+
|
83 |
+
content = model_output[:model_output.rfind(self.tool_call_start_token)]
|
84 |
+
|
85 |
+
return ExtractedToolCallInformation(
|
86 |
+
tools_called=True,
|
87 |
+
tool_calls=tool_calls,
|
88 |
+
content=content if content else None,
|
89 |
+
)
|
90 |
+
|
91 |
+
except Exception:
|
92 |
+
logger.exception(f"Error in extracting tool call from response. Response: {model_output}")
|
93 |
+
return ExtractedToolCallInformation(
|
94 |
+
tools_called=False,
|
95 |
+
tool_calls=[],
|
96 |
+
content=model_output,
|
97 |
+
)
|
98 |
+
|
99 |
+
def extract_tool_calls_streaming(
|
100 |
+
self,
|
101 |
+
previous_text: str,
|
102 |
+
current_text: str,
|
103 |
+
delta_text: str,
|
104 |
+
previous_token_ids: Sequence[int],
|
105 |
+
current_token_ids: Sequence[int],
|
106 |
+
delta_token_ids: Sequence[int],
|
107 |
+
request: ChatCompletionRequest,
|
108 |
+
) -> Union[DeltaMessage, None]:
|
109 |
+
|
110 |
+
raise NotImplementedError("Tool calling is not supported in streaming mode!")
|
recipe.yaml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
quant_stage:
|
2 |
+
quant_modifiers:
|
3 |
+
AWQModifier:
|
4 |
+
config_groups:
|
5 |
+
group_0:
|
6 |
+
targets: [Linear]
|
7 |
+
weights:
|
8 |
+
num_bits: 8
|
9 |
+
type: int
|
10 |
+
symmetric: true
|
11 |
+
group_size: 32
|
12 |
+
strategy: group
|
13 |
+
block_structure: null
|
14 |
+
dynamic: false
|
15 |
+
actorder: null
|
16 |
+
observer: mse
|
17 |
+
observer_kwargs: {}
|
18 |
+
input_activations: null
|
19 |
+
output_activations: null
|
20 |
+
format: null
|
21 |
+
targets: [Linear]
|
22 |
+
ignore: [backbone.embeddings, 're:.*mixer.A_log', 're:.*conv1d.bias', 're:.*mixer.D',
|
23 |
+
're:.*mixer.dt_bias', 're:.*norm', 're:backbone.norm_f', lm_head]
|
24 |
+
mappings:
|
25 |
+
- smooth_layer: re:.*norm$
|
26 |
+
balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$', 're:.*up_proj$',
|
27 |
+
're:.*in_proj$']
|
28 |
+
- smooth_layer: re:.*v_proj$
|
29 |
+
balance_layers: ['re:.*o_proj$']
|
30 |
+
- smooth_layer: re:.*up_proj$
|
31 |
+
balance_layers: ['re:.*down_proj$']
|
32 |
+
duo_scaling: true
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<SPECIAL_12>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3277c00fe5fb3963b3cb7c07b7f183722d2af4d775a4aea7cfb3684d7cccbc2f
|
3 |
+
size 17078330
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|