Commit 
							
							·
						
						16426c6
	
0
								Parent(s):
							
							
Initial commit
Browse files- .gitattributes +37 -0
- README.md +187 -0
- added_tokens.json +28 -0
- chat_template.jinja +85 -0
- config.json +131 -0
- generation_config.json +13 -0
- hf_quant_config.json +14 -0
- merges.txt +0 -0
- model-00001-of-00005.safetensors +3 -0
- model-00002-of-00005.safetensors +3 -0
- model-00003-of-00005.safetensors +3 -0
- model-00004-of-00005.safetensors +3 -0
- model-00005-of-00005.safetensors +3 -0
- model.safetensors.index.json +0 -0
- special_tokens_map.json +19 -0
- tokenizer.json +3 -0
- tokenizer_config.json +239 -0
- vocab.json +0 -0
    	
        .gitattributes
    ADDED
    
    | @@ -0,0 +1,37 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            +
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            +
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            +
            *.npy filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            +
            *.npz filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            +
            *.pickle filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            +
            *.pkl filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            +
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            +
            *.tar filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
            +
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
            +
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            tokenizer.json filter=lfs diff=lfs merge=lfs -text
         | 
| 37 | 
            +
             | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,187 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            pipeline_tag: text-generation
         | 
| 3 | 
            +
            base_model:
         | 
| 4 | 
            +
            - Qwen/Qwen3-32B
         | 
| 5 | 
            +
            license: apache-2.0
         | 
| 6 | 
            +
            library_name: Model Optimizer
         | 
| 7 | 
            +
            tags:
         | 
| 8 | 
            +
            - nvidia
         | 
| 9 | 
            +
            - ModelOpt
         | 
| 10 | 
            +
            - Qwen3
         | 
| 11 | 
            +
            - quantized
         | 
| 12 | 
            +
            - FP4
         | 
| 13 | 
            +
            - fp4
         | 
| 14 | 
            +
            ---
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Model Overview
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            ## Description:
         | 
| 19 | 
            +
            The NVIDIA Qwen3-32B FP4 model is the quantized version of Alibaba's Qwen3-32B model, which is an auto-regressive language model that uses an optimized transformer architecture. For more information, please check [here](https://huggingface.co/Qwen/Qwen3-32B). The NVIDIA Qwen3-32B FP4 model is quantized with [TensorRT Model Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer).
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            This model is ready for commercial/non-commercial use.  <br>
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            ## Third-Party Community Consideration
         | 
| 24 | 
            +
            This model is not owned or developed by NVIDIA. This model has been developed and built to a third-party’s requirements for this application and use case; see link to Non-NVIDIA [(Qwen3-32B) Model Card](https://huggingface.co/Qwen/Qwen3-32B).
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            ### License/Terms of Use:
         | 
| 27 | 
            +
            [Apache license 2.0](https://huggingface.co/datasets/choosealicense/licenses/blob/main/markdown/apache-2.0.md)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            ### Deployment Geography:
         | 
| 30 | 
            +
            Global <br>
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            ### Use Case: <br>
         | 
| 33 | 
            +
            Developers looking to take off the shelf pre-quantized models for deployment in AI Agent systems, chatbots, RAG systems, and other AI-powered applications. <br>
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ### Release Date:  <br>
         | 
| 36 | 
            +
            Huggingface 09/15/2025 via https://huggingface.co/nvidia/Qwen3-32B-FP4 <br> 
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            ## Model Architecture:
         | 
| 39 | 
            +
            **Architecture Type:** Transformers  <br>
         | 
| 40 | 
            +
            **Network Architecture:** Qwen3-32B <br>
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            **This model was developed based on Qwen3-32B
         | 
| 43 | 
            +
            **Number of model parameters: 32.8B
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            ## Input:
         | 
| 46 | 
            +
            **Input Type(s):** Text <br>
         | 
| 47 | 
            +
            **Input Format(s):** String <br>
         | 
| 48 | 
            +
            **Input Parameters:** 1D (One-Dimensional): Sequences <br>
         | 
| 49 | 
            +
            **Other Properties Related to Input:** Context length up to 131K <br>
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            ## Output:
         | 
| 52 | 
            +
            **Output Type(s):** Text <br>
         | 
| 53 | 
            +
            **Output Format:** String <br>
         | 
| 54 | 
            +
            **Output Parameters:** 1D (One-Dimensional): Sequences <br>
         | 
| 55 | 
            +
            **Other Properties Related to Output:** N/A <br>
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            Our AI models are designed and/or optimized to run on NVIDIA GPU-accelerated systems. By leveraging NVIDIA’s hardware (e.g. GPU cores) and software frameworks (e.g., CUDA libraries), the model achieves faster training and inference times compared to CPU-only solutions. <br>  
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            ## Software Integration:
         | 
| 60 | 
            +
            **Supported Runtime Engine(s):** <br>
         | 
| 61 | 
            +
            * TensorRT-LLM <br>
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            **Supported Hardware Microarchitecture Compatibility:** <br>
         | 
| 64 | 
            +
            * NVIDIA Blackwell <br>
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            **Preferred Operating System(s):** <br>
         | 
| 67 | 
            +
            * Linux <br>
         | 
| 68 | 
            +
             | 
| 69 | 
            +
            ## Model Version(s):
         | 
| 70 | 
            +
            The model is quantized with nvidia-modelopt **v0.35.0**  <br>
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            ## Post Training Quantization
         | 
| 73 | 
            +
            This model was obtained by quantizing the weights and activations of Qwen3-32B to FP4 data type, ready for inference with TensorRT-LLM. Only the weights and activations of the linear operators within transformer blocks are quantized.
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            ## Training, Testing, and Evaluation Datasets:
         | 
| 76 | 
            +
            ** Data Modality
         | 
| 77 | 
            +
            * [Text]
         | 
| 78 | 
            +
             | 
| 79 | 
            +
             | 
| 80 | 
            +
            ## Calibration Dataset: 
         | 
| 81 | 
            +
            ** Link: [cnn_dailymail](https://huggingface.co/datasets/abisee/cnn_dailymail) <br>
         | 
| 82 | 
            +
            ** Data collection method: Automated. <br>
         | 
| 83 | 
            +
            ** Labeling method: Automated. <br>
         | 
| 84 | 
            +
             | 
| 85 | 
            +
            ## Training Datasets:
         | 
| 86 | 
            +
            ** Data Collection Method by Dataset: Undisclosed <br>
         | 
| 87 | 
            +
            ** Labeling Method by Dataset: Undisclosed<br>
         | 
| 88 | 
            +
            ** Properties: Undisclosed
         | 
| 89 | 
            +
             | 
| 90 | 
            +
            ## Testing Dataset:
         | 
| 91 | 
            +
            ** Data Collection Method by Dataset: Undisclosed <br>
         | 
| 92 | 
            +
            ** Labeling Method by Dataset: Undisclosed <br>
         | 
| 93 | 
            +
            ** Properties: Undisclosed <br>
         | 
| 94 | 
            +
             | 
| 95 | 
            +
            ## Evaluation Dataset: 
         | 
| 96 | 
            +
            * Datasets: MMLU Pro, GPQA Diamond, HLE, LiveCodeBench, SciCode, HumanEval, AIME 2024, MATH-500 <br>
         | 
| 97 | 
            +
            ** Data collection method: Hybrid: Automated, Human <br>
         | 
| 98 | 
            +
            ** Labeling method: Hybrid: Human, Automated <br>
         | 
| 99 | 
            +
             | 
| 100 | 
            +
            ## Inference:
         | 
| 101 | 
            +
            **Engine:** TensorRT-LLM <br>
         | 
| 102 | 
            +
            **Test Hardware:** B200 <br>
         | 
| 103 | 
            +
             | 
| 104 | 
            +
            ## Usage
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            ### Deploy with TensorRT-LLM
         | 
| 107 | 
            +
             | 
| 108 | 
            +
            To deploy the quantized checkpoint with [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) LLM API, follow the sample codes below:
         | 
| 109 | 
            +
             | 
| 110 | 
            +
            * LLM API sample usage:
         | 
| 111 | 
            +
            ```
         | 
| 112 | 
            +
            from tensorrt_llm import LLM, SamplingParams
         | 
| 113 | 
            +
             | 
| 114 | 
            +
             | 
| 115 | 
            +
            def main():
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                prompts = [
         | 
| 118 | 
            +
                    "Hello, my name is",
         | 
| 119 | 
            +
                    "The president of the United States is",
         | 
| 120 | 
            +
                    "The capital of France is",
         | 
| 121 | 
            +
                    "The future of AI is",
         | 
| 122 | 
            +
                ]
         | 
| 123 | 
            +
                sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                llm = LLM(model="nvidia/Qwen3-32B-FP4")
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                outputs = llm.generate(prompts, sampling_params)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                # Print the outputs.
         | 
| 130 | 
            +
                for output in outputs:
         | 
| 131 | 
            +
                    prompt = output.prompt
         | 
| 132 | 
            +
                    generated_text = output.outputs[0].text
         | 
| 133 | 
            +
                    print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
         | 
| 134 | 
            +
             | 
| 135 | 
            +
             | 
| 136 | 
            +
            # The entry point of the program needs to be protected for spawning processes.
         | 
| 137 | 
            +
            if __name__ == '__main__':
         | 
| 138 | 
            +
                main()
         | 
| 139 | 
            +
             | 
| 140 | 
            +
            ```
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            ### Evaluation
         | 
| 143 | 
            +
            The accuracy benchmark results are presented in the table below:
         | 
| 144 | 
            +
            <table>
         | 
| 145 | 
            +
              <tr>
         | 
| 146 | 
            +
               <td><strong>Precision</strong>
         | 
| 147 | 
            +
               </td>
         | 
| 148 | 
            +
               <td><strong>MMLU Pro</strong>
         | 
| 149 | 
            +
               </td>
         | 
| 150 | 
            +
               <td><strong>SCICODE</strong>
         | 
| 151 | 
            +
               </td>
         | 
| 152 | 
            +
               <td><strong>MATH-500</strong>
         | 
| 153 | 
            +
               </td>
         | 
| 154 | 
            +
               <td><strong>AIME 2024</strong>
         | 
| 155 | 
            +
               </td>
         | 
| 156 | 
            +
              </tr>
         | 
| 157 | 
            +
              <tr>
         | 
| 158 | 
            +
               <td>BF16 (AA Ref)
         | 
| 159 | 
            +
               </td>
         | 
| 160 | 
            +
               <td>0.80
         | 
| 161 | 
            +
               </td>
         | 
| 162 | 
            +
               <td>0.35
         | 
| 163 | 
            +
               </td>
         | 
| 164 | 
            +
               <td>0.96
         | 
| 165 | 
            +
               </td>
         | 
| 166 | 
            +
               <td>0.81
         | 
| 167 | 
            +
               </td>
         | 
| 168 | 
            +
              </tr>
         | 
| 169 | 
            +
              <tr>
         | 
| 170 | 
            +
               <td>FP4
         | 
| 171 | 
            +
               </td>
         | 
| 172 | 
            +
               <td>0.78
         | 
| 173 | 
            +
               </td>
         | 
| 174 | 
            +
               <td>0.36
         | 
| 175 | 
            +
               </td>
         | 
| 176 | 
            +
               <td>0.96
         | 
| 177 | 
            +
               </td>
         | 
| 178 | 
            +
               <td>0.80
         | 
| 179 | 
            +
               </td>
         | 
| 180 | 
            +
              </tr>
         | 
| 181 | 
            +
              <tr>
         | 
| 182 | 
            +
            </table>
         | 
| 183 | 
            +
             | 
| 184 | 
            +
            ## Ethical Considerations
         | 
| 185 | 
            +
             | 
| 186 | 
            +
            NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications.  When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
         | 
| 187 | 
            +
            Please report model quality, risk, security vulnerabilities or NVIDIA AI Concerns here.  
         | 
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,28 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "</think>": 151668,
         | 
| 3 | 
            +
              "</tool_call>": 151658,
         | 
| 4 | 
            +
              "</tool_response>": 151666,
         | 
| 5 | 
            +
              "<think>": 151667,
         | 
| 6 | 
            +
              "<tool_call>": 151657,
         | 
| 7 | 
            +
              "<tool_response>": 151665,
         | 
| 8 | 
            +
              "<|box_end|>": 151649,
         | 
| 9 | 
            +
              "<|box_start|>": 151648,
         | 
| 10 | 
            +
              "<|endoftext|>": 151643,
         | 
| 11 | 
            +
              "<|file_sep|>": 151664,
         | 
| 12 | 
            +
              "<|fim_middle|>": 151660,
         | 
| 13 | 
            +
              "<|fim_pad|>": 151662,
         | 
| 14 | 
            +
              "<|fim_prefix|>": 151659,
         | 
| 15 | 
            +
              "<|fim_suffix|>": 151661,
         | 
| 16 | 
            +
              "<|im_end|>": 151645,
         | 
| 17 | 
            +
              "<|im_start|>": 151644,
         | 
| 18 | 
            +
              "<|image_pad|>": 151655,
         | 
| 19 | 
            +
              "<|object_ref_end|>": 151647,
         | 
| 20 | 
            +
              "<|object_ref_start|>": 151646,
         | 
| 21 | 
            +
              "<|quad_end|>": 151651,
         | 
| 22 | 
            +
              "<|quad_start|>": 151650,
         | 
| 23 | 
            +
              "<|repo_name|>": 151663,
         | 
| 24 | 
            +
              "<|video_pad|>": 151656,
         | 
| 25 | 
            +
              "<|vision_end|>": 151653,
         | 
| 26 | 
            +
              "<|vision_pad|>": 151654,
         | 
| 27 | 
            +
              "<|vision_start|>": 151652
         | 
| 28 | 
            +
            }
         | 
    	
        chat_template.jinja
    ADDED
    
    | @@ -0,0 +1,85 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {%- if tools %}
         | 
| 2 | 
            +
                {{- '<|im_start|>system\n' }}
         | 
| 3 | 
            +
                {%- if messages[0].role == 'system' %}
         | 
| 4 | 
            +
                    {{- messages[0].content + '\n\n' }}
         | 
| 5 | 
            +
                {%- endif %}
         | 
| 6 | 
            +
                {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
         | 
| 7 | 
            +
                {%- for tool in tools %}
         | 
| 8 | 
            +
                    {{- "\n" }}
         | 
| 9 | 
            +
                    {{- tool | tojson }}
         | 
| 10 | 
            +
                {%- endfor %}
         | 
| 11 | 
            +
                {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
         | 
| 12 | 
            +
            {%- else %}
         | 
| 13 | 
            +
                {%- if messages[0].role == 'system' %}
         | 
| 14 | 
            +
                    {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
         | 
| 15 | 
            +
                {%- endif %}
         | 
| 16 | 
            +
            {%- endif %}
         | 
| 17 | 
            +
            {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
         | 
| 18 | 
            +
            {%- for message in messages[::-1] %}
         | 
| 19 | 
            +
                {%- set index = (messages|length - 1) - loop.index0 %}
         | 
| 20 | 
            +
                {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
         | 
| 21 | 
            +
                    {%- set ns.multi_step_tool = false %}
         | 
| 22 | 
            +
                    {%- set ns.last_query_index = index %}
         | 
| 23 | 
            +
                {%- endif %}
         | 
| 24 | 
            +
            {%- endfor %}
         | 
| 25 | 
            +
            {%- for message in messages %}
         | 
| 26 | 
            +
                {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
         | 
| 27 | 
            +
                    {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
         | 
| 28 | 
            +
                {%- elif message.role == "assistant" %}
         | 
| 29 | 
            +
                    {%- set content = message.content %}
         | 
| 30 | 
            +
                    {%- set reasoning_content = '' %}
         | 
| 31 | 
            +
                    {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
         | 
| 32 | 
            +
                        {%- set reasoning_content = message.reasoning_content %}
         | 
| 33 | 
            +
                    {%- else %}
         | 
| 34 | 
            +
                        {%- if '</think>' in message.content %}
         | 
| 35 | 
            +
                            {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
         | 
| 36 | 
            +
                            {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
         | 
| 37 | 
            +
                        {%- endif %}
         | 
| 38 | 
            +
                    {%- endif %}
         | 
| 39 | 
            +
                    {%- if loop.index0 > ns.last_query_index %}
         | 
| 40 | 
            +
                        {%- if loop.last or (not loop.last and reasoning_content) %}
         | 
| 41 | 
            +
                            {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
         | 
| 42 | 
            +
                        {%- else %}
         | 
| 43 | 
            +
                            {{- '<|im_start|>' + message.role + '\n' + content }}
         | 
| 44 | 
            +
                        {%- endif %}
         | 
| 45 | 
            +
                    {%- else %}
         | 
| 46 | 
            +
                        {{- '<|im_start|>' + message.role + '\n' + content }}
         | 
| 47 | 
            +
                    {%- endif %}
         | 
| 48 | 
            +
                    {%- if message.tool_calls %}
         | 
| 49 | 
            +
                        {%- for tool_call in message.tool_calls %}
         | 
| 50 | 
            +
                            {%- if (loop.first and content) or (not loop.first) %}
         | 
| 51 | 
            +
                                {{- '\n' }}
         | 
| 52 | 
            +
                            {%- endif %}
         | 
| 53 | 
            +
                            {%- if tool_call.function %}
         | 
| 54 | 
            +
                                {%- set tool_call = tool_call.function %}
         | 
| 55 | 
            +
                            {%- endif %}
         | 
| 56 | 
            +
                            {{- '<tool_call>\n{"name": "' }}
         | 
| 57 | 
            +
                            {{- tool_call.name }}
         | 
| 58 | 
            +
                            {{- '", "arguments": ' }}
         | 
| 59 | 
            +
                            {%- if tool_call.arguments is string %}
         | 
| 60 | 
            +
                                {{- tool_call.arguments }}
         | 
| 61 | 
            +
                            {%- else %}
         | 
| 62 | 
            +
                                {{- tool_call.arguments | tojson }}
         | 
| 63 | 
            +
                            {%- endif %}
         | 
| 64 | 
            +
                            {{- '}\n</tool_call>' }}
         | 
| 65 | 
            +
                        {%- endfor %}
         | 
| 66 | 
            +
                    {%- endif %}
         | 
| 67 | 
            +
                    {{- '<|im_end|>\n' }}
         | 
| 68 | 
            +
                {%- elif message.role == "tool" %}
         | 
| 69 | 
            +
                    {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
         | 
| 70 | 
            +
                        {{- '<|im_start|>user' }}
         | 
| 71 | 
            +
                    {%- endif %}
         | 
| 72 | 
            +
                    {{- '\n<tool_response>\n' }}
         | 
| 73 | 
            +
                    {{- message.content }}
         | 
| 74 | 
            +
                    {{- '\n</tool_response>' }}
         | 
| 75 | 
            +
                    {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
         | 
| 76 | 
            +
                        {{- '<|im_end|>\n' }}
         | 
| 77 | 
            +
                    {%- endif %}
         | 
| 78 | 
            +
                {%- endif %}
         | 
| 79 | 
            +
            {%- endfor %}
         | 
| 80 | 
            +
            {%- if add_generation_prompt %}
         | 
| 81 | 
            +
                {{- '<|im_start|>assistant\n' }}
         | 
| 82 | 
            +
                {%- if enable_thinking is defined and enable_thinking is false %}
         | 
| 83 | 
            +
                    {{- '<think>\n\n</think>\n\n' }}
         | 
| 84 | 
            +
                {%- endif %}
         | 
| 85 | 
            +
            {%- endif %}
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,131 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "architectures": [
         | 
| 3 | 
            +
                    "Qwen3ForCausalLM"
         | 
| 4 | 
            +
                ],
         | 
| 5 | 
            +
                "attention_bias": false,
         | 
| 6 | 
            +
                "attention_dropout": 0.0,
         | 
| 7 | 
            +
                "bos_token_id": 151643,
         | 
| 8 | 
            +
                "eos_token_id": 151645,
         | 
| 9 | 
            +
                "head_dim": 128,
         | 
| 10 | 
            +
                "hidden_act": "silu",
         | 
| 11 | 
            +
                "hidden_size": 5120,
         | 
| 12 | 
            +
                "initializer_range": 0.02,
         | 
| 13 | 
            +
                "intermediate_size": 25600,
         | 
| 14 | 
            +
                "layer_types": [
         | 
| 15 | 
            +
                    "full_attention",
         | 
| 16 | 
            +
                    "full_attention",
         | 
| 17 | 
            +
                    "full_attention",
         | 
| 18 | 
            +
                    "full_attention",
         | 
| 19 | 
            +
                    "full_attention",
         | 
| 20 | 
            +
                    "full_attention",
         | 
| 21 | 
            +
                    "full_attention",
         | 
| 22 | 
            +
                    "full_attention",
         | 
| 23 | 
            +
                    "full_attention",
         | 
| 24 | 
            +
                    "full_attention",
         | 
| 25 | 
            +
                    "full_attention",
         | 
| 26 | 
            +
                    "full_attention",
         | 
| 27 | 
            +
                    "full_attention",
         | 
| 28 | 
            +
                    "full_attention",
         | 
| 29 | 
            +
                    "full_attention",
         | 
| 30 | 
            +
                    "full_attention",
         | 
| 31 | 
            +
                    "full_attention",
         | 
| 32 | 
            +
                    "full_attention",
         | 
| 33 | 
            +
                    "full_attention",
         | 
| 34 | 
            +
                    "full_attention",
         | 
| 35 | 
            +
                    "full_attention",
         | 
| 36 | 
            +
                    "full_attention",
         | 
| 37 | 
            +
                    "full_attention",
         | 
| 38 | 
            +
                    "full_attention",
         | 
| 39 | 
            +
                    "full_attention",
         | 
| 40 | 
            +
                    "full_attention",
         | 
| 41 | 
            +
                    "full_attention",
         | 
| 42 | 
            +
                    "full_attention",
         | 
| 43 | 
            +
                    "full_attention",
         | 
| 44 | 
            +
                    "full_attention",
         | 
| 45 | 
            +
                    "full_attention",
         | 
| 46 | 
            +
                    "full_attention",
         | 
| 47 | 
            +
                    "full_attention",
         | 
| 48 | 
            +
                    "full_attention",
         | 
| 49 | 
            +
                    "full_attention",
         | 
| 50 | 
            +
                    "full_attention",
         | 
| 51 | 
            +
                    "full_attention",
         | 
| 52 | 
            +
                    "full_attention",
         | 
| 53 | 
            +
                    "full_attention",
         | 
| 54 | 
            +
                    "full_attention",
         | 
| 55 | 
            +
                    "full_attention",
         | 
| 56 | 
            +
                    "full_attention",
         | 
| 57 | 
            +
                    "full_attention",
         | 
| 58 | 
            +
                    "full_attention",
         | 
| 59 | 
            +
                    "full_attention",
         | 
| 60 | 
            +
                    "full_attention",
         | 
| 61 | 
            +
                    "full_attention",
         | 
| 62 | 
            +
                    "full_attention",
         | 
| 63 | 
            +
                    "full_attention",
         | 
| 64 | 
            +
                    "full_attention",
         | 
| 65 | 
            +
                    "full_attention",
         | 
| 66 | 
            +
                    "full_attention",
         | 
| 67 | 
            +
                    "full_attention",
         | 
| 68 | 
            +
                    "full_attention",
         | 
| 69 | 
            +
                    "full_attention",
         | 
| 70 | 
            +
                    "full_attention",
         | 
| 71 | 
            +
                    "full_attention",
         | 
| 72 | 
            +
                    "full_attention",
         | 
| 73 | 
            +
                    "full_attention",
         | 
| 74 | 
            +
                    "full_attention",
         | 
| 75 | 
            +
                    "full_attention",
         | 
| 76 | 
            +
                    "full_attention",
         | 
| 77 | 
            +
                    "full_attention",
         | 
| 78 | 
            +
                    "full_attention"
         | 
| 79 | 
            +
                ],
         | 
| 80 | 
            +
                "max_position_embeddings": 40960,
         | 
| 81 | 
            +
                "max_window_layers": 64,
         | 
| 82 | 
            +
                "model_type": "qwen3",
         | 
| 83 | 
            +
                "num_attention_heads": 64,
         | 
| 84 | 
            +
                "num_hidden_layers": 64,
         | 
| 85 | 
            +
                "num_key_value_heads": 8,
         | 
| 86 | 
            +
                "rms_norm_eps": 1e-06,
         | 
| 87 | 
            +
                "rope_scaling": null,
         | 
| 88 | 
            +
                "rope_theta": 1000000,
         | 
| 89 | 
            +
                "sliding_window": null,
         | 
| 90 | 
            +
                "tie_word_embeddings": false,
         | 
| 91 | 
            +
                "torch_dtype": "bfloat16",
         | 
| 92 | 
            +
                "transformers_version": "4.53.1",
         | 
| 93 | 
            +
                "use_cache": true,
         | 
| 94 | 
            +
                "use_sliding_window": false,
         | 
| 95 | 
            +
                "vocab_size": 151936,
         | 
| 96 | 
            +
                "quantization_config": {
         | 
| 97 | 
            +
                    "config_groups": {
         | 
| 98 | 
            +
                        "group_0": {
         | 
| 99 | 
            +
                            "input_activations": {
         | 
| 100 | 
            +
                                "dynamic": false,
         | 
| 101 | 
            +
                                "num_bits": 4,
         | 
| 102 | 
            +
                                "type": "float",
         | 
| 103 | 
            +
                                "group_size": 16
         | 
| 104 | 
            +
                            },
         | 
| 105 | 
            +
                            "weights": {
         | 
| 106 | 
            +
                                "dynamic": false,
         | 
| 107 | 
            +
                                "num_bits": 4,
         | 
| 108 | 
            +
                                "type": "float",
         | 
| 109 | 
            +
                                "group_size": 16
         | 
| 110 | 
            +
                            },
         | 
| 111 | 
            +
                            "targets": [
         | 
| 112 | 
            +
                                "Linear"
         | 
| 113 | 
            +
                            ]
         | 
| 114 | 
            +
                        }
         | 
| 115 | 
            +
                    },
         | 
| 116 | 
            +
                    "ignore": [
         | 
| 117 | 
            +
                        "lm_head"
         | 
| 118 | 
            +
                    ],
         | 
| 119 | 
            +
                    "quant_algo": "NVFP4",
         | 
| 120 | 
            +
                    "kv_cache_scheme": {
         | 
| 121 | 
            +
                        "dynamic": false,
         | 
| 122 | 
            +
                        "num_bits": 8,
         | 
| 123 | 
            +
                        "type": "float"
         | 
| 124 | 
            +
                    },
         | 
| 125 | 
            +
                    "producer": {
         | 
| 126 | 
            +
                        "name": "modelopt",
         | 
| 127 | 
            +
                        "version": "0.35.0"
         | 
| 128 | 
            +
                    },
         | 
| 129 | 
            +
                    "quant_method": "modelopt"
         | 
| 130 | 
            +
                }
         | 
| 131 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token_id": 151643,
         | 
| 3 | 
            +
              "do_sample": true,
         | 
| 4 | 
            +
              "eos_token_id": [
         | 
| 5 | 
            +
                151645,
         | 
| 6 | 
            +
                151643
         | 
| 7 | 
            +
              ],
         | 
| 8 | 
            +
              "pad_token_id": 151643,
         | 
| 9 | 
            +
              "temperature": 0.6,
         | 
| 10 | 
            +
              "top_k": 20,
         | 
| 11 | 
            +
              "top_p": 0.95,
         | 
| 12 | 
            +
              "transformers_version": "4.53.1"
         | 
| 13 | 
            +
            }
         | 
    	
        hf_quant_config.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "producer": {
         | 
| 3 | 
            +
                    "name": "modelopt",
         | 
| 4 | 
            +
                    "version": "0.35.0"
         | 
| 5 | 
            +
                },
         | 
| 6 | 
            +
                "quantization": {
         | 
| 7 | 
            +
                    "quant_algo": "NVFP4",
         | 
| 8 | 
            +
                    "kv_cache_quant_algo": "FP8",
         | 
| 9 | 
            +
                    "group_size": 16,
         | 
| 10 | 
            +
                    "exclude_modules": [
         | 
| 11 | 
            +
                        "lm_head"
         | 
| 12 | 
            +
                    ]
         | 
| 13 | 
            +
                }
         | 
| 14 | 
            +
            }
         | 
    	
        merges.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        model-00001-of-00005.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:8648ed7353da4d1c713aa6b01eba69c372a8bdfb0f3adbcfaade8111592d11b7
         | 
| 3 | 
            +
            size 4974154840
         | 
    	
        model-00002-of-00005.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4a17d0318dfd131d9fda6efaad7f1d20774b8c99d05da9e2fd9d17cda03c745b
         | 
| 3 | 
            +
            size 4937271816
         | 
    	
        model-00003-of-00005.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d9cfa615230e1968f9c0c1f643eb0e6e2e3e336016d0da3723719f121494e9f9
         | 
| 3 | 
            +
            size 4937271816
         | 
    	
        model-00004-of-00005.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1c9025b63244ec1ce185c0d351abfd91391a436694923421914905ae50f01315
         | 
| 3 | 
            +
            size 4261880984
         | 
    	
        model-00005-of-00005.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fbd6a7104a886641f322332c121ef34cfaa312ed757e3cb019b14c26002a8fb6
         | 
| 3 | 
            +
            size 1555824768
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,19 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "additional_special_tokens": [
         | 
| 3 | 
            +
                "<|im_start|>",
         | 
| 4 | 
            +
                "<|im_end|>",
         | 
| 5 | 
            +
                "<|object_ref_start|>",
         | 
| 6 | 
            +
                "<|object_ref_end|>",
         | 
| 7 | 
            +
                "<|box_start|>",
         | 
| 8 | 
            +
                "<|box_end|>",
         | 
| 9 | 
            +
                "<|quad_start|>",
         | 
| 10 | 
            +
                "<|quad_end|>",
         | 
| 11 | 
            +
                "<|vision_start|>",
         | 
| 12 | 
            +
                "<|vision_end|>",
         | 
| 13 | 
            +
                "<|vision_pad|>",
         | 
| 14 | 
            +
                "<|image_pad|>",
         | 
| 15 | 
            +
                "<|video_pad|>"
         | 
| 16 | 
            +
              ],
         | 
| 17 | 
            +
              "eos_token": "<|endoftext|>",
         | 
| 18 | 
            +
              "pad_token": "<|endoftext|>"
         | 
| 19 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
         | 
| 3 | 
            +
            size 11422654
         | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,239 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_bos_token": false,
         | 
| 3 | 
            +
              "add_prefix_space": false,
         | 
| 4 | 
            +
              "added_tokens_decoder": {
         | 
| 5 | 
            +
                "151643": {
         | 
| 6 | 
            +
                  "content": "<|endoftext|>",
         | 
| 7 | 
            +
                  "lstrip": false,
         | 
| 8 | 
            +
                  "normalized": false,
         | 
| 9 | 
            +
                  "rstrip": false,
         | 
| 10 | 
            +
                  "single_word": false,
         | 
| 11 | 
            +
                  "special": true
         | 
| 12 | 
            +
                },
         | 
| 13 | 
            +
                "151644": {
         | 
| 14 | 
            +
                  "content": "<|im_start|>",
         | 
| 15 | 
            +
                  "lstrip": false,
         | 
| 16 | 
            +
                  "normalized": false,
         | 
| 17 | 
            +
                  "rstrip": false,
         | 
| 18 | 
            +
                  "single_word": false,
         | 
| 19 | 
            +
                  "special": true
         | 
| 20 | 
            +
                },
         | 
| 21 | 
            +
                "151645": {
         | 
| 22 | 
            +
                  "content": "<|im_end|>",
         | 
| 23 | 
            +
                  "lstrip": false,
         | 
| 24 | 
            +
                  "normalized": false,
         | 
| 25 | 
            +
                  "rstrip": false,
         | 
| 26 | 
            +
                  "single_word": false,
         | 
| 27 | 
            +
                  "special": true
         | 
| 28 | 
            +
                },
         | 
| 29 | 
            +
                "151646": {
         | 
| 30 | 
            +
                  "content": "<|object_ref_start|>",
         | 
| 31 | 
            +
                  "lstrip": false,
         | 
| 32 | 
            +
                  "normalized": false,
         | 
| 33 | 
            +
                  "rstrip": false,
         | 
| 34 | 
            +
                  "single_word": false,
         | 
| 35 | 
            +
                  "special": true
         | 
| 36 | 
            +
                },
         | 
| 37 | 
            +
                "151647": {
         | 
| 38 | 
            +
                  "content": "<|object_ref_end|>",
         | 
| 39 | 
            +
                  "lstrip": false,
         | 
| 40 | 
            +
                  "normalized": false,
         | 
| 41 | 
            +
                  "rstrip": false,
         | 
| 42 | 
            +
                  "single_word": false,
         | 
| 43 | 
            +
                  "special": true
         | 
| 44 | 
            +
                },
         | 
| 45 | 
            +
                "151648": {
         | 
| 46 | 
            +
                  "content": "<|box_start|>",
         | 
| 47 | 
            +
                  "lstrip": false,
         | 
| 48 | 
            +
                  "normalized": false,
         | 
| 49 | 
            +
                  "rstrip": false,
         | 
| 50 | 
            +
                  "single_word": false,
         | 
| 51 | 
            +
                  "special": true
         | 
| 52 | 
            +
                },
         | 
| 53 | 
            +
                "151649": {
         | 
| 54 | 
            +
                  "content": "<|box_end|>",
         | 
| 55 | 
            +
                  "lstrip": false,
         | 
| 56 | 
            +
                  "normalized": false,
         | 
| 57 | 
            +
                  "rstrip": false,
         | 
| 58 | 
            +
                  "single_word": false,
         | 
| 59 | 
            +
                  "special": true
         | 
| 60 | 
            +
                },
         | 
| 61 | 
            +
                "151650": {
         | 
| 62 | 
            +
                  "content": "<|quad_start|>",
         | 
| 63 | 
            +
                  "lstrip": false,
         | 
| 64 | 
            +
                  "normalized": false,
         | 
| 65 | 
            +
                  "rstrip": false,
         | 
| 66 | 
            +
                  "single_word": false,
         | 
| 67 | 
            +
                  "special": true
         | 
| 68 | 
            +
                },
         | 
| 69 | 
            +
                "151651": {
         | 
| 70 | 
            +
                  "content": "<|quad_end|>",
         | 
| 71 | 
            +
                  "lstrip": false,
         | 
| 72 | 
            +
                  "normalized": false,
         | 
| 73 | 
            +
                  "rstrip": false,
         | 
| 74 | 
            +
                  "single_word": false,
         | 
| 75 | 
            +
                  "special": true
         | 
| 76 | 
            +
                },
         | 
| 77 | 
            +
                "151652": {
         | 
| 78 | 
            +
                  "content": "<|vision_start|>",
         | 
| 79 | 
            +
                  "lstrip": false,
         | 
| 80 | 
            +
                  "normalized": false,
         | 
| 81 | 
            +
                  "rstrip": false,
         | 
| 82 | 
            +
                  "single_word": false,
         | 
| 83 | 
            +
                  "special": true
         | 
| 84 | 
            +
                },
         | 
| 85 | 
            +
                "151653": {
         | 
| 86 | 
            +
                  "content": "<|vision_end|>",
         | 
| 87 | 
            +
                  "lstrip": false,
         | 
| 88 | 
            +
                  "normalized": false,
         | 
| 89 | 
            +
                  "rstrip": false,
         | 
| 90 | 
            +
                  "single_word": false,
         | 
| 91 | 
            +
                  "special": true
         | 
| 92 | 
            +
                },
         | 
| 93 | 
            +
                "151654": {
         | 
| 94 | 
            +
                  "content": "<|vision_pad|>",
         | 
| 95 | 
            +
                  "lstrip": false,
         | 
| 96 | 
            +
                  "normalized": false,
         | 
| 97 | 
            +
                  "rstrip": false,
         | 
| 98 | 
            +
                  "single_word": false,
         | 
| 99 | 
            +
                  "special": true
         | 
| 100 | 
            +
                },
         | 
| 101 | 
            +
                "151655": {
         | 
| 102 | 
            +
                  "content": "<|image_pad|>",
         | 
| 103 | 
            +
                  "lstrip": false,
         | 
| 104 | 
            +
                  "normalized": false,
         | 
| 105 | 
            +
                  "rstrip": false,
         | 
| 106 | 
            +
                  "single_word": false,
         | 
| 107 | 
            +
                  "special": true
         | 
| 108 | 
            +
                },
         | 
| 109 | 
            +
                "151656": {
         | 
| 110 | 
            +
                  "content": "<|video_pad|>",
         | 
| 111 | 
            +
                  "lstrip": false,
         | 
| 112 | 
            +
                  "normalized": false,
         | 
| 113 | 
            +
                  "rstrip": false,
         | 
| 114 | 
            +
                  "single_word": false,
         | 
| 115 | 
            +
                  "special": true
         | 
| 116 | 
            +
                },
         | 
| 117 | 
            +
                "151657": {
         | 
| 118 | 
            +
                  "content": "<tool_call>",
         | 
| 119 | 
            +
                  "lstrip": false,
         | 
| 120 | 
            +
                  "normalized": false,
         | 
| 121 | 
            +
                  "rstrip": false,
         | 
| 122 | 
            +
                  "single_word": false,
         | 
| 123 | 
            +
                  "special": false
         | 
| 124 | 
            +
                },
         | 
| 125 | 
            +
                "151658": {
         | 
| 126 | 
            +
                  "content": "</tool_call>",
         | 
| 127 | 
            +
                  "lstrip": false,
         | 
| 128 | 
            +
                  "normalized": false,
         | 
| 129 | 
            +
                  "rstrip": false,
         | 
| 130 | 
            +
                  "single_word": false,
         | 
| 131 | 
            +
                  "special": false
         | 
| 132 | 
            +
                },
         | 
| 133 | 
            +
                "151659": {
         | 
| 134 | 
            +
                  "content": "<|fim_prefix|>",
         | 
| 135 | 
            +
                  "lstrip": false,
         | 
| 136 | 
            +
                  "normalized": false,
         | 
| 137 | 
            +
                  "rstrip": false,
         | 
| 138 | 
            +
                  "single_word": false,
         | 
| 139 | 
            +
                  "special": false
         | 
| 140 | 
            +
                },
         | 
| 141 | 
            +
                "151660": {
         | 
| 142 | 
            +
                  "content": "<|fim_middle|>",
         | 
| 143 | 
            +
                  "lstrip": false,
         | 
| 144 | 
            +
                  "normalized": false,
         | 
| 145 | 
            +
                  "rstrip": false,
         | 
| 146 | 
            +
                  "single_word": false,
         | 
| 147 | 
            +
                  "special": false
         | 
| 148 | 
            +
                },
         | 
| 149 | 
            +
                "151661": {
         | 
| 150 | 
            +
                  "content": "<|fim_suffix|>",
         | 
| 151 | 
            +
                  "lstrip": false,
         | 
| 152 | 
            +
                  "normalized": false,
         | 
| 153 | 
            +
                  "rstrip": false,
         | 
| 154 | 
            +
                  "single_word": false,
         | 
| 155 | 
            +
                  "special": false
         | 
| 156 | 
            +
                },
         | 
| 157 | 
            +
                "151662": {
         | 
| 158 | 
            +
                  "content": "<|fim_pad|>",
         | 
| 159 | 
            +
                  "lstrip": false,
         | 
| 160 | 
            +
                  "normalized": false,
         | 
| 161 | 
            +
                  "rstrip": false,
         | 
| 162 | 
            +
                  "single_word": false,
         | 
| 163 | 
            +
                  "special": false
         | 
| 164 | 
            +
                },
         | 
| 165 | 
            +
                "151663": {
         | 
| 166 | 
            +
                  "content": "<|repo_name|>",
         | 
| 167 | 
            +
                  "lstrip": false,
         | 
| 168 | 
            +
                  "normalized": false,
         | 
| 169 | 
            +
                  "rstrip": false,
         | 
| 170 | 
            +
                  "single_word": false,
         | 
| 171 | 
            +
                  "special": false
         | 
| 172 | 
            +
                },
         | 
| 173 | 
            +
                "151664": {
         | 
| 174 | 
            +
                  "content": "<|file_sep|>",
         | 
| 175 | 
            +
                  "lstrip": false,
         | 
| 176 | 
            +
                  "normalized": false,
         | 
| 177 | 
            +
                  "rstrip": false,
         | 
| 178 | 
            +
                  "single_word": false,
         | 
| 179 | 
            +
                  "special": false
         | 
| 180 | 
            +
                },
         | 
| 181 | 
            +
                "151665": {
         | 
| 182 | 
            +
                  "content": "<tool_response>",
         | 
| 183 | 
            +
                  "lstrip": false,
         | 
| 184 | 
            +
                  "normalized": false,
         | 
| 185 | 
            +
                  "rstrip": false,
         | 
| 186 | 
            +
                  "single_word": false,
         | 
| 187 | 
            +
                  "special": false
         | 
| 188 | 
            +
                },
         | 
| 189 | 
            +
                "151666": {
         | 
| 190 | 
            +
                  "content": "</tool_response>",
         | 
| 191 | 
            +
                  "lstrip": false,
         | 
| 192 | 
            +
                  "normalized": false,
         | 
| 193 | 
            +
                  "rstrip": false,
         | 
| 194 | 
            +
                  "single_word": false,
         | 
| 195 | 
            +
                  "special": false
         | 
| 196 | 
            +
                },
         | 
| 197 | 
            +
                "151667": {
         | 
| 198 | 
            +
                  "content": "<think>",
         | 
| 199 | 
            +
                  "lstrip": false,
         | 
| 200 | 
            +
                  "normalized": false,
         | 
| 201 | 
            +
                  "rstrip": false,
         | 
| 202 | 
            +
                  "single_word": false,
         | 
| 203 | 
            +
                  "special": false
         | 
| 204 | 
            +
                },
         | 
| 205 | 
            +
                "151668": {
         | 
| 206 | 
            +
                  "content": "</think>",
         | 
| 207 | 
            +
                  "lstrip": false,
         | 
| 208 | 
            +
                  "normalized": false,
         | 
| 209 | 
            +
                  "rstrip": false,
         | 
| 210 | 
            +
                  "single_word": false,
         | 
| 211 | 
            +
                  "special": false
         | 
| 212 | 
            +
                }
         | 
| 213 | 
            +
              },
         | 
| 214 | 
            +
              "additional_special_tokens": [
         | 
| 215 | 
            +
                "<|im_start|>",
         | 
| 216 | 
            +
                "<|im_end|>",
         | 
| 217 | 
            +
                "<|object_ref_start|>",
         | 
| 218 | 
            +
                "<|object_ref_end|>",
         | 
| 219 | 
            +
                "<|box_start|>",
         | 
| 220 | 
            +
                "<|box_end|>",
         | 
| 221 | 
            +
                "<|quad_start|>",
         | 
| 222 | 
            +
                "<|quad_end|>",
         | 
| 223 | 
            +
                "<|vision_start|>",
         | 
| 224 | 
            +
                "<|vision_end|>",
         | 
| 225 | 
            +
                "<|vision_pad|>",
         | 
| 226 | 
            +
                "<|image_pad|>",
         | 
| 227 | 
            +
                "<|video_pad|>"
         | 
| 228 | 
            +
              ],
         | 
| 229 | 
            +
              "bos_token": null,
         | 
| 230 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 231 | 
            +
              "eos_token": "<|endoftext|>",
         | 
| 232 | 
            +
              "errors": "replace",
         | 
| 233 | 
            +
              "extra_special_tokens": {},
         | 
| 234 | 
            +
              "model_max_length": 131072,
         | 
| 235 | 
            +
              "pad_token": "<|endoftext|>",
         | 
| 236 | 
            +
              "split_special_tokens": false,
         | 
| 237 | 
            +
              "tokenizer_class": "Qwen2Tokenizer",
         | 
| 238 | 
            +
              "unk_token": null
         | 
| 239 | 
            +
            }
         | 
    	
        vocab.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
