README.md CHANGED
@@ -323,7 +323,7 @@ This code repository is licensed under [the MIT License](LICENSE-CODE). The use
323
  ```
324
  @misc{deepseekai2024deepseekv3technicalreport,
325
  title={DeepSeek-V3 Technical Report},
326
- author={DeepSeek-AI},
327
  year={2024},
328
  eprint={2412.19437},
329
  archivePrefix={arXiv},
 
323
  ```
324
  @misc{deepseekai2024deepseekv3technicalreport,
325
  title={DeepSeek-V3 Technical Report},
326
+ author={DeepSeek-AI and Aixin Liu and Bei Feng and Bing Xue and Bingxuan Wang and Bochao Wu and Chengda Lu and Chenggang Zhao and Chengqi Deng and Chenyu Zhang and Chong Ruan and Damai Dai and Daya Guo and Dejian Yang and Deli Chen and Dongjie Ji and Erhang Li and Fangyun Lin and Fucong Dai and Fuli Luo and Guangbo Hao and Guanting Chen and Guowei Li and H. Zhang and Han Bao and Hanwei Xu and Haocheng Wang and Haowei Zhang and Honghui Ding and Huajian Xin and Huazuo Gao and Hui Li and Hui Qu and J. L. Cai and Jian Liang and Jianzhong Guo and Jiaqi Ni and Jiashi Li and Jiawei Wang and Jin Chen and Jingchang Chen and Jingyang Yuan and Junjie Qiu and Junlong Li and Junxiao Song and Kai Dong and Kai Hu and Kaige Gao and Kang Guan and Kexin Huang and Kuai Yu and Lean Wang and Lecong Zhang and Lei Xu and Leyi Xia and Liang Zhao and Litong Wang and Liyue Zhang and Meng Li and Miaojun Wang and Mingchuan Zhang and Minghua Zhang and Minghui Tang and Mingming Li and Ning Tian and Panpan Huang and Peiyi Wang and Peng Zhang and Qiancheng Wang and Qihao Zhu and Qinyu Chen and Qiushi Du and R. J. Chen and R. L. Jin and Ruiqi Ge and Ruisong Zhang and Ruizhe Pan and Runji Wang and Runxin Xu and Ruoyu Zhang and Ruyi Chen and S. S. Li and Shanghao Lu and Shangyan Zhou and Shanhuang Chen and Shaoqing Wu and Shengfeng Ye and Shengfeng Ye and Shirong Ma and Shiyu Wang and Shuang Zhou and Shuiping Yu and Shunfeng Zhou and Shuting Pan and T. Wang and Tao Yun and Tian Pei and Tianyu Sun and W. L. Xiao and Wangding Zeng and Wanjia Zhao and Wei An and Wen Liu and Wenfeng Liang and Wenjun Gao and Wenqin Yu and Wentao Zhang and X. Q. Li and Xiangyue Jin and Xianzu Wang and Xiao Bi and Xiaodong Liu and Xiaohan Wang and Xiaojin Shen and Xiaokang Chen and Xiaokang Zhang and Xiaosha Chen and Xiaotao Nie and Xiaowen Sun and Xiaoxiang Wang and Xin Cheng and Xin Liu and Xin Xie and Xingchao Liu and Xingkai Yu and Xinnan Song and Xinxia Shan and Xinyi Zhou and Xinyu Yang and Xinyuan Li and Xuecheng Su and Xuheng Lin and Y. K. Li and Y. Q. Wang and Y. X. Wei and Y. X. Zhu and Yang Zhang and Yanhong Xu and Yanhong Xu and Yanping Huang and Yao Li and Yao Zhao and Yaofeng Sun and Yaohui Li and Yaohui Wang and Yi Yu and Yi Zheng and Yichao Zhang and Yifan Shi and Yiliang Xiong and Ying He and Ying Tang and Yishi Piao and Yisong Wang and Yixuan Tan and Yiyang Ma and Yiyuan Liu and Yongqiang Guo and Yu Wu and Yuan Ou and Yuchen Zhu and Yuduan Wang and Yue Gong and Yuheng Zou and Yujia He and Yukun Zha and Yunfan Xiong and Yunxian Ma and Yuting Yan and Yuxiang Luo and Yuxiang You and Yuxuan Liu and Yuyang Zhou and Z. F. Wu and Z. Z. Ren and Zehui Ren and Zhangli Sha and Zhe Fu and Zhean Xu and Zhen Huang and Zhen Zhang and Zhenda Xie and Zhengyan Zhang and Zhewen Hao and Zhibin Gou and Zhicheng Ma and Zhigang Yan and Zhihong Shao and Zhipeng Xu and Zhiyu Wu and Zhongyu Zhang and Zhuoshu Li and Zihui Gu and Zijia Zhu and Zijun Liu and Zilin Li and Ziwei Xie and Ziyang Song and Ziyi Gao and Zizheng Pan},
327
  year={2024},
328
  eprint={2412.19437},
329
  archivePrefix={arXiv},
README_WEIGHTS.md CHANGED
@@ -18,7 +18,7 @@ The DeepSeek-V3 weight file consists of two main components: **Main Model Weight
18
  - Input/output embedding layers and a complete set of 61 Transformer hidden layers.
19
  - **Parameter Count**:
20
  - Total parameters: **671B**
21
- - Activation parameters: **36.6B** (including 0.9B for the output Head).
22
 
23
  #### Structural Details
24
 
@@ -35,8 +35,8 @@ The DeepSeek-V3 weight file consists of two main components: **Main Model Weight
35
  - **Composition**:
36
  - Additional MTP Modules defined by the `num_nextn_predict_layers` field. In this model, the value is set to 1.
37
  - **Parameter Count**:
38
- - Parameters: **11.5B unique parameters** (excluding the shared 0.9B Embedding and 0.9B output Head).
39
- - Activation parameters: **1.5B** (including 0.9B for the output Head).
40
 
41
  #### Structural Details
42
 
 
18
  - Input/output embedding layers and a complete set of 61 Transformer hidden layers.
19
  - **Parameter Count**:
20
  - Total parameters: **671B**
21
+ - Activation parameters: **36.7B** (including 0.9B for Embedding and 0.9B for the output Head).
22
 
23
  #### Structural Details
24
 
 
35
  - **Composition**:
36
  - Additional MTP Modules defined by the `num_nextn_predict_layers` field. In this model, the value is set to 1.
37
  - **Parameter Count**:
38
+ - Parameters: **11.5B unique parameters**, excluding the shared 0.9B Embedding and 0.9B output Head).
39
+ - Activation parameters: **2.4B** (including the shared 0.9B Embedding and 0.9B output Head).
40
 
41
  #### Structural Details
42
 
config.json CHANGED
@@ -9,6 +9,7 @@
9
  "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
  "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
  },
 
12
  "bos_token_id": 0,
13
  "eos_token_id": 1,
14
  "ep_size": 1,
@@ -31,6 +32,7 @@
31
  "num_hidden_layers": 61,
32
  "num_key_value_heads": 128,
33
  "num_nextn_predict_layers": 1,
 
34
  "q_lora_rank": 1536,
35
  "qk_nope_head_dim": 128,
36
  "qk_rope_head_dim": 64,
@@ -56,6 +58,7 @@
56
  "rope_theta": 10000,
57
  "routed_scaling_factor": 2.5,
58
  "scoring_func": "sigmoid",
 
59
  "tie_word_embeddings": false,
60
  "topk_group": 4,
61
  "topk_method": "noaux_tc",
 
9
  "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
  "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
  },
12
+ "aux_loss_alpha": 0.001,
13
  "bos_token_id": 0,
14
  "eos_token_id": 1,
15
  "ep_size": 1,
 
32
  "num_hidden_layers": 61,
33
  "num_key_value_heads": 128,
34
  "num_nextn_predict_layers": 1,
35
+ "pretraining_tp": 1,
36
  "q_lora_rank": 1536,
37
  "qk_nope_head_dim": 128,
38
  "qk_rope_head_dim": 64,
 
58
  "rope_theta": 10000,
59
  "routed_scaling_factor": 2.5,
60
  "scoring_func": "sigmoid",
61
+ "seq_aux": true,
62
  "tie_word_embeddings": false,
63
  "topk_group": 4,
64
  "topk_method": "noaux_tc",
configuration_deepseek.py CHANGED
@@ -82,6 +82,11 @@ class DeepseekV3Config(PretrainedConfig):
82
  Beginning of stream token id.
83
  eos_token_id (`int`, *optional*, defaults to 2):
84
  End of stream token id.
 
 
 
 
 
85
  tie_word_embeddings (`bool`, *optional*, defaults to `False`):
86
  Whether to tie weight embeddings
87
  rope_theta (`float`, *optional*, defaults to 10000.0):
@@ -136,6 +141,8 @@ class DeepseekV3Config(PretrainedConfig):
136
  first_k_dense_replace = 3,
137
  norm_topk_prob = True,
138
  scoring_func = 'sigmoid',
 
 
139
  hidden_act="silu",
140
  max_position_embeddings=4096,
141
  initializer_range=0.02,
@@ -144,6 +151,7 @@ class DeepseekV3Config(PretrainedConfig):
144
  pad_token_id=None,
145
  bos_token_id=0,
146
  eos_token_id=1,
 
147
  tie_word_embeddings=False,
148
  rope_theta=10000.0,
149
  rope_scaling=None,
@@ -176,6 +184,8 @@ class DeepseekV3Config(PretrainedConfig):
176
  self.first_k_dense_replace = first_k_dense_replace
177
  self.norm_topk_prob = norm_topk_prob
178
  self.scoring_func = scoring_func
 
 
179
  # for backward compatibility
180
  if num_key_value_heads is None:
181
  num_key_value_heads = num_attention_heads
@@ -184,6 +194,7 @@ class DeepseekV3Config(PretrainedConfig):
184
  self.hidden_act = hidden_act
185
  self.initializer_range = initializer_range
186
  self.rms_norm_eps = rms_norm_eps
 
187
  self.use_cache = use_cache
188
  self.rope_theta = rope_theta
189
  self.rope_scaling = rope_scaling
 
82
  Beginning of stream token id.
83
  eos_token_id (`int`, *optional*, defaults to 2):
84
  End of stream token id.
85
+ pretraining_tp (`int`, *optional*, defaults to 1):
86
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
87
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
88
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
89
+ issue](https://github.com/pytorch/pytorch/issues/76232).
90
  tie_word_embeddings (`bool`, *optional*, defaults to `False`):
91
  Whether to tie weight embeddings
92
  rope_theta (`float`, *optional*, defaults to 10000.0):
 
141
  first_k_dense_replace = 3,
142
  norm_topk_prob = True,
143
  scoring_func = 'sigmoid',
144
+ aux_loss_alpha = 0.001,
145
+ seq_aux = True,
146
  hidden_act="silu",
147
  max_position_embeddings=4096,
148
  initializer_range=0.02,
 
151
  pad_token_id=None,
152
  bos_token_id=0,
153
  eos_token_id=1,
154
+ pretraining_tp=1,
155
  tie_word_embeddings=False,
156
  rope_theta=10000.0,
157
  rope_scaling=None,
 
184
  self.first_k_dense_replace = first_k_dense_replace
185
  self.norm_topk_prob = norm_topk_prob
186
  self.scoring_func = scoring_func
187
+ self.aux_loss_alpha = aux_loss_alpha
188
+ self.seq_aux = seq_aux
189
  # for backward compatibility
190
  if num_key_value_heads is None:
191
  num_key_value_heads = num_attention_heads
 
194
  self.hidden_act = hidden_act
195
  self.initializer_range = initializer_range
196
  self.rms_norm_eps = rms_norm_eps
197
+ self.pretraining_tp = pretraining_tp
198
  self.use_cache = use_cache
199
  self.rope_theta = rope_theta
200
  self.rope_scaling = rope_scaling
modeling_deepseek.py CHANGED
@@ -398,6 +398,7 @@ class MoEGate(nn.Module):
398
  self.n_routed_experts = config.n_routed_experts
399
  self.routed_scaling_factor = config.routed_scaling_factor
400
  self.scoring_func = config.scoring_func
 
401
  self.topk_method = config.topk_method
402
  self.n_group = config.n_group
403
  self.topk_group = config.topk_group
@@ -454,7 +455,7 @@ class MoEGate(nn.Module):
454
  )
455
  .reshape(bsz * seq_len, -1)
456
  ) # [n, e]
457
- tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
458
  _, topk_idx = torch.topk(
459
  tmp_scores, k=self.top_k, dim=-1, sorted=False
460
  )
 
398
  self.n_routed_experts = config.n_routed_experts
399
  self.routed_scaling_factor = config.routed_scaling_factor
400
  self.scoring_func = config.scoring_func
401
+ self.seq_aux = config.seq_aux
402
  self.topk_method = config.topk_method
403
  self.n_group = config.n_group
404
  self.topk_group = config.topk_group
 
455
  )
456
  .reshape(bsz * seq_len, -1)
457
  ) # [n, e]
458
+ tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), 0.0) # [n, e]
459
  _, topk_idx = torch.topk(
460
  tmp_scores, k=self.top_k, dim=-1, sorted=False
461
  )