| import glob | |
| import re | |
| import shutil | |
| import sys | |
| import accelerate | |
| import torch | |
| from safetensors import safe_open | |
| from configuration_bailing_shared_moe_v2 import BailingSharedMoeV2Config | |
| from modeling_bailing_moe_v2 import BailingMoeV2ForCausalLM | |
| from configuration_bailing_moe_v2 import BailingMoeV2Config | |
| input_model = sys.argv[1] | |
| output_model_path = sys.argv[2] | |
| auto_map = { | |
| "AutoConfig": "configuration_bailing_moe_v2.BailingMoeV2Config", | |
| "AutoModel": "modeling_bailing_moe_v2.BailingMoeV2Model", | |
| "AutoModelForCausalLM": "modeling_bailing_moe_v2.BailingMoeV2ForCausalLM" | |
| } | |
| cfg_shared_moe = BailingSharedMoeV2Config.from_pretrained(input_model) | |
| cfg_standard_moe = BailingMoeV2Config( | |
| auto_map=auto_map, | |
| vocab_size=cfg_shared_moe.vocab_size, | |
| hidden_size=cfg_shared_moe.hidden_size, | |
| intermediate_size=cfg_shared_moe.intermediate_size, | |
| num_hidden_layers=cfg_shared_moe.num_hidden_layers, | |
| num_attention_heads=cfg_shared_moe.num_attention_heads, | |
| num_key_value_heads=cfg_shared_moe.num_key_value_heads, | |
| hidden_act=cfg_shared_moe.hidden_act, | |
| max_position_embeddings=cfg_shared_moe.max_position_embeddings, | |
| initializer_range=cfg_shared_moe.initializer_range, | |
| rms_norm_eps=cfg_shared_moe.rms_norm_eps, | |
| use_cache=cfg_shared_moe.use_cache, | |
| tie_word_embeddings=cfg_shared_moe.tie_word_embeddings, | |
| rope_theta=cfg_shared_moe.rope_theta, | |
| rope_scaling=cfg_shared_moe.rope_scaling, | |
| max_window_layers=cfg_shared_moe.max_window_layers, | |
| attention_dropout=cfg_shared_moe.attention_dropout, | |
| moe_intermediate_size=cfg_shared_moe.moe_intermediate_size, | |
| num_experts_per_tok=cfg_shared_moe.num_experts_per_tok, | |
| num_experts=cfg_shared_moe.num_experts, | |
| num_shared_experts=cfg_shared_moe.num_shared_experts, | |
| norm_topk_prob=cfg_shared_moe.norm_topk_prob, | |
| output_router_logits=cfg_shared_moe.output_router_logits, | |
| shared_expert_intermediate_size=None, | |
| head_dim=cfg_shared_moe.head_dim, | |
| embedding_dropout=cfg_shared_moe.embedding_dropout, | |
| eos_token_id=cfg_shared_moe.eos_token_id, | |
| first_k_dense_replace=cfg_shared_moe.first_k_dense_replace, | |
| output_dropout=cfg_shared_moe.output_dropout, | |
| pad_token_id=cfg_shared_moe.pad_token_id, | |
| torch_dtype=cfg_shared_moe.torch_dtype, | |
| use_bias=cfg_shared_moe.use_bias, | |
| use_qkv_bias=cfg_shared_moe.use_qkv_bias, | |
| moe_router_enable_expert_bias=cfg_shared_moe.moe_router_enable_expert_bias, | |
| routed_scaling_factor=cfg_shared_moe.routed_scaling_factor, | |
| n_group=cfg_shared_moe.n_group, | |
| topk_group=cfg_shared_moe.topk_group, | |
| use_qk_norm=cfg_shared_moe.use_qk_norm, | |
| moe_shared_expert_intermediate_size=cfg_shared_moe.moe_shared_expert_intermediate_size, | |
| num_nextn_predict_layers=cfg_shared_moe.num_nextn_predict_layers, | |
| score_function=cfg_shared_moe.score_function, | |
| router_dtype=cfg_shared_moe.router_dtype, | |
| use_rmsnorm=cfg_shared_moe.use_rmsnorm, | |
| partial_rotary_factor=cfg_shared_moe.partial_rotary_factor | |
| ) | |
| num_experts = cfg_standard_moe.num_experts | |
| with accelerate.init_empty_weights(): | |
| model_standard_moe = BailingMoeV2ForCausalLM(cfg_shared_moe) | |
| model_standard_moe = model_standard_moe.to(torch.bfloat16) | |
| new_state_dict = {} | |
| pattern = f"{input_model}/model-*-of-*.safetensors" | |
| files = sorted(glob.glob(pattern)) | |
| if len(files) == 0: | |
| raise FileNotFoundError | |
| tensors = {} | |
| for file_path in files: | |
| print(f"processing {file_path}") | |
| with safe_open(file_path, framework="pt", device="cpu") as f: | |
| for key in f.keys(): | |
| tensor = f.get_tensor(key) | |
| tensors[key] = tensor | |
| for key in tensors: | |
| if "moe_mlp" not in key: | |
| new_state_dict[key] = tensors[key] | |
| elif "moe_mlp.output_experts" in key: | |
| layer_num = int(re.search(r"\d+", key).group()) | |
| for i, tensor in enumerate(torch.unbind(tensors[key])): | |
| new_state_dict[ | |
| f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight" | |
| ] = tensor.contiguous() | |
| elif "moe_mlp.experts" in key: | |
| layer_num = int(re.search(r"\d+", key).group()) | |
| for i, tensor in enumerate(torch.unbind(tensors[key])): | |
| ( | |
| new_state_dict[ | |
| f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight" | |
| ], | |
| new_state_dict[ | |
| f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight" | |
| ], | |
| ) = torch.chunk(tensor, 2, dim=0) | |
| model_standard_moe.load_state_dict(new_state_dict, strict=True, assign=True) | |
| model_standard_moe.save_pretrained(output_model_path) | |
| cfg_standard_moe.save_pretrained(output_model_path) | |
| shutil.copy( | |
| "modeling_bailing_moe_v2.py", | |
| output_model_path + "/" + "modeling_bailing_moe_v2.py", | |
| ) | |
| shutil.copy( | |
| "configuration_bailing_moe_v2.py", | |
| output_model_path + "/" + "configuration_bailing_moe_v2.py", | |
| ) | |
| for i in ["special_tokens_map.json", "tokenizer_config.json", "tokenizer.json"]: | |
| shutil.copy(input_model + "/" + i, output_model_path + "/" + i) | |