guinansu commited on
Commit
86524fa
·
verified ·
1 Parent(s): 3ceac2d

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fast/gsu/downloadmodels/7b/llama-2-7b-hf",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 4096,
16
+ "mlp_bias": false,
17
+ "model_type": "customllama",
18
+ "auto_map": {
19
+ "AutoConfig": "configuration_customllama.CustomLlamaConfig",
20
+ "AutoModelForCausalLM": "modeling_customllama.CustomLlamaForCausalLM"
21
+ },
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 23,
24
+ "num_key_value_heads": 32,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": null,
28
+ "rope_theta": 10000.0,
29
+ "scales": [
30
+ 1.0,
31
+ 1.0,
32
+ 1.0278732834267,
33
+ 1.0007125033084,
34
+ 1.0372524019845,
35
+ 1.0777305076805,
36
+ 1.1949117035846,
37
+ 0.8763010948154,
38
+ 1.2810249285471,
39
+ 0.8625149492519,
40
+ 0.9961008444105,
41
+ 1.0,
42
+ 1.0,
43
+ 1.0,
44
+ 1.0,
45
+ 1.1784337435162,
46
+ 1.0,
47
+ 1.0,
48
+ 1.0,
49
+ 1.0,
50
+ 1.0,
51
+ 1.0,
52
+ 1.0
53
+ ],
54
+ "tie_word_embeddings": false,
55
+ "torch_dtype": "float32",
56
+ "transformers_version": "4.45.2",
57
+ "use_cache": true,
58
+ "vocab_size": 32001
59
+ }
configuration_customllama.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+ from transformers.modeling_rope_utils import rope_config_validation
4
+
5
+
6
+ class CustomLlamaConfig(PretrainedConfig):
7
+ model_type = "customllama"
8
+ keys_to_ignore_at_inference = ["past_key_values"]
9
+ # Default tensor parallel plan for base model `LlamaModel`
10
+ base_model_tp_plan = {
11
+ "layers.*.self_attn.q_proj": "colwise",
12
+ "layers.*.self_attn.k_proj": "colwise",
13
+ "layers.*.self_attn.v_proj": "colwise",
14
+ "layers.*.self_attn.o_proj": "rowwise",
15
+ "layers.*.mlp.gate_proj": "colwise",
16
+ "layers.*.mlp.up_proj": "colwise",
17
+ "layers.*.mlp.down_proj": "rowwise",
18
+ }
19
+
20
+ def __init__(
21
+ self,
22
+ vocab_size=32000,
23
+ hidden_size=4096,
24
+ intermediate_size=11008,
25
+ num_hidden_layers=32,
26
+ num_attention_heads=32,
27
+ num_key_value_heads=None,
28
+ hidden_act="silu",
29
+ max_position_embeddings=2048,
30
+ initializer_range=0.02,
31
+ rms_norm_eps=1e-6,
32
+ use_cache=True,
33
+ pad_token_id=None,
34
+ bos_token_id=1,
35
+ eos_token_id=2,
36
+ pretraining_tp=1,
37
+ tie_word_embeddings=False,
38
+ rope_theta=10000.0,
39
+ rope_scaling=None,
40
+ attention_bias=False,
41
+ attention_dropout=0.0,
42
+ mlp_bias=False,
43
+ head_dim=None,
44
+ scales=None,
45
+ **kwargs,
46
+ ):
47
+ self.vocab_size = vocab_size
48
+ self.max_position_embeddings = max_position_embeddings
49
+ self.hidden_size = hidden_size
50
+ self.intermediate_size = intermediate_size
51
+ self.num_hidden_layers = num_hidden_layers
52
+ self.num_attention_heads = num_attention_heads
53
+
54
+ # for backward compatibility
55
+ if num_key_value_heads is None:
56
+ num_key_value_heads = num_attention_heads
57
+
58
+ self.num_key_value_heads = num_key_value_heads
59
+ self.hidden_act = hidden_act
60
+ self.initializer_range = initializer_range
61
+ self.rms_norm_eps = rms_norm_eps
62
+ self.pretraining_tp = pretraining_tp
63
+ self.use_cache = use_cache
64
+ self.rope_theta = rope_theta
65
+ self.rope_scaling = rope_scaling
66
+ self.attention_bias = attention_bias
67
+ self.attention_dropout = attention_dropout
68
+ self.mlp_bias = mlp_bias
69
+ self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
70
+ self.scales = scales
71
+
72
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
73
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
74
+ rope_config_validation(self)
75
+
76
+ super().__init__(
77
+ pad_token_id=pad_token_id,
78
+ bos_token_id=bos_token_id,
79
+ eos_token_id=eos_token_id,
80
+ tie_word_embeddings=tie_word_embeddings,
81
+ **kwargs,
82
+ )
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0b526832ac4c4d2679491edcc73010c188542ac5fdf720ae97b62df98c5d01e
3
+ size 4962063400
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6413fedb9ef665f082890c84d80d16e78877add6347c13f5bec1405f320c3830
3
+ size 4991441304
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55ffe38176b97b0a6b8fb66b73ca57cd3716b732cab6bd8bd75630e3decdff2d
3
+ size 4857206904
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34571c912d77cad5637f1423382f695484625b9bf841b8a325aabdbc03126b5
3
+ size 4857206888
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"total_size": 19667894272}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00004.safetensors", "lm_head.weight": "model-00001-of-00004.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.input_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00004-of-00004.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00004-of-00004.safetensors", "model.norm.weight": "model-00004-of-00004.safetensors"}}
modeling_customllama.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.llama.modeling_llama import LlamaModel, LlamaDecoderLayer
2
+ from transformers import LlamaConfig, LlamaForCausalLM
3
+ import torch.nn as nn
4
+
5
+ from .configuration_customllama import CustomLlamaConfig
6
+
7
+
8
+ class CustomLlamaDecoderLayer(LlamaDecoderLayer):
9
+ def __init__(self, config, layer_idx, scale=1.0):
10
+ super().__init__(config, layer_idx)
11
+ self.scale = scale
12
+
13
+ def forward(self, hidden_states, attention_mask=None, position_ids=None, past_key_value=None, output_attentions=False, use_cache=True, cache_position=None, position_embeddings=None, **kwargs):
14
+ outputs = super().forward(
15
+ hidden_states,
16
+ attention_mask=attention_mask,
17
+ position_ids=position_ids,
18
+ past_key_value=past_key_value,
19
+ output_attentions=output_attentions,
20
+ use_cache=use_cache,
21
+ cache_position=cache_position,
22
+ position_embeddings=position_embeddings
23
+ )
24
+
25
+ hidden_states = outputs[0]
26
+ hidden_states = hidden_states * self.scale
27
+ return (hidden_states, *outputs[1:])
28
+
29
+
30
+ class CustomLlama(LlamaModel):
31
+ config_class = CustomLlamaConfig
32
+ def __init__(self, config):
33
+ super().__init__(config)
34
+ self.scales = config.scales
35
+ assert len(self.scales) == config.num_hidden_layers
36
+
37
+ self.layers = nn.ModuleList([
38
+ CustomLlamaDecoderLayer(config, layer_idx=i,scale=self.scales[i]) for i in range(config.num_hidden_layers)
39
+ ])
40
+
41
+ def forward(self, *args, **kwargs):
42
+ return super().forward(*args, **kwargs)
43
+
44
+
45
+ class CustomLlamaForCausalLM(LlamaForCausalLM):
46
+ config_class = CustomLlamaConfig
47
+ def __init__(self, config):
48
+ super().__init__(config)
49
+ self.model = CustomLlama(config)
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ }
38
+ },
39
+ "bos_token": "<s>",
40
+ "clean_up_tokenization_spaces": false,
41
+ "eos_token": "</s>",
42
+ "legacy": false,
43
+ "model_max_length": 1000000000000000019884624838656,
44
+ "pad_token": "[PAD]",
45
+ "padding_side": "right",
46
+ "sp_model_kwargs": {},
47
+ "tokenizer_class": "LlamaTokenizer",
48
+ "unk_token": "<unk>",
49
+ "use_default_system_prompt": false
50
+ }