[email protected] commited on
Commit
110fd89
·
0 Parent(s):

Add hf “weights

Browse files
Files changed (2) hide show
  1. config.json +99 -0
  2. tokenizer_config.json +11 -0
config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "blt",
3
+ "vocab_size": 260,
4
+ "max_position_embeddings": 4096,
5
+ "patch_in_forward": true,
6
+ "realtime_patching": true,
7
+ "patching_mode": "entropy",
8
+ "patch_size": 4,
9
+ "patching_threshold": 1.335442066192627,
10
+ "patching_threshold_add": null,
11
+ "max_patch_length": null,
12
+ "patching_batch_size": 1,
13
+ "patching_device": "cuda",
14
+ "monotonicity": false,
15
+ "cross_attn_k": 2,
16
+ "encoder_hash_byte_group_size": [
17
+ 3,
18
+ 4,
19
+ 5,
20
+ 6,
21
+ 7,
22
+ 8
23
+ ],
24
+ "encoder_hash_byte_group_vocab": 500002,
25
+ "encoder_hash_byte_group_nb_functions": 1,
26
+ "pm_size": 0,
27
+ "patcher_config": {
28
+ "vocab_size": 260,
29
+ "hidden_size": 768,
30
+ "num_hidden_layers": 14,
31
+ "num_attention_heads": 12,
32
+ "num_key_value_heads": null,
33
+ "max_position_embeddings": 8192,
34
+ "norm_eps": 1e-05,
35
+ "dropout": 0.0,
36
+ "rope_theta": 10000.0,
37
+ "attn_impl": "xformers",
38
+ "attn_bias_type": "local_block_causal",
39
+ "intermediate_size": 2048
40
+ },
41
+ "encoder_config": {
42
+ "vocab_size": 260,
43
+ "cross_attn_all_layers": false,
44
+ "cross_attn_k": 2,
45
+ "hidden_size_global": 2048,
46
+ "pm_size": 0,
47
+ "hidden_size": 1024,
48
+ "num_attention_heads": 16,
49
+ "num_key_value_heads": null,
50
+ "num_hidden_layers": 1,
51
+ "norm_eps": 1e-05,
52
+ "dropout": 0.0,
53
+ "max_position_embeddings": 24576,
54
+ "rope_theta": 500000.0,
55
+ "rope_scaling": {
56
+ "rope_type": "default"
57
+ },
58
+ "hidden_act": "silu",
59
+ "_attn_implementation": "sdpa",
60
+ "intermediate_size": 2816
61
+ },
62
+ "decoder_config": {
63
+ "vocab_size": 260,
64
+ "cross_attn_all_layers": true,
65
+ "cross_attn_k": 2,
66
+ "hidden_size_global": 2048,
67
+ "hidden_size": 1024,
68
+ "num_attention_heads": 16,
69
+ "num_key_value_heads": null,
70
+ "num_hidden_layers": 9,
71
+ "norm_eps": 1e-05,
72
+ "dropout": 0.0,
73
+ "max_position_embeddings": 24576,
74
+ "rope_theta": 500000.0,
75
+ "rope_scaling": {
76
+ "rope_type": "default"
77
+ },
78
+ "hidden_act": "silu",
79
+ "_attn_implementation": "sdpa",
80
+ "intermediate_size": 2816
81
+ },
82
+ "global_config": {
83
+ "hidden_size": 2048,
84
+ "num_attention_heads": 16,
85
+ "num_key_value_heads": null,
86
+ "num_hidden_layers": 25,
87
+ "norm_eps": 1e-05,
88
+ "dropout": 0.0,
89
+ "max_position_embeddings": 4096,
90
+ "rope_theta": 500000.0,
91
+ "rope_scaling": {
92
+ "rope_type": "default"
93
+ },
94
+ "hidden_act": "silu",
95
+ "_attn_implementation": "sdpa",
96
+ "intermediate_size": 5632
97
+ },
98
+ "tie_word_embeddings": false
99
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BltTokenizer",
3
+ "vocab_size": 260,
4
+ "model_max_length": 1024,
5
+ "add_bos_token": true,
6
+ "add_eos_token": false,
7
+ "bos_token": "<s>",
8
+ "eos_token": "</s>",
9
+ "pad_token": "<pad>",
10
+ "unk_token": "<unk>"
11
+ }