Upload config.json with huggingface_hub
Browse files- config.json +51 -0
config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/nllb-200-distilled-600M",
|
3 |
+
"architectures": [
|
4 |
+
"M2M100ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"model_type": "m2m_100",
|
7 |
+
"quantization_config": {
|
8 |
+
"method": "EfQAT-CWPN",
|
9 |
+
"bits": 4,
|
10 |
+
"adaptive_bits": true,
|
11 |
+
"critical_layers_bits": 8,
|
12 |
+
"normal_layers_bits": 4,
|
13 |
+
"compression_ratio": 6.3,
|
14 |
+
"precision_retention": 0.807
|
15 |
+
},
|
16 |
+
"efqat_config": {
|
17 |
+
"update_ratio": 0.08,
|
18 |
+
"update_frequency": 30,
|
19 |
+
"progressive_stages": 3,
|
20 |
+
"critical_layer_patterns": [
|
21 |
+
"self_attn.q_proj",
|
22 |
+
"self_attn.k_proj",
|
23 |
+
"self_attn.v_proj",
|
24 |
+
"encoder_attn.q_proj",
|
25 |
+
"encoder_attn.k_proj",
|
26 |
+
"encoder_attn.v_proj",
|
27 |
+
"lm_head"
|
28 |
+
]
|
29 |
+
},
|
30 |
+
"torch_dtype": "float32",
|
31 |
+
"transformers_version": "4.36.0",
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 256206,
|
34 |
+
"d_model": 1024,
|
35 |
+
"encoder_layers": 12,
|
36 |
+
"decoder_layers": 12,
|
37 |
+
"encoder_attention_heads": 16,
|
38 |
+
"decoder_attention_heads": 16,
|
39 |
+
"decoder_ffn_dim": 4096,
|
40 |
+
"encoder_ffn_dim": 4096,
|
41 |
+
"activation_function": "relu",
|
42 |
+
"dropout": 0.1,
|
43 |
+
"attention_dropout": 0.1,
|
44 |
+
"activation_dropout": 0.0,
|
45 |
+
"init_std": 0.02,
|
46 |
+
"decoder_start_token_id": 2,
|
47 |
+
"scale_embedding": true,
|
48 |
+
"pad_token_id": 1,
|
49 |
+
"bos_token_id": 0,
|
50 |
+
"eos_token_id": 2
|
51 |
+
}
|