DuongTrongChi commited on
Commit
6909dca
·
verified ·
1 Parent(s): d8a7a33

Upload Qwen2ForCausalLM

Browse files
Files changed (2) hide show
  1. config.json +1 -16
  2. model.safetensors +2 -2
config.json CHANGED
@@ -17,26 +17,11 @@
17
  "num_hidden_layers": 24,
18
  "num_key_value_heads": 16,
19
  "pad_token_id": 151645,
20
- "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
- "bnb_4bit_compute_dtype": "float16",
24
- "bnb_4bit_quant_storage": "uint8",
25
- "bnb_4bit_quant_type": "nf4",
26
- "bnb_4bit_use_double_quant": true,
27
- "llm_int8_enable_fp32_cpu_offload": false,
28
- "llm_int8_has_fp16_weight": false,
29
- "llm_int8_skip_modules": null,
30
- "llm_int8_threshold": 6.0,
31
- "load_in_4bit": true,
32
- "load_in_8bit": false,
33
- "quant_method": "bitsandbytes"
34
- },
35
  "rms_norm_eps": 1e-06,
36
  "rope_theta": 1000000.0,
37
  "sliding_window": 32768,
38
  "tie_word_embeddings": false,
39
- "torch_dtype": "float32",
40
  "transformers_version": "4.40.2",
41
  "use_cache": true,
42
  "use_sliding_window": false,
 
17
  "num_hidden_layers": 24,
18
  "num_key_value_heads": 16,
19
  "pad_token_id": 151645,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  "rms_norm_eps": 1e-06,
21
  "rope_theta": 1000000.0,
22
  "sliding_window": 32768,
23
  "tie_word_embeddings": false,
24
+ "torch_dtype": "float16",
25
  "transformers_version": "4.40.2",
26
  "use_cache": true,
27
  "use_sliding_window": false,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26f0354da4f18b31b50899f922482ede471ac071d0d665a4e2ed6b677a4cf4ff
3
- size 3134844532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3890b526292709f739445489bd50d6891f4685ee4a241e3728291e0c5af9d89d
3
+ size 3671314720