Upload folder using huggingface_hub
Browse files- chat_template.jinja +6 -0
- config.json +8 -3
- generation_config.json +1 -1
- model-00001-of-00002.safetensors +2 -2
- model-00002-of-00002.safetensors +2 -2
- model.safetensors.index.json +258 -258
- special_tokens_map.json +7 -1
- tokenizer.json +0 -0
- tokenizer_config.json +4 -3
chat_template.jinja
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% for message in messages %}{% if message['role'] == 'user' %}{{'<|user|>
|
2 |
+
' + message['content'] + '<|end|>
|
3 |
+
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
|
4 |
+
' + message['content'] + '<|end|>
|
5 |
+
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
|
6 |
+
' }}{% else %}{{ eos_token }}{% endif %}
|
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "unsloth/Phi-3-medium-4k-instruct",
|
3 |
"architectures": [
|
4 |
"MistralForCausalLM"
|
5 |
],
|
@@ -26,7 +25,12 @@
|
|
26 |
"bnb_4bit_use_double_quant": true,
|
27 |
"llm_int8_enable_fp32_cpu_offload": false,
|
28 |
"llm_int8_has_fp16_weight": false,
|
29 |
-
"llm_int8_skip_modules":
|
|
|
|
|
|
|
|
|
|
|
30 |
"llm_int8_threshold": 6.0,
|
31 |
"load_in_4bit": true,
|
32 |
"load_in_8bit": false,
|
@@ -37,7 +41,8 @@
|
|
37 |
"sliding_window": 2048,
|
38 |
"tie_word_embeddings": false,
|
39 |
"torch_dtype": "bfloat16",
|
40 |
-
"transformers_version": "4.
|
|
|
41 |
"unsloth_version": "2024.9",
|
42 |
"use_cache": true,
|
43 |
"vocab_size": 32064
|
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"MistralForCausalLM"
|
4 |
],
|
|
|
25 |
"bnb_4bit_use_double_quant": true,
|
26 |
"llm_int8_enable_fp32_cpu_offload": false,
|
27 |
"llm_int8_has_fp16_weight": false,
|
28 |
+
"llm_int8_skip_modules": [
|
29 |
+
"lm_head",
|
30 |
+
"multi_modal_projector",
|
31 |
+
"merger",
|
32 |
+
"modality_projection"
|
33 |
+
],
|
34 |
"llm_int8_threshold": 6.0,
|
35 |
"load_in_4bit": true,
|
36 |
"load_in_8bit": false,
|
|
|
41 |
"sliding_window": 2048,
|
42 |
"tie_word_embeddings": false,
|
43 |
"torch_dtype": "bfloat16",
|
44 |
+
"transformers_version": "4.52.4",
|
45 |
+
"unsloth_fixed": true,
|
46 |
"unsloth_version": "2024.9",
|
47 |
"use_cache": true,
|
48 |
"vocab_size": 32064
|
generation_config.json
CHANGED
@@ -8,5 +8,5 @@
|
|
8 |
],
|
9 |
"max_length": 4096,
|
10 |
"pad_token_id": 32009,
|
11 |
-
"transformers_version": "4.
|
12 |
}
|
|
|
8 |
],
|
9 |
"max_length": 4096,
|
10 |
"pad_token_id": 32009,
|
11 |
+
"transformers_version": "4.52.4"
|
12 |
}
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48ea427b2e4813a780a8f2d48cce981b63a05e6a9e23f2596b044fa59c5c9cad
|
3 |
+
size 4981224634
|
model-00002-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a99aab3d6116f716f66760186c1ad07d07da157126654477d64bc2cc81e753b
|
3 |
+
size 2708902797
|
model.safetensors.index.json
CHANGED
@@ -577,13 +577,13 @@
|
|
577 |
"model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
578 |
"model.layers.2.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
579 |
"model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
580 |
-
"model.layers.20.input_layernorm.weight": "model-
|
581 |
-
"model.layers.20.mlp.down_proj.weight": "model-
|
582 |
-
"model.layers.20.mlp.down_proj.weight.absmax": "model-
|
583 |
-
"model.layers.20.mlp.down_proj.weight.nested_absmax": "model-
|
584 |
-
"model.layers.20.mlp.down_proj.weight.nested_quant_map": "model-
|
585 |
-
"model.layers.20.mlp.down_proj.weight.quant_map": "model-
|
586 |
-
"model.layers.20.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
587 |
"model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
588 |
"model.layers.20.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
589 |
"model.layers.20.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
@@ -596,7 +596,7 @@
|
|
596 |
"model.layers.20.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
597 |
"model.layers.20.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
598 |
"model.layers.20.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
599 |
-
"model.layers.20.post_attention_layernorm.weight": "model-
|
600 |
"model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
601 |
"model.layers.20.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
602 |
"model.layers.20.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
@@ -621,226 +621,226 @@
|
|
621 |
"model.layers.20.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
622 |
"model.layers.20.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
623 |
"model.layers.20.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
624 |
-
"model.layers.21.input_layernorm.weight": "model-
|
625 |
-
"model.layers.21.mlp.down_proj.weight": "model-
|
626 |
-
"model.layers.21.mlp.down_proj.weight.absmax": "model-
|
627 |
-
"model.layers.21.mlp.down_proj.weight.nested_absmax": "model-
|
628 |
-
"model.layers.21.mlp.down_proj.weight.nested_quant_map": "model-
|
629 |
-
"model.layers.21.mlp.down_proj.weight.quant_map": "model-
|
630 |
-
"model.layers.21.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
631 |
-
"model.layers.21.mlp.gate_proj.weight": "model-
|
632 |
-
"model.layers.21.mlp.gate_proj.weight.absmax": "model-
|
633 |
-
"model.layers.21.mlp.gate_proj.weight.nested_absmax": "model-
|
634 |
-
"model.layers.21.mlp.gate_proj.weight.nested_quant_map": "model-
|
635 |
-
"model.layers.21.mlp.gate_proj.weight.quant_map": "model-
|
636 |
-
"model.layers.21.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
637 |
-
"model.layers.21.mlp.up_proj.weight": "model-
|
638 |
-
"model.layers.21.mlp.up_proj.weight.absmax": "model-
|
639 |
-
"model.layers.21.mlp.up_proj.weight.nested_absmax": "model-
|
640 |
-
"model.layers.21.mlp.up_proj.weight.nested_quant_map": "model-
|
641 |
-
"model.layers.21.mlp.up_proj.weight.quant_map": "model-
|
642 |
-
"model.layers.21.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
643 |
-
"model.layers.21.post_attention_layernorm.weight": "model-
|
644 |
-
"model.layers.21.self_attn.k_proj.weight": "model-
|
645 |
-
"model.layers.21.self_attn.k_proj.weight.absmax": "model-
|
646 |
-
"model.layers.21.self_attn.k_proj.weight.nested_absmax": "model-
|
647 |
-
"model.layers.21.self_attn.k_proj.weight.nested_quant_map": "model-
|
648 |
-
"model.layers.21.self_attn.k_proj.weight.quant_map": "model-
|
649 |
-
"model.layers.21.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
650 |
-
"model.layers.21.self_attn.o_proj.weight": "model-
|
651 |
-
"model.layers.21.self_attn.o_proj.weight.absmax": "model-
|
652 |
-
"model.layers.21.self_attn.o_proj.weight.nested_absmax": "model-
|
653 |
-
"model.layers.21.self_attn.o_proj.weight.nested_quant_map": "model-
|
654 |
-
"model.layers.21.self_attn.o_proj.weight.quant_map": "model-
|
655 |
-
"model.layers.21.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
656 |
-
"model.layers.21.self_attn.q_proj.weight": "model-
|
657 |
-
"model.layers.21.self_attn.q_proj.weight.absmax": "model-
|
658 |
-
"model.layers.21.self_attn.q_proj.weight.nested_absmax": "model-
|
659 |
-
"model.layers.21.self_attn.q_proj.weight.nested_quant_map": "model-
|
660 |
-
"model.layers.21.self_attn.q_proj.weight.quant_map": "model-
|
661 |
-
"model.layers.21.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
662 |
-
"model.layers.21.self_attn.v_proj.weight": "model-
|
663 |
-
"model.layers.21.self_attn.v_proj.weight.absmax": "model-
|
664 |
-
"model.layers.21.self_attn.v_proj.weight.nested_absmax": "model-
|
665 |
-
"model.layers.21.self_attn.v_proj.weight.nested_quant_map": "model-
|
666 |
-
"model.layers.21.self_attn.v_proj.weight.quant_map": "model-
|
667 |
-
"model.layers.21.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
668 |
-
"model.layers.22.input_layernorm.weight": "model-
|
669 |
-
"model.layers.22.mlp.down_proj.weight": "model-
|
670 |
-
"model.layers.22.mlp.down_proj.weight.absmax": "model-
|
671 |
-
"model.layers.22.mlp.down_proj.weight.nested_absmax": "model-
|
672 |
-
"model.layers.22.mlp.down_proj.weight.nested_quant_map": "model-
|
673 |
-
"model.layers.22.mlp.down_proj.weight.quant_map": "model-
|
674 |
-
"model.layers.22.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
675 |
-
"model.layers.22.mlp.gate_proj.weight": "model-
|
676 |
-
"model.layers.22.mlp.gate_proj.weight.absmax": "model-
|
677 |
-
"model.layers.22.mlp.gate_proj.weight.nested_absmax": "model-
|
678 |
-
"model.layers.22.mlp.gate_proj.weight.nested_quant_map": "model-
|
679 |
-
"model.layers.22.mlp.gate_proj.weight.quant_map": "model-
|
680 |
-
"model.layers.22.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
681 |
-
"model.layers.22.mlp.up_proj.weight": "model-
|
682 |
-
"model.layers.22.mlp.up_proj.weight.absmax": "model-
|
683 |
-
"model.layers.22.mlp.up_proj.weight.nested_absmax": "model-
|
684 |
-
"model.layers.22.mlp.up_proj.weight.nested_quant_map": "model-
|
685 |
-
"model.layers.22.mlp.up_proj.weight.quant_map": "model-
|
686 |
-
"model.layers.22.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
687 |
-
"model.layers.22.post_attention_layernorm.weight": "model-
|
688 |
-
"model.layers.22.self_attn.k_proj.weight": "model-
|
689 |
-
"model.layers.22.self_attn.k_proj.weight.absmax": "model-
|
690 |
-
"model.layers.22.self_attn.k_proj.weight.nested_absmax": "model-
|
691 |
-
"model.layers.22.self_attn.k_proj.weight.nested_quant_map": "model-
|
692 |
-
"model.layers.22.self_attn.k_proj.weight.quant_map": "model-
|
693 |
-
"model.layers.22.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
694 |
-
"model.layers.22.self_attn.o_proj.weight": "model-
|
695 |
-
"model.layers.22.self_attn.o_proj.weight.absmax": "model-
|
696 |
-
"model.layers.22.self_attn.o_proj.weight.nested_absmax": "model-
|
697 |
-
"model.layers.22.self_attn.o_proj.weight.nested_quant_map": "model-
|
698 |
-
"model.layers.22.self_attn.o_proj.weight.quant_map": "model-
|
699 |
-
"model.layers.22.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
700 |
-
"model.layers.22.self_attn.q_proj.weight": "model-
|
701 |
-
"model.layers.22.self_attn.q_proj.weight.absmax": "model-
|
702 |
-
"model.layers.22.self_attn.q_proj.weight.nested_absmax": "model-
|
703 |
-
"model.layers.22.self_attn.q_proj.weight.nested_quant_map": "model-
|
704 |
-
"model.layers.22.self_attn.q_proj.weight.quant_map": "model-
|
705 |
-
"model.layers.22.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
706 |
-
"model.layers.22.self_attn.v_proj.weight": "model-
|
707 |
-
"model.layers.22.self_attn.v_proj.weight.absmax": "model-
|
708 |
-
"model.layers.22.self_attn.v_proj.weight.nested_absmax": "model-
|
709 |
-
"model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-
|
710 |
-
"model.layers.22.self_attn.v_proj.weight.quant_map": "model-
|
711 |
-
"model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
712 |
-
"model.layers.23.input_layernorm.weight": "model-
|
713 |
-
"model.layers.23.mlp.down_proj.weight": "model-
|
714 |
-
"model.layers.23.mlp.down_proj.weight.absmax": "model-
|
715 |
-
"model.layers.23.mlp.down_proj.weight.nested_absmax": "model-
|
716 |
-
"model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-
|
717 |
-
"model.layers.23.mlp.down_proj.weight.quant_map": "model-
|
718 |
-
"model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
719 |
-
"model.layers.23.mlp.gate_proj.weight": "model-
|
720 |
-
"model.layers.23.mlp.gate_proj.weight.absmax": "model-
|
721 |
-
"model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-
|
722 |
-
"model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-
|
723 |
-
"model.layers.23.mlp.gate_proj.weight.quant_map": "model-
|
724 |
-
"model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
725 |
-
"model.layers.23.mlp.up_proj.weight": "model-
|
726 |
-
"model.layers.23.mlp.up_proj.weight.absmax": "model-
|
727 |
-
"model.layers.23.mlp.up_proj.weight.nested_absmax": "model-
|
728 |
-
"model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-
|
729 |
-
"model.layers.23.mlp.up_proj.weight.quant_map": "model-
|
730 |
-
"model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
731 |
-
"model.layers.23.post_attention_layernorm.weight": "model-
|
732 |
-
"model.layers.23.self_attn.k_proj.weight": "model-
|
733 |
-
"model.layers.23.self_attn.k_proj.weight.absmax": "model-
|
734 |
-
"model.layers.23.self_attn.k_proj.weight.nested_absmax": "model-
|
735 |
-
"model.layers.23.self_attn.k_proj.weight.nested_quant_map": "model-
|
736 |
-
"model.layers.23.self_attn.k_proj.weight.quant_map": "model-
|
737 |
-
"model.layers.23.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
738 |
-
"model.layers.23.self_attn.o_proj.weight": "model-
|
739 |
-
"model.layers.23.self_attn.o_proj.weight.absmax": "model-
|
740 |
-
"model.layers.23.self_attn.o_proj.weight.nested_absmax": "model-
|
741 |
-
"model.layers.23.self_attn.o_proj.weight.nested_quant_map": "model-
|
742 |
-
"model.layers.23.self_attn.o_proj.weight.quant_map": "model-
|
743 |
-
"model.layers.23.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
744 |
-
"model.layers.23.self_attn.q_proj.weight": "model-
|
745 |
-
"model.layers.23.self_attn.q_proj.weight.absmax": "model-
|
746 |
-
"model.layers.23.self_attn.q_proj.weight.nested_absmax": "model-
|
747 |
-
"model.layers.23.self_attn.q_proj.weight.nested_quant_map": "model-
|
748 |
-
"model.layers.23.self_attn.q_proj.weight.quant_map": "model-
|
749 |
-
"model.layers.23.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
750 |
-
"model.layers.23.self_attn.v_proj.weight": "model-
|
751 |
-
"model.layers.23.self_attn.v_proj.weight.absmax": "model-
|
752 |
-
"model.layers.23.self_attn.v_proj.weight.nested_absmax": "model-
|
753 |
-
"model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-
|
754 |
-
"model.layers.23.self_attn.v_proj.weight.quant_map": "model-
|
755 |
-
"model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
756 |
-
"model.layers.24.input_layernorm.weight": "model-
|
757 |
-
"model.layers.24.mlp.down_proj.weight": "model-
|
758 |
-
"model.layers.24.mlp.down_proj.weight.absmax": "model-
|
759 |
-
"model.layers.24.mlp.down_proj.weight.nested_absmax": "model-
|
760 |
-
"model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-
|
761 |
-
"model.layers.24.mlp.down_proj.weight.quant_map": "model-
|
762 |
-
"model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
763 |
-
"model.layers.24.mlp.gate_proj.weight": "model-
|
764 |
-
"model.layers.24.mlp.gate_proj.weight.absmax": "model-
|
765 |
-
"model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-
|
766 |
-
"model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-
|
767 |
-
"model.layers.24.mlp.gate_proj.weight.quant_map": "model-
|
768 |
-
"model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
769 |
-
"model.layers.24.mlp.up_proj.weight": "model-
|
770 |
-
"model.layers.24.mlp.up_proj.weight.absmax": "model-
|
771 |
-
"model.layers.24.mlp.up_proj.weight.nested_absmax": "model-
|
772 |
-
"model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-
|
773 |
-
"model.layers.24.mlp.up_proj.weight.quant_map": "model-
|
774 |
-
"model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
775 |
-
"model.layers.24.post_attention_layernorm.weight": "model-
|
776 |
-
"model.layers.24.self_attn.k_proj.weight": "model-
|
777 |
-
"model.layers.24.self_attn.k_proj.weight.absmax": "model-
|
778 |
-
"model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-
|
779 |
-
"model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-
|
780 |
-
"model.layers.24.self_attn.k_proj.weight.quant_map": "model-
|
781 |
-
"model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
782 |
-
"model.layers.24.self_attn.o_proj.weight": "model-
|
783 |
-
"model.layers.24.self_attn.o_proj.weight.absmax": "model-
|
784 |
-
"model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-
|
785 |
-
"model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-
|
786 |
-
"model.layers.24.self_attn.o_proj.weight.quant_map": "model-
|
787 |
-
"model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
788 |
-
"model.layers.24.self_attn.q_proj.weight": "model-
|
789 |
-
"model.layers.24.self_attn.q_proj.weight.absmax": "model-
|
790 |
-
"model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-
|
791 |
-
"model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-
|
792 |
-
"model.layers.24.self_attn.q_proj.weight.quant_map": "model-
|
793 |
-
"model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
794 |
-
"model.layers.24.self_attn.v_proj.weight": "model-
|
795 |
-
"model.layers.24.self_attn.v_proj.weight.absmax": "model-
|
796 |
-
"model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-
|
797 |
-
"model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-
|
798 |
-
"model.layers.24.self_attn.v_proj.weight.quant_map": "model-
|
799 |
-
"model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
800 |
-
"model.layers.25.input_layernorm.weight": "model-
|
801 |
-
"model.layers.25.mlp.down_proj.weight": "model-
|
802 |
-
"model.layers.25.mlp.down_proj.weight.absmax": "model-
|
803 |
-
"model.layers.25.mlp.down_proj.weight.nested_absmax": "model-
|
804 |
-
"model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-
|
805 |
-
"model.layers.25.mlp.down_proj.weight.quant_map": "model-
|
806 |
-
"model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
807 |
-
"model.layers.25.mlp.gate_proj.weight": "model-
|
808 |
-
"model.layers.25.mlp.gate_proj.weight.absmax": "model-
|
809 |
-
"model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-
|
810 |
-
"model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-
|
811 |
-
"model.layers.25.mlp.gate_proj.weight.quant_map": "model-
|
812 |
-
"model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
813 |
-
"model.layers.25.mlp.up_proj.weight": "model-
|
814 |
-
"model.layers.25.mlp.up_proj.weight.absmax": "model-
|
815 |
-
"model.layers.25.mlp.up_proj.weight.nested_absmax": "model-
|
816 |
-
"model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-
|
817 |
-
"model.layers.25.mlp.up_proj.weight.quant_map": "model-
|
818 |
-
"model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
819 |
-
"model.layers.25.post_attention_layernorm.weight": "model-
|
820 |
-
"model.layers.25.self_attn.k_proj.weight": "model-
|
821 |
-
"model.layers.25.self_attn.k_proj.weight.absmax": "model-
|
822 |
-
"model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-
|
823 |
-
"model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-
|
824 |
-
"model.layers.25.self_attn.k_proj.weight.quant_map": "model-
|
825 |
-
"model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
826 |
-
"model.layers.25.self_attn.o_proj.weight": "model-
|
827 |
-
"model.layers.25.self_attn.o_proj.weight.absmax": "model-
|
828 |
-
"model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-
|
829 |
-
"model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-
|
830 |
-
"model.layers.25.self_attn.o_proj.weight.quant_map": "model-
|
831 |
-
"model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
832 |
-
"model.layers.25.self_attn.q_proj.weight": "model-
|
833 |
-
"model.layers.25.self_attn.q_proj.weight.absmax": "model-
|
834 |
-
"model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-
|
835 |
-
"model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-
|
836 |
-
"model.layers.25.self_attn.q_proj.weight.quant_map": "model-
|
837 |
-
"model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
838 |
-
"model.layers.25.self_attn.v_proj.weight": "model-
|
839 |
-
"model.layers.25.self_attn.v_proj.weight.absmax": "model-
|
840 |
-
"model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-
|
841 |
-
"model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-
|
842 |
-
"model.layers.25.self_attn.v_proj.weight.quant_map": "model-
|
843 |
-
"model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
844 |
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
845 |
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
846 |
"model.layers.26.mlp.down_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
@@ -848,12 +848,12 @@
|
|
848 |
"model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00002.safetensors",
|
849 |
"model.layers.26.mlp.down_proj.weight.quant_map": "model-00002-of-00002.safetensors",
|
850 |
"model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00002.safetensors",
|
851 |
-
"model.layers.26.mlp.gate_proj.weight": "model-
|
852 |
-
"model.layers.26.mlp.gate_proj.weight.absmax": "model-
|
853 |
-
"model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-
|
854 |
-
"model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-
|
855 |
-
"model.layers.26.mlp.gate_proj.weight.quant_map": "model-
|
856 |
-
"model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
857 |
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
858 |
"model.layers.26.mlp.up_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
859 |
"model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00002-of-00002.safetensors",
|
@@ -861,30 +861,30 @@
|
|
861 |
"model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00002.safetensors",
|
862 |
"model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00002.safetensors",
|
863 |
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
864 |
-
"model.layers.26.self_attn.k_proj.weight": "model-
|
865 |
-
"model.layers.26.self_attn.k_proj.weight.absmax": "model-
|
866 |
-
"model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-
|
867 |
-
"model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-
|
868 |
-
"model.layers.26.self_attn.k_proj.weight.quant_map": "model-
|
869 |
-
"model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
870 |
-
"model.layers.26.self_attn.o_proj.weight": "model-
|
871 |
-
"model.layers.26.self_attn.o_proj.weight.absmax": "model-
|
872 |
-
"model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-
|
873 |
-
"model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-
|
874 |
-
"model.layers.26.self_attn.o_proj.weight.quant_map": "model-
|
875 |
-
"model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
876 |
-
"model.layers.26.self_attn.q_proj.weight": "model-
|
877 |
-
"model.layers.26.self_attn.q_proj.weight.absmax": "model-
|
878 |
-
"model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-
|
879 |
-
"model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-
|
880 |
-
"model.layers.26.self_attn.q_proj.weight.quant_map": "model-
|
881 |
-
"model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
882 |
-
"model.layers.26.self_attn.v_proj.weight": "model-
|
883 |
-
"model.layers.26.self_attn.v_proj.weight.absmax": "model-
|
884 |
-
"model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-
|
885 |
-
"model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-
|
886 |
-
"model.layers.26.self_attn.v_proj.weight.quant_map": "model-
|
887 |
-
"model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
888 |
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
889 |
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
890 |
"model.layers.27.mlp.down_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
|
|
577 |
"model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
578 |
"model.layers.2.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
579 |
"model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
580 |
+
"model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
581 |
+
"model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
582 |
+
"model.layers.20.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
583 |
+
"model.layers.20.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
584 |
+
"model.layers.20.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
585 |
+
"model.layers.20.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
586 |
+
"model.layers.20.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
587 |
"model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
588 |
"model.layers.20.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
589 |
"model.layers.20.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
|
|
596 |
"model.layers.20.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
597 |
"model.layers.20.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
598 |
"model.layers.20.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
599 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
600 |
"model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
601 |
"model.layers.20.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
602 |
"model.layers.20.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
|
|
621 |
"model.layers.20.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
622 |
"model.layers.20.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
623 |
"model.layers.20.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
624 |
+
"model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
625 |
+
"model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
626 |
+
"model.layers.21.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
627 |
+
"model.layers.21.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
628 |
+
"model.layers.21.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
629 |
+
"model.layers.21.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
630 |
+
"model.layers.21.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
631 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
632 |
+
"model.layers.21.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
633 |
+
"model.layers.21.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
634 |
+
"model.layers.21.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
635 |
+
"model.layers.21.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
636 |
+
"model.layers.21.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
637 |
+
"model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
638 |
+
"model.layers.21.mlp.up_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
639 |
+
"model.layers.21.mlp.up_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
640 |
+
"model.layers.21.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
641 |
+
"model.layers.21.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
642 |
+
"model.layers.21.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
643 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
644 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
645 |
+
"model.layers.21.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
646 |
+
"model.layers.21.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
647 |
+
"model.layers.21.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
648 |
+
"model.layers.21.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
649 |
+
"model.layers.21.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
650 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
651 |
+
"model.layers.21.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
652 |
+
"model.layers.21.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
653 |
+
"model.layers.21.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
654 |
+
"model.layers.21.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
655 |
+
"model.layers.21.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
656 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
657 |
+
"model.layers.21.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
658 |
+
"model.layers.21.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
659 |
+
"model.layers.21.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
660 |
+
"model.layers.21.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
661 |
+
"model.layers.21.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
662 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
663 |
+
"model.layers.21.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
664 |
+
"model.layers.21.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
665 |
+
"model.layers.21.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
666 |
+
"model.layers.21.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
667 |
+
"model.layers.21.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
668 |
+
"model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
669 |
+
"model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
670 |
+
"model.layers.22.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
671 |
+
"model.layers.22.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
672 |
+
"model.layers.22.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
673 |
+
"model.layers.22.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
674 |
+
"model.layers.22.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
675 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
676 |
+
"model.layers.22.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
677 |
+
"model.layers.22.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
678 |
+
"model.layers.22.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
679 |
+
"model.layers.22.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
680 |
+
"model.layers.22.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
681 |
+
"model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
682 |
+
"model.layers.22.mlp.up_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
683 |
+
"model.layers.22.mlp.up_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
684 |
+
"model.layers.22.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
685 |
+
"model.layers.22.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
686 |
+
"model.layers.22.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
687 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
688 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
689 |
+
"model.layers.22.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
690 |
+
"model.layers.22.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
691 |
+
"model.layers.22.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
692 |
+
"model.layers.22.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
693 |
+
"model.layers.22.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
694 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
695 |
+
"model.layers.22.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
696 |
+
"model.layers.22.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
697 |
+
"model.layers.22.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
698 |
+
"model.layers.22.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
699 |
+
"model.layers.22.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
700 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
701 |
+
"model.layers.22.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
702 |
+
"model.layers.22.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
703 |
+
"model.layers.22.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
704 |
+
"model.layers.22.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
705 |
+
"model.layers.22.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
706 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
707 |
+
"model.layers.22.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
708 |
+
"model.layers.22.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
709 |
+
"model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
710 |
+
"model.layers.22.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
711 |
+
"model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
712 |
+
"model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
713 |
+
"model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
714 |
+
"model.layers.23.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
715 |
+
"model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
716 |
+
"model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
717 |
+
"model.layers.23.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
718 |
+
"model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
719 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
720 |
+
"model.layers.23.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
721 |
+
"model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
722 |
+
"model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
723 |
+
"model.layers.23.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
724 |
+
"model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
725 |
+
"model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
726 |
+
"model.layers.23.mlp.up_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
727 |
+
"model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
728 |
+
"model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
729 |
+
"model.layers.23.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
730 |
+
"model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
731 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
732 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
733 |
+
"model.layers.23.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
734 |
+
"model.layers.23.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
735 |
+
"model.layers.23.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
736 |
+
"model.layers.23.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
737 |
+
"model.layers.23.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
738 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
739 |
+
"model.layers.23.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
740 |
+
"model.layers.23.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
741 |
+
"model.layers.23.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
742 |
+
"model.layers.23.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
743 |
+
"model.layers.23.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
744 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
745 |
+
"model.layers.23.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
746 |
+
"model.layers.23.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
747 |
+
"model.layers.23.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
748 |
+
"model.layers.23.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
749 |
+
"model.layers.23.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
750 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
751 |
+
"model.layers.23.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
752 |
+
"model.layers.23.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
753 |
+
"model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
754 |
+
"model.layers.23.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
755 |
+
"model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
756 |
+
"model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
757 |
+
"model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
758 |
+
"model.layers.24.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
759 |
+
"model.layers.24.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
760 |
+
"model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
761 |
+
"model.layers.24.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
762 |
+
"model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
763 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
764 |
+
"model.layers.24.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
765 |
+
"model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
766 |
+
"model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
767 |
+
"model.layers.24.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
768 |
+
"model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
769 |
+
"model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
770 |
+
"model.layers.24.mlp.up_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
771 |
+
"model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
772 |
+
"model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
773 |
+
"model.layers.24.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
774 |
+
"model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
775 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
776 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
777 |
+
"model.layers.24.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
778 |
+
"model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
779 |
+
"model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
780 |
+
"model.layers.24.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
781 |
+
"model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
782 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
783 |
+
"model.layers.24.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
784 |
+
"model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
785 |
+
"model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
786 |
+
"model.layers.24.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
787 |
+
"model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
788 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
789 |
+
"model.layers.24.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
790 |
+
"model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
791 |
+
"model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
792 |
+
"model.layers.24.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
793 |
+
"model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
794 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
795 |
+
"model.layers.24.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
796 |
+
"model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
797 |
+
"model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
798 |
+
"model.layers.24.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
799 |
+
"model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
800 |
+
"model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
801 |
+
"model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
802 |
+
"model.layers.25.mlp.down_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
803 |
+
"model.layers.25.mlp.down_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
804 |
+
"model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
805 |
+
"model.layers.25.mlp.down_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
806 |
+
"model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
807 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
808 |
+
"model.layers.25.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
809 |
+
"model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
810 |
+
"model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
811 |
+
"model.layers.25.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
812 |
+
"model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
813 |
+
"model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
814 |
+
"model.layers.25.mlp.up_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
815 |
+
"model.layers.25.mlp.up_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
816 |
+
"model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
817 |
+
"model.layers.25.mlp.up_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
818 |
+
"model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
819 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
820 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
821 |
+
"model.layers.25.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
822 |
+
"model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
823 |
+
"model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
824 |
+
"model.layers.25.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
825 |
+
"model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
826 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
827 |
+
"model.layers.25.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
828 |
+
"model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
829 |
+
"model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
830 |
+
"model.layers.25.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
831 |
+
"model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
832 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
833 |
+
"model.layers.25.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
834 |
+
"model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
835 |
+
"model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
836 |
+
"model.layers.25.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
837 |
+
"model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
838 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
839 |
+
"model.layers.25.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
840 |
+
"model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
841 |
+
"model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
842 |
+
"model.layers.25.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
843 |
+
"model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
844 |
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
845 |
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
846 |
"model.layers.26.mlp.down_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
|
|
848 |
"model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00002.safetensors",
|
849 |
"model.layers.26.mlp.down_proj.weight.quant_map": "model-00002-of-00002.safetensors",
|
850 |
"model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00002.safetensors",
|
851 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
852 |
+
"model.layers.26.mlp.gate_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
853 |
+
"model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
854 |
+
"model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
855 |
+
"model.layers.26.mlp.gate_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
856 |
+
"model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
857 |
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
858 |
"model.layers.26.mlp.up_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
859 |
"model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00002-of-00002.safetensors",
|
|
|
861 |
"model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00002.safetensors",
|
862 |
"model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00002.safetensors",
|
863 |
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
864 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
865 |
+
"model.layers.26.self_attn.k_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
866 |
+
"model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
867 |
+
"model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
868 |
+
"model.layers.26.self_attn.k_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
869 |
+
"model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
870 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
871 |
+
"model.layers.26.self_attn.o_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
872 |
+
"model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
873 |
+
"model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
874 |
+
"model.layers.26.self_attn.o_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
875 |
+
"model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
876 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
877 |
+
"model.layers.26.self_attn.q_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
878 |
+
"model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
879 |
+
"model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
880 |
+
"model.layers.26.self_attn.q_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
881 |
+
"model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
882 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
883 |
+
"model.layers.26.self_attn.v_proj.weight.absmax": "model-00001-of-00002.safetensors",
|
884 |
+
"model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00002.safetensors",
|
885 |
+
"model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00002.safetensors",
|
886 |
+
"model.layers.26.self_attn.v_proj.weight.quant_map": "model-00001-of-00002.safetensors",
|
887 |
+
"model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00002.safetensors",
|
888 |
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
889 |
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
890 |
"model.layers.27.mlp.down_proj.weight.absmax": "model-00002-of-00002.safetensors",
|
special_tokens_map.json
CHANGED
@@ -13,7 +13,13 @@
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
-
"pad_token":
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"unk_token": {
|
18 |
"content": "<unk>",
|
19 |
"lstrip": false,
|
|
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|placeholder6|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
"unk_token": {
|
24 |
"content": "<unk>",
|
25 |
"lstrip": false,
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -117,9 +117,9 @@
|
|
117 |
}
|
118 |
},
|
119 |
"bos_token": "<s>",
|
120 |
-
"chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
|
121 |
"clean_up_tokenization_spaces": false,
|
122 |
"eos_token": "<|endoftext|>",
|
|
|
123 |
"legacy": false,
|
124 |
"model_max_length": 4096,
|
125 |
"pad_token": "<|placeholder6|>",
|
@@ -127,5 +127,6 @@
|
|
127 |
"sp_model_kwargs": {},
|
128 |
"tokenizer_class": "LlamaTokenizer",
|
129 |
"unk_token": "<unk>",
|
130 |
-
"use_default_system_prompt": false
|
131 |
-
}
|
|
|
|
117 |
}
|
118 |
},
|
119 |
"bos_token": "<s>",
|
|
|
120 |
"clean_up_tokenization_spaces": false,
|
121 |
"eos_token": "<|endoftext|>",
|
122 |
+
"extra_special_tokens": {},
|
123 |
"legacy": false,
|
124 |
"model_max_length": 4096,
|
125 |
"pad_token": "<|placeholder6|>",
|
|
|
127 |
"sp_model_kwargs": {},
|
128 |
"tokenizer_class": "LlamaTokenizer",
|
129 |
"unk_token": "<unk>",
|
130 |
+
"use_default_system_prompt": false,
|
131 |
+
"chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}"
|
132 |
+
}
|