Upload folder using huggingface_hub
Browse files- README.md +8 -88
 - added_tokens.json +2 -2
 - config.json +1 -3
 - mergekit_config.yml +22 -0
 - model-00001-of-00008.safetensors +2 -2
 - model-00002-of-00008.safetensors +2 -2
 - model-00003-of-00008.safetensors +2 -2
 - model-00004-of-00008.safetensors +2 -2
 - model-00005-of-00008.safetensors +2 -2
 - model-00006-of-00008.safetensors +2 -2
 - model-00007-of-00008.safetensors +2 -2
 - model-00008-of-00008.safetensors +1 -1
 - model.safetensors.index.json +1 -298
 - special_tokens_map.json +9 -3
 - tokenizer.json +2 -2
 - tokenizer_config.json +5 -7
 
    	
        README.md
    CHANGED
    
    | 
         @@ -1,92 +1,16 @@ 
     | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            -
             
     | 
| 3 | 
         
            -
            -  
     | 
| 4 | 
         
            -
             
     | 
| 5 | 
         
             
            library_name: transformers
         
     | 
| 6 | 
         
             
            tags:
         
     | 
| 7 | 
         
             
            - mergekit
         
     | 
| 8 | 
         
             
            - merge
         
     | 
| 9 | 
         
            -
            - unsloth
         
     | 
| 10 | 
         
            -
            base_model:
         
     | 
| 11 | 
         
            -
            - LeroyDyer/Mixtral_AI_CyberBrain_2.0
         
     | 
| 12 | 
         
            -
            - ezelikman/quietstar-8-ahead
         
     | 
| 13 | 
         
            -
            ---
         
     | 
| 14 | 
         
            -
             
     | 
| 15 | 
         
            -
            ActulLLY ITS woRKING IT JUST NEEDS TRAINING DATA!! .... 
         
     | 
| 16 | 
         
            -
             
     | 
| 17 | 
         
            -
             
     | 
| 18 | 
         
            -
            This project is implemented by simply patching the base Mistral implementation in Huggingface transformers using a new modeling_mistral.py and a new configuration_mistral.py and otherwise applying standard transformers features (e.g. the default Trainer). 
         
     | 
| 19 | 
         
            -
             
     | 
| 20 | 
         
            -
            IE: First Clone the latest transformers 
         
     | 
| 21 | 
         
            -
            enter the models\mistral folder and upload the modelling_mistral.py
         
     | 
| 22 | 
         
            -
            then cd transformers and install frot he folder pip install ./transformers 
         
     | 
| 23 | 
         
            -
             
     | 
| 24 | 
         
            -
            after it can be loaded normally for training; 
         
     | 
| 25 | 
         
            -
             
     | 
| 26 | 
         
            -
            ```
         
     | 
| 27 | 
         
            -
             
     | 
| 28 | 
         
            -
            from unsloth import FastLanguageModel
         
     | 
| 29 | 
         
            -
            import torch
         
     | 
| 30 | 
         
            -
            max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
         
     | 
| 31 | 
         
            -
            dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
         
     | 
| 32 | 
         
            -
            load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
         
     | 
| 33 | 
         
            -
             
     | 
| 34 | 
         
            -
            # 4bit pre quantized models we support for 4x faster downloading + no OOMs.
         
     | 
| 35 | 
         
            -
            fourbit_models = [
         
     | 
| 36 | 
         
            -
                "unsloth/mistral-7b-bnb-4bit",
         
     | 
| 37 | 
         
            -
                "unsloth/mistral-7b-instruct-v0.2-bnb-4bit",
         
     | 
| 38 | 
         
            -
                "unsloth/llama-2-7b-bnb-4bit",
         
     | 
| 39 | 
         
            -
                "unsloth/llama-2-13b-bnb-4bit",
         
     | 
| 40 | 
         
            -
                "unsloth/codellama-34b-bnb-4bit",
         
     | 
| 41 | 
         
            -
                "unsloth/tinyllama-bnb-4bit",
         
     | 
| 42 | 
         
            -
                "unsloth/gemma-7b-bnb-4bit", # New Google 6 trillion tokens model 2.5x faster!
         
     | 
| 43 | 
         
            -
                "unsloth/gemma-2b-bnb-4bit",
         
     | 
| 44 | 
         
            -
            ] # More models at https://huggingface.co/unsloth
         
     | 
| 45 | 
         
            -
             
     | 
| 46 | 
         
            -
            model = FastLanguageModel.from_pretrained(
         
     | 
| 47 | 
         
            -
                model_name = "LeroyDyer/Mixtral_AI_CyberBrain_3.0", # Choose ANY! eg teknium/OpenHermes-2.5-Mistral-7B
         
     | 
| 48 | 
         
            -
                max_seq_length = 2048,
         
     | 
| 49 | 
         
            -
                dtype = dtype,
         
     | 
| 50 | 
         
            -
                load_in_4bit = load_in_4bit,
         
     | 
| 51 | 
         
            -
            #    trust_remote_code = True,
         
     | 
| 52 | 
         
            -
                ignore_mismatched_sizes = True,
         
     | 
| 53 | 
         
            -
                merged_talk_heads=True,
         
     | 
| 54 | 
         
            -
                merged_lm_and_talk_heads=False,
         
     | 
| 55 | 
         
            -
                merged_lm_and_think_heads=True,
         
     | 
| 56 | 
         
            -
                use_concat_talk_head=True,
         
     | 
| 57 | 
         
            -
                use_shallow_think=True,
         
     | 
| 58 | 
         
            -
                use_shallow_talk=False,
         
     | 
| 59 | 
         
            -
                use_complex_think_head=False,
         
     | 
| 60 | 
         
            -
                use_complex_talk_head=True,
         
     | 
| 61 | 
         
            -
                use_weighted_talk_head=True,
         
     | 
| 62 | 
         
            -
             
     | 
| 63 | 
         
            -
                # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
         
     | 
| 64 | 
         
            -
            )
         
     | 
| 65 | 
         
            -
             
     | 
| 66 | 
         
            -
            tokenizer = AutoTokenizer.from_pretrained(tokenizer_id,truncation=True,padding_side="right")
         
     | 
| 67 | 
         
            -
            tokenizer.pad_token_id = tokenizer.eos_token_id
         
     | 
| 68 | 
         
            -
             
     | 
| 69 | 
         
            -
             
     | 
| 70 | 
         
            -
             
     | 
| 71 | 
         
            -
            model.tokenizer = tokenizer
         
     | 
| 72 | 
         
            -
             
     | 
| 73 | 
         
            -
            model.train
         
     | 
| 74 | 
         
            -
             
     | 
| 75 | 
         
            -
             
     | 
| 76 | 
         
            -
            ```
         
     | 
| 77 | 
         
            -
             
     | 
| 78 | 
         
            -
             
     | 
| 79 | 
         
            -
            right now the modelling_mistral.py s still havng problems loading remotely hence the hacky way... but after its fixed it will be fine. 
         
     | 
| 80 | 
         
            -
             
     | 
| 81 | 
         
            -
             
     | 
| 82 | 
         | 
| 
         | 
|
| 83 | 
         
             
            # merge
         
     | 
| 84 | 
         | 
| 85 | 
         
             
            This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
         
     | 
| 86 | 
         
            -
            yes multiple verions of this model was merged in attempts to grab the neccasary tensors ...
         
     | 
| 87 | 
         
            -
            but some how it did not build as some parameters was not loading. ie it would not load the config file! hopefully this will be rectified soon. so remote loading will be fine ... enabling for enhanced training.
         
     | 
| 88 | 
         
            -
            the model was trained to perfection so it still works fine! 
         
     | 
| 89 | 
         
            -
            the lora was made so tat later it can be loaded with the model for further training of the effected tensors... 
         
     | 
| 90 | 
         | 
| 91 | 
         
             
            ## Merge Details
         
     | 
| 92 | 
         
             
            ### Merge Method
         
     | 
| 
         @@ -96,7 +20,7 @@ This model was merged using the SLERP merge method. 
     | 
|
| 96 | 
         
             
            ### Models Merged
         
     | 
| 97 | 
         | 
| 98 | 
         
             
            The following models were included in the merge:
         
     | 
| 99 | 
         
            -
            * [LeroyDyer/ 
     | 
| 100 | 
         
             
            * [ezelikman/quietstar-8-ahead](https://huggingface.co/ezelikman/quietstar-8-ahead)
         
     | 
| 101 | 
         | 
| 102 | 
         
             
            ### Configuration
         
     | 
| 
         @@ -107,18 +31,14 @@ The following YAML configuration was used to produce this model: 
     | 
|
| 107 | 
         | 
| 108 | 
         
             
            slices:
         
     | 
| 109 | 
         
             
              - sources:
         
     | 
| 110 | 
         
            -
                  - model: LeroyDyer/ 
     | 
| 111 | 
         
             
                    layer_range: [0, 32]
         
     | 
| 112 | 
         
             
                  - model: ezelikman/quietstar-8-ahead
         
     | 
| 113 | 
         
             
                    layer_range: [0, 32]
         
     | 
| 114 | 
         
             
            # or, the equivalent models: syntax:
         
     | 
| 115 | 
         
             
            # models:
         
     | 
| 116 | 
         
             
            #   - model: mistralai/Mistral-7B-Instruct-v0.2
         
     | 
| 117 | 
         
            -
            # LaRGER MODEL MUST BE BASE 
     | 
| 118 | 
         
            -
            #  BASE MODEL MUST BE THE TOKENIZER YOU WISH TO ADOPT 
         
     | 
| 119 | 
         
            -
            # so for models with customized processes they must be the base model
         
     | 
| 120 | 
         
            -
            # If the base model has remote code then this must be collected and added 
         
     | 
| 121 | 
         
            -
            # to the repo after and the config file adusted to allow for automapping to your new repo
         
     | 
| 122 | 
         
             
            #   - model: yanismiraoui/Yarn-Mistral-7b-128k-sharded
         
     | 
| 123 | 
         
             
            merge_method: slerp
         
     | 
| 124 | 
         
             
            base_model: ezelikman/quietstar-8-ahead
         
     | 
| 
         @@ -131,4 +51,4 @@ parameters: 
     | 
|
| 131 | 
         
             
                - value: 0.5 # fallback for rest of tensors
         
     | 
| 132 | 
         
             
            dtype: float16
         
     | 
| 133 | 
         | 
| 134 | 
         
            -
            ```
         
     | 
| 
         | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            +
            base_model:
         
     | 
| 3 | 
         
            +
            - LeroyDyer/Mixtral_AI_CyberBrain_3_0
         
     | 
| 4 | 
         
            +
            - ezelikman/quietstar-8-ahead
         
     | 
| 5 | 
         
             
            library_name: transformers
         
     | 
| 6 | 
         
             
            tags:
         
     | 
| 7 | 
         
             
            - mergekit
         
     | 
| 8 | 
         
             
            - merge
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 9 | 
         | 
| 10 | 
         
            +
            ---
         
     | 
| 11 | 
         
             
            # merge
         
     | 
| 12 | 
         | 
| 13 | 
         
             
            This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 14 | 
         | 
| 15 | 
         
             
            ## Merge Details
         
     | 
| 16 | 
         
             
            ### Merge Method
         
     | 
| 
         | 
|
| 20 | 
         
             
            ### Models Merged
         
     | 
| 21 | 
         | 
| 22 | 
         
             
            The following models were included in the merge:
         
     | 
| 23 | 
         
            +
            * [LeroyDyer/Mixtral_AI_CyberBrain_3_0](https://huggingface.co/LeroyDyer/Mixtral_AI_CyberBrain_3_0)
         
     | 
| 24 | 
         
             
            * [ezelikman/quietstar-8-ahead](https://huggingface.co/ezelikman/quietstar-8-ahead)
         
     | 
| 25 | 
         | 
| 26 | 
         
             
            ### Configuration
         
     | 
| 
         | 
|
| 31 | 
         | 
| 32 | 
         
             
            slices:
         
     | 
| 33 | 
         
             
              - sources:
         
     | 
| 34 | 
         
            +
                  - model: LeroyDyer/Mixtral_AI_CyberBrain_3_0
         
     | 
| 35 | 
         
             
                    layer_range: [0, 32]
         
     | 
| 36 | 
         
             
                  - model: ezelikman/quietstar-8-ahead
         
     | 
| 37 | 
         
             
                    layer_range: [0, 32]
         
     | 
| 38 | 
         
             
            # or, the equivalent models: syntax:
         
     | 
| 39 | 
         
             
            # models:
         
     | 
| 40 | 
         
             
            #   - model: mistralai/Mistral-7B-Instruct-v0.2
         
     | 
| 41 | 
         
            +
            # LaRGER MODEL MUST BE BASE
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 42 | 
         
             
            #   - model: yanismiraoui/Yarn-Mistral-7b-128k-sharded
         
     | 
| 43 | 
         
             
            merge_method: slerp
         
     | 
| 44 | 
         
             
            base_model: ezelikman/quietstar-8-ahead
         
     | 
| 
         | 
|
| 51 | 
         
             
                - value: 0.5 # fallback for rest of tensors
         
     | 
| 52 | 
         
             
            dtype: float16
         
     | 
| 53 | 
         | 
| 54 | 
         
            +
            ```
         
     | 
    	
        added_tokens.json
    CHANGED
    
    | 
         @@ -1,4 +1,4 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            -
              "<|endthought|>":  
     | 
| 3 | 
         
            -
              "<|startthought|>":  
     | 
| 4 | 
         
             
            }
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            +
              "<|endthought|>": 32000,
         
     | 
| 3 | 
         
            +
              "<|startthought|>": 32001
         
     | 
| 4 | 
         
             
            }
         
     | 
    	
        config.json
    CHANGED
    
    | 
         @@ -1,5 +1,5 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            -
              "_name_or_path": "LeroyDyer/ 
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "MistralForCausalLM"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 
         @@ -20,13 +20,11 @@ 
     | 
|
| 20 | 
         
             
              "num_hidden_layers": 32,
         
     | 
| 21 | 
         
             
              "num_key_value_heads": 8,
         
     | 
| 22 | 
         
             
              "rms_norm_eps": 1e-05,
         
     | 
| 23 | 
         
            -
              "rope_scaling": null,
         
     | 
| 24 | 
         
             
              "rope_theta": 10000.0,
         
     | 
| 25 | 
         
             
              "sliding_window": 4096,
         
     | 
| 26 | 
         
             
              "tie_word_embeddings": false,
         
     | 
| 27 | 
         
             
              "torch_dtype": "float16",
         
     | 
| 28 | 
         
             
              "transformers_version": "4.40.0.dev0",
         
     | 
| 29 | 
         
            -
              "unsloth_version": "2024.3",
         
     | 
| 30 | 
         
             
              "use_cache": true,
         
     | 
| 31 | 
         
             
              "use_complex_talk_head": true,
         
     | 
| 32 | 
         
             
              "use_complex_think_head": false,
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
            +
              "_name_or_path": "LeroyDyer/Mixtral_AI_CyberBrain_3_0",
         
     | 
| 3 | 
         
             
              "architectures": [
         
     | 
| 4 | 
         
             
                "MistralForCausalLM"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 
         | 
|
| 20 | 
         
             
              "num_hidden_layers": 32,
         
     | 
| 21 | 
         
             
              "num_key_value_heads": 8,
         
     | 
| 22 | 
         
             
              "rms_norm_eps": 1e-05,
         
     | 
| 
         | 
|
| 23 | 
         
             
              "rope_theta": 10000.0,
         
     | 
| 24 | 
         
             
              "sliding_window": 4096,
         
     | 
| 25 | 
         
             
              "tie_word_embeddings": false,
         
     | 
| 26 | 
         
             
              "torch_dtype": "float16",
         
     | 
| 27 | 
         
             
              "transformers_version": "4.40.0.dev0",
         
     | 
| 
         | 
|
| 28 | 
         
             
              "use_cache": true,
         
     | 
| 29 | 
         
             
              "use_complex_talk_head": true,
         
     | 
| 30 | 
         
             
              "use_complex_think_head": false,
         
     | 
    	
        mergekit_config.yml
    ADDED
    
    | 
         @@ -0,0 +1,22 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
             
     | 
| 2 | 
         
            +
            slices:
         
     | 
| 3 | 
         
            +
              - sources:
         
     | 
| 4 | 
         
            +
                  - model: LeroyDyer/Mixtral_AI_CyberBrain_3_0
         
     | 
| 5 | 
         
            +
                    layer_range: [0, 32]
         
     | 
| 6 | 
         
            +
                  - model: ezelikman/quietstar-8-ahead
         
     | 
| 7 | 
         
            +
                    layer_range: [0, 32]
         
     | 
| 8 | 
         
            +
            # or, the equivalent models: syntax:
         
     | 
| 9 | 
         
            +
            # models:
         
     | 
| 10 | 
         
            +
            #   - model: mistralai/Mistral-7B-Instruct-v0.2
         
     | 
| 11 | 
         
            +
            # LaRGER MODEL MUST BE BASE
         
     | 
| 12 | 
         
            +
            #   - model: yanismiraoui/Yarn-Mistral-7b-128k-sharded
         
     | 
| 13 | 
         
            +
            merge_method: slerp
         
     | 
| 14 | 
         
            +
            base_model: ezelikman/quietstar-8-ahead
         
     | 
| 15 | 
         
            +
            parameters:
         
     | 
| 16 | 
         
            +
              t:
         
     | 
| 17 | 
         
            +
                - filter: self_attn
         
     | 
| 18 | 
         
            +
                  value: [0.3, 0.6, 0.3786, 0.6, 0.6]
         
     | 
| 19 | 
         
            +
                - filter: mlp
         
     | 
| 20 | 
         
            +
                  value: [0.7, 0.4, 0.6, 0.4, 0.7]
         
     | 
| 21 | 
         
            +
                - value: 0.5 # fallback for rest of tensors
         
     | 
| 22 | 
         
            +
            dtype: float16
         
     | 
    	
        model-00001-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:9aa1b16f05743b2f339f6e70521c166198221a90edc9a9559a2d666416020ca6
         
     | 
| 3 | 
         
            +
            size 1973489936
         
     | 
    	
        model-00002-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:356d76c09d93a81c26368bb2fee8698d036ef21b6ee7007cf88e71463adc642e
         
     | 
| 3 | 
         
            +
            size 1979798000
         
     | 
    	
        model-00003-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:907a3d64fc26868db58b658da99d2aa49f979d8db0d459c703637416b671368c
         
     | 
| 3 | 
         
            +
            size 1946227312
         
     | 
    	
        model-00004-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:68dd86eba610c49dbdf57c29721f7d7040358eaf990fc3d6b1c0ce3d6fddb153
         
     | 
| 3 | 
         
            +
            size 1979798016
         
     | 
    	
        model-00005-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:7abb6a3613474b4eae84e76fd4636149738b693d10eb12242250b37cd2af600b
         
     | 
| 3 | 
         
            +
            size 1946227336
         
     | 
    	
        model-00006-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:22c18413958dacaa1f724841221f89a772a9f2aca52ec92c375e78a054e30813
         
     | 
| 3 | 
         
            +
            size 1979798016
         
     | 
    	
        model-00007-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
            -
            size  
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:1725e5ba912edc98426340511e1984347d123ca4d92c66cbd6ba7e9d3ee1309a
         
     | 
| 3 | 
         
            +
            size 1862340784
         
     | 
    	
        model-00008-of-00008.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 815851048
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:95e0b00098431ab705f87191f852793a1082b2f5e373c060f22c2ea3804ff51a
         
     | 
| 3 | 
         
             
            size 815851048
         
     | 
    	
        model.safetensors.index.json
    CHANGED
    
    | 
         @@ -1,298 +1 @@ 
     | 
|
| 1 | 
         
            -
            {
         
     | 
| 2 | 
         
            -
              "metadata": {
         
     | 
| 3 | 
         
            -
                "total_size": 14483496960
         
     | 
| 4 | 
         
            -
              },
         
     | 
| 5 | 
         
            -
              "weight_map": {
         
     | 
| 6 | 
         
            -
                "lm_head.weight": "model-00008-of-00008.safetensors",
         
     | 
| 7 | 
         
            -
                "model.embed_tokens.weight": "model-00001-of-00008.safetensors",
         
     | 
| 8 | 
         
            -
                "model.layers.0.input_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 9 | 
         
            -
                "model.layers.0.mlp.down_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 10 | 
         
            -
                "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 11 | 
         
            -
                "model.layers.0.mlp.up_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 12 | 
         
            -
                "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 13 | 
         
            -
                "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 14 | 
         
            -
                "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 15 | 
         
            -
                "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 16 | 
         
            -
                "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 17 | 
         
            -
                "model.layers.1.input_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 18 | 
         
            -
                "model.layers.1.mlp.down_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 19 | 
         
            -
                "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 20 | 
         
            -
                "model.layers.1.mlp.up_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 21 | 
         
            -
                "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 22 | 
         
            -
                "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 23 | 
         
            -
                "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 24 | 
         
            -
                "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 25 | 
         
            -
                "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 26 | 
         
            -
                "model.layers.10.input_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 27 | 
         
            -
                "model.layers.10.mlp.down_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 28 | 
         
            -
                "model.layers.10.mlp.gate_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 29 | 
         
            -
                "model.layers.10.mlp.up_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 30 | 
         
            -
                "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 31 | 
         
            -
                "model.layers.10.self_attn.k_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 32 | 
         
            -
                "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 33 | 
         
            -
                "model.layers.10.self_attn.q_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 34 | 
         
            -
                "model.layers.10.self_attn.v_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 35 | 
         
            -
                "model.layers.11.input_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 36 | 
         
            -
                "model.layers.11.mlp.down_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 37 | 
         
            -
                "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 38 | 
         
            -
                "model.layers.11.mlp.up_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 39 | 
         
            -
                "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 40 | 
         
            -
                "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 41 | 
         
            -
                "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 42 | 
         
            -
                "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 43 | 
         
            -
                "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 44 | 
         
            -
                "model.layers.12.input_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 45 | 
         
            -
                "model.layers.12.mlp.down_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 46 | 
         
            -
                "model.layers.12.mlp.gate_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 47 | 
         
            -
                "model.layers.12.mlp.up_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 48 | 
         
            -
                "model.layers.12.post_attention_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 49 | 
         
            -
                "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 50 | 
         
            -
                "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 51 | 
         
            -
                "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 52 | 
         
            -
                "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 53 | 
         
            -
                "model.layers.13.input_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 54 | 
         
            -
                "model.layers.13.mlp.down_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 55 | 
         
            -
                "model.layers.13.mlp.gate_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 56 | 
         
            -
                "model.layers.13.mlp.up_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 57 | 
         
            -
                "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 58 | 
         
            -
                "model.layers.13.self_attn.k_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 59 | 
         
            -
                "model.layers.13.self_attn.o_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 60 | 
         
            -
                "model.layers.13.self_attn.q_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 61 | 
         
            -
                "model.layers.13.self_attn.v_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 62 | 
         
            -
                "model.layers.14.input_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 63 | 
         
            -
                "model.layers.14.mlp.down_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 64 | 
         
            -
                "model.layers.14.mlp.gate_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 65 | 
         
            -
                "model.layers.14.mlp.up_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 66 | 
         
            -
                "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 67 | 
         
            -
                "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 68 | 
         
            -
                "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 69 | 
         
            -
                "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 70 | 
         
            -
                "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 71 | 
         
            -
                "model.layers.15.input_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 72 | 
         
            -
                "model.layers.15.mlp.down_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 73 | 
         
            -
                "model.layers.15.mlp.gate_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 74 | 
         
            -
                "model.layers.15.mlp.up_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 75 | 
         
            -
                "model.layers.15.post_attention_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 76 | 
         
            -
                "model.layers.15.self_attn.k_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 77 | 
         
            -
                "model.layers.15.self_attn.o_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 78 | 
         
            -
                "model.layers.15.self_attn.q_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 79 | 
         
            -
                "model.layers.15.self_attn.v_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 80 | 
         
            -
                "model.layers.16.input_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 81 | 
         
            -
                "model.layers.16.mlp.down_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 82 | 
         
            -
                "model.layers.16.mlp.gate_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 83 | 
         
            -
                "model.layers.16.mlp.up_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 84 | 
         
            -
                "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00008.safetensors",
         
     | 
| 85 | 
         
            -
                "model.layers.16.self_attn.k_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 86 | 
         
            -
                "model.layers.16.self_attn.o_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 87 | 
         
            -
                "model.layers.16.self_attn.q_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 88 | 
         
            -
                "model.layers.16.self_attn.v_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 89 | 
         
            -
                "model.layers.17.input_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 90 | 
         
            -
                "model.layers.17.mlp.down_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 91 | 
         
            -
                "model.layers.17.mlp.gate_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 92 | 
         
            -
                "model.layers.17.mlp.up_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 93 | 
         
            -
                "model.layers.17.post_attention_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 94 | 
         
            -
                "model.layers.17.self_attn.k_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 95 | 
         
            -
                "model.layers.17.self_attn.o_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 96 | 
         
            -
                "model.layers.17.self_attn.q_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 97 | 
         
            -
                "model.layers.17.self_attn.v_proj.weight": "model-00004-of-00008.safetensors",
         
     | 
| 98 | 
         
            -
                "model.layers.18.input_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 99 | 
         
            -
                "model.layers.18.mlp.down_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 100 | 
         
            -
                "model.layers.18.mlp.gate_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 101 | 
         
            -
                "model.layers.18.mlp.up_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 102 | 
         
            -
                "model.layers.18.post_attention_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 103 | 
         
            -
                "model.layers.18.self_attn.k_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 104 | 
         
            -
                "model.layers.18.self_attn.o_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 105 | 
         
            -
                "model.layers.18.self_attn.q_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 106 | 
         
            -
                "model.layers.18.self_attn.v_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 107 | 
         
            -
                "model.layers.19.input_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 108 | 
         
            -
                "model.layers.19.mlp.down_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 109 | 
         
            -
                "model.layers.19.mlp.gate_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 110 | 
         
            -
                "model.layers.19.mlp.up_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 111 | 
         
            -
                "model.layers.19.post_attention_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 112 | 
         
            -
                "model.layers.19.self_attn.k_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 113 | 
         
            -
                "model.layers.19.self_attn.o_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 114 | 
         
            -
                "model.layers.19.self_attn.q_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 115 | 
         
            -
                "model.layers.19.self_attn.v_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 116 | 
         
            -
                "model.layers.2.input_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 117 | 
         
            -
                "model.layers.2.mlp.down_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 118 | 
         
            -
                "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 119 | 
         
            -
                "model.layers.2.mlp.up_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 120 | 
         
            -
                "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00008.safetensors",
         
     | 
| 121 | 
         
            -
                "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 122 | 
         
            -
                "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 123 | 
         
            -
                "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 124 | 
         
            -
                "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 125 | 
         
            -
                "model.layers.20.input_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 126 | 
         
            -
                "model.layers.20.mlp.down_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 127 | 
         
            -
                "model.layers.20.mlp.gate_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 128 | 
         
            -
                "model.layers.20.mlp.up_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 129 | 
         
            -
                "model.layers.20.post_attention_layernorm.weight": "model-00005-of-00008.safetensors",
         
     | 
| 130 | 
         
            -
                "model.layers.20.self_attn.k_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 131 | 
         
            -
                "model.layers.20.self_attn.o_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 132 | 
         
            -
                "model.layers.20.self_attn.q_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 133 | 
         
            -
                "model.layers.20.self_attn.v_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 134 | 
         
            -
                "model.layers.21.input_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 135 | 
         
            -
                "model.layers.21.mlp.down_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 136 | 
         
            -
                "model.layers.21.mlp.gate_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 137 | 
         
            -
                "model.layers.21.mlp.up_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 138 | 
         
            -
                "model.layers.21.post_attention_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 139 | 
         
            -
                "model.layers.21.self_attn.k_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 140 | 
         
            -
                "model.layers.21.self_attn.o_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 141 | 
         
            -
                "model.layers.21.self_attn.q_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 142 | 
         
            -
                "model.layers.21.self_attn.v_proj.weight": "model-00005-of-00008.safetensors",
         
     | 
| 143 | 
         
            -
                "model.layers.22.input_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 144 | 
         
            -
                "model.layers.22.mlp.down_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 145 | 
         
            -
                "model.layers.22.mlp.gate_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 146 | 
         
            -
                "model.layers.22.mlp.up_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 147 | 
         
            -
                "model.layers.22.post_attention_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 148 | 
         
            -
                "model.layers.22.self_attn.k_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 149 | 
         
            -
                "model.layers.22.self_attn.o_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 150 | 
         
            -
                "model.layers.22.self_attn.q_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 151 | 
         
            -
                "model.layers.22.self_attn.v_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 152 | 
         
            -
                "model.layers.23.input_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 153 | 
         
            -
                "model.layers.23.mlp.down_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 154 | 
         
            -
                "model.layers.23.mlp.gate_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 155 | 
         
            -
                "model.layers.23.mlp.up_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 156 | 
         
            -
                "model.layers.23.post_attention_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 157 | 
         
            -
                "model.layers.23.self_attn.k_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 158 | 
         
            -
                "model.layers.23.self_attn.o_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 159 | 
         
            -
                "model.layers.23.self_attn.q_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 160 | 
         
            -
                "model.layers.23.self_attn.v_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 161 | 
         
            -
                "model.layers.24.input_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 162 | 
         
            -
                "model.layers.24.mlp.down_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 163 | 
         
            -
                "model.layers.24.mlp.gate_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 164 | 
         
            -
                "model.layers.24.mlp.up_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 165 | 
         
            -
                "model.layers.24.post_attention_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 166 | 
         
            -
                "model.layers.24.self_attn.k_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 167 | 
         
            -
                "model.layers.24.self_attn.o_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 168 | 
         
            -
                "model.layers.24.self_attn.q_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 169 | 
         
            -
                "model.layers.24.self_attn.v_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 170 | 
         
            -
                "model.layers.25.input_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 171 | 
         
            -
                "model.layers.25.mlp.down_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 172 | 
         
            -
                "model.layers.25.mlp.gate_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 173 | 
         
            -
                "model.layers.25.mlp.up_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 174 | 
         
            -
                "model.layers.25.post_attention_layernorm.weight": "model-00006-of-00008.safetensors",
         
     | 
| 175 | 
         
            -
                "model.layers.25.self_attn.k_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 176 | 
         
            -
                "model.layers.25.self_attn.o_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 177 | 
         
            -
                "model.layers.25.self_attn.q_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 178 | 
         
            -
                "model.layers.25.self_attn.v_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 179 | 
         
            -
                "model.layers.26.input_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 180 | 
         
            -
                "model.layers.26.mlp.down_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 181 | 
         
            -
                "model.layers.26.mlp.gate_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 182 | 
         
            -
                "model.layers.26.mlp.up_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 183 | 
         
            -
                "model.layers.26.post_attention_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 184 | 
         
            -
                "model.layers.26.self_attn.k_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 185 | 
         
            -
                "model.layers.26.self_attn.o_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 186 | 
         
            -
                "model.layers.26.self_attn.q_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 187 | 
         
            -
                "model.layers.26.self_attn.v_proj.weight": "model-00006-of-00008.safetensors",
         
     | 
| 188 | 
         
            -
                "model.layers.27.input_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 189 | 
         
            -
                "model.layers.27.mlp.down_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 190 | 
         
            -
                "model.layers.27.mlp.gate_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 191 | 
         
            -
                "model.layers.27.mlp.up_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 192 | 
         
            -
                "model.layers.27.post_attention_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 193 | 
         
            -
                "model.layers.27.self_attn.k_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 194 | 
         
            -
                "model.layers.27.self_attn.o_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 195 | 
         
            -
                "model.layers.27.self_attn.q_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 196 | 
         
            -
                "model.layers.27.self_attn.v_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 197 | 
         
            -
                "model.layers.28.input_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 198 | 
         
            -
                "model.layers.28.mlp.down_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 199 | 
         
            -
                "model.layers.28.mlp.gate_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 200 | 
         
            -
                "model.layers.28.mlp.up_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 201 | 
         
            -
                "model.layers.28.post_attention_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 202 | 
         
            -
                "model.layers.28.self_attn.k_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 203 | 
         
            -
                "model.layers.28.self_attn.o_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 204 | 
         
            -
                "model.layers.28.self_attn.q_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 205 | 
         
            -
                "model.layers.28.self_attn.v_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 206 | 
         
            -
                "model.layers.29.input_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 207 | 
         
            -
                "model.layers.29.mlp.down_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 208 | 
         
            -
                "model.layers.29.mlp.gate_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 209 | 
         
            -
                "model.layers.29.mlp.up_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 210 | 
         
            -
                "model.layers.29.post_attention_layernorm.weight": "model-00007-of-00008.safetensors",
         
     | 
| 211 | 
         
            -
                "model.layers.29.self_attn.k_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 212 | 
         
            -
                "model.layers.29.self_attn.o_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 213 | 
         
            -
                "model.layers.29.self_attn.q_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 214 | 
         
            -
                "model.layers.29.self_attn.v_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 215 | 
         
            -
                "model.layers.3.input_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 216 | 
         
            -
                "model.layers.3.mlp.down_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 217 | 
         
            -
                "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 218 | 
         
            -
                "model.layers.3.mlp.up_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 219 | 
         
            -
                "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 220 | 
         
            -
                "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 221 | 
         
            -
                "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 222 | 
         
            -
                "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 223 | 
         
            -
                "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00008.safetensors",
         
     | 
| 224 | 
         
            -
                "model.layers.30.input_layernorm.weight": "model-00008-of-00008.safetensors",
         
     | 
| 225 | 
         
            -
                "model.layers.30.mlp.down_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 226 | 
         
            -
                "model.layers.30.mlp.gate_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 227 | 
         
            -
                "model.layers.30.mlp.up_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 228 | 
         
            -
                "model.layers.30.post_attention_layernorm.weight": "model-00008-of-00008.safetensors",
         
     | 
| 229 | 
         
            -
                "model.layers.30.self_attn.k_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 230 | 
         
            -
                "model.layers.30.self_attn.o_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 231 | 
         
            -
                "model.layers.30.self_attn.q_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 232 | 
         
            -
                "model.layers.30.self_attn.v_proj.weight": "model-00007-of-00008.safetensors",
         
     | 
| 233 | 
         
            -
                "model.layers.31.input_layernorm.weight": "model-00008-of-00008.safetensors",
         
     | 
| 234 | 
         
            -
                "model.layers.31.mlp.down_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 235 | 
         
            -
                "model.layers.31.mlp.gate_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 236 | 
         
            -
                "model.layers.31.mlp.up_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 237 | 
         
            -
                "model.layers.31.post_attention_layernorm.weight": "model-00008-of-00008.safetensors",
         
     | 
| 238 | 
         
            -
                "model.layers.31.self_attn.k_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 239 | 
         
            -
                "model.layers.31.self_attn.o_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 240 | 
         
            -
                "model.layers.31.self_attn.q_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 241 | 
         
            -
                "model.layers.31.self_attn.v_proj.weight": "model-00008-of-00008.safetensors",
         
     | 
| 242 | 
         
            -
                "model.layers.4.input_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 243 | 
         
            -
                "model.layers.4.mlp.down_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 244 | 
         
            -
                "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 245 | 
         
            -
                "model.layers.4.mlp.up_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 246 | 
         
            -
                "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 247 | 
         
            -
                "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 248 | 
         
            -
                "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 249 | 
         
            -
                "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 250 | 
         
            -
                "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 251 | 
         
            -
                "model.layers.5.input_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 252 | 
         
            -
                "model.layers.5.mlp.down_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 253 | 
         
            -
                "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 254 | 
         
            -
                "model.layers.5.mlp.up_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 255 | 
         
            -
                "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 256 | 
         
            -
                "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 257 | 
         
            -
                "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 258 | 
         
            -
                "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 259 | 
         
            -
                "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 260 | 
         
            -
                "model.layers.6.input_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 261 | 
         
            -
                "model.layers.6.mlp.down_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 262 | 
         
            -
                "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 263 | 
         
            -
                "model.layers.6.mlp.up_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 264 | 
         
            -
                "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 265 | 
         
            -
                "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 266 | 
         
            -
                "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 267 | 
         
            -
                "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 268 | 
         
            -
                "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 269 | 
         
            -
                "model.layers.7.input_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 270 | 
         
            -
                "model.layers.7.mlp.down_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 271 | 
         
            -
                "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 272 | 
         
            -
                "model.layers.7.mlp.up_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 273 | 
         
            -
                "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00008.safetensors",
         
     | 
| 274 | 
         
            -
                "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 275 | 
         
            -
                "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 276 | 
         
            -
                "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 277 | 
         
            -
                "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 278 | 
         
            -
                "model.layers.8.input_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 279 | 
         
            -
                "model.layers.8.mlp.down_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 280 | 
         
            -
                "model.layers.8.mlp.gate_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 281 | 
         
            -
                "model.layers.8.mlp.up_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 282 | 
         
            -
                "model.layers.8.post_attention_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 283 | 
         
            -
                "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 284 | 
         
            -
                "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 285 | 
         
            -
                "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 286 | 
         
            -
                "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00008.safetensors",
         
     | 
| 287 | 
         
            -
                "model.layers.9.input_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 288 | 
         
            -
                "model.layers.9.mlp.down_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 289 | 
         
            -
                "model.layers.9.mlp.gate_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 290 | 
         
            -
                "model.layers.9.mlp.up_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 291 | 
         
            -
                "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00008.safetensors",
         
     | 
| 292 | 
         
            -
                "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 293 | 
         
            -
                "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 294 | 
         
            -
                "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 295 | 
         
            -
                "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00008.safetensors",
         
     | 
| 296 | 
         
            -
                "model.norm.weight": "model-00008-of-00008.safetensors"
         
     | 
| 297 | 
         
            -
              }
         
     | 
| 298 | 
         
            -
            }
         
     | 
| 
         | 
|
| 1 | 
         
            +
            {"metadata": {"mergekit_version": "0.0.4.2", "total_size": 14483496960}, "weight_map": {"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00008.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00001-of-00008.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.2.input_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00008.safetensors", "model.embed_tokens.weight": "model-00001-of-00008.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.7.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.6.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.5.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.4.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.3.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.11.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.10.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.9.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.8.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.16.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.15.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.14.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.13.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.12.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.20.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.19.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.18.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.17.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.25.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.24.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.23.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.22.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.21.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.29.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.28.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.27.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.26.input_layernorm.weight": "model-00007-of-00008.safetensors", "lm_head.weight": "model-00008-of-00008.safetensors", "model.norm.weight": "model-00008-of-00008.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00008-of-00008.safetensors", "model.layers.31.input_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00008-of-00008.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.30.input_layernorm.weight": "model-00008-of-00008.safetensors"}}
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
    	
        special_tokens_map.json
    CHANGED
    
    | 
         @@ -1,7 +1,7 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
             
              "additional_special_tokens": [
         
     | 
| 3 | 
         
            -
                "<| 
     | 
| 4 | 
         
            -
                "<| 
     | 
| 5 | 
         
             
              ],
         
     | 
| 6 | 
         
             
              "bos_token": {
         
     | 
| 7 | 
         
             
                "content": "<s>",
         
     | 
| 
         @@ -17,7 +17,13 @@ 
     | 
|
| 17 | 
         
             
                "rstrip": false,
         
     | 
| 18 | 
         
             
                "single_word": false
         
     | 
| 19 | 
         
             
              },
         
     | 
| 20 | 
         
            -
              "pad_token":  
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 21 | 
         
             
              "unk_token": {
         
     | 
| 22 | 
         
             
                "content": "<unk>",
         
     | 
| 23 | 
         
             
                "lstrip": false,
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
             
              "additional_special_tokens": [
         
     | 
| 3 | 
         
            +
                "<|endthought|>",
         
     | 
| 4 | 
         
            +
                "<|startthought|>"
         
     | 
| 5 | 
         
             
              ],
         
     | 
| 6 | 
         
             
              "bos_token": {
         
     | 
| 7 | 
         
             
                "content": "<s>",
         
     | 
| 
         | 
|
| 17 | 
         
             
                "rstrip": false,
         
     | 
| 18 | 
         
             
                "single_word": false
         
     | 
| 19 | 
         
             
              },
         
     | 
| 20 | 
         
            +
              "pad_token": {
         
     | 
| 21 | 
         
            +
                "content": "</s>",
         
     | 
| 22 | 
         
            +
                "lstrip": false,
         
     | 
| 23 | 
         
            +
                "normalized": false,
         
     | 
| 24 | 
         
            +
                "rstrip": false,
         
     | 
| 25 | 
         
            +
                "single_word": false
         
     | 
| 26 | 
         
            +
              },
         
     | 
| 27 | 
         
             
              "unk_token": {
         
     | 
| 28 | 
         
             
                "content": "<unk>",
         
     | 
| 29 | 
         
             
                "lstrip": false,
         
     | 
    	
        tokenizer.json
    CHANGED
    
    | 
         @@ -32,7 +32,7 @@ 
     | 
|
| 32 | 
         
             
                },
         
     | 
| 33 | 
         
             
                {
         
     | 
| 34 | 
         
             
                  "id": 32000,
         
     | 
| 35 | 
         
            -
                  "content": "<| 
     | 
| 36 | 
         
             
                  "single_word": false,
         
     | 
| 37 | 
         
             
                  "lstrip": false,
         
     | 
| 38 | 
         
             
                  "rstrip": false,
         
     | 
| 
         @@ -41,7 +41,7 @@ 
     | 
|
| 41 | 
         
             
                },
         
     | 
| 42 | 
         
             
                {
         
     | 
| 43 | 
         
             
                  "id": 32001,
         
     | 
| 44 | 
         
            -
                  "content": "<| 
     | 
| 45 | 
         
             
                  "single_word": false,
         
     | 
| 46 | 
         
             
                  "lstrip": false,
         
     | 
| 47 | 
         
             
                  "rstrip": false,
         
     | 
| 
         | 
|
| 32 | 
         
             
                },
         
     | 
| 33 | 
         
             
                {
         
     | 
| 34 | 
         
             
                  "id": 32000,
         
     | 
| 35 | 
         
            +
                  "content": "<|endthought|>",
         
     | 
| 36 | 
         
             
                  "single_word": false,
         
     | 
| 37 | 
         
             
                  "lstrip": false,
         
     | 
| 38 | 
         
             
                  "rstrip": false,
         
     | 
| 
         | 
|
| 41 | 
         
             
                },
         
     | 
| 42 | 
         
             
                {
         
     | 
| 43 | 
         
             
                  "id": 32001,
         
     | 
| 44 | 
         
            +
                  "content": "<|startthought|>",
         
     | 
| 45 | 
         
             
                  "single_word": false,
         
     | 
| 46 | 
         
             
                  "lstrip": false,
         
     | 
| 47 | 
         
             
                  "rstrip": false,
         
     | 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -27,7 +27,7 @@ 
     | 
|
| 27 | 
         
             
                  "special": true
         
     | 
| 28 | 
         
             
                },
         
     | 
| 29 | 
         
             
                "32000": {
         
     | 
| 30 | 
         
            -
                  "content": "<| 
     | 
| 31 | 
         
             
                  "lstrip": false,
         
     | 
| 32 | 
         
             
                  "normalized": false,
         
     | 
| 33 | 
         
             
                  "rstrip": false,
         
     | 
| 
         @@ -35,7 +35,7 @@ 
     | 
|
| 35 | 
         
             
                  "special": true
         
     | 
| 36 | 
         
             
                },
         
     | 
| 37 | 
         
             
                "32001": {
         
     | 
| 38 | 
         
            -
                  "content": "<| 
     | 
| 39 | 
         
             
                  "lstrip": false,
         
     | 
| 40 | 
         
             
                  "normalized": false,
         
     | 
| 41 | 
         
             
                  "rstrip": false,
         
     | 
| 
         @@ -44,20 +44,18 @@ 
     | 
|
| 44 | 
         
             
                }
         
     | 
| 45 | 
         
             
              },
         
     | 
| 46 | 
         
             
              "additional_special_tokens": [
         
     | 
| 47 | 
         
            -
                "<| 
     | 
| 48 | 
         
            -
                "<| 
     | 
| 49 | 
         
             
              ],
         
     | 
| 50 | 
         
             
              "bos_token": "<s>",
         
     | 
| 51 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 52 | 
         
             
              "eos_token": "</s>",
         
     | 
| 53 | 
         
             
              "legacy": true,
         
     | 
| 54 | 
         
            -
              "model_max_length":  
     | 
| 55 | 
         
             
              "pad_token": "</s>",
         
     | 
| 56 | 
         
            -
              "padding_side": "right",
         
     | 
| 57 | 
         
             
              "sp_model_kwargs": {},
         
     | 
| 58 | 
         
             
              "spaces_between_special_tokens": false,
         
     | 
| 59 | 
         
             
              "tokenizer_class": "LlamaTokenizer",
         
     | 
| 60 | 
         
            -
              "truncation": true,
         
     | 
| 61 | 
         
             
              "unk_token": "<unk>",
         
     | 
| 62 | 
         
             
              "use_default_system_prompt": false
         
     | 
| 63 | 
         
             
            }
         
     | 
| 
         | 
|
| 27 | 
         
             
                  "special": true
         
     | 
| 28 | 
         
             
                },
         
     | 
| 29 | 
         
             
                "32000": {
         
     | 
| 30 | 
         
            +
                  "content": "<|endthought|>",
         
     | 
| 31 | 
         
             
                  "lstrip": false,
         
     | 
| 32 | 
         
             
                  "normalized": false,
         
     | 
| 33 | 
         
             
                  "rstrip": false,
         
     | 
| 
         | 
|
| 35 | 
         
             
                  "special": true
         
     | 
| 36 | 
         
             
                },
         
     | 
| 37 | 
         
             
                "32001": {
         
     | 
| 38 | 
         
            +
                  "content": "<|startthought|>",
         
     | 
| 39 | 
         
             
                  "lstrip": false,
         
     | 
| 40 | 
         
             
                  "normalized": false,
         
     | 
| 41 | 
         
             
                  "rstrip": false,
         
     | 
| 
         | 
|
| 44 | 
         
             
                }
         
     | 
| 45 | 
         
             
              },
         
     | 
| 46 | 
         
             
              "additional_special_tokens": [
         
     | 
| 47 | 
         
            +
                "<|endthought|>",
         
     | 
| 48 | 
         
            +
                "<|startthought|>"
         
     | 
| 49 | 
         
             
              ],
         
     | 
| 50 | 
         
             
              "bos_token": "<s>",
         
     | 
| 51 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 52 | 
         
             
              "eos_token": "</s>",
         
     | 
| 53 | 
         
             
              "legacy": true,
         
     | 
| 54 | 
         
            +
              "model_max_length": 1000000000000000019884624838656,
         
     | 
| 55 | 
         
             
              "pad_token": "</s>",
         
     | 
| 
         | 
|
| 56 | 
         
             
              "sp_model_kwargs": {},
         
     | 
| 57 | 
         
             
              "spaces_between_special_tokens": false,
         
     | 
| 58 | 
         
             
              "tokenizer_class": "LlamaTokenizer",
         
     | 
| 
         | 
|
| 59 | 
         
             
              "unk_token": "<unk>",
         
     | 
| 60 | 
         
             
              "use_default_system_prompt": false
         
     | 
| 61 | 
         
             
            }
         
     |