Upload 25 files
Browse files- README.md +202 -3
- config_sentence_transformers.json +14 -0
- document_0_Transformer/config.json +25 -0
- document_0_Transformer/model.safetensors +3 -0
- document_0_Transformer/sentence_bert_config.json +7 -0
- document_0_Transformer/special_tokens_map.json +37 -0
- document_0_Transformer/tokenizer.json +0 -0
- document_0_Transformer/tokenizer_config.json +58 -0
- document_0_Transformer/vocab.txt +0 -0
- document_1_Pooling/config.json +10 -0
- logo.png +0 -0
- logo.webp +0 -0
- modules.json +14 -0
- query_0_Transformer/config.json +25 -0
- query_0_Transformer/model.safetensors +3 -0
- query_0_Transformer/sentence_bert_config.json +7 -0
- query_0_Transformer/special_tokens_map.json +37 -0
- query_0_Transformer/tokenizer.json +0 -0
- query_0_Transformer/tokenizer_config.json +65 -0
- query_0_Transformer/vocab.txt +0 -0
- query_1_Pooling/config.json +10 -0
- query_2_Dense/config.json +6 -0
- query_2_Dense/model.safetensors +3 -0
- router_config.json +24 -0
    	
        README.md
    CHANGED
    
    | @@ -1,3 +1,202 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
            license: apache-2.0
         | 
| 3 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: apache-2.0  
         | 
| 3 | 
            +
            base_model: microsoft/MiniLM-L6-v2  
         | 
| 4 | 
            +
            tags:  
         | 
| 5 | 
            +
            - transformers  
         | 
| 6 | 
            +
            - sentence-transformers  
         | 
| 7 | 
            +
            - sentence-similarity  
         | 
| 8 | 
            +
            - feature-extraction  
         | 
| 9 | 
            +
            - text-embeddings-inference  
         | 
| 10 | 
            +
            - information-retrieval  
         | 
| 11 | 
            +
            - knowledge-distillation  
         | 
| 12 | 
            +
            language:
         | 
| 13 | 
            +
            - en
         | 
| 14 | 
            +
            ---
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            <div style="display: flex; justify-content: center;">      
         | 
| 17 | 
            +
                <div style="display: flex; align-items: center; gap: 10px;">      
         | 
| 18 | 
            +
                    <img src="logo.webp" alt="MongoDB Logo" style="height: 36px; width: auto; border-radius: 4px;">      
         | 
| 19 | 
            +
                    <span style="font-size: 32px; font-weight: bold">MongoDB/mdbr-leaf-mt-asym</span>      
         | 
| 20 | 
            +
                </div>      
         | 
| 21 | 
            +
            </div>  
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            # Content
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            1. [Introduction](#introduction)
         | 
| 26 | 
            +
            2. [Technical Report](#technical-report)
         | 
| 27 | 
            +
            3. [Highlights](#highlights)
         | 
| 28 | 
            +
            4. [Benchmarks](#benchmark-comparison)
         | 
| 29 | 
            +
            5. [Quickstart](#quickstart)
         | 
| 30 | 
            +
            6. [Citation](#citation)
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            # Introduction
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            `mdbr-leaf-mt-asym` is a compact high-performance text embedding model designed for classification, clustering, semantic sentence similarity and summarization tasks. 
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            This model is the asymmetric variant of `mdbr-leaf-mt`, which uses [`MongoDB/mdbr-leaf-mt`](https://huggingface.co/MongoDB/mdbr-leaf-mt) for queries and [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) for documents.
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            The model is robust to [vector quantization](#vector-quantization) and [MRL truncation](#mrl-truncation).
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            If you are looking to perform semantic search / information retrieval (e.g. for RAGs), please check out our [`mdbr-leaf-ir`](https://huggingface.co/MongoDB/mdbr-leaf-ir) model, which is specifically trained for these tasks.
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            > [!Note]  
         | 
| 43 | 
            +
            > **Note**: this model has been developed by the ML team of MongoDB Research. At the time of writing it is not used in any of MongoDB's commercial product or service offerings.
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            # Technical Report
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            A technical report detailing our proposed `LEAF` training procedure is [available here](https://arxiv.org/abs/2509.12539).
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            # Highlights  
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            * **State-of-the-Art Performance**: `mdbr-leaf-mt-asym` achieves state-of-the-art results for compact embedding models, **ranking #1** on the public [MTEB v2 (Eng) leaderboard](https://huggingface.co/spaces/mteb/leaderboard) for models with ≤30M parameters.
         | 
| 52 | 
            +
            * **Flexible Architecture Support**: `mdbr-leaf-mt-asym` uses an asymmetric retrieval architecture enabling even greater retrieval results.
         | 
| 53 | 
            +
            * **MRL and Quantization Support**: embedding vectors generated by `mdbr-leaf-mt-asym` compress well when truncated (MRL) and can be stored using more efficient types like `int8` and `binary`.  [See below](#mrl-truncation) for more information.
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            ## Benchmark Comparison
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            The table below shows the scores for `mdbr-leaf-mt` on the MTEB v2 (English) benchmark, compared to other retrieval models.
         | 
| 58 | 
            +
             | 
| 59 | 
            +
            `mdbr-leaf-mt` ranks #1 on this benchmark for models with <30M parameters.
         | 
| 60 | 
            +
             | 
| 61 | 
            +
            | Model                              | Size    | MTEB v2 (Eng) |  
         | 
| 62 | 
            +
            |------------------------------------|---------|---------------|  
         | 
| 63 | 
            +
            | OpenAI text-embedding-3-large      | Unknown | 66.43         |  
         | 
| 64 | 
            +
            | OpenAI text-embedding-3-small      | Unknown | 64.56         |  
         | 
| 65 | 
            +
            | **mdbr-leaf-mt**                   | 23M     | **63.97**     |  
         | 
| 66 | 
            +
            | gte-small                          | 33M     | 63.22         |  
         | 
| 67 | 
            +
            | snowflake-arctic-embed-s           | 32M     | 61.59         |  
         | 
| 68 | 
            +
            | e5-small-v2                        | 33M     | 61.32         |  
         | 
| 69 | 
            +
            | granite-embedding-small-english-r2 | 47M     | 61.07         |  
         | 
| 70 | 
            +
            | all-MiniLM-L6-v2                   | 22M     | 59.03         |  
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            # Quickstart
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            ## Sentence Transformers
         | 
| 75 | 
            +
             | 
| 76 | 
            +
            ```python  
         | 
| 77 | 
            +
            from sentence_transformers import SentenceTransformer  
         | 
| 78 | 
            +
             | 
| 79 | 
            +
            # Load the model  
         | 
| 80 | 
            +
            model = SentenceTransformer("MongoDB/mdbr-leaf-mt-asym")  
         | 
| 81 | 
            +
             | 
| 82 | 
            +
            # Example queries and documents
         | 
| 83 | 
            +
            queries = [
         | 
| 84 | 
            +
                "What is machine learning?", 
         | 
| 85 | 
            +
                "How does neural network training work?",
         | 
| 86 | 
            +
            ]
         | 
| 87 | 
            +
             | 
| 88 | 
            +
            documents = [
         | 
| 89 | 
            +
                "Machine learning is a subset of artificial intelligence that focuses on algorithms that can learn from data.",
         | 
| 90 | 
            +
                "Neural networks are trained through backpropagation, adjusting weights to minimize prediction errors.",
         | 
| 91 | 
            +
            ]
         | 
| 92 | 
            +
             | 
| 93 | 
            +
            # Encode queries and documents
         | 
| 94 | 
            +
            query_embeddings = model.encode_query(queries)
         | 
| 95 | 
            +
            document_embeddings = model.encode_document(documents)
         | 
| 96 | 
            +
             | 
| 97 | 
            +
            # Compute similarity scores
         | 
| 98 | 
            +
            scores = model.similarity(query_embeddings, document_embeddings)
         | 
| 99 | 
            +
             | 
| 100 | 
            +
            # Print results
         | 
| 101 | 
            +
            for i, query in enumerate(queries):
         | 
| 102 | 
            +
                print(f"Query: {query}")
         | 
| 103 | 
            +
                for j, doc in enumerate(documents):
         | 
| 104 | 
            +
                    print(f" Similarity: {scores[i, j]:.4f} | Document {j}: {doc[:80]}...")
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            # Query: What is machine learning?
         | 
| 107 | 
            +
            #  Similarity: 0.8483 | Document 0: Machine learning is a subset of artificial intelligence that focuses on algorith...
         | 
| 108 | 
            +
            #  Similarity: 0.6805 | Document 1: Neural networks are trained through backpropagation, adjusting weights to minimi...
         | 
| 109 | 
            +
             | 
| 110 | 
            +
            # Query: How does neural network training work?
         | 
| 111 | 
            +
            #  Similarity: 0.6050 | Document 0: Machine learning is a subset of artificial intelligence that focuses on algorith...
         | 
| 112 | 
            +
            #  Similarity: 0.7689 | Document 1: Neural networks are trained through backpropagation, adjusting weights to minimi...
         | 
| 113 | 
            +
            ```
         | 
| 114 | 
            +
             | 
| 115 | 
            +
            ## Transformers Usage  
         | 
| 116 | 
            +
             | 
| 117 | 
            +
            See [here](https://huggingface.co/MongoDB/mdbr-leaf-mt/blob/main/transformers_example_mt.ipynb).
         | 
| 118 | 
            +
              
         | 
| 119 | 
            +
            ## Asymmetric Retrieval Setup
         | 
| 120 | 
            +
              
         | 
| 121 | 
            +
            `mdbr-leaf-mt` is *aligned* to [`mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1), the model it has been distilled from. This enables flexible architectures in which, for example, documents are encoded using the larger model, while queries can be encoded faster and more efficiently with the compact `leaf` model. This generally outperforms the symmetric setup in which both queries and documents are encoded with `leaf`.
         | 
| 122 | 
            +
             | 
| 123 | 
            +
            To use exclusively the leaf model, use [`mdbr-leaf-mt`](https://huggingface.co/MongoDB/mdbr-leaf-mt).
         | 
| 124 | 
            +
             | 
| 125 | 
            +
            ## MRL Truncation
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            Embeddings have been trained via [MRL](https://arxiv.org/abs/2205.13147) and can be truncated for more efficient storage:
         | 
| 128 | 
            +
            ```python
         | 
| 129 | 
            +
            query_embeds = model.encode_query(queries, truncate_dim=256)
         | 
| 130 | 
            +
            doc_embeds = model.encode_document(documents, truncate_dim=256)
         | 
| 131 | 
            +
             | 
| 132 | 
            +
            similarities = model.similarity(query_embeds, doc_embeds)
         | 
| 133 | 
            +
             | 
| 134 | 
            +
            print('After MRL:')
         | 
| 135 | 
            +
            print(f"* Embeddings dimension: {query_embeds.shape[1]}")
         | 
| 136 | 
            +
            print(f"* Similarities:\n{similarities}")
         | 
| 137 | 
            +
             | 
| 138 | 
            +
            # After MRL:
         | 
| 139 | 
            +
            # * Embeddings dimension: 256
         | 
| 140 | 
            +
            # * Similarities:
         | 
| 141 | 
            +
            # tensor([[0.8584, 0.6921],
         | 
| 142 | 
            +
            #         [0.5973, 0.7893]])
         | 
| 143 | 
            +
            ```
         | 
| 144 | 
            +
             | 
| 145 | 
            +
            ## Vector Quantization
         | 
| 146 | 
            +
            Vector quantization, for example to `int8` or `binary`, can be performed as follows:
         | 
| 147 | 
            +
             | 
| 148 | 
            +
            **Note**: For vector quantization to types other than binary, we suggest performing a calibration to determine the optimal ranges, [see here](https://sbert.net/examples/sentence_transformer/applications/embedding-quantization/README.html#scalar-int8-quantization). 
         | 
| 149 | 
            +
            Good initial values are -1.0 and +1.0.
         | 
| 150 | 
            +
            ```python
         | 
| 151 | 
            +
            from sentence_transformers.quantization import quantize_embeddings
         | 
| 152 | 
            +
            import torch
         | 
| 153 | 
            +
             | 
| 154 | 
            +
            query_embeds = model.encode(queries, prompt_name="query")
         | 
| 155 | 
            +
            doc_embeds = model.encode(documents)
         | 
| 156 | 
            +
             | 
| 157 | 
            +
            # Quantize embeddings to int8 using -1.0 and +1.0
         | 
| 158 | 
            +
            ranges = torch.tensor([[-1.0], [+1.0]]).expand(2, query_embeds.shape[1]).cpu().numpy()
         | 
| 159 | 
            +
            query_embeds = quantize_embeddings(query_embeds, "int8", ranges=ranges)
         | 
| 160 | 
            +
            doc_embeds = quantize_embeddings(doc_embeds, "int8", ranges=ranges)
         | 
| 161 | 
            +
             | 
| 162 | 
            +
            # Calculate similarities; cast to int64 to avoid under/overflow
         | 
| 163 | 
            +
            similarities = query_embeds.astype(int) @ doc_embeds.astype(int).T
         | 
| 164 | 
            +
             | 
| 165 | 
            +
            print('After quantization:')
         | 
| 166 | 
            +
            print(f"* Embeddings type: {query_embeds.dtype}")
         | 
| 167 | 
            +
            print(f"* Similarities:\n{similarities}")
         | 
| 168 | 
            +
             | 
| 169 | 
            +
            # After quantization:
         | 
| 170 | 
            +
            # * Embeddings type: int8
         | 
| 171 | 
            +
            # * Similarities:
         | 
| 172 | 
            +
            # [[2202032 1422868]
         | 
| 173 | 
            +
            #  [1421197 1845580]]
         | 
| 174 | 
            +
            ```
         | 
| 175 | 
            +
             | 
| 176 | 
            +
            # Evaluation
         | 
| 177 | 
            +
             | 
| 178 | 
            +
            Please [see here](https://huggingface.co/MongoDB/mdbr-leaf-mt/blob/main/evaluate_models.ipynb).
         | 
| 179 | 
            +
             | 
| 180 | 
            +
            # Citation
         | 
| 181 | 
            +
              
         | 
| 182 | 
            +
            If you use this model in your work, please cite:  
         | 
| 183 | 
            +
              
         | 
| 184 | 
            +
            ```bibtex  
         | 
| 185 | 
            +
            @misc{mdbr_leaf,
         | 
| 186 | 
            +
                  title={LEAF: Knowledge Distillation of Text Embedding Models with Teacher-Aligned Representations}, 
         | 
| 187 | 
            +
                  author={Robin Vujanic and Thomas Rueckstiess},
         | 
| 188 | 
            +
                  year={2025},
         | 
| 189 | 
            +
                  eprint={2509.12539},
         | 
| 190 | 
            +
                  archivePrefix={arXiv},
         | 
| 191 | 
            +
                  primaryClass={cs.IR},
         | 
| 192 | 
            +
                  url={https://arxiv.org/abs/2509.12539}, 
         | 
| 193 | 
            +
            }
         | 
| 194 | 
            +
            ```  
         | 
| 195 | 
            +
              
         | 
| 196 | 
            +
            # License  
         | 
| 197 | 
            +
              
         | 
| 198 | 
            +
            This model is released under Apache 2.0 License.  
         | 
| 199 | 
            +
              
         | 
| 200 | 
            +
            # Contact  
         | 
| 201 | 
            +
              
         | 
| 202 | 
            +
            For questions or issues, please open an issue or pull request. You can also contact the MongoDB ML research team at [email protected].  
         | 
    	
        config_sentence_transformers.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "model_type": "SentenceTransformer",
         | 
| 3 | 
            +
              "__version__": {
         | 
| 4 | 
            +
                "sentence_transformers": "5.1.0",
         | 
| 5 | 
            +
                "transformers": "4.56.1",
         | 
| 6 | 
            +
                "pytorch": "2.8.0+cu126"
         | 
| 7 | 
            +
              },
         | 
| 8 | 
            +
              "prompts": {
         | 
| 9 | 
            +
                "query": "Represent this sentence for searching relevant passages: ",
         | 
| 10 | 
            +
                "document": ""
         | 
| 11 | 
            +
              },
         | 
| 12 | 
            +
              "default_prompt_name": null,
         | 
| 13 | 
            +
              "similarity_fn_name": "cosine"
         | 
| 14 | 
            +
            }
         | 
    	
        document_0_Transformer/config.json
    ADDED
    
    | @@ -0,0 +1,25 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "architectures": [
         | 
| 3 | 
            +
                "BertModel"
         | 
| 4 | 
            +
              ],
         | 
| 5 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 6 | 
            +
              "classifier_dropout": null,
         | 
| 7 | 
            +
              "dtype": "float32",
         | 
| 8 | 
            +
              "gradient_checkpointing": false,
         | 
| 9 | 
            +
              "hidden_act": "gelu",
         | 
| 10 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 11 | 
            +
              "hidden_size": 1024,
         | 
| 12 | 
            +
              "initializer_range": 0.02,
         | 
| 13 | 
            +
              "intermediate_size": 4096,
         | 
| 14 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 15 | 
            +
              "max_position_embeddings": 512,
         | 
| 16 | 
            +
              "model_type": "bert",
         | 
| 17 | 
            +
              "num_attention_heads": 16,
         | 
| 18 | 
            +
              "num_hidden_layers": 24,
         | 
| 19 | 
            +
              "pad_token_id": 0,
         | 
| 20 | 
            +
              "position_embedding_type": "absolute",
         | 
| 21 | 
            +
              "transformers_version": "4.56.1",
         | 
| 22 | 
            +
              "type_vocab_size": 2,
         | 
| 23 | 
            +
              "use_cache": false,
         | 
| 24 | 
            +
              "vocab_size": 30522
         | 
| 25 | 
            +
            }
         | 
    	
        document_0_Transformer/model.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e86b2a89f7f8933cf7bd90586cdf69d0012140e412818234b234f807e51ee574
         | 
| 3 | 
            +
            size 1340612432
         | 
    	
        document_0_Transformer/sentence_bert_config.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "max_seq_length": 512,
         | 
| 3 | 
            +
                "do_lower_case": false,
         | 
| 4 | 
            +
                "model_args": {
         | 
| 5 | 
            +
                    "add_pooling_layer": false
         | 
| 6 | 
            +
                }
         | 
| 7 | 
            +
            }
         | 
    	
        document_0_Transformer/special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,37 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "cls_token": {
         | 
| 3 | 
            +
                "content": "[CLS]",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": false,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "mask_token": {
         | 
| 10 | 
            +
                "content": "[MASK]",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": false,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": {
         | 
| 17 | 
            +
                "content": "[PAD]",
         | 
| 18 | 
            +
                "lstrip": false,
         | 
| 19 | 
            +
                "normalized": false,
         | 
| 20 | 
            +
                "rstrip": false,
         | 
| 21 | 
            +
                "single_word": false
         | 
| 22 | 
            +
              },
         | 
| 23 | 
            +
              "sep_token": {
         | 
| 24 | 
            +
                "content": "[SEP]",
         | 
| 25 | 
            +
                "lstrip": false,
         | 
| 26 | 
            +
                "normalized": false,
         | 
| 27 | 
            +
                "rstrip": false,
         | 
| 28 | 
            +
                "single_word": false
         | 
| 29 | 
            +
              },
         | 
| 30 | 
            +
              "unk_token": {
         | 
| 31 | 
            +
                "content": "[UNK]",
         | 
| 32 | 
            +
                "lstrip": false,
         | 
| 33 | 
            +
                "normalized": false,
         | 
| 34 | 
            +
                "rstrip": false,
         | 
| 35 | 
            +
                "single_word": false
         | 
| 36 | 
            +
              }
         | 
| 37 | 
            +
            }
         | 
    	
        document_0_Transformer/tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        document_0_Transformer/tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,58 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "added_tokens_decoder": {
         | 
| 3 | 
            +
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false,
         | 
| 9 | 
            +
                  "special": true
         | 
| 10 | 
            +
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
            +
                  "lstrip": false,
         | 
| 14 | 
            +
                  "normalized": false,
         | 
| 15 | 
            +
                  "rstrip": false,
         | 
| 16 | 
            +
                  "single_word": false,
         | 
| 17 | 
            +
                  "special": true
         | 
| 18 | 
            +
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
            +
                  "lstrip": false,
         | 
| 22 | 
            +
                  "normalized": false,
         | 
| 23 | 
            +
                  "rstrip": false,
         | 
| 24 | 
            +
                  "single_word": false,
         | 
| 25 | 
            +
                  "special": true
         | 
| 26 | 
            +
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
            +
                  "lstrip": false,
         | 
| 30 | 
            +
                  "normalized": false,
         | 
| 31 | 
            +
                  "rstrip": false,
         | 
| 32 | 
            +
                  "single_word": false,
         | 
| 33 | 
            +
                  "special": true
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
            +
                  "normalized": false,
         | 
| 39 | 
            +
                  "rstrip": false,
         | 
| 40 | 
            +
                  "single_word": false,
         | 
| 41 | 
            +
                  "special": true
         | 
| 42 | 
            +
                }
         | 
| 43 | 
            +
              },
         | 
| 44 | 
            +
              "clean_up_tokenization_spaces": true,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
            +
              "do_basic_tokenize": true,
         | 
| 47 | 
            +
              "do_lower_case": true,
         | 
| 48 | 
            +
              "extra_special_tokens": {},
         | 
| 49 | 
            +
              "mask_token": "[MASK]",
         | 
| 50 | 
            +
              "model_max_length": 512,
         | 
| 51 | 
            +
              "never_split": null,
         | 
| 52 | 
            +
              "pad_token": "[PAD]",
         | 
| 53 | 
            +
              "sep_token": "[SEP]",
         | 
| 54 | 
            +
              "strip_accents": null,
         | 
| 55 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 56 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 57 | 
            +
              "unk_token": "[UNK]"
         | 
| 58 | 
            +
            }
         | 
    	
        document_0_Transformer/vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        document_1_Pooling/config.json
    ADDED
    
    | @@ -0,0 +1,10 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "word_embedding_dimension": 1024,
         | 
| 3 | 
            +
                "pooling_mode_cls_token": true,
         | 
| 4 | 
            +
                "pooling_mode_mean_tokens": false,
         | 
| 5 | 
            +
                "pooling_mode_max_tokens": false,
         | 
| 6 | 
            +
                "pooling_mode_mean_sqrt_len_tokens": false,
         | 
| 7 | 
            +
                "pooling_mode_weightedmean_tokens": false,
         | 
| 8 | 
            +
                "pooling_mode_lasttoken": false,
         | 
| 9 | 
            +
                "include_prompt": true
         | 
| 10 | 
            +
            }
         | 
    	
        logo.png
    ADDED
    
    |   | 
    	
        logo.webp
    ADDED
    
    |   | 
    	
        modules.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [
         | 
| 2 | 
            +
              {
         | 
| 3 | 
            +
                "idx": 0,
         | 
| 4 | 
            +
                "name": "0",
         | 
| 5 | 
            +
                "path": "",
         | 
| 6 | 
            +
                "type": "sentence_transformers.models.Router"
         | 
| 7 | 
            +
              },
         | 
| 8 | 
            +
              {
         | 
| 9 | 
            +
                "idx": 1,
         | 
| 10 | 
            +
                "name": "1",
         | 
| 11 | 
            +
                "path": "1_Normalize",
         | 
| 12 | 
            +
                "type": "sentence_transformers.models.Normalize"
         | 
| 13 | 
            +
              }
         | 
| 14 | 
            +
            ]
         | 
    	
        query_0_Transformer/config.json
    ADDED
    
    | @@ -0,0 +1,25 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "architectures": [
         | 
| 3 | 
            +
                "BertModel"
         | 
| 4 | 
            +
              ],
         | 
| 5 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 6 | 
            +
              "classifier_dropout": null,
         | 
| 7 | 
            +
              "dtype": "float32",
         | 
| 8 | 
            +
              "gradient_checkpointing": false,
         | 
| 9 | 
            +
              "hidden_act": "gelu",
         | 
| 10 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 11 | 
            +
              "hidden_size": 384,
         | 
| 12 | 
            +
              "initializer_range": 0.02,
         | 
| 13 | 
            +
              "intermediate_size": 1536,
         | 
| 14 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 15 | 
            +
              "max_position_embeddings": 512,
         | 
| 16 | 
            +
              "model_type": "bert",
         | 
| 17 | 
            +
              "num_attention_heads": 12,
         | 
| 18 | 
            +
              "num_hidden_layers": 6,
         | 
| 19 | 
            +
              "pad_token_id": 0,
         | 
| 20 | 
            +
              "position_embedding_type": "absolute",
         | 
| 21 | 
            +
              "transformers_version": "4.56.1",
         | 
| 22 | 
            +
              "type_vocab_size": 2,
         | 
| 23 | 
            +
              "use_cache": true,
         | 
| 24 | 
            +
              "vocab_size": 30522
         | 
| 25 | 
            +
            }
         | 
    	
        query_0_Transformer/model.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:08a4fce05c16baee8b6ca143d1ae11535ababb05412f506e7a89a8b56916a6a5
         | 
| 3 | 
            +
            size 90272656
         | 
    	
        query_0_Transformer/sentence_bert_config.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "max_seq_length": 512,
         | 
| 3 | 
            +
                "do_lower_case": false,
         | 
| 4 | 
            +
                "model_args": {
         | 
| 5 | 
            +
                    "add_pooling_layer": false
         | 
| 6 | 
            +
                }
         | 
| 7 | 
            +
            }
         | 
    	
        query_0_Transformer/special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,37 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "cls_token": {
         | 
| 3 | 
            +
                "content": "[CLS]",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": false,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "mask_token": {
         | 
| 10 | 
            +
                "content": "[MASK]",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": false,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": {
         | 
| 17 | 
            +
                "content": "[PAD]",
         | 
| 18 | 
            +
                "lstrip": false,
         | 
| 19 | 
            +
                "normalized": false,
         | 
| 20 | 
            +
                "rstrip": false,
         | 
| 21 | 
            +
                "single_word": false
         | 
| 22 | 
            +
              },
         | 
| 23 | 
            +
              "sep_token": {
         | 
| 24 | 
            +
                "content": "[SEP]",
         | 
| 25 | 
            +
                "lstrip": false,
         | 
| 26 | 
            +
                "normalized": false,
         | 
| 27 | 
            +
                "rstrip": false,
         | 
| 28 | 
            +
                "single_word": false
         | 
| 29 | 
            +
              },
         | 
| 30 | 
            +
              "unk_token": {
         | 
| 31 | 
            +
                "content": "[UNK]",
         | 
| 32 | 
            +
                "lstrip": false,
         | 
| 33 | 
            +
                "normalized": false,
         | 
| 34 | 
            +
                "rstrip": false,
         | 
| 35 | 
            +
                "single_word": false
         | 
| 36 | 
            +
              }
         | 
| 37 | 
            +
            }
         | 
    	
        query_0_Transformer/tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        query_0_Transformer/tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,65 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "added_tokens_decoder": {
         | 
| 3 | 
            +
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false,
         | 
| 9 | 
            +
                  "special": true
         | 
| 10 | 
            +
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
            +
                  "lstrip": false,
         | 
| 14 | 
            +
                  "normalized": false,
         | 
| 15 | 
            +
                  "rstrip": false,
         | 
| 16 | 
            +
                  "single_word": false,
         | 
| 17 | 
            +
                  "special": true
         | 
| 18 | 
            +
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
            +
                  "lstrip": false,
         | 
| 22 | 
            +
                  "normalized": false,
         | 
| 23 | 
            +
                  "rstrip": false,
         | 
| 24 | 
            +
                  "single_word": false,
         | 
| 25 | 
            +
                  "special": true
         | 
| 26 | 
            +
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
            +
                  "lstrip": false,
         | 
| 30 | 
            +
                  "normalized": false,
         | 
| 31 | 
            +
                  "rstrip": false,
         | 
| 32 | 
            +
                  "single_word": false,
         | 
| 33 | 
            +
                  "special": true
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
            +
                  "normalized": false,
         | 
| 39 | 
            +
                  "rstrip": false,
         | 
| 40 | 
            +
                  "single_word": false,
         | 
| 41 | 
            +
                  "special": true
         | 
| 42 | 
            +
                }
         | 
| 43 | 
            +
              },
         | 
| 44 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
            +
              "do_basic_tokenize": true,
         | 
| 47 | 
            +
              "do_lower_case": true,
         | 
| 48 | 
            +
              "extra_special_tokens": {},
         | 
| 49 | 
            +
              "mask_token": "[MASK]",
         | 
| 50 | 
            +
              "max_length": 128,
         | 
| 51 | 
            +
              "model_max_length": 512,
         | 
| 52 | 
            +
              "never_split": null,
         | 
| 53 | 
            +
              "pad_to_multiple_of": null,
         | 
| 54 | 
            +
              "pad_token": "[PAD]",
         | 
| 55 | 
            +
              "pad_token_type_id": 0,
         | 
| 56 | 
            +
              "padding_side": "right",
         | 
| 57 | 
            +
              "sep_token": "[SEP]",
         | 
| 58 | 
            +
              "stride": 0,
         | 
| 59 | 
            +
              "strip_accents": null,
         | 
| 60 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 61 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 62 | 
            +
              "truncation_side": "right",
         | 
| 63 | 
            +
              "truncation_strategy": "longest_first",
         | 
| 64 | 
            +
              "unk_token": "[UNK]"
         | 
| 65 | 
            +
            }
         | 
    	
        query_0_Transformer/vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        query_1_Pooling/config.json
    ADDED
    
    | @@ -0,0 +1,10 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "word_embedding_dimension": 384,
         | 
| 3 | 
            +
                "pooling_mode_cls_token": false,
         | 
| 4 | 
            +
                "pooling_mode_mean_tokens": true,
         | 
| 5 | 
            +
                "pooling_mode_max_tokens": false,
         | 
| 6 | 
            +
                "pooling_mode_mean_sqrt_len_tokens": false,
         | 
| 7 | 
            +
                "pooling_mode_weightedmean_tokens": false,
         | 
| 8 | 
            +
                "pooling_mode_lasttoken": false,
         | 
| 9 | 
            +
                "include_prompt": true
         | 
| 10 | 
            +
            }
         | 
    	
        query_2_Dense/config.json
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "in_features": 384,
         | 
| 3 | 
            +
                "out_features": 1024,
         | 
| 4 | 
            +
                "bias": false,
         | 
| 5 | 
            +
                "activation_function": "torch.nn.modules.linear.Identity"
         | 
| 6 | 
            +
            }
         | 
    	
        query_2_Dense/model.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:dfe95933b75110ca0c1650dc0a78f06d0a05a028892ac74ffc5aa3644283f16f
         | 
| 3 | 
            +
            size 1572952
         | 
    	
        router_config.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "types": {
         | 
| 3 | 
            +
                    "query_0_Transformer": "sentence_transformers.models.Transformer.Transformer",
         | 
| 4 | 
            +
                    "query_1_Pooling": "sentence_transformers.models.Pooling.Pooling",
         | 
| 5 | 
            +
                    "query_2_Dense": "sentence_transformers.models.Dense.Dense",
         | 
| 6 | 
            +
                    "document_0_Transformer": "sentence_transformers.models.Transformer.Transformer",
         | 
| 7 | 
            +
                    "document_1_Pooling": "sentence_transformers.models.Pooling.Pooling"
         | 
| 8 | 
            +
                },
         | 
| 9 | 
            +
                "structure": {
         | 
| 10 | 
            +
                    "query": [
         | 
| 11 | 
            +
                        "query_0_Transformer",
         | 
| 12 | 
            +
                        "query_1_Pooling",
         | 
| 13 | 
            +
                        "query_2_Dense"
         | 
| 14 | 
            +
                    ],
         | 
| 15 | 
            +
                    "document": [
         | 
| 16 | 
            +
                        "document_0_Transformer",
         | 
| 17 | 
            +
                        "document_1_Pooling"
         | 
| 18 | 
            +
                    ]
         | 
| 19 | 
            +
                },
         | 
| 20 | 
            +
                "parameters": {
         | 
| 21 | 
            +
                    "default_route": "document",
         | 
| 22 | 
            +
                    "allow_empty_key": true
         | 
| 23 | 
            +
                }
         | 
| 24 | 
            +
            }
         | 

