zhichao-geng commited on
Commit
29f9638
·
verified ·
1 Parent(s): 4c03a73

sentence_transformers_support (#1)

Browse files

- Add support to Sentence Transformers (84e5191ddb886bb66563d9d0f267cbcc65c7c883)
- Update README.md (c2e42e091b774937c0040aaf53b072ed4c7f698d)
- Update README.md (64d9cfe5766d56a682b9f3a991b7eb1a051a94ca)
- Update README.md (337f5cd0639bfb4800ebdc348ba6b42214e27aa3)

README.md CHANGED
@@ -24,6 +24,14 @@ tags:
24
  - passage-retrieval
25
  - document-expansion
26
  - bag-of-words
 
 
 
 
 
 
 
 
27
  datasets:
28
  - miracl/miracl
29
  ---
@@ -48,6 +56,47 @@ This is a learned sparse retrieval model. It encodes the documents to 105879 dim
48
 
49
  OpenSearch neural sparse feature supports learned sparse retrieval with lucene inverted index. Link: https://opensearch.org/docs/latest/query-dsl/specialized/neural-sparse/. The indexing and search can be performed with OpenSearch high-level API.
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  ## Usage (HuggingFace)
53
  This model is supposed to run inside OpenSearch cluster. But you can also use it outside the cluster, with HuggingFace models API.
 
24
  - passage-retrieval
25
  - document-expansion
26
  - bag-of-words
27
+ - sentence-transformers
28
+ - sparse-encoder
29
+ - sparse
30
+ - asymmetric
31
+ - inference-free
32
+ - splade
33
+ pipeline_tag: feature-extraction
34
+ library_name: sentence-transformers
35
  datasets:
36
  - miracl/miracl
37
  ---
 
56
 
57
  OpenSearch neural sparse feature supports learned sparse retrieval with lucene inverted index. Link: https://opensearch.org/docs/latest/query-dsl/specialized/neural-sparse/. The indexing and search can be performed with OpenSearch high-level API.
58
 
59
+ ## Usage (Sentence Transformers)
60
+
61
+ First install the Sentence Transformers library:
62
+
63
+ ```bash
64
+ pip install -U sentence-transformers
65
+ ```
66
+
67
+ Then you can load this model and run inference.
68
+
69
+ ```python
70
+ from sentence_transformers.sparse_encoder import SparseEncoder
71
+
72
+ # Download from the 🤗 Hub
73
+ model = SparseEncoder("opensearch-project/opensearch-neural-sparse-encoding-multilingual-v1")
74
+
75
+ query = "What's the weather in ny now?"
76
+ document = "Currently New York is rainy."
77
+
78
+ query_embed = model.encode_query(query)
79
+ document_embed = model.encode_document(document)
80
+
81
+ sim = model.similarity(query_embed, document_embed)
82
+ print(f"Similarity: {sim}")
83
+ # Similarity: tensor([[7.7400]])
84
+
85
+ decoded_query = model.decode(query_embed)
86
+ decoded_document = model.decode(document_embed)
87
+
88
+ for i in range(len(decoded_query)):
89
+ query_token, query_score = decoded_query[i]
90
+ doc_score = next((score for token, score in decoded_document if token == query_token), 0)
91
+ if doc_score != 0:
92
+ print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
93
+
94
+ # Token: weather, Query score: 3.0699, Document score: 1.2821
95
+ # Token: now, Query score: 1.6406, Document score: 0.9018
96
+ # Token: ?, Query score: 1.6108, Document score: 0.3141
97
+ # Token: ny, Query score: 1.2721, Document score: 1.3446
98
+ # Token: in, Query score: 0.6005, Document score: 0.1804
99
+ ```
100
 
101
  ## Usage (HuggingFace)
102
  This model is supposed to run inside OpenSearch cluster. But you can also use it outside the cluster, with HuggingFace models API.
config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SparseEncoder",
3
+ "__version__": {
4
+ "sentence_transformers": "5.0.0",
5
+ "transformers": "4.50.3",
6
+ "pytorch": "2.6.0+cu124"
7
+ },
8
+ "prompts": {
9
+ "query": "",
10
+ "document": ""
11
+ },
12
+ "default_prompt_name": null,
13
+ "similarity_fn_name": "dot"
14
+ }
document_1_SpladePooling/config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "pooling_strategy": "max",
3
+ "activation_function": "relu",
4
+ "word_embedding_dimension": null
5
+ }
modules.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Router"
7
+ }
8
+ ]
query_0_IDF/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "frozen": true
3
+ }
query_0_IDF/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336ae862c6ee224095e2155085bbe79f076af77caa4b403a11e8cd0ec9f0ceb5
3
+ size 423596
query_0_IDF/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
query_0_IDF/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
query_0_IDF/tokenizer_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "max_length": 200,
50
+ "model_max_length": 512,
51
+ "pad_to_multiple_of": null,
52
+ "pad_token": "[PAD]",
53
+ "pad_token_type_id": 0,
54
+ "padding_side": "right",
55
+ "sep_token": "[SEP]",
56
+ "stride": 0,
57
+ "strip_accents": null,
58
+ "tokenize_chinese_chars": true,
59
+ "tokenizer_class": "BertTokenizer",
60
+ "truncation_side": "right",
61
+ "truncation_strategy": "longest_first",
62
+ "unk_token": "[UNK]"
63
+ }
query_0_IDF/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
router_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "query_0_IDF": "sentence_transformers.sparse_encoder.models.IDF.IDF",
4
+ "": "sentence_transformers.sparse_encoder.models.MLMTransformer.MLMTransformer",
5
+ "document_1_SpladePooling": "sentence_transformers.sparse_encoder.models.SpladePooling.SpladePooling"
6
+ },
7
+ "structure": {
8
+ "query": [
9
+ "query_0_IDF"
10
+ ],
11
+ "document": [
12
+ "",
13
+ "document_1_SpladePooling"
14
+ ]
15
+ },
16
+ "parameters": {
17
+ "default_route": "document",
18
+ "allow_empty_key": true
19
+ }
20
+ }