olgab42 commited on
Commit
07105e0
·
verified ·
1 Parent(s): e5c18c0

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +47 -0
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - Alibaba-NLP/gte-multilingual-base
4
+ pipeline_tag: text-generation
5
+ license: apache-2.0
6
+ ---
7
+ This is the ONNX version of the [gte-multilingual-base](https://huggingface.co/Alibaba-NLP/gte-multilingual-base) model.
8
+
9
+ This example is adapted from the original model repository for the ONNX version.
10
+ ```python
11
+ # Requires transformers>=4.36.0
12
+ import onnxruntime as ort
13
+ import numpy as np
14
+ from transformers import AutoTokenizer
15
+ input_texts = [
16
+ "what is the capital of China?",
17
+ "how to implement quick sort in python?",
18
+ "北京",
19
+ "快排算法介绍"
20
+ ]
21
+ # Load the tokenizer (using the original model for tokenizer)
22
+ tokenizer = AutoTokenizer.from_pretrained('Alibaba-NLP/gte-multilingual-base')
23
+ # Load the ONNX model
24
+ session = ort.InferenceSession("model.onnx")
25
+ # Tokenize the input texts
26
+ batch_dict = tokenizer(input_texts, max_length=8192, padding=True, truncation=True, return_tensors='np')
27
+ # Run inference
28
+ outputs = session.run(None, {
29
+ "input_ids": batch_dict["input_ids"],
30
+ "attention_mask": batch_dict["attention_mask"]
31
+ })
32
+ # Get embeddings from the second output (last hidden states)
33
+ # Extract the [CLS] token embedding (first token) for each sequence
34
+ last_hidden_states = outputs[1] # Shape: (batch_size, seq_len, hidden_size)
35
+ dimension = 768 # The output dimension of the output embedding, should be in [128, 768]
36
+ embeddings = last_hidden_states[:, 0, :dimension] # Shape: (batch_size, dimension)
37
+ # Debug: Check embeddings
38
+ print(f"Embeddings shape: {embeddings.shape}")
39
+ print(f"First few values of first embedding: {embeddings[0][:5]}")
40
+ print(f"First few values of second embedding: {embeddings[1][:5]}")
41
+ # Normalize embeddings
42
+ embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
43
+ # Calculate similarity scores
44
+ scores = (embeddings[:1] @ embeddings[1:].T) * 100
45
+ print(scores.tolist())
46
+ # [[0.3016996383666992, 0.7503870129585266, 0.3203084468841553]]
47
+ ```