yilunzhang commited on
Commit
9ae1ebe
1 Parent(s): b6da614

Initial Commit

Browse files
Files changed (8) hide show
  1. README.md +54 -3
  2. config.json +23 -0
  3. convert.py +22 -0
  4. model.onnx +3 -0
  5. special_tokens_map.json +1 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +1 -0
  8. vocab.txt +0 -0
README.md CHANGED
@@ -1,3 +1,54 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ONNX version of `sentence-transformers/all-mpnet-base-v2`
2
+
3
+ This is the OONX version of https://huggingface.co/sentence-transformers/all-mpnet-base-v2, examined that the produced embeddings are the same.
4
+
5
+ Optmized for CPU usage.
6
+
7
+ ## Convert
8
+
9
+ The same checkpoint can also be created by using the `convert.py` script.
10
+
11
+ ## Usage - `transformers`
12
+
13
+ Exactly the same as in `sentence-transformers/all-mpnet-base-v2` except using `ORTModelForFeatureExtraction` from optimum.
14
+
15
+ ```
16
+ pip install optimum[onnxruntime]
17
+ ```
18
+
19
+ ```{python}
20
+ from transformers import AutoTokenizer
21
+ from optimum.onnxruntime import ORTModelForFeatureExtraction
22
+ import torch
23
+ import torch.nn.functional as F
24
+
25
+ # Mean Pooling - Take attention mask into account for correct averaging
26
+ def mean_pooling(model_output, attention_mask):
27
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
28
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
29
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
30
+
31
+
32
+ # Sentences we want sentence embeddings for
33
+ sentences = ['This is an example sentence', 'Each sentence is converted']
34
+
35
+ # Load model from HuggingFace Hub
36
+ tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-mpnet-base-v2')
37
+ model = ORTModelForFeatureExtraction.from_pretrained('sentence-transformers/all-mpnet-base-v2')
38
+
39
+ # Tokenize sentences
40
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
41
+
42
+ # Compute token embeddings
43
+ with torch.no_grad():
44
+ model_output = model(**encoded_input)
45
+
46
+ # Perform pooling
47
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
48
+
49
+ # Normalize embeddings
50
+ sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
51
+
52
+ print("Sentence embeddings:")
53
+ print(sentence_embeddings)
54
+ ```
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/mpnet-base",
3
+ "architectures": [
4
+ "MPNetForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
21
+ "transformers_version": "4.8.2",
22
+ "vocab_size": 30527
23
+ }
convert.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModel
2
+ import torch
3
+
4
+ max_seq_length = 384
5
+
6
+ model = AutoModel.from_pretrained("sentence-transformers/all-mpnet-base-v2")
7
+ model.eval()
8
+
9
+ inputs = {
10
+ "input_ids": torch.ones(1, max_seq_length, dtype=torch.int64),
11
+ "attention_mask": torch.ones(1, max_seq_length, dtype=torch.int64),
12
+ }
13
+
14
+ symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
15
+
16
+ torch.onnx.export(
17
+ model,args=tuple(inputs.values()),
18
+ f="model.onnx",
19
+ export_params=True,
20
+ input_names=["input_ids", "attention_mask"], output_names=["last_hidden_state"],
21
+ dynamic_axes={"input_ids": symbolic_names, "attention_mask": symbolic_names}
22
+ )
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91c3b3d55e18d17ed4657d4cc9207940ae14b526caa9ad79e55ae90cdc6f08ec
3
+ size 438158583
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "[UNK]", "pad_token": "<pad>", "mask_token": "<mask>", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "microsoft/mpnet-base", "tokenizer_class": "MPNetTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff