Jesse-marqo commited on
Commit
eb26513
1 Parent(s): 4750856

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +38 -1
README.md CHANGED
@@ -1,4 +1,41 @@
1
  ---
2
  license: apache-2.0
3
  ---
4
- Rank-tuned e5-large-v2 on the Marqo-GS-10M dataset for ecommerce. Full details here https://github.com/marqo-ai/GCL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+ Rank-tuned e5-large-v2 on the Marqo-GS-10M dataset for ecommerce. Full details here https://github.com/marqo-ai/GCL
5
+
6
+ ```python
7
+ import torch.nn.functional as F
8
+
9
+ from torch import Tensor
10
+ from transformers import AutoTokenizer, AutoModel
11
+
12
+
13
+ def average_pool(last_hidden_states: Tensor,
14
+ attention_mask: Tensor) -> Tensor:
15
+ last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
16
+ return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
17
+
18
+
19
+ # Each input text should start with "query: " or "passage: ".
20
+ # For tasks other than retrieval, you can simply use the "query: " prefix.
21
+ input_texts = ['query: Espresso Pitcher with Handle',
22
+ 'query: Women’s designer handbag sale',
23
+ "passage: Dianoo Espresso Steaming Pitcher, Espresso Milk Frothing Pitcher Stainless Steel",
24
+ "passage: Coach Outlet Eliza Shoulder Bag - Black - One Size"]
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained('Marqo/marqo-gcl-e5-large-v2-130')
27
+ model_new = AutoModel.from_pretrained('Marqo/marqo-gcl-e5-large-v2-130')
28
+
29
+ # Tokenize the input texts
30
+ batch_dict = tokenizer(input_texts, max_length=77, padding=True, truncation=True, return_tensors='pt')
31
+
32
+ outputs = model_new(**batch_dict)
33
+ embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
34
+
35
+ # normalize embeddings
36
+ embeddings = F.normalize(embeddings, p=2, dim=1)
37
+ scores = (embeddings[:2] @ embeddings[2:].T) * 100
38
+ print(scores.tolist())
39
+
40
+
41
+ ```