Daniel Campos commited on
Commit
81768e5
1 Parent(s): be2b266
Files changed (1) hide show
  1. README.md +64 -4
README.md CHANGED
@@ -42,9 +42,69 @@ Or you can also stream it without downloading it before:
42
  from datasets import load_dataset
43
  docs = load_dataset("Snowflake/msmarco-v2.1-snowflake-arctic-embed-l", split="train", streaming=True)
44
  for doc in docs:
45
- doc_id = doc['_id']
46
- title = doc['title']
47
- header = doc['header']
48
  text = doc['text']
49
- emb = doc['emb']
50
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  from datasets import load_dataset
43
  docs = load_dataset("Snowflake/msmarco-v2.1-snowflake-arctic-embed-l", split="train", streaming=True)
44
  for doc in docs:
45
+ doc_id = j['docid']
46
+ url = doc['url']
 
47
  text = doc['text']
48
+ emb = doc['embedding']
49
  ```
50
+
51
+
52
+ Note, The full dataset corpus is ~ 620GB so it will take a while to download and may not fit on some devices/
53
+
54
+ ## Search
55
+ A full search example (on the first 1,000 paragraphs):
56
+ ```python
57
+ from datasets import load_dataset
58
+ import torch
59
+ from transformers import AutoModel, AutoTokenizer
60
+ import numpy as np
61
+
62
+
63
+ top_k = 100
64
+ docs_stream = load_dataset("Snowflake/msmarco-v2.1-snowflake-arctic-embed-l",split="train", streaming=True)
65
+
66
+ docs = []
67
+ doc_embeddings = []
68
+
69
+ for doc in docs_stream:
70
+ docs.append(doc)
71
+ doc_embeddings.append(doc['embedding'])
72
+ if len(docs) >= top_k:
73
+ break
74
+
75
+ doc_embeddings = np.asarray(doc_embeddings)
76
+
77
+ tokenizer = AutoTokenizer.from_pretrained('Snowflake/snowflake-arctic-embed-l')
78
+ model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-l', add_pooling_layer=False)
79
+ model.eval()
80
+
81
+ query_prefix = 'Represent this sentence for searching relevant passages: '
82
+ queries = ['how do you clean smoke off walls']
83
+ queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
84
+ query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
85
+
86
+ # Compute token embeddings
87
+ with torch.no_grad():
88
+ query_embeddings = model(**query_tokens)[0][:, 0]
89
+
90
+
91
+ # normalize embeddings
92
+ query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
93
+ doc_embeddings = torch.nn.functional.normalize(doc_embeddings, p=2, dim=1)
94
+
95
+ # Compute dot score between query embedding and document embeddings
96
+ dot_scores = np.matmul(query_embeddings, doc_embeddings.transpose())[0]
97
+ top_k_hits = np.argpartition(dot_scores, -top_k)[-top_k:].tolist()
98
+
99
+ # Sort top_k_hits by dot score
100
+ top_k_hits.sort(key=lambda x: dot_scores[x], reverse=True)
101
+
102
+ # Print results
103
+ print("Query:", queries[0])
104
+ for doc_id in top_k_hits:
105
+ print(docs[doc_id]['doc_id'])
106
+ print(docs[doc_id]['text'])
107
+ print(docs[doc_id]['url'], "\n")
108
+ ```
109
+
110
+