Mean pooling not max pooling (#1)
Browse files- Mean pooling not max pooling (1588fa792ee9b44d4d320ca568e507dfa2f34581)
Co-authored-by: AJEGHRIR mustapha <[email protected]>
README.md
CHANGED
@@ -83,7 +83,7 @@ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tenso
|
|
83 |
with torch.no_grad():
|
84 |
model_output = model(**encoded_input)
|
85 |
|
86 |
-
# Perform pooling. In this case,
|
87 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
88 |
|
89 |
print("Sentence embeddings:")
|
|
|
83 |
with torch.no_grad():
|
84 |
model_output = model(**encoded_input)
|
85 |
|
86 |
+
# Perform pooling. In this case, mean pooling.
|
87 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
88 |
|
89 |
print("Sentence embeddings:")
|