ssmits commited on
Commit
343e8e5
1 Parent(s): 2c18e56

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -2
README.md CHANGED
@@ -66,14 +66,12 @@ Without sentence-transformers, you can use the model like this: First, you pass
66
  from transformers import AutoTokenizer, AutoModel
67
  import torch
68
 
69
-
70
  #Mean Pooling - Take attention mask into account for correct averaging
71
  def mean_pooling(model_output, attention_mask):
72
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
73
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
74
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
75
 
76
-
77
  # Sentences we want sentence embeddings for
78
  sentences = ['This is an example sentence', 'Each sentence is converted']
79
 
 
66
  from transformers import AutoTokenizer, AutoModel
67
  import torch
68
 
 
69
  #Mean Pooling - Take attention mask into account for correct averaging
70
  def mean_pooling(model_output, attention_mask):
71
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
72
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
73
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
74
 
 
75
  # Sentences we want sentence embeddings for
76
  sentences = ['This is an example sentence', 'Each sentence is converted']
77