xiaowu0162 commited on
Commit
7d51217
1 Parent(s): cfddce1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -41,7 +41,7 @@ Then you can use the model like this:
41
  from sentence_transformers import SentenceTransformer
42
  phrases = ["information retrieval", "text mining", "natural language processing"]
43
 
44
- model = SentenceTransformer('{MODEL_NAME}')
45
  embeddings = model.encode(phrases)
46
  print(embeddings)
47
  ```
@@ -65,8 +65,8 @@ def mean_pooling(model_output, attention_mask):
65
  phrases = ["information retrieval", "text mining", "natural language processing"]
66
 
67
  # Load model from HuggingFace Hub
68
- tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
69
- model = AutoModel.from_pretrained('{MODEL_NAME}')
70
 
71
  # Tokenize sentences
72
  encoded_input = tokenizer(phrases, padding=True, truncation=True, return_tensors='pt')
@@ -78,7 +78,7 @@ with torch.no_grad():
78
  # Perform pooling. In this case, mean pooling.
79
  sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
80
 
81
- print("Sentence embeddings:")
82
  print(sentence_embeddings)
83
  ```
84
 
 
41
  from sentence_transformers import SentenceTransformer
42
  phrases = ["information retrieval", "text mining", "natural language processing"]
43
 
44
+ model = SentenceTransformer('uclanlp/keyphrase-mpnet-v1')
45
  embeddings = model.encode(phrases)
46
  print(embeddings)
47
  ```
 
65
  phrases = ["information retrieval", "text mining", "natural language processing"]
66
 
67
  # Load model from HuggingFace Hub
68
+ tokenizer = AutoTokenizer.from_pretrained('uclanlp/keyphrase-mpnet-v1')
69
+ model = AutoModel.from_pretrained('uclanlp/keyphrase-mpnet-v1')
70
 
71
  # Tokenize sentences
72
  encoded_input = tokenizer(phrases, padding=True, truncation=True, return_tensors='pt')
 
78
  # Perform pooling. In this case, mean pooling.
79
  sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
80
 
81
+ print("Phrase embeddings:")
82
  print(sentence_embeddings)
83
  ```
84