File size: 1,803 Bytes
1296d6f 0f0892b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
---
license: apache-2.0
---
# Overview
Original from the sentences-transformers library.
Only for researching purposes.
Adapter by Aisuko
# Installation
```python
!pip install sentence-transformers==2.3.1
```
# Computing Embeddings for a large set of sentences
```python
import os
import csv
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import http_get
if __name__=='__main__':
url='http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv'
dataset_path='quora_duplicate_questions.tsv'
# max_corpus_size=50000 # max number of sentences to deal with
if not os.path.exists(dataset_path):
http_get(url, dataset_path)
# get all unique sentences from the file
corpus_sentences=set()
with open(dataset_path, encoding='utf8') as fIn:
reader=csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
corpus_sentences.add(row['question2'])
# if len(corpus_sentences)>=max_corpus_size:
# break
corpus_sentences=list(corpus_sentences)
model=SentenceTransformer('all-MiniLM-L6-v2').to('cuda')
model.max_seq_length=256
pool=model.start_multi_process_pool()
# computing the embeddings using the multi-process pool
emb=model.encode_multi_process(corpus_sentences, pool,batch_size=128,chunk_size=1024,normalize_embeddings=True)
print('Embeddings computed. Shape:', emb.shape)
# optional : stop the processes in the pool
model.stop_multi_process_pool(pool)
```
# Save the csv file
```python
import pandas as pd
corpus_embedding=pd.DataFrame(emb)
corpus_embedding.to_csv('quora_questions.csv',index=False)
``` |