aisuko commited on
Commit
0f0892b
1 Parent(s): 1296d6f

Add dataset file

Browse files

Signed-off-by: Aisuko <[email protected]>

Files changed (3) hide show
  1. .gitattributes +1 -0
  2. README.md +65 -0
  3. quora_questions.csv +3 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,68 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+
5
+ # Overview
6
+
7
+ Original from the sentences-transformers library.
8
+
9
+ Only for researching purposes.
10
+
11
+ Adapter by Aisuko
12
+
13
+ # Installation
14
+
15
+ ```python
16
+ !pip install sentence-transformers==2.3.1
17
+ ```
18
+
19
+ # Computing Embeddings for a large set of sentences
20
+
21
+ ```python
22
+ import os
23
+ import csv
24
+ import time
25
+
26
+ from sentence_transformers import SentenceTransformer
27
+ from sentence_transformers.util import http_get
28
+
29
+ if __name__=='__main__':
30
+ url='http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv'
31
+ dataset_path='quora_duplicate_questions.tsv'
32
+ # max_corpus_size=50000 # max number of sentences to deal with
33
+
34
+ if not os.path.exists(dataset_path):
35
+ http_get(url, dataset_path)
36
+
37
+ # get all unique sentences from the file
38
+ corpus_sentences=set()
39
+ with open(dataset_path, encoding='utf8') as fIn:
40
+ reader=csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
41
+ for row in reader:
42
+ corpus_sentences.add(row['question1'])
43
+ corpus_sentences.add(row['question2'])
44
+ # if len(corpus_sentences)>=max_corpus_size:
45
+ # break
46
+
47
+ corpus_sentences=list(corpus_sentences)
48
+ model=SentenceTransformer('all-MiniLM-L6-v2').to('cuda')
49
+ model.max_seq_length=256
50
+
51
+ pool=model.start_multi_process_pool()
52
+
53
+ # computing the embeddings using the multi-process pool
54
+ emb=model.encode_multi_process(corpus_sentences, pool,batch_size=128,chunk_size=1024,normalize_embeddings=True)
55
+ print('Embeddings computed. Shape:', emb.shape)
56
+
57
+ # optional : stop the processes in the pool
58
+ model.stop_multi_process_pool(pool)
59
+ ```
60
+
61
+ # Save the csv file
62
+
63
+ ```python
64
+ import pandas as pd
65
+
66
+ corpus_embedding=pd.DataFrame(emb)
67
+ corpus_embedding.to_csv('quora_questions.csv',index=False)
68
+ ```
quora_questions.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12013b9a3438ac3bd362508f8109965dfa319ab53a1fabbbfeb70ae4e7fd09af
3
+ size 2524694588