This is a part of the MTEB test.
# !pip install tensorflow_text
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
import tensorflow as tf
embedder=hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
class USE():
def encode(self, sentences, batch_size=32, **kwargs):
embeddings = []
for i in range(0, len(sentences), batch_size):
batch_sentences = sentences[i:i+batch_size]
batch_embeddings = embedder(batch_sentences)
embeddings.extend(batch_embeddings)
return embeddings
model = USE()
Spaces using vprelovac/universal-sentence-encoder-multilingual-3 2
Evaluation results
- accuracy on MTEB AmazonCounterfactualClassification (en)test set self-reported69.836
- ap on MTEB AmazonCounterfactualClassification (en)test set self-reported31.783
- f1 on MTEB AmazonCounterfactualClassification (en)test set self-reported63.496
- accuracy on MTEB AmazonPolarityClassificationtest set self-reported65.243
- ap on MTEB AmazonPolarityClassificationtest set self-reported60.211
- f1 on MTEB AmazonPolarityClassificationtest set self-reported64.932
- accuracy on MTEB AmazonReviewsClassification (en)test set self-reported33.954
- f1 on MTEB AmazonReviewsClassification (en)test set self-reported33.543
- v_measure on MTEB ArxivClusteringP2Ptest set self-reported33.721
- v_measure on MTEB ArxivClusteringS2Stest set self-reported23.990