Tom Aarsen commited on
Commit
7d3a9f6
β€’
2 Parent(s): 418d26a 7287938

Merge branch 'main' into model_size_parameters

Browse files
Files changed (2) hide show
  1. EXTERNAL_MODEL_RESULTS.json +0 -0
  2. app.py +40 -1
EXTERNAL_MODEL_RESULTS.json CHANGED
The diff for this file is too large to render. See raw diff
 
app.py CHANGED
@@ -215,6 +215,17 @@ TASK_LIST_RETRIEVAL_FR = [
215
  "XPQARetrieval (fr)",
216
  ]
217
 
 
 
 
 
 
 
 
 
 
 
 
218
  TASK_LIST_RETRIEVAL_PL = [
219
  "ArguAna-PL",
220
  "DBPedia-PL",
@@ -324,6 +335,7 @@ def make_clickable_model(model_name, link=None):
324
  # Models without metadata, thus we cannot fetch their results naturally
325
  EXTERNAL_MODELS = [
326
  "Baichuan-text-embedding",
 
327
  "Cohere-embed-multilingual-v3.0",
328
  "Cohere-embed-multilingual-light-v3.0",
329
  "DanskBERT",
@@ -342,6 +354,7 @@ EXTERNAL_MODELS = [
342
  "bert-base-swedish-cased",
343
  "bert-base-uncased",
344
  "bge-base-zh-v1.5",
 
345
  "bge-large-zh-v1.5",
346
  "bge-large-zh-noinstruct",
347
  "bge-small-zh-v1.5",
@@ -364,6 +377,8 @@ EXTERNAL_MODELS = [
364
  "gelectra-base",
365
  "gelectra-large",
366
  "glove.6B.300d",
 
 
367
  "gottbert-base",
368
  "gtr-t5-base",
369
  "gtr-t5-large",
@@ -434,6 +449,7 @@ EXTERNAL_MODELS = [
434
  ]
435
 
436
  EXTERNAL_MODEL_TO_LINK = {
 
437
  "Cohere-embed-multilingual-v3.0": "https://huggingface.co/Cohere/Cohere-embed-multilingual-v3.0",
438
  "Cohere-embed-multilingual-light-v3.0": "https://huggingface.co/Cohere/Cohere-embed-multilingual-light-v3.0",
439
  "allenai-specter": "https://huggingface.co/sentence-transformers/allenai-specter",
@@ -450,6 +466,7 @@ EXTERNAL_MODEL_TO_LINK = {
450
  "bert-base-swedish-cased": "https://huggingface.co/KB/bert-base-swedish-cased",
451
  "bert-base-uncased": "https://huggingface.co/bert-base-uncased",
452
  "bge-base-zh-v1.5": "https://huggingface.co/BAAI/bge-base-zh-v1.5",
 
453
  "bge-large-zh-v1.5": "https://huggingface.co/BAAI/bge-large-zh-v1.5",
454
  "bge-large-zh-noinstruct": "https://huggingface.co/BAAI/bge-large-zh-noinstruct",
455
  "bge-small-zh-v1.5": "https://huggingface.co/BAAI/bge-small-zh-v1.5",
@@ -480,6 +497,8 @@ EXTERNAL_MODEL_TO_LINK = {
480
  "gelectra-base": "https://huggingface.co/deepset/gelectra-base",
481
  "gelectra-large": "https://huggingface.co/deepset/gelectra-large",
482
  "glove.6B.300d": "https://huggingface.co/sentence-transformers/average_word_embeddings_glove.6B.300d",
 
 
483
  "gottbert-base": "https://huggingface.co/uklfr/gottbert-base",
484
  "gtr-t5-base": "https://huggingface.co/sentence-transformers/gtr-t5-base",
485
  "gtr-t5-large": "https://huggingface.co/sentence-transformers/gtr-t5-large",
@@ -553,6 +572,7 @@ EXTERNAL_MODEL_TO_LINK = {
553
  }
554
 
555
  EXTERNAL_MODEL_TO_DIM = {
 
556
  "Cohere-embed-multilingual-v3.0": 1024,
557
  "Cohere-embed-multilingual-light-v3.0": 384,
558
  "all-MiniLM-L12-v2": 384,
@@ -568,6 +588,7 @@ EXTERNAL_MODEL_TO_DIM = {
568
  "bert-base-swedish-cased": 768,
569
  "bert-base-uncased": 768,
570
  "bge-base-zh-v1.5": 768,
 
571
  "bge-large-zh-v1.5": 1024,
572
  "bge-large-zh-noinstruct": 1024,
573
  "bge-small-zh-v1.5": 512,
@@ -601,6 +622,8 @@ EXTERNAL_MODEL_TO_DIM = {
601
  "gelectra-base": 768,
602
  "gelectra-large": 1024,
603
  "glove.6B.300d": 300,
 
 
604
  "gottbert-base": 768,
605
  "gtr-t5-base": 768,
606
  "gtr-t5-large": 768,
@@ -671,6 +694,7 @@ EXTERNAL_MODEL_TO_DIM = {
671
  }
672
 
673
  EXTERNAL_MODEL_TO_SEQLEN = {
 
674
  "Cohere-embed-multilingual-v3.0": 512,
675
  "Cohere-embed-multilingual-light-v3.0": 512,
676
  "all-MiniLM-L12-v2": 512,
@@ -686,6 +710,7 @@ EXTERNAL_MODEL_TO_SEQLEN = {
686
  "bert-base-swedish-cased": 512,
687
  "bert-base-uncased": 512,
688
  "bge-base-zh-v1.5": 512,
 
689
  "bge-large-zh-v1.5": 512,
690
  "bge-large-zh-noinstruct": 512,
691
  "bge-small-zh-v1.5": 512,
@@ -715,6 +740,8 @@ EXTERNAL_MODEL_TO_SEQLEN = {
715
  "gbert-large": 512,
716
  "gelectra-base": 512,
717
  "gelectra-large": 512,
 
 
718
  "gottbert-base": 512,
719
  "glove.6B.300d": "N/A",
720
  "gtr-t5-base": 512,
@@ -904,6 +931,8 @@ PROPRIETARY_MODELS = {
904
  "voyage-code-2",
905
  "voyage-lite-01-instruct",
906
  "voyage-lite-02-instruct",
 
 
907
  }
908
  PROPRIETARY_MODELS = {
909
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, "https://huggingface.co/spaces/mteb/leaderboard"))
@@ -1151,7 +1180,7 @@ def add_task(examples):
1151
  examples["mteb_task"] = "PairClassification"
1152
  elif examples["mteb_dataset_name"] in norm(TASK_LIST_RERANKING + TASK_LIST_RERANKING_FR + TASK_LIST_RERANKING_ZH):
1153
  examples["mteb_task"] = "Reranking"
1154
- elif examples["mteb_dataset_name"] in norm(TASK_LIST_RETRIEVAL_NORM + TASK_LIST_RETRIEVAL_FR + TASK_LIST_RETRIEVAL_PL + TASK_LIST_RETRIEVAL_ZH):
1155
  examples["mteb_task"] = "Retrieval"
1156
  elif examples["mteb_dataset_name"] in norm(TASK_LIST_STS + TASK_LIST_STS_FR + TASK_LIST_STS_PL + TASK_LIST_STS_ZH):
1157
  examples["mteb_task"] = "STS"
@@ -1569,6 +1598,7 @@ DATA_CLASSIFICATION_SV = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIF
1569
  DATA_CLASSIFICATION_OTHER = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_OTHER)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_CLASSIFICATION_OTHER]
1570
  DATA_CLUSTERING_DE = get_mteb_data(["Clustering"], [], TASK_LIST_CLUSTERING_DE)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_CLUSTERING_DE]
1571
  DATA_STS_OTHER = get_mteb_data(["STS"], [], TASK_LIST_STS_OTHER)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_STS_OTHER]
 
1572
 
1573
  # Exact, add all non-nan integer values for every dataset
1574
  NUM_SCORES = 0
@@ -1602,6 +1632,7 @@ for d in [
1602
  DATA_RETRIEVAL_FR,
1603
  DATA_RETRIEVAL_PL,
1604
  DATA_RETRIEVAL_ZH,
 
1605
  DATA_STS_EN,
1606
  DATA_STS_FR,
1607
  DATA_STS_PL,
@@ -1893,6 +1924,14 @@ data = {
1893
  "data": DATA_RETRIEVAL_FR,
1894
  "refresh": partial(get_mteb_data, tasks=["Retrieval"], datasets=TASK_LIST_RETRIEVAL_FR)
1895
  },
 
 
 
 
 
 
 
 
1896
  {
1897
  "language": "Polish",
1898
  "description": "**Retrieval Polish Leaderboard** πŸ”ŽπŸ‡΅πŸ‡±",
 
215
  "XPQARetrieval (fr)",
216
  ]
217
 
218
+ TASK_LIST_RETRIEVAL_LAW = [
219
+ "AILACasedocs",
220
+ "AILAStatutes",
221
+ "GerDaLIRSmall",
222
+ "LeCaRDv2",
223
+ "LegalBenchConsumerContractsQA",
224
+ "LegalBenchCorporateLobbying",
225
+ "LegalQuAD",
226
+ "LegalSummarization",
227
+ ]
228
+
229
  TASK_LIST_RETRIEVAL_PL = [
230
  "ArguAna-PL",
231
  "DBPedia-PL",
 
335
  # Models without metadata, thus we cannot fetch their results naturally
336
  EXTERNAL_MODELS = [
337
  "Baichuan-text-embedding",
338
+ "Cohere-embed-english-v3.0",
339
  "Cohere-embed-multilingual-v3.0",
340
  "Cohere-embed-multilingual-light-v3.0",
341
  "DanskBERT",
 
354
  "bert-base-swedish-cased",
355
  "bert-base-uncased",
356
  "bge-base-zh-v1.5",
357
+ "bge-large-en-v1.5",
358
  "bge-large-zh-v1.5",
359
  "bge-large-zh-noinstruct",
360
  "bge-small-zh-v1.5",
 
377
  "gelectra-base",
378
  "gelectra-large",
379
  "glove.6B.300d",
380
+ "google-gecko.text-embedding-preview-0409",
381
+ "google-gecko-256.text-embedding-preview-0409",
382
  "gottbert-base",
383
  "gtr-t5-base",
384
  "gtr-t5-large",
 
449
  ]
450
 
451
  EXTERNAL_MODEL_TO_LINK = {
452
+ "Cohere-embed-english-v3.0": "https://huggingface.co/Cohere/Cohere-embed-english-v3.0",
453
  "Cohere-embed-multilingual-v3.0": "https://huggingface.co/Cohere/Cohere-embed-multilingual-v3.0",
454
  "Cohere-embed-multilingual-light-v3.0": "https://huggingface.co/Cohere/Cohere-embed-multilingual-light-v3.0",
455
  "allenai-specter": "https://huggingface.co/sentence-transformers/allenai-specter",
 
466
  "bert-base-swedish-cased": "https://huggingface.co/KB/bert-base-swedish-cased",
467
  "bert-base-uncased": "https://huggingface.co/bert-base-uncased",
468
  "bge-base-zh-v1.5": "https://huggingface.co/BAAI/bge-base-zh-v1.5",
469
+ "bge-large-en-v1.5": "https://huggingface.co/BAAI/bge-large-en-v1.5",
470
  "bge-large-zh-v1.5": "https://huggingface.co/BAAI/bge-large-zh-v1.5",
471
  "bge-large-zh-noinstruct": "https://huggingface.co/BAAI/bge-large-zh-noinstruct",
472
  "bge-small-zh-v1.5": "https://huggingface.co/BAAI/bge-small-zh-v1.5",
 
497
  "gelectra-base": "https://huggingface.co/deepset/gelectra-base",
498
  "gelectra-large": "https://huggingface.co/deepset/gelectra-large",
499
  "glove.6B.300d": "https://huggingface.co/sentence-transformers/average_word_embeddings_glove.6B.300d",
500
+ "google-gecko.text-embedding-preview-0409": "https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings#latest_models",
501
+ "google-gecko-256.text-embedding-preview-0409": "https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings#latest_models",
502
  "gottbert-base": "https://huggingface.co/uklfr/gottbert-base",
503
  "gtr-t5-base": "https://huggingface.co/sentence-transformers/gtr-t5-base",
504
  "gtr-t5-large": "https://huggingface.co/sentence-transformers/gtr-t5-large",
 
572
  }
573
 
574
  EXTERNAL_MODEL_TO_DIM = {
575
+ "Cohere-embed-english-v3.0": 1024,
576
  "Cohere-embed-multilingual-v3.0": 1024,
577
  "Cohere-embed-multilingual-light-v3.0": 384,
578
  "all-MiniLM-L12-v2": 384,
 
588
  "bert-base-swedish-cased": 768,
589
  "bert-base-uncased": 768,
590
  "bge-base-zh-v1.5": 768,
591
+ "bge-large-en-v1.5": 1024,
592
  "bge-large-zh-v1.5": 1024,
593
  "bge-large-zh-noinstruct": 1024,
594
  "bge-small-zh-v1.5": 512,
 
622
  "gelectra-base": 768,
623
  "gelectra-large": 1024,
624
  "glove.6B.300d": 300,
625
+ "google-gecko.text-embedding-preview-0409": 768,
626
+ "google-gecko-256.text-embedding-preview-0409": 256,
627
  "gottbert-base": 768,
628
  "gtr-t5-base": 768,
629
  "gtr-t5-large": 768,
 
694
  }
695
 
696
  EXTERNAL_MODEL_TO_SEQLEN = {
697
+ "Cohere-embed-english-v3.0": 512,
698
  "Cohere-embed-multilingual-v3.0": 512,
699
  "Cohere-embed-multilingual-light-v3.0": 512,
700
  "all-MiniLM-L12-v2": 512,
 
710
  "bert-base-swedish-cased": 512,
711
  "bert-base-uncased": 512,
712
  "bge-base-zh-v1.5": 512,
713
+ "bge-large-en-v1.5": 512,
714
  "bge-large-zh-v1.5": 512,
715
  "bge-large-zh-noinstruct": 512,
716
  "bge-small-zh-v1.5": 512,
 
740
  "gbert-large": 512,
741
  "gelectra-base": 512,
742
  "gelectra-large": 512,
743
+ "google-gecko.text-embedding-preview-0409": 2048,
744
+ "google-gecko-256.text-embedding-preview-0409": 2048,
745
  "gottbert-base": 512,
746
  "glove.6B.300d": "N/A",
747
  "gtr-t5-base": 512,
 
931
  "voyage-code-2",
932
  "voyage-lite-01-instruct",
933
  "voyage-lite-02-instruct",
934
+ "google-gecko.text-embedding-preview-0409",
935
+ "google-gecko-256.text-embedding-preview-0409",
936
  }
937
  PROPRIETARY_MODELS = {
938
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, "https://huggingface.co/spaces/mteb/leaderboard"))
 
1180
  examples["mteb_task"] = "PairClassification"
1181
  elif examples["mteb_dataset_name"] in norm(TASK_LIST_RERANKING + TASK_LIST_RERANKING_FR + TASK_LIST_RERANKING_ZH):
1182
  examples["mteb_task"] = "Reranking"
1183
+ elif examples["mteb_dataset_name"] in norm(TASK_LIST_RETRIEVAL_NORM + TASK_LIST_RETRIEVAL_FR + TASK_LIST_RETRIEVAL_PL + TASK_LIST_RETRIEVAL_ZH + TASK_LIST_RETRIEVAL_LAW):
1184
  examples["mteb_task"] = "Retrieval"
1185
  elif examples["mteb_dataset_name"] in norm(TASK_LIST_STS + TASK_LIST_STS_FR + TASK_LIST_STS_PL + TASK_LIST_STS_ZH):
1186
  examples["mteb_task"] = "STS"
 
1598
  DATA_CLASSIFICATION_OTHER = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_OTHER)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_CLASSIFICATION_OTHER]
1599
  DATA_CLUSTERING_DE = get_mteb_data(["Clustering"], [], TASK_LIST_CLUSTERING_DE)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_CLUSTERING_DE]
1600
  DATA_STS_OTHER = get_mteb_data(["STS"], [], TASK_LIST_STS_OTHER)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_STS_OTHER]
1601
+ DATA_RETRIEVAL_LAW = get_mteb_data(["Retrieval"], [], TASK_LIST_RETRIEVAL_LAW)[["Rank", "Model", "Model Size (Million Parameters)", "Average"] + TASK_LIST_RETRIEVAL_LAW]
1602
 
1603
  # Exact, add all non-nan integer values for every dataset
1604
  NUM_SCORES = 0
 
1632
  DATA_RETRIEVAL_FR,
1633
  DATA_RETRIEVAL_PL,
1634
  DATA_RETRIEVAL_ZH,
1635
+ DATA_RETRIEVAL_LAW,
1636
  DATA_STS_EN,
1637
  DATA_STS_FR,
1638
  DATA_STS_PL,
 
1924
  "data": DATA_RETRIEVAL_FR,
1925
  "refresh": partial(get_mteb_data, tasks=["Retrieval"], datasets=TASK_LIST_RETRIEVAL_FR)
1926
  },
1927
+ {
1928
+ "language": "Law",
1929
+ "language_long": "English, German, Chinese",
1930
+ "description": "**Retrieval Law Leaderboard** πŸ”Žβš–οΈ",
1931
+ "credits": "[Voyage AI](https://www.voyageai.com/)",
1932
+ "data": DATA_RETRIEVAL_LAW,
1933
+ "refresh": partial(get_mteb_data, tasks=["Retrieval"], datasets=TASK_LIST_RETRIEVAL_LAW)
1934
+ },
1935
  {
1936
  "language": "Polish",
1937
  "description": "**Retrieval Polish Leaderboard** πŸ”ŽπŸ‡΅πŸ‡±",