Duplicate from mjwong/multilingual-e5-large-xnli
Browse filesCo-authored-by: Ming Jie Wong <[email protected]>
- .gitattributes +36 -0
- README.md +130 -0
- config.json +39 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +3 -0
- tokenizer_config.json +19 -0
- training_args.bin +3 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- multilingual
|
4 |
+
- en
|
5 |
+
- ar
|
6 |
+
- bg
|
7 |
+
- de
|
8 |
+
- el
|
9 |
+
- es
|
10 |
+
- fr
|
11 |
+
- hi
|
12 |
+
- ru
|
13 |
+
- sw
|
14 |
+
- th
|
15 |
+
- tr
|
16 |
+
- ur
|
17 |
+
- vi
|
18 |
+
- zh
|
19 |
+
license: mit
|
20 |
+
datasets:
|
21 |
+
- xnli
|
22 |
+
pipeline_tag: zero-shot-classification
|
23 |
+
widget:
|
24 |
+
- text: Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU
|
25 |
+
candidate_labels: politics, economy, entertainment, environment
|
26 |
+
base_model: intfloat/multilingual-e5-large
|
27 |
+
model-index:
|
28 |
+
- name: multilingual-e5-large-xnli
|
29 |
+
results: []
|
30 |
+
---
|
31 |
+
|
32 |
+
# multilingual-e5-large-xnli
|
33 |
+
|
34 |
+
This model is a fine-tuned version of [intfloat/multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) on the XNLI dataset.
|
35 |
+
|
36 |
+
## Model description
|
37 |
+
|
38 |
+
[Text Embeddings by Weakly-Supervised Contrastive Pre-training](https://arxiv.org/pdf/2212.03533.pdf).
|
39 |
+
Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, Furu Wei, arXiv 2022
|
40 |
+
|
41 |
+
## How to use the model
|
42 |
+
|
43 |
+
### With the zero-shot classification pipeline
|
44 |
+
|
45 |
+
The model can be loaded with the `zero-shot-classification` pipeline like so:
|
46 |
+
|
47 |
+
```python
|
48 |
+
from transformers import pipeline
|
49 |
+
classifier = pipeline("zero-shot-classification",
|
50 |
+
model="mjwong/multilingual-e5-large-xnli")
|
51 |
+
```
|
52 |
+
|
53 |
+
You can then use this pipeline to classify sequences into any of the class names you specify.
|
54 |
+
|
55 |
+
```python
|
56 |
+
sequence_to_classify = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU"
|
57 |
+
candidate_labels = ["politics", "economy", "entertainment", "environment"]
|
58 |
+
classifier(sequence_to_classify, candidate_labels)
|
59 |
+
```
|
60 |
+
|
61 |
+
If more than one candidate label can be correct, pass `multi_class=True` to calculate each class independently:
|
62 |
+
|
63 |
+
```python
|
64 |
+
candidate_labels = ["politics", "economy", "entertainment", "environment"]
|
65 |
+
classifier(sequence_to_classify, candidate_labels, multi_label=True)
|
66 |
+
```
|
67 |
+
|
68 |
+
### With manual PyTorch
|
69 |
+
|
70 |
+
The model can also be applied on NLI tasks like so:
|
71 |
+
|
72 |
+
```python
|
73 |
+
import torch
|
74 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
75 |
+
|
76 |
+
# device = "cuda:0" or "cpu"
|
77 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
78 |
+
|
79 |
+
model_name = "mjwong/multilingual-e5-large-xnli"
|
80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
81 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
82 |
+
|
83 |
+
premise = "But I thought you'd sworn off coffee."
|
84 |
+
hypothesis = "I thought that you vowed to drink more coffee."
|
85 |
+
|
86 |
+
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
87 |
+
output = model(input["input_ids"].to(device))
|
88 |
+
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
89 |
+
label_names = ["entailment", "neutral", "contradiction"]
|
90 |
+
prediction = {name: round(float(pred) * 100, 2) for pred, name in zip(prediction, label_names)}
|
91 |
+
print(prediction)
|
92 |
+
```
|
93 |
+
|
94 |
+
### Eval results
|
95 |
+
The model was evaluated using the XNLI test sets on 15 languages: English (en), Arabic (ar), Bulgarian (bg), German (de), Greek (el), Spanish (es), French (fr), Hindi (hi), Russian (ru), Swahili (sw), Thai (th), Turkish (tr), Urdu (ur), Vietnam (vi) and Chinese (zh). The metric used is accuracy.
|
96 |
+
|
97 |
+
|Datasets|en|ar|bg|de|el|es|fr|hi|ru|sw|th|tr|ur|vi|zh|
|
98 |
+
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
|
99 |
+
|[multilingual-e5-base-xnli](https://huggingface.co/mjwong/multilingual-e5-base-xnli)|0.849|0.768|0.803|0.800|0.792|0.809|0.805|0.738|0.782|0.728|0.756|0.766|0.713|0.787|0.785|
|
100 |
+
|[multilingual-e5-base-xnli-anli](https://huggingface.co/mjwong/multilingual-e5-base-xnli-anli)|0.811|0.711|0.751|0.759|0.746|0.778|0.765|0.685|0.728|0.662|0.705|0.716|0.683|0.736|0.740|
|
101 |
+
|[multilingual-e5-large-xnli](https://huggingface.co/mjwong/multilingual-e5-large-xnli)|0.867|0.791|0.832|0.825|0.823|0.837|0.824|0.778|0.806|0.749|0.787|0.793|0.738|0.813|0.808|
|
102 |
+
|[multilingual-e5-large-xnli-anli](https://huggingface.co/mjwong/multilingual-e5-large-xnli-anli)|0.865|0.765|0.811|0.811|0.795|0.823|0.816|0.743|0.785|0.713|0.765|0.774|0.706|0.788|0.787|
|
103 |
+
|
104 |
+
The model was also evaluated using the dev sets for MultiNLI and test sets for ANLI. The metric used is accuracy.
|
105 |
+
|
106 |
+
|Datasets|mnli_dev_m|mnli_dev_mm|anli_test_r1|anli_test_r2|anli_test_r3|
|
107 |
+
| :---: | :---: | :---: | :---: | :---: | :---: |
|
108 |
+
|[multilingual-e5-base-xnli](https://huggingface.co/mjwong/multilingual-e5-base-xnli)|0.835|0.837|0.287|0.276|0.301|
|
109 |
+
|[multilingual-e5-base-xnli-anli](https://huggingface.co/mjwong/multilingual-e5-base-xnli-anli)|0.814|0.811|0.588|0.437|0.439|
|
110 |
+
|[multilingual-e5-large-xnli](https://huggingface.co/mjwong/multilingual-e5-large-xnli)|0.865|0.865|0.312|0.316|0.300|
|
111 |
+
|[multilingual-e5-large-xnli-anli](https://huggingface.co/mjwong/multilingual-e5-large-xnli-anli)|0.863|0.863|0.623|0.456|0.455|
|
112 |
+
|
113 |
+
### Training hyperparameters
|
114 |
+
|
115 |
+
The following hyperparameters were used during training:
|
116 |
+
|
117 |
+
- learning_rate: 2e-05
|
118 |
+
- train_batch_size: 16
|
119 |
+
- eval_batch_size: 16
|
120 |
+
- seed: 42
|
121 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
122 |
+
- lr_scheduler_type: linear
|
123 |
+
- lr_scheduler_warmup_ratio: 0.1
|
124 |
+
- num_epochs: 1
|
125 |
+
|
126 |
+
### Framework versions
|
127 |
+
- Transformers 4.28.1
|
128 |
+
- Pytorch 1.12.1+cu116
|
129 |
+
- Datasets 2.11.0
|
130 |
+
- Tokenizers 0.12.1
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "mjwong/multilingual-e5-large-xnli",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"id2label": {
|
14 |
+
"0": "entailment",
|
15 |
+
"1": "neutral",
|
16 |
+
"2": "contradiction"
|
17 |
+
},
|
18 |
+
"initializer_range": 0.02,
|
19 |
+
"intermediate_size": 4096,
|
20 |
+
"label2id": {
|
21 |
+
"contradiction": 2,
|
22 |
+
"entailment": 0,
|
23 |
+
"neutral": 1
|
24 |
+
},
|
25 |
+
"layer_norm_eps": 1e-05,
|
26 |
+
"max_position_embeddings": 514,
|
27 |
+
"model_type": "xlm-roberta",
|
28 |
+
"num_attention_heads": 16,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"output_past": true,
|
31 |
+
"pad_token_id": 1,
|
32 |
+
"position_embedding_type": "absolute",
|
33 |
+
"problem_type": "single_label_classification",
|
34 |
+
"torch_dtype": "float32",
|
35 |
+
"transformers_version": "4.28.1",
|
36 |
+
"type_vocab_size": 1,
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 250002
|
39 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6bd79736369b5b54176ba2886b15e1151e64b96245add03bdaaea6731b391a22
|
3 |
+
size 2239626972
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21472a4ccf8f94cb26e02602d184d8effc1fd784f2e4d2e7bc658f9b61e98d49
|
3 |
+
size 2239711213
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2c509a525eb51aebb33fb59c24ee923c1d4c1db23c3ae81fe05ccf354084f7b
|
3 |
+
size 17082758
|
tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"eos_token": "</s>",
|
6 |
+
"mask_token": {
|
7 |
+
"__type": "AddedToken",
|
8 |
+
"content": "<mask>",
|
9 |
+
"lstrip": true,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"model_max_length": 512,
|
15 |
+
"pad_token": "<pad>",
|
16 |
+
"sep_token": "</s>",
|
17 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
18 |
+
"unk_token": "<unk>"
|
19 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d356b925ea075039011818775c79788eb9327de24fb17efa70e468f9b736f3f
|
3 |
+
size 3567
|